[flang-commits] [flang] f98ee40 - [NFC][Py Reformat] Reformat python files in the rest of the dirs

Tobias Hieta via flang-commits flang-commits at lists.llvm.org
Thu May 25 02:17:16 PDT 2023


Author: Tobias Hieta
Date: 2023-05-25T11:17:05+02:00
New Revision: f98ee40f4b5d7474fc67e82824bf6abbaedb7b1c

URL: https://github.com/llvm/llvm-project/commit/f98ee40f4b5d7474fc67e82824bf6abbaedb7b1c
DIFF: https://github.com/llvm/llvm-project/commit/f98ee40f4b5d7474fc67e82824bf6abbaedb7b1c.diff

LOG: [NFC][Py Reformat] Reformat python files in the rest of the dirs

This is an ongoing series of commits that are reformatting our
Python code. This catches the last of the python files to
reformat. Since they where so few I bunched them together.

Reformatting is done with `black`.

If you end up having problems merging this commit because you
have made changes to a python file, the best way to handle that
is to run git checkout --ours <yourfile> and then reformat it
with black.

If you run into any problems, post to discourse about it and
we will try to help.

RFC Thread below:

https://discourse.llvm.org/t/rfc-document-and-standardize-python-code-style

Reviewed By: jhenderson, #libc, Mordante, sivachandra

Differential Revision: https://reviews.llvm.org/D150784

Added: 
    

Modified: 
    bolt/docs/conf.py
    bolt/test/AArch64/lit.local.cfg
    bolt/test/Unit/lit.cfg.py
    bolt/test/X86/lit.local.cfg
    bolt/test/link_fdata.py
    bolt/test/lit.cfg.py
    bolt/test/lit.local.cfg
    bolt/test/runtime/AArch64/lit.local.cfg
    bolt/test/runtime/X86/lit.local.cfg
    bolt/test/runtime/lit.local.cfg
    bolt/utils/dot2html/dot2html.py
    bolt/utils/llvm-bolt-wrapper.py
    bolt/utils/nfc-check-setup.py
    compiler-rt/lib/asan/scripts/asan_symbolize.py
    compiler-rt/lib/dfsan/scripts/build-libc-list.py
    compiler-rt/lib/fuzzer/scripts/unbalanced_allocs.py
    compiler-rt/lib/sanitizer_common/scripts/gen_dynamic_list.py
    compiler-rt/lib/sanitizer_common/scripts/sancov.py
    compiler-rt/test/asan/TestCases/Android/lit.local.cfg.py
    compiler-rt/test/asan/TestCases/Darwin/lit.local.cfg.py
    compiler-rt/test/asan/TestCases/Linux/lit.local.cfg.py
    compiler-rt/test/asan/TestCases/Posix/asan_symbolize_script/plugin_no_op.py
    compiler-rt/test/asan/TestCases/Posix/asan_symbolize_script/plugin_wrong_frame_number_bug.py
    compiler-rt/test/asan/TestCases/Posix/lit.local.cfg.py
    compiler-rt/test/asan/TestCases/Windows/lit.local.cfg.py
    compiler-rt/test/asan/TestCases/Windows/msvc/lit.local.cfg.py
    compiler-rt/test/asan/lit.cfg.py
    compiler-rt/test/builtins/TestCases/Darwin/lit.local.cfg.py
    compiler-rt/test/builtins/Unit/lit.cfg.py
    compiler-rt/test/builtins/lit.cfg.py
    compiler-rt/test/cfi/cross-dso/icall/lit.local.cfg.py
    compiler-rt/test/cfi/cross-dso/lit.local.cfg.py
    compiler-rt/test/cfi/icall/lit.local.cfg.py
    compiler-rt/test/cfi/lit.cfg.py
    compiler-rt/test/crt/lit.cfg.py
    compiler-rt/test/dfsan/lit.cfg.py
    compiler-rt/test/fuzzer/lit.cfg.py
    compiler-rt/test/gwp_asan/lit.cfg.py
    compiler-rt/test/hwasan/TestCases/Linux/lit.local.cfg.py
    compiler-rt/test/hwasan/TestCases/Posix/lit.local.cfg.py
    compiler-rt/test/hwasan/lit.cfg.py
    compiler-rt/test/lit.common.cfg.py
    compiler-rt/test/lsan/TestCases/Darwin/lit.local.cfg.py
    compiler-rt/test/lsan/TestCases/Linux/lit.local.cfg.py
    compiler-rt/test/lsan/TestCases/Posix/lit.local.cfg.py
    compiler-rt/test/lsan/lit.common.cfg.py
    compiler-rt/test/memprof/lit.cfg.py
    compiler-rt/test/metadata/lit.cfg.py
    compiler-rt/test/msan/Linux/lit.local.cfg.py
    compiler-rt/test/msan/lit.cfg.py
    compiler-rt/test/orc/TestCases/Darwin/arm64/lit.local.cfg.py
    compiler-rt/test/orc/TestCases/Darwin/lit.local.cfg.py
    compiler-rt/test/orc/TestCases/Darwin/x86-64/lit.local.cfg.py
    compiler-rt/test/orc/TestCases/FreeBSD/lit.local.cfg.py
    compiler-rt/test/orc/TestCases/FreeBSD/x86-64/lit.local.cfg.py
    compiler-rt/test/orc/TestCases/Generic/lit.local.cfg.py
    compiler-rt/test/orc/TestCases/Linux/aarch64/lit.local.cfg.py
    compiler-rt/test/orc/TestCases/Linux/lit.local.cfg.py
    compiler-rt/test/orc/TestCases/Linux/x86-64/lit.local.cfg.py
    compiler-rt/test/orc/TestCases/Windows/lit.local.cfg.py
    compiler-rt/test/orc/TestCases/Windows/x86-64/lit.local.cfg.py
    compiler-rt/test/orc/lit.cfg.py
    compiler-rt/test/profile/AIX/lit.local.cfg.py
    compiler-rt/test/profile/Darwin/lit.local.cfg.py
    compiler-rt/test/profile/Linux/lit.local.cfg.py
    compiler-rt/test/profile/Posix/lit.local.cfg.py
    compiler-rt/test/profile/Windows/lit.local.cfg.py
    compiler-rt/test/profile/lit.cfg.py
    compiler-rt/test/safestack/lit.cfg.py
    compiler-rt/test/sanitizer_common/TestCases/Darwin/lit.local.cfg.py
    compiler-rt/test/sanitizer_common/TestCases/FreeBSD/lit.local.cfg.py
    compiler-rt/test/sanitizer_common/TestCases/Linux/lit.local.cfg.py
    compiler-rt/test/sanitizer_common/TestCases/NetBSD/lit.local.cfg.py
    compiler-rt/test/sanitizer_common/TestCases/Posix/lit.local.cfg.py
    compiler-rt/test/sanitizer_common/android_commands/android_common.py
    compiler-rt/test/sanitizer_common/android_commands/android_compile.py
    compiler-rt/test/sanitizer_common/android_commands/android_run.py
    compiler-rt/test/sanitizer_common/ios_commands/get_pid_from_output.py
    compiler-rt/test/sanitizer_common/ios_commands/iossim_compile.py
    compiler-rt/test/sanitizer_common/ios_commands/iossim_run.py
    compiler-rt/test/sanitizer_common/ios_commands/print_crashreport_for_pid.py
    compiler-rt/test/sanitizer_common/lit.common.cfg.py
    compiler-rt/test/scudo/lit.cfg.py
    compiler-rt/test/shadowcallstack/lit.cfg.py
    compiler-rt/test/tsan/Darwin/lit.local.cfg.py
    compiler-rt/test/tsan/Linux/lit.local.cfg.py
    compiler-rt/test/tsan/libcxx/lit.local.cfg.py
    compiler-rt/test/tsan/libdispatch/lit.local.cfg.py
    compiler-rt/test/tsan/lit.cfg.py
    compiler-rt/test/ubsan/TestCases/Misc/Linux/lit.local.cfg.py
    compiler-rt/test/ubsan/TestCases/TypeCheck/Function/lit.local.cfg.py
    compiler-rt/test/ubsan/TestCases/TypeCheck/Linux/lit.local.cfg.py
    compiler-rt/test/ubsan/lit.common.cfg.py
    compiler-rt/test/ubsan_minimal/lit.common.cfg.py
    compiler-rt/test/xray/lit.cfg.py
    compiler-rt/unittests/lit.common.unit.cfg.py
    cross-project-tests/amdgpu/lit.local.cfg
    cross-project-tests/debuginfo-tests/clang_llvm_roundtrip/lit.local.cfg
    cross-project-tests/debuginfo-tests/dexter-tests/lit.local.cfg
    cross-project-tests/debuginfo-tests/dexter/dex/__init__.py
    cross-project-tests/debuginfo-tests/dexter/dex/builder/Builder.py
    cross-project-tests/debuginfo-tests/dexter/dex/builder/ParserOptions.py
    cross-project-tests/debuginfo-tests/dexter/dex/command/CommandBase.py
    cross-project-tests/debuginfo-tests/dexter/dex/command/ParseCommand.py
    cross-project-tests/debuginfo-tests/dexter/dex/command/StepValueInfo.py
    cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexCommandLine.py
    cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexDeclareAddress.py
    cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexDeclareFile.py
    cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexExpectProgramState.py
    cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexExpectStepKind.py
    cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexExpectStepOrder.py
    cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexExpectWatchBase.py
    cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexExpectWatchType.py
    cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexExpectWatchValue.py
    cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexFinishTest.py
    cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexLabel.py
    cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexLimitSteps.py
    cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexUnreachable.py
    cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexWatch.py
    cross-project-tests/debuginfo-tests/dexter/dex/debugger/DebuggerBase.py
    cross-project-tests/debuginfo-tests/dexter/dex/debugger/DebuggerControllers/ConditionalController.py
    cross-project-tests/debuginfo-tests/dexter/dex/debugger/DebuggerControllers/ControllerHelpers.py
    cross-project-tests/debuginfo-tests/dexter/dex/debugger/DebuggerControllers/DebuggerControllerBase.py
    cross-project-tests/debuginfo-tests/dexter/dex/debugger/DebuggerControllers/DefaultController.py
    cross-project-tests/debuginfo-tests/dexter/dex/debugger/Debuggers.py
    cross-project-tests/debuginfo-tests/dexter/dex/debugger/__init__.py
    cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/__init__.py
    cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/breakpoint.py
    cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/client.py
    cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/control.py
    cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/dbgeng.py
    cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/probe_process.py
    cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/setup.py
    cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/symbols.py
    cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/symgroup.py
    cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/sysobjs.py
    cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/utils.py
    cross-project-tests/debuginfo-tests/dexter/dex/debugger/lldb/LLDB.py
    cross-project-tests/debuginfo-tests/dexter/dex/debugger/visualstudio/VisualStudio.py
    cross-project-tests/debuginfo-tests/dexter/dex/debugger/visualstudio/VisualStudio2015.py
    cross-project-tests/debuginfo-tests/dexter/dex/debugger/visualstudio/VisualStudio2017.py
    cross-project-tests/debuginfo-tests/dexter/dex/debugger/visualstudio/VisualStudio2019.py
    cross-project-tests/debuginfo-tests/dexter/dex/debugger/visualstudio/windows/ComInterface.py
    cross-project-tests/debuginfo-tests/dexter/dex/dextIR/BuilderIR.py
    cross-project-tests/debuginfo-tests/dexter/dex/dextIR/DextIR.py
    cross-project-tests/debuginfo-tests/dexter/dex/dextIR/LocIR.py
    cross-project-tests/debuginfo-tests/dexter/dex/dextIR/ProgramState.py
    cross-project-tests/debuginfo-tests/dexter/dex/dextIR/StepIR.py
    cross-project-tests/debuginfo-tests/dexter/dex/dextIR/ValueIR.py
    cross-project-tests/debuginfo-tests/dexter/dex/heuristic/Heuristic.py
    cross-project-tests/debuginfo-tests/dexter/dex/tools/Main.py
    cross-project-tests/debuginfo-tests/dexter/dex/tools/TestToolBase.py
    cross-project-tests/debuginfo-tests/dexter/dex/tools/ToolBase.py
    cross-project-tests/debuginfo-tests/dexter/dex/tools/clang_opt_bisect/Tool.py
    cross-project-tests/debuginfo-tests/dexter/dex/tools/help/Tool.py
    cross-project-tests/debuginfo-tests/dexter/dex/tools/list_debuggers/Tool.py
    cross-project-tests/debuginfo-tests/dexter/dex/tools/no_tool_/Tool.py
    cross-project-tests/debuginfo-tests/dexter/dex/tools/run_debugger_internal_/Tool.py
    cross-project-tests/debuginfo-tests/dexter/dex/tools/test/Tool.py
    cross-project-tests/debuginfo-tests/dexter/dex/tools/view/Tool.py
    cross-project-tests/debuginfo-tests/dexter/dex/utils/Environment.py
    cross-project-tests/debuginfo-tests/dexter/dex/utils/Exceptions.py
    cross-project-tests/debuginfo-tests/dexter/dex/utils/ExtArgParse.py
    cross-project-tests/debuginfo-tests/dexter/dex/utils/Logging.py
    cross-project-tests/debuginfo-tests/dexter/dex/utils/PrettyOutputBase.py
    cross-project-tests/debuginfo-tests/dexter/dex/utils/ReturnCode.py
    cross-project-tests/debuginfo-tests/dexter/dex/utils/RootDirectory.py
    cross-project-tests/debuginfo-tests/dexter/dex/utils/Timeout.py
    cross-project-tests/debuginfo-tests/dexter/dex/utils/Timer.py
    cross-project-tests/debuginfo-tests/dexter/dex/utils/UnitTests.py
    cross-project-tests/debuginfo-tests/dexter/dex/utils/Version.py
    cross-project-tests/debuginfo-tests/dexter/dex/utils/WorkingDirectory.py
    cross-project-tests/debuginfo-tests/dexter/dex/utils/posix/PrettyOutput.py
    cross-project-tests/debuginfo-tests/dexter/dex/utils/windows/PrettyOutput.py
    cross-project-tests/debuginfo-tests/dexter/dexter.py
    cross-project-tests/debuginfo-tests/dexter/feature_tests/commands/perfect/dex_declare_file/dex_and_source/lit.local.cfg.py
    cross-project-tests/debuginfo-tests/dexter/feature_tests/commands/perfect/dex_declare_file/precompiled_binary/lit.local.cfg.py
    cross-project-tests/debuginfo-tests/dexter/feature_tests/commands/perfect/dex_declare_file/precompiled_binary_different_dir/lit.local.cfg.py
    cross-project-tests/debuginfo-tests/dexter/feature_tests/commands/perfect/dex_declare_file/windows_noncanonical_path/lit.local.cfg.py
    cross-project-tests/debuginfo-tests/dexter/feature_tests/commands/perfect/limit_steps/lit.local.cfg
    cross-project-tests/debuginfo-tests/dexter/feature_tests/lit.local.cfg
    cross-project-tests/debuginfo-tests/lit.local.cfg
    cross-project-tests/debuginfo-tests/llgdb-tests/lit.local.cfg
    cross-project-tests/debuginfo-tests/llgdb-tests/llgdb.py
    cross-project-tests/debuginfo-tests/llvm-prettyprinters/gdb/lit.local.cfg
    cross-project-tests/intrinsic-header-tests/lit.local.cfg
    cross-project-tests/lit.cfg.py
    flang/docs/FIR/CreateFIRLangRef.py
    flang/docs/conf.py
    flang/examples/FlangOmpReport/yaml_summarizer.py
    flang/test/Evaluate/test_folding.py
    flang/test/NonGtestUnit/lit.cfg.py
    flang/test/Semantics/common.py
    flang/test/Semantics/test_errors.py
    flang/test/Semantics/test_modfile.py
    flang/test/Semantics/test_symbols.py
    flang/test/Unit/lit.cfg.py
    flang/test/lib/lit.local.cfg
    flang/test/lit.cfg.py
    libc/AOR_v20.02/math/tools/plot.py
    libc/docs/conf.py
    libc/utils/mathtools/GenerateHPDConstants.py
    libclc/generic/lib/gen_convert.py
    lld/docs/conf.py
    lld/test/COFF/lit.local.cfg
    lld/test/ELF/lit.local.cfg
    lld/test/MachO/Inputs/DependencyDump.py
    lld/test/MachO/Inputs/code-signature-check.py
    lld/test/MachO/lit.local.cfg
    lld/test/MachO/tools/generate-cfi-funcs.py
    lld/test/MachO/tools/generate-thunkable-program.py
    lld/test/MachO/tools/validate-unwind-info.py
    lld/test/lit.cfg.py
    lld/test/wasm/lit.local.cfg
    lld/utils/benchmark.py
    llvm-libgcc/generate_version_script.py
    openmp/docs/conf.py
    openmp/libompd/gdb-plugin/ompd/__init__.py
    openmp/libompd/gdb-plugin/ompd/frame_filter.py
    openmp/libompd/gdb-plugin/ompd/ompd.py
    openmp/libompd/gdb-plugin/ompd/ompd_address_space.py
    openmp/libompd/gdb-plugin/ompd/ompd_callbacks.py
    openmp/libompd/gdb-plugin/ompd/ompd_handles.py
    openmp/libomptarget/utils/generate_microtask_cases.py
    openmp/runtime/test/affinity/format/check.py
    polly/docs/conf.py
    polly/lib/External/isl/imath/tests/gmp-compat-test/genctest.py
    polly/lib/External/isl/imath/tests/gmp-compat-test/gendata.py
    polly/lib/External/isl/imath/tests/gmp-compat-test/genpytest.py
    polly/lib/External/isl/imath/tests/gmp-compat-test/gmpapi.py
    polly/lib/External/isl/imath/tests/gmp-compat-test/runtest.py
    polly/lib/External/isl/imath/tools/mkdoc.py
    polly/lib/External/isl/isl_test_python.py
    polly/lib/External/isl/libisl-gdb.py
    polly/test/update_check.py
    polly/utils/argparse.py
    polly/utils/pyscop/isl.py
    polly/utils/pyscop/pyscop.py
    pstl/test/std/lit.local.cfg
    third-party/benchmark/.ycm_extra_conf.py
    third-party/benchmark/bindings/python/google_benchmark/example.py
    third-party/benchmark/setup.py
    third-party/benchmark/tools/compare.py
    third-party/benchmark/tools/gbench/__init__.py
    third-party/benchmark/tools/gbench/report.py
    third-party/benchmark/tools/gbench/util.py
    third-party/benchmark/tools/strip_asm.py
    utils/bazel/overlay_directories.py

Removed: 
    


################################################################################
diff  --git a/bolt/docs/conf.py b/bolt/docs/conf.py
index 6f1f9ee15eaad..ededa1ccbb0be 100644
--- a/bolt/docs/conf.py
+++ b/bolt/docs/conf.py
@@ -16,92 +16,92 @@
 # If extensions (or modules to document with autodoc) are in another directory,
 # add these directories to sys.path here. If the directory is relative to the
 # documentation root, use os.path.abspath to make it absolute, like shown here.
-#sys.path.insert(0, os.path.abspath('.'))
+# sys.path.insert(0, os.path.abspath('.'))
 
 # -- General configuration -----------------------------------------------------
 
 # If your documentation needs a minimal Sphinx version, state it here.
-#needs_sphinx = '1.0'
+# needs_sphinx = '1.0'
 
 # Add any Sphinx extension module names here, as strings. They can be extensions
 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.intersphinx', 'sphinx.ext.todo']
+extensions = ["sphinx.ext.intersphinx", "sphinx.ext.todo"]
 
 # Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
+templates_path = ["_templates"]
 
 # The suffix of source filenames.
-source_suffix = '.rst'
+source_suffix = ".rst"
 
 # The encoding of source files.
-#source_encoding = 'utf-8-sig'
+# source_encoding = 'utf-8-sig'
 
 # The master toctree document.
-master_doc = 'index'
+master_doc = "index"
 
 # General information about the project.
-project = u'BOLT'
-copyright = u'2015-%d, BOLT team' % date.today().year
+project = "BOLT"
+copyright = "2015-%d, BOLT team" % date.today().year
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
-#language = None
+# language = None
 
 # There are two options for replacing |today|: either, you set today to some
 # non-false value, then it is used:
-#today = ''
+# today = ''
 # Else, today_fmt is used as the format for a strftime call.
-today_fmt = '%Y-%m-%d'
+today_fmt = "%Y-%m-%d"
 
 # List of patterns, relative to source directory, that match files and
 # directories to ignore when looking for source files.
-exclude_patterns = ['_build']
+exclude_patterns = ["_build"]
 
 # The reST default role (used for this markup: `text`) to use for all documents.
-#default_role = None
+# default_role = None
 
 # If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
+# add_function_parentheses = True
 
 # If true, the current module name will be prepended to all description
 # unit titles (such as .. function::).
-#add_module_names = True
+# add_module_names = True
 
 # If true, sectionauthor and moduleauthor directives will be shown in the
 # output. They are ignored by default.
 show_authors = True
 
 # The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'friendly'
+pygments_style = "friendly"
 
 # A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
+# modindex_common_prefix = []
 
 
 # -- Options for HTML output ---------------------------------------------------
 
 # The theme to use for HTML and HTML Help pages.  See the documentation for
 # a list of builtin themes.
-html_theme = 'haiku'
+html_theme = "haiku"
 
 # Theme options are theme-specific and customize the look and feel of a theme
 # further.  For a list of options available for each theme, see the
 # documentation.
-#html_theme_options = {}
+# html_theme_options = {}
 
 # Add any paths that contain custom themes here, relative to this directory.
 html_theme_path = ["."]
 
 # The name for this set of Sphinx documents.  If None, it defaults to
 # "<project> v<release> documentation".
-#html_title = None
+# html_title = None
 
 # A shorter title for the navigation bar.  Default is the same as html_title.
-#html_short_title = None
+# html_short_title = None
 
 # The name of an image file (relative to this directory) to place at the top
 # of the sidebar.
-#html_logo = None
+# html_logo = None
 
 # If given, this must be the name of an image file (path relative to the
 # configuration directory) that is the favicon of the docs. Modern browsers use
@@ -109,110 +109,104 @@
 # icon file (.ico), which is 16x16 or 32x32 pixels large. Default: None.  The
 # image file will be copied to the _static directory of the output HTML, but
 # only if the file does not already exist there.
-html_favicon = '_static/favicon.ico'
+html_favicon = "_static/favicon.ico"
 
 # Add any paths that contain custom static files (such as style sheets) here,
 # relative to this directory. They are copied after the builtin static files,
 # so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
+html_static_path = ["_static"]
 
 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
 # using the given strftime format.
-html_last_updated_fmt = '%Y-%m-%d'
+html_last_updated_fmt = "%Y-%m-%d"
 
 # If true, SmartyPants will be used to convert quotes and dashes to
 # typographically correct entities.
-#html_use_smartypants = True
+# html_use_smartypants = True
 
 # Custom sidebar templates, maps document names to template names.
-html_sidebars = {'index': ['indexsidebar.html']}
+html_sidebars = {"index": ["indexsidebar.html"]}
 
 # Additional templates that should be rendered to pages, maps page names to
 # template names.
 # html_additional_pages = {'index': 'index.html'}
 
 # If false, no module index is generated.
-#html_domain_indices = True
+# html_domain_indices = True
 
 # If false, no index is generated.
-#html_use_index = True
+# html_use_index = True
 
 # If true, the index is split into individual pages for each letter.
-#html_split_index = False
+# html_split_index = False
 
 # If true, links to the reST sources are added to the pages.
 html_show_sourcelink = True
 
 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
-#html_show_sphinx = True
+# html_show_sphinx = True
 
 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
-#html_show_copyright = True
+# html_show_copyright = True
 
 # If true, an OpenSearch description file will be output, and all pages will
 # contain a <link> tag referring to it.  The value of this option must be the
 # base URL from which the finished HTML is served.
-#html_use_opensearch = ''
+# html_use_opensearch = ''
 
 # This is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = None
+# html_file_suffix = None
 
 # Output file base name for HTML help builder.
-htmlhelp_basename = 'boltdoc'
+htmlhelp_basename = "boltdoc"
 
 
 # -- Options for LaTeX output --------------------------------------------------
 
 latex_elements = {
-# The paper size ('letterpaper' or 'a4paper').
-#'papersize': 'letterpaper',
-
-# The font size ('10pt', '11pt' or '12pt').
-#'pointsize': '10pt',
-
-# Additional stuff for the LaTeX preamble.
-#'preamble': '',
+    # The paper size ('letterpaper' or 'a4paper').
+    #'papersize': 'letterpaper',
+    # The font size ('10pt', '11pt' or '12pt').
+    #'pointsize': '10pt',
+    # Additional stuff for the LaTeX preamble.
+    #'preamble': '',
 }
 
 # Grouping the document tree into LaTeX files. List of tuples
 # (source start file, target name, title, author, documentclass [howto/manual]).
 latex_documents = [
-  ('contents', 'bolt.tex', u'BOLT Documentation',
-   u'LLVM project', 'manual'),
+    ("contents", "bolt.tex", "BOLT Documentation", "LLVM project", "manual"),
 ]
 
 # The name of an image file (relative to this directory) to place at the top of
 # the title page.
-#latex_logo = None
+# latex_logo = None
 
 # For "manual" documents, if this is true, then toplevel headings are parts,
 # not chapters.
-#latex_use_parts = False
+# latex_use_parts = False
 
 # If true, show page references after internal links.
-#latex_show_pagerefs = False
+# latex_show_pagerefs = False
 
 # If true, show URL addresses after external links.
-#latex_show_urls = False
+# latex_show_urls = False
 
 # Documents to append as an appendix to all manuals.
-#latex_appendices = []
+# latex_appendices = []
 
 # If false, no module index is generated.
-#latex_domain_indices = True
+# latex_domain_indices = True
 
 
 # -- Options for manual page output --------------------------------------------
 
 # One entry per manual page. List of tuples
 # (source start file, name, description, authors, manual section).
-man_pages = [
-    ('contents', 'bolt', u'BOLT Documentation',
-     [u'LLVM project'], 1)
-]
+man_pages = [("contents", "bolt", "BOLT Documentation", ["LLVM project"], 1)]
 
 # If true, show URL addresses after external links.
-#man_show_urls = False
+# man_show_urls = False
 
 
 # -- Options for Texinfo output ------------------------------------------------
@@ -221,19 +215,25 @@
 # (source start file, target name, title, author,
 #  dir menu entry, description, category)
 texinfo_documents = [
-  ('contents', 'BOLT', u'BOLT Documentation',
-   u'LLVM project', 'BOLT', 'Binary Optimization and Layout Tool',
-   'Miscellaneous'),
+    (
+        "contents",
+        "BOLT",
+        "BOLT Documentation",
+        "LLVM project",
+        "BOLT",
+        "Binary Optimization and Layout Tool",
+        "Miscellaneous",
+    ),
 ]
 
 # Documents to append as an appendix to all manuals.
-#texinfo_appendices = []
+# texinfo_appendices = []
 
 # If false, no module index is generated.
-#texinfo_domain_indices = True
+# texinfo_domain_indices = True
 
 # How to display URL addresses: 'footnote', 'no', or 'inline'.
-#texinfo_show_urls = 'footnote'
+# texinfo_show_urls = 'footnote'
 
 
 # FIXME: Define intersphinx configuration.

diff  --git a/bolt/test/AArch64/lit.local.cfg b/bolt/test/AArch64/lit.local.cfg
index 5374d4f9329b9..59fa15a876b50 100644
--- a/bolt/test/AArch64/lit.local.cfg
+++ b/bolt/test/AArch64/lit.local.cfg
@@ -1,7 +1,7 @@
-if 'AArch64' not in config.root.targets:
+if "AArch64" not in config.root.targets:
     config.unsupported = True
 
-flags = '--target=aarch64-pc-linux -nostartfiles -nostdlib -ffreestanding'
+flags = "--target=aarch64-pc-linux -nostartfiles -nostdlib -ffreestanding"
 
-config.substitutions.insert(0, ('%cflags', f'%cflags {flags}'))
-config.substitutions.insert(0, ('%cxxflags', f'%cxxflags {flags}'))
+config.substitutions.insert(0, ("%cflags", f"%cflags {flags}"))
+config.substitutions.insert(0, ("%cxxflags", f"%cxxflags {flags}"))

diff  --git a/bolt/test/Unit/lit.cfg.py b/bolt/test/Unit/lit.cfg.py
index 52fd802dd8c47..e62ffe7f42bed 100644
--- a/bolt/test/Unit/lit.cfg.py
+++ b/bolt/test/Unit/lit.cfg.py
@@ -8,15 +8,15 @@
 import lit.formats
 
 # name: The name of this test suite.
-config.name = 'BOLT-Unit'
+config.name = "BOLT-Unit"
 
 # suffixes: A list of file extensions to treat as test files.
 config.suffixes = []
 
 # test_source_root: The root path where tests are located.
 # test_exec_root: The root path where tests should be run.
-config.test_exec_root = os.path.join(config.bolt_obj_root, 'unittests')
+config.test_exec_root = os.path.join(config.bolt_obj_root, "unittests")
 config.test_source_root = config.test_exec_root
 
 # testFormat: The test format to use to interpret tests.
-config.test_format = lit.formats.GoogleTest(config.llvm_build_mode, 'Tests')
+config.test_format = lit.formats.GoogleTest(config.llvm_build_mode, "Tests")

diff  --git a/bolt/test/X86/lit.local.cfg b/bolt/test/X86/lit.local.cfg
index e81f048d7f867..947d25cb6e8c4 100644
--- a/bolt/test/X86/lit.local.cfg
+++ b/bolt/test/X86/lit.local.cfg
@@ -1,7 +1,7 @@
-if not 'X86' in config.root.targets:
+if not "X86" in config.root.targets:
     config.unsupported = True
 
-flags = '--target=x86_64-pc-linux -nostdlib'
+flags = "--target=x86_64-pc-linux -nostdlib"
 
-config.substitutions.insert(0, ('%cflags', f'%cflags {flags}'))
-config.substitutions.insert(0, ('%cxxflags', f'%cxxflags {flags}'))
+config.substitutions.insert(0, ("%cflags", f"%cflags {flags}"))
+config.substitutions.insert(0, ("%cxxflags", f"%cxxflags {flags}"))

diff  --git a/bolt/test/link_fdata.py b/bolt/test/link_fdata.py
index b87804d59d3e7..0232dd3211e9b 100755
--- a/bolt/test/link_fdata.py
+++ b/bolt/test/link_fdata.py
@@ -18,7 +18,7 @@
 parser.add_argument("output")
 parser.add_argument("prefix", nargs="?", default="FDATA", help="Custom FDATA prefix")
 parser.add_argument("--nmtool", default="nm", help="Path to nm tool")
-parser.add_argument("--no-lbr", action='store_true')
+parser.add_argument("--no-lbr", action="store_true")
 
 args = parser.parse_args()
 
@@ -48,7 +48,7 @@
 # as (src_tuple, dst_tuple, mispred_count, exec_count) tuples, where src and dst
 # are represented as (is_sym, anchor, offset) tuples
 exprs = []
-with open(args.input, 'r') as f:
+with open(args.input, "r") as f:
     for line in f.readlines():
         prefix_match = prefix_pat.match(line)
         if not prefix_match:
@@ -60,43 +60,49 @@
         if fdata_match:
             src_dst, execnt, mispred = fdata_match.groups()
             # Split by whitespaces not preceded by a backslash (negative lookbehind)
-            chunks = re.split(r'(?<!\\) +', src_dst)
+            chunks = re.split(r"(?<!\\) +", src_dst)
             # Check if the number of records separated by non-escaped whitespace
             # exactly matches the format.
-            assert len(chunks) == 6, f"ERROR: wrong format/whitespaces must be escaped:\n{line}"
-            exprs.append(('FDATA', (*chunks, execnt, mispred)))
+            assert (
+                len(chunks) == 6
+            ), f"ERROR: wrong format/whitespaces must be escaped:\n{line}"
+            exprs.append(("FDATA", (*chunks, execnt, mispred)))
         elif nolbr_match:
             loc, count = nolbr_match.groups()
             # Split by whitespaces not preceded by a backslash (negative lookbehind)
-            chunks = re.split(r'(?<!\\) +', loc)
+            chunks = re.split(r"(?<!\\) +", loc)
             # Check if the number of records separated by non-escaped whitespace
             # exactly matches the format.
-            assert len(chunks) == 3, f"ERROR: wrong format/whitespaces must be escaped:\n{line}"
-            exprs.append(('NOLBR', (*chunks, count)))
+            assert (
+                len(chunks) == 3
+            ), f"ERROR: wrong format/whitespaces must be escaped:\n{line}"
+            exprs.append(("NOLBR", (*chunks, count)))
         elif preagg_match:
-            exprs.append(('PREAGG', preagg_match.groups()))
+            exprs.append(("PREAGG", preagg_match.groups()))
         else:
             exit("ERROR: unexpected input:\n%s" % line)
 
 # Read nm output: <symbol value> <symbol type> <symbol name>
-nm_output = subprocess.run([args.nmtool, '--defined-only', args.objfile],
-                           text = True, capture_output = True).stdout
+nm_output = subprocess.run(
+    [args.nmtool, "--defined-only", args.objfile], text=True, capture_output=True
+).stdout
 # Populate symbol map
 symbols = {}
 for symline in nm_output.splitlines():
     symval, _, symname = symline.split(maxsplit=2)
     symbols[symname] = symval
 
+
 def evaluate_symbol(issym, anchor, offsym):
     sym_match = replace_pat.match(offsym)
     if not sym_match:
         # No need to evaluate symbol value, return as is
-        return f'{issym} {anchor} {offsym}'
-    symname = sym_match.group('symname')
+        return f"{issym} {anchor} {offsym}"
+    symname = sym_match.group("symname")
     assert symname in symbols, f"ERROR: symbol {symname} is not defined in binary"
     # Evaluate to an absolute offset if issym is false
-    if issym == '0':
-        return f'{issym} {anchor} {symbols[symname]}'
+    if issym == "0":
+        return f"{issym} {anchor} {symbols[symname]}"
     # Evaluate symbol against its anchor if issym is true
     assert anchor in symbols, f"ERROR: symbol {anchor} is not defined in binary"
     anchor_value = int(symbols[anchor], 16)
@@ -104,29 +110,34 @@ def evaluate_symbol(issym, anchor, offsym):
     sym_offset = symbol_value - anchor_value
     return f'{issym} {anchor} {format(sym_offset, "x")}'
 
+
 def replace_symbol(matchobj):
-    '''
+    """
     Expects matchobj to only capture one group which contains the symbol name.
-    '''
-    symname = matchobj.group('symname')
+    """
+    symname = matchobj.group("symname")
     assert symname in symbols, f"ERROR: symbol {symname} is not defined in binary"
     return symbols[symname]
 
-with open(args.output, 'w', newline='\n') as f:
+
+with open(args.output, "w", newline="\n") as f:
     if args.no_lbr:
-        print('no_lbr', file = f)
+        print("no_lbr", file=f)
     for etype, expr in exprs:
-        if etype == 'FDATA':
+        if etype == "FDATA":
             issym1, anchor1, offsym1, issym2, anchor2, offsym2, execnt, mispred = expr
-            print(evaluate_symbol(issym1, anchor1, offsym1),
-                  evaluate_symbol(issym2, anchor2, offsym2),
-                  execnt, mispred, file = f)
-        elif etype == 'NOLBR':
+            print(
+                evaluate_symbol(issym1, anchor1, offsym1),
+                evaluate_symbol(issym2, anchor2, offsym2),
+                execnt,
+                mispred,
+                file=f,
+            )
+        elif etype == "NOLBR":
             issym, anchor, offsym, count = expr
-            print(evaluate_symbol(issym, anchor, offsym), count, file = f)
-        elif etype == 'PREAGG':
+            print(evaluate_symbol(issym, anchor, offsym), count, file=f)
+        elif etype == "PREAGG":
             # Replace all symbols enclosed in ##
-            print(expr[0], re.sub(replace_pat, replace_symbol, expr[1]),
-                  file = f)
+            print(expr[0], re.sub(replace_pat, replace_symbol, expr[1]), file=f)
         else:
             exit("ERROR: unhandled expression type:\n%s" % etype)

diff  --git a/bolt/test/lit.cfg.py b/bolt/test/lit.cfg.py
index f28afd5912279..fe27af87f9106 100644
--- a/bolt/test/lit.cfg.py
+++ b/bolt/test/lit.cfg.py
@@ -16,7 +16,7 @@
 # Configuration file for the 'lit' test runner.
 
 # name: The name of this test suite.
-config.name = 'BOLT'
+config.name = "BOLT"
 
 # testFormat: The test format to use to interpret tests.
 #
@@ -25,19 +25,32 @@
 config.test_format = lit.formats.ShTest(not llvm_config.use_lit_shell)
 
 # suffixes: A list of file extensions to treat as test files.
-config.suffixes = ['.c', '.cpp', '.cppm', '.m', '.mm', '.cu',
-                   '.ll', '.cl', '.s', '.S', '.modulemap', '.test', '.rs']
+config.suffixes = [
+    ".c",
+    ".cpp",
+    ".cppm",
+    ".m",
+    ".mm",
+    ".cu",
+    ".ll",
+    ".cl",
+    ".s",
+    ".S",
+    ".modulemap",
+    ".test",
+    ".rs",
+]
 
 # excludes: A list of directories to exclude from the testsuite. The 'Inputs'
 # subdirectories contain auxiliary inputs for various tests in their parent
 # directories.
-config.excludes = ['Inputs', 'CMakeLists.txt', 'README.txt', 'LICENSE.txt']
+config.excludes = ["Inputs", "CMakeLists.txt", "README.txt", "LICENSE.txt"]
 
 # test_source_root: The root path where tests are located.
 config.test_source_root = os.path.dirname(__file__)
 
 # test_exec_root: The root path where tests should be run.
-config.test_exec_root = os.path.join(config.bolt_obj_root, 'test')
+config.test_exec_root = os.path.join(config.bolt_obj_root, "test")
 
 # checking if maxIndividualTestTime is available on the platform and sets
 # it to 60sec if so, declares lit-max-individual-test-time feature for
@@ -47,8 +60,11 @@
     config.available_features.add("lit-max-individual-test-time")
     lit_config.maxIndividualTestTime = 60
 else:
-    lit_config.warning('Setting a timeout per test not supported. ' + errormsg
-                       + ' Some tests will be skipped.')
+    lit_config.warning(
+        "Setting a timeout per test not supported. "
+        + errormsg
+        + " Some tests will be skipped."
+    )
 
 if config.bolt_enable_runtime:
     config.available_features.add("bolt-runtime")
@@ -58,57 +74,64 @@
 
 llvm_config.use_default_substitutions()
 
-llvm_config.config.environment['CLANG'] = config.bolt_clang
+llvm_config.config.environment["CLANG"] = config.bolt_clang
 llvm_config.use_clang()
 
-llvm_config.config.environment['LD_LLD'] = config.bolt_lld
-ld_lld = llvm_config.use_llvm_tool('ld.lld', required=True, search_env='LD_LLD')
-llvm_config.config.available_features.add('ld.lld')
-llvm_config.add_tool_substitutions([ToolSubst(r'ld\.lld', command=ld_lld)])
+llvm_config.config.environment["LD_LLD"] = config.bolt_lld
+ld_lld = llvm_config.use_llvm_tool("ld.lld", required=True, search_env="LD_LLD")
+llvm_config.config.available_features.add("ld.lld")
+llvm_config.add_tool_substitutions([ToolSubst(r"ld\.lld", command=ld_lld)])
 
-config.substitutions.append(('%cflags', ''))
-config.substitutions.append(('%cxxflags', ''))
+config.substitutions.append(("%cflags", ""))
+config.substitutions.append(("%cxxflags", ""))
 
-link_fdata_cmd = os.path.join(config.test_source_root, 'link_fdata.py')
+link_fdata_cmd = os.path.join(config.test_source_root, "link_fdata.py")
 
-tool_dirs = [config.llvm_tools_dir,
-             config.test_source_root]
+tool_dirs = [config.llvm_tools_dir, config.test_source_root]
 
 tools = [
-    ToolSubst('llc', unresolved='fatal'),
-    ToolSubst('llvm-dwarfdump', unresolved='fatal'),
-    ToolSubst('llvm-bolt', unresolved='fatal'),
-    ToolSubst('llvm-bolt
diff ', unresolved='fatal'),
-    ToolSubst('llvm-bolt-heatmap', unresolved='fatal'),
-    ToolSubst('llvm-bat-dump', unresolved='fatal'),
-    ToolSubst('perf2bolt', unresolved='fatal'),
-    ToolSubst('yaml2obj', unresolved='fatal'),
-    ToolSubst('llvm-mc', unresolved='fatal'),
-    ToolSubst('llvm-nm', unresolved='fatal'),
-    ToolSubst('llvm-objdump', unresolved='fatal'),
-    ToolSubst('llvm-objcopy', unresolved='fatal'),
-    ToolSubst('llvm-strings', unresolved='fatal'),
-    ToolSubst('llvm-strip', unresolved='fatal'),
-    ToolSubst('llvm-readelf', unresolved='fatal'),
-    ToolSubst('link_fdata', command=sys.executable, unresolved='fatal', extra_args=[link_fdata_cmd]),
-    ToolSubst('merge-fdata', unresolved='fatal'),
-    ToolSubst('llvm-readobj', unresolved='fatal'),
-    ToolSubst('llvm-dwp', unresolved='fatal'),
-    ToolSubst('split-file', unresolved='fatal'),
+    ToolSubst("llc", unresolved="fatal"),
+    ToolSubst("llvm-dwarfdump", unresolved="fatal"),
+    ToolSubst("llvm-bolt", unresolved="fatal"),
+    ToolSubst("llvm-bolt
diff ", unresolved="fatal"),
+    ToolSubst("llvm-bolt-heatmap", unresolved="fatal"),
+    ToolSubst("llvm-bat-dump", unresolved="fatal"),
+    ToolSubst("perf2bolt", unresolved="fatal"),
+    ToolSubst("yaml2obj", unresolved="fatal"),
+    ToolSubst("llvm-mc", unresolved="fatal"),
+    ToolSubst("llvm-nm", unresolved="fatal"),
+    ToolSubst("llvm-objdump", unresolved="fatal"),
+    ToolSubst("llvm-objcopy", unresolved="fatal"),
+    ToolSubst("llvm-strings", unresolved="fatal"),
+    ToolSubst("llvm-strip", unresolved="fatal"),
+    ToolSubst("llvm-readelf", unresolved="fatal"),
+    ToolSubst(
+        "link_fdata",
+        command=sys.executable,
+        unresolved="fatal",
+        extra_args=[link_fdata_cmd],
+    ),
+    ToolSubst("merge-fdata", unresolved="fatal"),
+    ToolSubst("llvm-readobj", unresolved="fatal"),
+    ToolSubst("llvm-dwp", unresolved="fatal"),
+    ToolSubst("split-file", unresolved="fatal"),
 ]
 llvm_config.add_tool_substitutions(tools, tool_dirs)
 
+
 def calculate_arch_features(arch_string):
     features = []
     for arch in arch_string.split():
-        features.append(arch.lower() + '-registered-target')
+        features.append(arch.lower() + "-registered-target")
     return features
 
 
 llvm_config.feature_config(
-    [('--assertion-mode', {'ON': 'asserts'}),
-     ('--cxxflags', {r'-D_GLIBCXX_DEBUG\b': 'libstdcxx-safe-mode'}),
-        ('--targets-built', calculate_arch_features)
-     ])
-
-config.targets = frozenset(config.targets_to_build.split(';'))
+    [
+        ("--assertion-mode", {"ON": "asserts"}),
+        ("--cxxflags", {r"-D_GLIBCXX_DEBUG\b": "libstdcxx-safe-mode"}),
+        ("--targets-built", calculate_arch_features),
+    ]
+)
+
+config.targets = frozenset(config.targets_to_build.split(";"))

diff  --git a/bolt/test/lit.local.cfg b/bolt/test/lit.local.cfg
index 122bedd558338..4f4d84e49b133 100644
--- a/bolt/test/lit.local.cfg
+++ b/bolt/test/lit.local.cfg
@@ -1,6 +1,6 @@
-host_linux_triple = config.target_triple.split('-')[0]+'-linux'
-common_linker_flags = '-fuse-ld=lld -Wl,--unresolved-symbols=ignore-all'
-flags = f'--target={host_linux_triple} {common_linker_flags}'
+host_linux_triple = config.target_triple.split("-")[0] + "-linux"
+common_linker_flags = "-fuse-ld=lld -Wl,--unresolved-symbols=ignore-all"
+flags = f"--target={host_linux_triple} {common_linker_flags}"
 
-config.substitutions.insert(0, ('%cflags', f'%cflags {flags}'))
-config.substitutions.insert(0, ('%cxxflags', f'%cxxflags {flags}'))
+config.substitutions.insert(0, ("%cflags", f"%cflags {flags}"))
+config.substitutions.insert(0, ("%cxxflags", f"%cxxflags {flags}"))

diff  --git a/bolt/test/runtime/AArch64/lit.local.cfg b/bolt/test/runtime/AArch64/lit.local.cfg
index b565c79d50454..682f390bf38e1 100644
--- a/bolt/test/runtime/AArch64/lit.local.cfg
+++ b/bolt/test/runtime/AArch64/lit.local.cfg
@@ -1,2 +1,2 @@
-if config.host_arch not in ['aarch64']:
+if config.host_arch not in ["aarch64"]:
     config.unsupported = True

diff  --git a/bolt/test/runtime/X86/lit.local.cfg b/bolt/test/runtime/X86/lit.local.cfg
index 7d0b9ea5a8b5d..b5dffc135ce6b 100644
--- a/bolt/test/runtime/X86/lit.local.cfg
+++ b/bolt/test/runtime/X86/lit.local.cfg
@@ -1,2 +1,2 @@
-if config.host_arch not in ['x86', 'X86', 'x86_64']:
+if config.host_arch not in ["x86", "X86", "x86_64"]:
     config.unsupported = True

diff  --git a/bolt/test/runtime/lit.local.cfg b/bolt/test/runtime/lit.local.cfg
index 0b3754f8261ed..26b1435658228 100644
--- a/bolt/test/runtime/lit.local.cfg
+++ b/bolt/test/runtime/lit.local.cfg
@@ -1,3 +1,3 @@
 # Tests are not expected to pass in a cross-compilation setup.
-if not {'native', 'system-linux'}.issubset(config.available_features):
-  config.unsupported = True
+if not {"native", "system-linux"}.issubset(config.available_features):
+    config.unsupported = True

diff  --git a/bolt/utils/dot2html/dot2html.py b/bolt/utils/dot2html/dot2html.py
index 07a1faad3050c..0b70fd18585ca 100755
--- a/bolt/utils/dot2html/dot2html.py
+++ b/bolt/utils/dot2html/dot2html.py
@@ -4,26 +4,36 @@
 import sys
 
 BASE_PATH = os.path.dirname(os.path.abspath(__file__))
-HTML_TEMPLATE_NAME = 'd3-graphviz-template.html'
+HTML_TEMPLATE_NAME = "d3-graphviz-template.html"
 HTML_TEMPLATE_PATH = os.path.join(BASE_PATH, HTML_TEMPLATE_NAME)
 
+
 def main():
     parser = argparse.ArgumentParser()
-    parser.add_argument('dotfile', nargs='?', type=argparse.FileType('r'),
-                        default=sys.stdin,
-                        help='Input .dot file, reads from stdin if not set')
-    parser.add_argument('htmlfile', nargs='?', type=argparse.FileType('w'),
-                        default=sys.stdout,
-                        help='Output .html file, writes to stdout if not set')
+    parser.add_argument(
+        "dotfile",
+        nargs="?",
+        type=argparse.FileType("r"),
+        default=sys.stdin,
+        help="Input .dot file, reads from stdin if not set",
+    )
+    parser.add_argument(
+        "htmlfile",
+        nargs="?",
+        type=argparse.FileType("w"),
+        default=sys.stdout,
+        help="Output .html file, writes to stdout if not set",
+    )
     args = parser.parse_args()
 
-    template = open(HTML_TEMPLATE_PATH, 'r')
+    template = open(HTML_TEMPLATE_PATH, "r")
 
     for line in template:
         if "<INSERT_DOT>" in line:
-            print(args.dotfile.read(), file=args.htmlfile, end='')
+            print(args.dotfile.read(), file=args.htmlfile, end="")
         else:
-            print(line, file=args.htmlfile, end='')
+            print(line, file=args.htmlfile, end="")
+
 
 if __name__ == "__main__":
     main()

diff  --git a/bolt/utils/llvm-bolt-wrapper.py b/bolt/utils/llvm-bolt-wrapper.py
index 901304d134ee5..cd48204026fb7 100755
--- a/bolt/utils/llvm-bolt-wrapper.py
+++ b/bolt/utils/llvm-bolt-wrapper.py
@@ -41,9 +41,10 @@
 # # optional, defaults to timing.log in CWD
 # timing_file = timing1.log
 
+
 def read_cfg():
     src_dir = os.path.dirname(os.path.abspath(__file__))
-    cfg = configparser.ConfigParser(allow_no_value = True)
+    cfg = configparser.ConfigParser(allow_no_value=True)
     cfgs = cfg.read("llvm-bolt-wrapper.ini")
     if not cfgs:
         cfgs = cfg.read(os.path.join(src_dir, "llvm-bolt-wrapper.ini"))
@@ -51,69 +52,72 @@ def read_cfg():
 
     def get_cfg(key):
         # if key is not present in config, assume False
-        if key not in cfg['config']:
+        if key not in cfg["config"]:
             return False
         # if key is present, but has no value, assume True
-        if not cfg['config'][key]:
+        if not cfg["config"][key]:
             return True
         # if key has associated value, interpret the value
-        return cfg['config'].getboolean(key)
+        return cfg["config"].getboolean(key)
 
     d = {
         # BOLT binary locations
-        'BASE_BOLT': cfg['config']['base_bolt'],
-        'CMP_BOLT': cfg['config']['cmp_bolt'],
+        "BASE_BOLT": cfg["config"]["base_bolt"],
+        "CMP_BOLT": cfg["config"]["cmp_bolt"],
         # optional
-        'VERBOSE': get_cfg('verbose'),
-        'KEEP_TMP': get_cfg('keep_tmp'),
-        'NO_MINIMIZE': get_cfg('no_minimize'),
-        'RUN_SEQUENTIALLY': get_cfg('run_sequentially'),
-        'COMPARE_OUTPUT': get_cfg('compare_output'),
-        'SKIP_BINARY_CMP': get_cfg('skip_binary_cmp'),
-        'TIMING_FILE': cfg['config'].get('timing_file', 'timing.log'),
+        "VERBOSE": get_cfg("verbose"),
+        "KEEP_TMP": get_cfg("keep_tmp"),
+        "NO_MINIMIZE": get_cfg("no_minimize"),
+        "RUN_SEQUENTIALLY": get_cfg("run_sequentially"),
+        "COMPARE_OUTPUT": get_cfg("compare_output"),
+        "SKIP_BINARY_CMP": get_cfg("skip_binary_cmp"),
+        "TIMING_FILE": cfg["config"].get("timing_file", "timing.log"),
     }
-    if d['VERBOSE']:
+    if d["VERBOSE"]:
         print(f"Using config {os.path.abspath(cfgs[0])}")
     return SimpleNamespace(**d)
 
+
 # perf2bolt mode
-PERF2BOLT_MODE = ['-aggregate-only', '-ignore-build-id']
+PERF2BOLT_MODE = ["-aggregate-only", "-ignore-build-id"]
 
 # bolt
diff  mode
-BOLTDIFF_MODE = ['-
diff -only', '-o', '/dev/null']
+BOLTDIFF_MODE = ["-
diff -only", "-o", "/dev/null"]
 
 # options to suppress binary 
diff erences as much as possible
-MINIMIZE_DIFFS = ['-bolt-info=0']
+MINIMIZE_DIFFS = ["-bolt-info=0"]
 
 # bolt output options that need to be intercepted
 BOLT_OUTPUT_OPTS = {
-    '-o': 'BOLT output binary',
-    '-w': 'BOLT recorded profile',
+    "-o": "BOLT output binary",
+    "-w": "BOLT recorded profile",
 }
 
 # regex patterns to exclude the line from log comparison
 SKIP_MATCH = [
-    'BOLT-INFO: BOLT version',
-    r'^Args: ',
-    r'^BOLT-DEBUG:',
-    r'BOLT-INFO:.*data.*output data',
-    'WARNING: reading perf data directly',
+    "BOLT-INFO: BOLT version",
+    r"^Args: ",
+    r"^BOLT-DEBUG:",
+    r"BOLT-INFO:.*data.*output data",
+    "WARNING: reading perf data directly",
 ]
 
+
 def run_cmd(cmd, out_f, cfg):
     if cfg.VERBOSE:
-        print(' '.join(cmd))
+        print(" ".join(cmd))
     return subprocess.Popen(cmd, stdout=out_f, stderr=subprocess.STDOUT)
 
+
 def run_bolt(bolt_path, bolt_args, out_f, cfg):
-    p2b = os.path.basename(sys.argv[0]) == 'perf2bolt' # perf2bolt mode
-    bd = os.path.basename(sys.argv[0]) == 'llvm-bolt
diff ' # bolt
diff  mode
-    hm = sys.argv[1] == 'heatmap' # heatmap mode
-    cmd = ['/usr/bin/time', '-f', '%e %M', bolt_path] + bolt_args
+    p2b = os.path.basename(sys.argv[0]) == "perf2bolt"  # perf2bolt mode
+    bd = os.path.basename(sys.argv[0]) == "llvm-bolt
diff "  # bolt
diff  mode
+    hm = sys.argv[1] == "heatmap"  # heatmap mode
+    cmd = ["/usr/bin/time", "-f", "%e %M", bolt_path] + bolt_args
     if p2b:
         # -ignore-build-id can occur at most once, hence remove it from cmd
-        if '-ignore-build-id' in cmd:
-            cmd.remove('-ignore-build-id')
+        if "-ignore-build-id" in cmd:
+            cmd.remove("-ignore-build-id")
         cmd += PERF2BOLT_MODE
     elif bd:
         cmd += BOLTDIFF_MODE
@@ -121,55 +125,65 @@ def run_bolt(bolt_path, bolt_args, out_f, cfg):
         cmd += MINIMIZE_DIFFS
     return run_cmd(cmd, out_f, cfg)
 
+
 def prepend_dash(args: Mapping[AnyStr, AnyStr]) -> Sequence[AnyStr]:
-    '''
+    """
     Accepts parsed arguments and returns flat list with dash prepended to
     the option.
     Example: Namespace(o='test.tmp') -> ['-o', 'test.tmp']
-    '''
-    dashed = [('-'+key,value) for (key,value) in args.items()]
+    """
+    dashed = [("-" + key, value) for (key, value) in args.items()]
     flattened = list(sum(dashed, ()))
     return flattened
 
+
 def replace_cmp_path(tmp: AnyStr, args: Mapping[AnyStr, AnyStr]) -> Sequence[AnyStr]:
-    '''
+    """
     Keeps file names, but replaces the path to a temp folder.
     Example: Namespace(o='abc/test.tmp') -> Namespace(o='/tmp/tmpf9un/test.tmp')
     Except preserve /dev/null.
-    '''
-    replace_path = lambda x: os.path.join(tmp, os.path.basename(x)) if x != '/dev/null' else '/dev/null'
+    """
+    replace_path = (
+        lambda x: os.path.join(tmp, os.path.basename(x))
+        if x != "/dev/null"
+        else "/dev/null"
+    )
     new_args = {key: replace_path(value) for key, value in args.items()}
     return prepend_dash(new_args)
 
+
 def preprocess_args(args: argparse.Namespace) -> Mapping[AnyStr, AnyStr]:
-    '''
+    """
     Drop options that weren't parsed (e.g. -w), convert to a dict
-    '''
+    """
     return {key: value for key, value in vars(args).items() if value}
 
-def write_to(txt, filename, mode='w'):
+
+def write_to(txt, filename, mode="w"):
     with open(filename, mode) as f:
         f.write(txt)
 
+
 def wait(proc, fdesc):
     proc.wait()
     fdesc.close()
     return open(fdesc.name)
 
+
 def compare_logs(main, cmp, skip_begin=0, skip_end=0, str_input=True):
-    '''
+    """
     Compares logs but allows for certain lines to be excluded from comparison.
     If str_input is True (default), the input it assumed to be a string,
     which is split into lines. Otherwise the input is assumed to be a file.
     Returns None on success, mismatch otherwise.
-    '''
+    """
     main_inp = main.splitlines() if str_input else main.readlines()
     cmp_inp = cmp.splitlines() if str_input else cmp.readlines()
     # rewind logs after consumption
     if not str_input:
         main.seek(0)
         cmp.seek(0)
-    for lhs, rhs in list(zip(main_inp, cmp_inp))[skip_begin:-skip_end or None]:
+    for lhs, rhs in list(zip(main_inp, cmp_inp))[skip_begin : -skip_end or None]:
         if lhs != rhs:
             # check skip patterns
             for skip in SKIP_MATCH:
@@ -181,52 +195,59 @@ def compare_logs(main, cmp, skip_begin=0, skip_end=0, str_input=True):
                 return (lhs, rhs)
     return None
 
+
 def fmt_cmp(cmp_tuple):
     if not cmp_tuple:
-        return ''
-    return f'main:\n{cmp_tuple[0]}\ncmp:\n{cmp_tuple[1]}\n'
+        return ""
+    return f"main:\n{cmp_tuple[0]}\ncmp:\n{cmp_tuple[1]}\n"
+
 
 def compare_with(lhs, rhs, cmd, skip_begin=0, skip_end=0):
-    '''
+    """
     Runs cmd on both lhs and rhs and compares stdout.
     Returns tuple (mismatch, lhs_stdout):
         - if stdout matches between two files, mismatch is None,
         - otherwise mismatch is a tuple of mismatching lines.
-    '''
-    run = lambda binary: subprocess.run(cmd.split() + [binary],
-                                        text=True, check=True,
-                                        capture_output=True).stdout
+    """
+    run = lambda binary: subprocess.run(
+        cmd.split() + [binary], text=True, check=True, capture_output=True
+    ).stdout
     run_lhs = run(lhs)
     run_rhs = run(rhs)
     cmp = compare_logs(run_lhs, run_rhs, skip_begin, skip_end)
     return cmp, run_lhs
 
+
 def parse_cmp_offset(cmp_out):
-    '''
+    """
     Extracts byte number from cmp output:
     file1 file2 
diff er: byte X, line Y
-    '''
+    """
     # NOTE: cmp counts bytes starting from 1!
-    return int(re.search(r'byte (\d+),', cmp_out).groups()[0]) - 1
+    return int(re.search(r"byte (\d+),", cmp_out).groups()[0]) - 1
+
 
 def report_real_time(binary, main_err, cmp_err, cfg):
-    '''
+    """
     Extracts real time from stderr and appends it to TIMING FILE it as csv:
     "output binary; base bolt; cmp bolt"
-    '''
+    """
+
     def get_real_from_stderr(logline):
-        return '; '.join(logline.split())
+        return "; ".join(logline.split())
+
     for line in main_err:
         pass
     main = get_real_from_stderr(line)
     for line in cmp_err:
         pass
     cmp = get_real_from_stderr(line)
-    write_to(f"{binary}; {main}; {cmp}\n", cfg.TIMING_FILE, 'a')
+    write_to(f"{binary}; {main}; {cmp}\n", cfg.TIMING_FILE, "a")
     # rewind logs after consumption
     main_err.seek(0)
     cmp_err.seek(0)
 
+
 def clean_exit(tmp, out, exitcode, cfg):
     # temp files are only cleaned on success
     if not cfg.KEEP_TMP:
@@ -236,8 +257,9 @@ def clean_exit(tmp, out, exitcode, cfg):
     shutil.copyfileobj(out, sys.stdout)
     sys.exit(exitcode)
 
+
 def find_section(offset, readelf_hdr):
-    hdr = readelf_hdr.split('\n')
+    hdr = readelf_hdr.split("\n")
     section = None
     # extract sections table (parse objdump -hw output)
     for line in hdr[5:-1]:
@@ -247,7 +269,7 @@ def find_section(offset, readelf_hdr):
         # section size
         size = int(cols[2], 16)
         if offset >= file_offset and offset < file_offset + size:
-            if sys.stdout.isatty(): # terminal supports colors
+            if sys.stdout.isatty():  # terminal supports colors
                 print(f"\033[1m{line}\033[0m")
             else:
                 print(f">{line}")
@@ -256,34 +278,57 @@ def find_section(offset, readelf_hdr):
             print(line)
     return section
 
+
 def main_config_generator():
     parser = argparse.ArgumentParser()
-    parser.add_argument('base_bolt', help='Full path to base llvm-bolt binary')
-    parser.add_argument('cmp_bolt', help='Full path to cmp llvm-bolt binary')
-    parser.add_argument('--verbose', action='store_true',
-                        help='Print subprocess invocation cmdline (default False)')
-    parser.add_argument('--keep_tmp', action='store_true',
-                        help = 'Preserve tmp folder on a clean exit '
-                        '(tmp directory is preserved on crash by default)')
-    parser.add_argument('--no_minimize', action='store_true',
-                        help=f'Do not add `{MINIMIZE_DIFFS}` that is used '
-                        'by default to reduce binary 
diff erences')
-    parser.add_argument('--run_sequentially', action='store_true',
-                        help='Run both binaries sequentially (default '
-                        'in parallel). Use for timing comparison')
-    parser.add_argument('--compare_output', action='store_true',
-                        help = 'Compare bolt stdout/stderr (disabled by default)')
-    parser.add_argument('--skip_binary_cmp', action='store_true',
-                        help = 'Disable output comparison')
-    parser.add_argument('--timing_file', help = 'Override path to timing log '
-                        'file (default `timing.log` in CWD)')
+    parser.add_argument("base_bolt", help="Full path to base llvm-bolt binary")
+    parser.add_argument("cmp_bolt", help="Full path to cmp llvm-bolt binary")
+    parser.add_argument(
+        "--verbose",
+        action="store_true",
+        help="Print subprocess invocation cmdline (default False)",
+    )
+    parser.add_argument(
+        "--keep_tmp",
+        action="store_true",
+        help="Preserve tmp folder on a clean exit "
+        "(tmp directory is preserved on crash by default)",
+    )
+    parser.add_argument(
+        "--no_minimize",
+        action="store_true",
+        help=f"Do not add `{MINIMIZE_DIFFS}` that is used "
+        "by default to reduce binary 
diff erences",
+    )
+    parser.add_argument(
+        "--run_sequentially",
+        action="store_true",
+        help="Run both binaries sequentially (default "
+        "in parallel). Use for timing comparison",
+    )
+    parser.add_argument(
+        "--compare_output",
+        action="store_true",
+        help="Compare bolt stdout/stderr (disabled by default)",
+    )
+    parser.add_argument(
+        "--skip_binary_cmp", action="store_true", help="Disable output comparison"
+    )
+    parser.add_argument(
+        "--timing_file",
+        help="Override path to timing log " "file (default `timing.log` in CWD)",
+    )
     args = parser.parse_args()
 
-    print(dedent(f'''\
+    print(
+        dedent(
+            f"""\
     [config]
     # mandatory
     base_bolt = {args.base_bolt}
-    cmp_bolt = {args.cmp_bolt}'''))
+    cmp_bolt = {args.cmp_bolt}"""
+        )
+    )
     del args.base_bolt
     del args.cmp_bolt
     d = vars(args)
@@ -293,6 +338,7 @@ def main_config_generator():
             if value:
                 print(key)
 
+
 def main():
     cfg = read_cfg()
     # intercept output arguments
@@ -309,8 +355,8 @@ def main():
     args = prepend_dash(args)
 
     # run both BOLT binaries
-    main_f = open(os.path.join(tmp, 'main_bolt.stdout'), 'w')
-    cmp_f = open(os.path.join(tmp, 'cmp_bolt.stdout'), 'w')
+    main_f = open(os.path.join(tmp, "main_bolt.stdout"), "w")
+    cmp_f = open(os.path.join(tmp, "cmp_bolt.stdout"), "w")
     main_bolt = run_bolt(cfg.BASE_BOLT, unknownargs + args, main_f, cfg)
     if cfg.RUN_SEQUENTIALLY:
         main_out = wait(main_bolt, main_f)
@@ -330,22 +376,26 @@ def main():
         cfg.SKIP_BINARY_CMP = True
 
     # compare logs, skip_end=1 skips the line with time
-    out = compare_logs(main_out, cmp_out, skip_end=1, str_input=False) if cfg.COMPARE_OUTPUT else None
+    out = (
+        compare_logs(main_out, cmp_out, skip_end=1, str_input=False)
+        if cfg.COMPARE_OUTPUT
+        else None
+    )
     if out:
         print(tmp)
         print(fmt_cmp(out))
-        write_to(fmt_cmp(out), os.path.join(tmp, 'summary.txt'))
+        write_to(fmt_cmp(out), os.path.join(tmp, "summary.txt"))
         exit("logs mismatch")
 
-    if os.path.basename(sys.argv[0]) == 'llvm-bolt
diff ': # bolt
diff  mode
+    if os.path.basename(sys.argv[0]) == "llvm-bolt
diff ":  # bolt
diff  mode
         # no output binary to compare, so just exit
         clean_exit(tmp, main_out, main_bolt.returncode, cfg)
 
     # compare binaries (using cmp)
-    main_binary = args[args.index('-o')+1]
-    cmp_binary = cmp_args[cmp_args.index('-o')+1]
-    if main_binary == '/dev/null':
-        assert cmp_binary == '/dev/null'
+    main_binary = args[args.index("-o") + 1]
+    cmp_binary = cmp_args[cmp_args.index("-o") + 1]
+    if main_binary == "/dev/null":
+        assert cmp_binary == "/dev/null"
         cfg.SKIP_BINARY_CMP = True
 
     # report binary timing as csv: output binary; base bolt real; cmp bolt real
@@ -368,23 +418,25 @@ def main():
             assert not main_exists
             exit(f"{main_binary} doesn't exist")
 
-        cmp_proc = subprocess.run(['cmp', '-b', main_binary, cmp_binary],
-                                  capture_output=True, text=True)
+        cmp_proc = subprocess.run(
+            ["cmp", "-b", main_binary, cmp_binary], capture_output=True, text=True
+        )
         if cmp_proc.returncode:
             # check if output is an ELF file (magic bytes)
-            with open(main_binary, 'rb') as f:
+            with open(main_binary, "rb") as f:
                 magic = f.read(4)
-                if magic != b'\x7fELF':
+                if magic != b"\x7fELF":
                     exit("output mismatch")
             # check if ELF headers match
-            mismatch, _ = compare_with(main_binary, cmp_binary, 'readelf -We')
+            mismatch, _ = compare_with(main_binary, cmp_binary, "readelf -We")
             if mismatch:
                 print(fmt_cmp(mismatch))
-                write_to(fmt_cmp(mismatch), os.path.join(tmp, 'headers.txt'))
+                write_to(fmt_cmp(mismatch), os.path.join(tmp, "headers.txt"))
                 exit("headers mismatch")
             # if headers match, compare sections (skip line with filename)
-            mismatch, hdr = compare_with(main_binary, cmp_binary, 'objdump -hw',
-                                         skip_begin=2)
+            mismatch, hdr = compare_with(
+                main_binary, cmp_binary, "objdump -hw", skip_begin=2
+            )
             assert not mismatch
             # check which section has the first mismatch
             mismatch_offset = parse_cmp_offset(cmp_proc.stdout)
@@ -393,6 +445,7 @@ def main():
 
     clean_exit(tmp, main_out, main_bolt.returncode, cfg)
 
+
 if __name__ == "__main__":
     # config generator mode if the script is launched as is
     if os.path.basename(__file__) == "llvm-bolt-wrapper.py":

diff  --git a/bolt/utils/nfc-check-setup.py b/bolt/utils/nfc-check-setup.py
index dba82acfbc6ac..b7b30a9296cb0 100755
--- a/bolt/utils/nfc-check-setup.py
+++ b/bolt/utils/nfc-check-setup.py
@@ -7,56 +7,73 @@
 import sys
 import textwrap
 
+
 def get_git_ref_or_rev(dir: str) -> str:
     # Run 'git symbolic-ref -q --short HEAD || git rev-parse --short HEAD'
-    cmd_ref = 'git symbolic-ref -q --short HEAD'
-    ref = subprocess.run(shlex.split(cmd_ref), cwd=dir, text=True,
-                         stdout=subprocess.PIPE)
+    cmd_ref = "git symbolic-ref -q --short HEAD"
+    ref = subprocess.run(
+        shlex.split(cmd_ref), cwd=dir, text=True, stdout=subprocess.PIPE
+    )
     if not ref.returncode:
         return ref.stdout.strip()
-    cmd_rev = 'git rev-parse --short HEAD'
-    return subprocess.check_output(shlex.split(cmd_rev), cwd=dir,
-                                   text=True).strip()
+    cmd_rev = "git rev-parse --short HEAD"
+    return subprocess.check_output(shlex.split(cmd_rev), cwd=dir, text=True).strip()
 
 
 def main():
-    parser = argparse.ArgumentParser(description=textwrap.dedent('''
+    parser = argparse.ArgumentParser(
+        description=textwrap.dedent(
+            """
             This script builds two versions of BOLT (with the current and
             previous revision) and sets up symlink for llvm-bolt-wrapper.
             Passes the options through to llvm-bolt-wrapper.
-            '''))
-    parser.add_argument('build_dir', nargs='?', default=os.getcwd(),
-                        help='Path to BOLT build directory, default is current '
-                             'directory')
-    parser.add_argument('--switch-back', default=False, action='store_true',
-                        help='Checkout back to the starting revision')
+            """
+        )
+    )
+    parser.add_argument(
+        "build_dir",
+        nargs="?",
+        default=os.getcwd(),
+        help="Path to BOLT build directory, default is current " "directory",
+    )
+    parser.add_argument(
+        "--switch-back",
+        default=False,
+        action="store_true",
+        help="Checkout back to the starting revision",
+    )
     args, wrapper_args = parser.parse_known_args()
-    bolt_path = f'{args.build_dir}/bin/llvm-bolt'
+    bolt_path = f"{args.build_dir}/bin/llvm-bolt"
 
     source_dir = None
     # find the repo directory
-    with open(f'{args.build_dir}/CMakeCache.txt') as f:
+    with open(f"{args.build_dir}/CMakeCache.txt") as f:
         for line in f:
-            m = re.match(r'LLVM_SOURCE_DIR:STATIC=(.*)', line)
+            m = re.match(r"LLVM_SOURCE_DIR:STATIC=(.*)", line)
             if m:
                 source_dir = m.groups()[0]
     if not source_dir:
         sys.exit("Source directory is not found")
 
     script_dir = os.path.dirname(os.path.abspath(__file__))
-    wrapper_path = f'{script_dir}/llvm-bolt-wrapper.py'
+    wrapper_path = f"{script_dir}/llvm-bolt-wrapper.py"
     # build the current commit
-    subprocess.run(shlex.split("cmake --build . --target llvm-bolt"),
-                   cwd=args.build_dir)
+    subprocess.run(
+        shlex.split("cmake --build . --target llvm-bolt"), cwd=args.build_dir
+    )
     # rename llvm-bolt
-    os.replace(bolt_path, f'{bolt_path}.new')
+    os.replace(bolt_path, f"{bolt_path}.new")
     # memorize the old hash for logging
     old_ref = get_git_ref_or_rev(source_dir)
 
     # determine whether a stash is needed
-    stash = subprocess.run(shlex.split("git status --porcelain"), cwd=source_dir,
-                           stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
-                           text=True).stdout
+    stash = subprocess.run(
+        shlex.split("git status --porcelain"),
+        cwd=source_dir,
+        stdout=subprocess.PIPE,
+        stderr=subprocess.STDOUT,
+        text=True,
+    ).stdout
     if stash:
         # save local changes before checkout
         subprocess.run(shlex.split("git stash push -u"), cwd=source_dir)
@@ -65,16 +82,17 @@ def main():
     # get the parent commit hash for logging
     new_ref = get_git_ref_or_rev(source_dir)
     # build the previous commit
-    subprocess.run(shlex.split("cmake --build . --target llvm-bolt"),
-                   cwd=args.build_dir)
+    subprocess.run(
+        shlex.split("cmake --build . --target llvm-bolt"), cwd=args.build_dir
+    )
     # rename llvm-bolt
-    os.replace(bolt_path, f'{bolt_path}.old')
+    os.replace(bolt_path, f"{bolt_path}.old")
     # set up llvm-bolt-wrapper.ini
     ini = subprocess.check_output(
-        shlex.split(
-            f"{wrapper_path} {bolt_path}.old {bolt_path}.new") + wrapper_args,
-            text=True)
-    with open(f'{args.build_dir}/bin/llvm-bolt-wrapper.ini', 'w') as f:
+        shlex.split(f"{wrapper_path} {bolt_path}.old {bolt_path}.new") + wrapper_args,
+        text=True,
+    )
+    with open(f"{args.build_dir}/bin/llvm-bolt-wrapper.ini", "w") as f:
         f.write(ini)
     # symlink llvm-bolt-wrapper
     os.symlink(wrapper_path, bolt_path)
@@ -83,12 +101,16 @@ def main():
             subprocess.run(shlex.split("git stash pop"), cwd=source_dir)
         subprocess.run(shlex.split(f"git checkout {old_ref}"), cwd=source_dir)
     else:
-        print(f"The repository {source_dir} has been switched from {old_ref} "
-              f"to {new_ref}. Local changes were stashed. Switch back using\n\t"
-              f"git checkout {old_ref}\n")
-    print(f"Build directory {args.build_dir} is ready to run BOLT tests, e.g.\n"
-          "\tbin/llvm-lit -sv tools/bolt/test\nor\n"
-          "\tbin/llvm-lit -sv tools/bolttests")
+        print(
+            f"The repository {source_dir} has been switched from {old_ref} "
+            f"to {new_ref}. Local changes were stashed. Switch back using\n\t"
+            f"git checkout {old_ref}\n"
+        )
+    print(
+        f"Build directory {args.build_dir} is ready to run BOLT tests, e.g.\n"
+        "\tbin/llvm-lit -sv tools/bolt/test\nor\n"
+        "\tbin/llvm-lit -sv tools/bolttests"
+    )
 
 
 if __name__ == "__main__":

diff  --git a/compiler-rt/lib/asan/scripts/asan_symbolize.py b/compiler-rt/lib/asan/scripts/asan_symbolize.py
index 1e8540f0bf95d..b08769614aeb1 100755
--- a/compiler-rt/lib/asan/scripts/asan_symbolize.py
+++ b/compiler-rt/lib/asan/scripts/asan_symbolize.py
@@ -1,11 +1,11 @@
 #!/usr/bin/env python
-#===- lib/asan/scripts/asan_symbolize.py -----------------------------------===#
+# ===- lib/asan/scripts/asan_symbolize.py -----------------------------------===#
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===------------------------------------------------------------------------===#
+# ===------------------------------------------------------------------------===#
 """
 Example of use:
   asan_symbolize.py -c "$HOME/opt/cross/bin/arm-linux-gnueabi-" -s "$HOME/SymbolFiles" < asan.log
@@ -40,1027 +40,1133 @@
 
 # FIXME: merge the code that calls fix_filename().
 def fix_filename(file_name):
-  if fix_filename_patterns:
-    for path_to_cut in fix_filename_patterns:
-      file_name = re.sub('.*' + path_to_cut, '', file_name)
-  file_name = re.sub('.*asan_[a-z_]*.(cc|cpp):[0-9]*', '_asan_rtl_', file_name)
-  file_name = re.sub('.*crtstuff.c:0', '???:0', file_name)
-  return file_name
+    if fix_filename_patterns:
+        for path_to_cut in fix_filename_patterns:
+            file_name = re.sub(".*" + path_to_cut, "", file_name)
+    file_name = re.sub(".*asan_[a-z_]*.(cc|cpp):[0-9]*", "_asan_rtl_", file_name)
+    file_name = re.sub(".*crtstuff.c:0", "???:0", file_name)
+    return file_name
+
 
 def is_valid_arch(s):
-  return s in ["i386", "x86_64", "x86_64h", "arm", "armv6", "armv7", "armv7s",
-               "armv7k", "arm64", "powerpc64", "powerpc64le", "s390x", "s390",
-               "riscv64", "loongarch64"]
+    return s in [
+        "i386",
+        "x86_64",
+        "x86_64h",
+        "arm",
+        "armv6",
+        "armv7",
+        "armv7s",
+        "armv7k",
+        "arm64",
+        "powerpc64",
+        "powerpc64le",
+        "s390x",
+        "s390",
+        "riscv64",
+        "loongarch64",
+    ]
+
 
 def guess_arch(addr):
-  # Guess which arch we're running. 10 = len('0x') + 8 hex digits.
-  if len(addr) > 10:
-    return 'x86_64'
-  else:
-    return 'i386'
+    # Guess which arch we're running. 10 = len('0x') + 8 hex digits.
+    if len(addr) > 10:
+        return "x86_64"
+    else:
+        return "i386"
+
 
 class Symbolizer(object):
-  def __init__(self):
-    pass
-
-  def symbolize(self, addr, binary, offset):
-    """Symbolize the given address (pair of binary and offset).
-
-    Overriden in subclasses.
-    Args:
-        addr: virtual address of an instruction.
-        binary: path to executable/shared object containing this instruction.
-        offset: instruction offset in the @binary.
-    Returns:
-        list of strings (one string for each inlined frame) describing
-        the code locations for this instruction (that is, function name, file
-        name, line and column numbers).
-    """
-    return None
+    def __init__(self):
+        pass
+
+    def symbolize(self, addr, binary, offset):
+        """Symbolize the given address (pair of binary and offset).
+
+        Overriden in subclasses.
+        Args:
+            addr: virtual address of an instruction.
+            binary: path to executable/shared object containing this instruction.
+            offset: instruction offset in the @binary.
+        Returns:
+            list of strings (one string for each inlined frame) describing
+            the code locations for this instruction (that is, function name, file
+            name, line and column numbers).
+        """
+        return None
 
 
 class LLVMSymbolizer(Symbolizer):
-  def __init__(self, symbolizer_path, default_arch, system, dsym_hints=[]):
-    super(LLVMSymbolizer, self).__init__()
-    self.symbolizer_path = symbolizer_path
-    self.default_arch = default_arch
-    self.system = system
-    self.dsym_hints = dsym_hints
-    self.pipe = self.open_llvm_symbolizer()
-
-  def open_llvm_symbolizer(self):
-    cmd = [self.symbolizer_path,
-           ('--demangle' if demangle else '--no-demangle'),
-           '--functions=linkage',
-           '--inlines',
-           '--default-arch=%s' % self.default_arch]
-    if self.system == 'Darwin':
-      for hint in self.dsym_hints:
-        cmd.append('--dsym-hint=%s' % hint)
-    logging.debug(' '.join(cmd))
-    try:
-      result = subprocess.Popen(cmd, stdin=subprocess.PIPE,
-                                stdout=subprocess.PIPE,
-                                bufsize=0,
-                                universal_newlines=True)
-    except OSError:
-      result = None
-    return result
-
-  def symbolize(self, addr, binary, offset):
-    """Overrides Symbolizer.symbolize."""
-    if not self.pipe:
-      return None
-    result = []
-    try:
-      symbolizer_input = '"%s" %s' % (binary, offset)
-      logging.debug(symbolizer_input)
-      self.pipe.stdin.write("%s\n" % symbolizer_input)
-      while True:
-        function_name = self.pipe.stdout.readline().rstrip()
-        if not function_name:
-          break
-        file_name = self.pipe.stdout.readline().rstrip()
-        file_name = fix_filename(file_name)
-        if (not function_name.startswith('??') or
-            not file_name.startswith('??')):
-          # Append only non-trivial frames.
-          result.append('%s in %s %s' % (addr, function_name,
-                                         file_name))
-    except Exception:
-      result = []
-    if not result:
-      result = None
-    return result
+    def __init__(self, symbolizer_path, default_arch, system, dsym_hints=[]):
+        super(LLVMSymbolizer, self).__init__()
+        self.symbolizer_path = symbolizer_path
+        self.default_arch = default_arch
+        self.system = system
+        self.dsym_hints = dsym_hints
+        self.pipe = self.open_llvm_symbolizer()
+
+    def open_llvm_symbolizer(self):
+        cmd = [
+            self.symbolizer_path,
+            ("--demangle" if demangle else "--no-demangle"),
+            "--functions=linkage",
+            "--inlines",
+            "--default-arch=%s" % self.default_arch,
+        ]
+        if self.system == "Darwin":
+            for hint in self.dsym_hints:
+                cmd.append("--dsym-hint=%s" % hint)
+        logging.debug(" ".join(cmd))
+        try:
+            result = subprocess.Popen(
+                cmd,
+                stdin=subprocess.PIPE,
+                stdout=subprocess.PIPE,
+                bufsize=0,
+                universal_newlines=True,
+            )
+        except OSError:
+            result = None
+        return result
+
+    def symbolize(self, addr, binary, offset):
+        """Overrides Symbolizer.symbolize."""
+        if not self.pipe:
+            return None
+        result = []
+        try:
+            symbolizer_input = '"%s" %s' % (binary, offset)
+            logging.debug(symbolizer_input)
+            self.pipe.stdin.write("%s\n" % symbolizer_input)
+            while True:
+                function_name = self.pipe.stdout.readline().rstrip()
+                if not function_name:
+                    break
+                file_name = self.pipe.stdout.readline().rstrip()
+                file_name = fix_filename(file_name)
+                if not function_name.startswith("??") or not file_name.startswith("??"):
+                    # Append only non-trivial frames.
+                    result.append("%s in %s %s" % (addr, function_name, file_name))
+        except Exception:
+            result = []
+        if not result:
+            result = None
+        return result
 
 
 def LLVMSymbolizerFactory(system, default_arch, dsym_hints=[]):
-  symbolizer_path = os.getenv('LLVM_SYMBOLIZER_PATH')
-  if not symbolizer_path:
-    symbolizer_path = os.getenv('ASAN_SYMBOLIZER_PATH')
+    symbolizer_path = os.getenv("LLVM_SYMBOLIZER_PATH")
     if not symbolizer_path:
-      # Assume llvm-symbolizer is in PATH.
-      symbolizer_path = 'llvm-symbolizer'
-  return LLVMSymbolizer(symbolizer_path, default_arch, system, dsym_hints)
+        symbolizer_path = os.getenv("ASAN_SYMBOLIZER_PATH")
+        if not symbolizer_path:
+            # Assume llvm-symbolizer is in PATH.
+            symbolizer_path = "llvm-symbolizer"
+    return LLVMSymbolizer(symbolizer_path, default_arch, system, dsym_hints)
 
 
 class Addr2LineSymbolizer(Symbolizer):
-  def __init__(self, binary):
-    super(Addr2LineSymbolizer, self).__init__()
-    self.binary = binary
-    self.pipe = self.open_addr2line()
-    self.output_terminator = -1
-
-  def open_addr2line(self):
-    addr2line_tool = 'addr2line'
-    if binutils_prefix:
-      addr2line_tool = binutils_prefix + addr2line_tool
-    logging.debug('addr2line binary is %s' % shutil.which(addr2line_tool))
-    cmd = [addr2line_tool, '-fi']
-    if demangle:
-      cmd += ['--demangle']
-    cmd += ['-e', self.binary]
-    logging.debug(' '.join(cmd))
-    return subprocess.Popen(cmd,
-                            stdin=subprocess.PIPE, stdout=subprocess.PIPE,
-                            bufsize=0,
-                            universal_newlines=True)
-
-  def symbolize(self, addr, binary, offset):
-    """Overrides Symbolizer.symbolize."""
-    if self.binary != binary:
-      return None
-    lines = []
-    try:
-      self.pipe.stdin.write("%s\n" % offset)
-      self.pipe.stdin.write("%s\n" % self.output_terminator)
-      is_first_frame = True
-      while True:
-        function_name = self.pipe.stdout.readline().rstrip()
-        logging.debug("read function_name='%s' from addr2line" % function_name)
-        # If llvm-symbolizer is installed as addr2line, older versions of
-        # llvm-symbolizer will print -1 when presented with -1 and not print
-        # a second line. In that case we will block for ever trying to read the
-        # file name. This also happens for non-existent files, in which case GNU
-        # addr2line exits immediate, but llvm-symbolizer does not (see
-        # https://llvm.org/PR42754).
-        if function_name == '-1':
-          logging.debug("got function '-1' -> no more input")
-          break
-        file_name = self.pipe.stdout.readline().rstrip()
-        logging.debug("read file_name='%s' from addr2line" % file_name)
-        if is_first_frame:
-          is_first_frame = False
-        elif function_name == '??':
-          assert file_name == '??:0', file_name
-          logging.debug("got function '??' -> no more input")
-          break
-        elif not function_name:
-          assert not file_name, file_name
-          logging.debug("got empty function name -> no more input")
-          break
-        if not function_name and not file_name:
-          logging.debug("got empty function and file name -> unknown function")
-          function_name = '??'
-          file_name = '??:0'
-        lines.append((function_name, file_name))
-    except IOError as e:
-      # EPIPE happens if addr2line exits early (which some implementations do
-      # if an invalid file is passed).
-      if e.errno == errno.EPIPE:
-        logging.debug(f"addr2line exited early (broken pipe) returncode={self.pipe.poll()}")
-      else:
-        logging.debug("unexpected I/O exception communicating with addr2line", exc_info=e)
-      lines.append(('??', '??:0'))
-    except Exception as e:
-      logging.debug("got unknown exception communicating with addr2line", exc_info=e)
-      lines.append(('??', '??:0'))
-    return ['%s in %s %s' % (addr, function, fix_filename(file)) for (function, file) in lines]
+    def __init__(self, binary):
+        super(Addr2LineSymbolizer, self).__init__()
+        self.binary = binary
+        self.pipe = self.open_addr2line()
+        self.output_terminator = -1
+
+    def open_addr2line(self):
+        addr2line_tool = "addr2line"
+        if binutils_prefix:
+            addr2line_tool = binutils_prefix + addr2line_tool
+        logging.debug("addr2line binary is %s" % shutil.which(addr2line_tool))
+        cmd = [addr2line_tool, "-fi"]
+        if demangle:
+            cmd += ["--demangle"]
+        cmd += ["-e", self.binary]
+        logging.debug(" ".join(cmd))
+        return subprocess.Popen(
+            cmd,
+            stdin=subprocess.PIPE,
+            stdout=subprocess.PIPE,
+            bufsize=0,
+            universal_newlines=True,
+        )
+
+    def symbolize(self, addr, binary, offset):
+        """Overrides Symbolizer.symbolize."""
+        if self.binary != binary:
+            return None
+        lines = []
+        try:
+            self.pipe.stdin.write("%s\n" % offset)
+            self.pipe.stdin.write("%s\n" % self.output_terminator)
+            is_first_frame = True
+            while True:
+                function_name = self.pipe.stdout.readline().rstrip()
+                logging.debug("read function_name='%s' from addr2line" % function_name)
+                # If llvm-symbolizer is installed as addr2line, older versions of
+                # llvm-symbolizer will print -1 when presented with -1 and not print
+                # a second line. In that case we will block for ever trying to read the
+                # file name. This also happens for non-existent files, in which case GNU
+                # addr2line exits immediate, but llvm-symbolizer does not (see
+                # https://llvm.org/PR42754).
+                if function_name == "-1":
+                    logging.debug("got function '-1' -> no more input")
+                    break
+                file_name = self.pipe.stdout.readline().rstrip()
+                logging.debug("read file_name='%s' from addr2line" % file_name)
+                if is_first_frame:
+                    is_first_frame = False
+                elif function_name == "??":
+                    assert file_name == "??:0", file_name
+                    logging.debug("got function '??' -> no more input")
+                    break
+                elif not function_name:
+                    assert not file_name, file_name
+                    logging.debug("got empty function name -> no more input")
+                    break
+                if not function_name and not file_name:
+                    logging.debug(
+                        "got empty function and file name -> unknown function"
+                    )
+                    function_name = "??"
+                    file_name = "??:0"
+                lines.append((function_name, file_name))
+        except IOError as e:
+            # EPIPE happens if addr2line exits early (which some implementations do
+            # if an invalid file is passed).
+            if e.errno == errno.EPIPE:
+                logging.debug(
+                    f"addr2line exited early (broken pipe) returncode={self.pipe.poll()}"
+                )
+            else:
+                logging.debug(
+                    "unexpected I/O exception communicating with addr2line", exc_info=e
+                )
+            lines.append(("??", "??:0"))
+        except Exception as e:
+            logging.debug(
+                "got unknown exception communicating with addr2line", exc_info=e
+            )
+            lines.append(("??", "??:0"))
+        return [
+            "%s in %s %s" % (addr, function, fix_filename(file))
+            for (function, file) in lines
+        ]
+
 
 class UnbufferedLineConverter(object):
-  """
-  Wrap a child process that responds to each line of input with one line of
-  output.  Uses pty to trick the child into providing unbuffered output.
-  """
-  def __init__(self, args, close_stderr=False):
-    # Local imports so that the script can start on Windows.
-    import pty
-    import termios
-    pid, fd = pty.fork()
-    if pid == 0:
-      # We're the child. Transfer control to command.
-      if close_stderr:
-        dev_null = os.open('/dev/null', 0)
-        os.dup2(dev_null, 2)
-      os.execvp(args[0], args)
-    else:
-      # Disable echoing.
-      attr = termios.tcgetattr(fd)
-      attr[3] = attr[3] & ~termios.ECHO
-      termios.tcsetattr(fd, termios.TCSANOW, attr)
-      # Set up a file()-like interface to the child process
-      self.r = os.fdopen(fd, "r", 1)
-      self.w = os.fdopen(os.dup(fd), "w", 1)
+    """
+    Wrap a child process that responds to each line of input with one line of
+    output.  Uses pty to trick the child into providing unbuffered output.
+    """
 
-  def convert(self, line):
-    self.w.write(line + "\n")
-    return self.readline()
+    def __init__(self, args, close_stderr=False):
+        # Local imports so that the script can start on Windows.
+        import pty
+        import termios
+
+        pid, fd = pty.fork()
+        if pid == 0:
+            # We're the child. Transfer control to command.
+            if close_stderr:
+                dev_null = os.open("/dev/null", 0)
+                os.dup2(dev_null, 2)
+            os.execvp(args[0], args)
+        else:
+            # Disable echoing.
+            attr = termios.tcgetattr(fd)
+            attr[3] = attr[3] & ~termios.ECHO
+            termios.tcsetattr(fd, termios.TCSANOW, attr)
+            # Set up a file()-like interface to the child process
+            self.r = os.fdopen(fd, "r", 1)
+            self.w = os.fdopen(os.dup(fd), "w", 1)
+
+    def convert(self, line):
+        self.w.write(line + "\n")
+        return self.readline()
 
-  def readline(self):
-    return self.r.readline().rstrip()
+    def readline(self):
+        return self.r.readline().rstrip()
 
 
 class DarwinSymbolizer(Symbolizer):
-  def __init__(self, addr, binary, arch):
-    super(DarwinSymbolizer, self).__init__()
-    self.binary = binary
-    self.arch = arch
-    self.open_atos()
-
-  def open_atos(self):
-    logging.debug('atos -o %s -arch %s', self.binary, self.arch)
-    cmdline = ['atos', '-o', self.binary, '-arch', self.arch]
-    self.atos = UnbufferedLineConverter(cmdline, close_stderr=True)
-
-  def symbolize(self, addr, binary, offset):
-    """Overrides Symbolizer.symbolize."""
-    if self.binary != binary:
-      return None
-    if not os.path.exists(binary):
-      # If the binary doesn't exist atos will exit which will lead to IOError
-      # exceptions being raised later on so just don't try to symbolize.
-      return ['{} ({}:{}+{})'.format(addr, binary, self.arch, offset)]
-    atos_line = self.atos.convert('0x%x' % int(offset, 16))
-    while "got symbolicator for" in atos_line:
-      atos_line = self.atos.readline()
-    # A well-formed atos response looks like this:
-    #   foo(type1, type2) (in object.name) (filename.cc:80)
-    # NOTE:
-    #   * For C functions atos omits parentheses and argument types.
-    #   * For C++ functions the function name (i.e., `foo` above) may contain
-    #     templates which may contain parentheses.
-    match = re.match('^(.*) \(in (.*)\) \((.*:\d*)\)$', atos_line)
-    logging.debug('atos_line: %s', atos_line)
-    if match:
-      function_name = match.group(1)
-      file_name = fix_filename(match.group(3))
-      return ['%s in %s %s' % (addr, function_name, file_name)]
-    else:
-      return ['%s in %s' % (addr, atos_line)]
+    def __init__(self, addr, binary, arch):
+        super(DarwinSymbolizer, self).__init__()
+        self.binary = binary
+        self.arch = arch
+        self.open_atos()
+
+    def open_atos(self):
+        logging.debug("atos -o %s -arch %s", self.binary, self.arch)
+        cmdline = ["atos", "-o", self.binary, "-arch", self.arch]
+        self.atos = UnbufferedLineConverter(cmdline, close_stderr=True)
+
+    def symbolize(self, addr, binary, offset):
+        """Overrides Symbolizer.symbolize."""
+        if self.binary != binary:
+            return None
+        if not os.path.exists(binary):
+            # If the binary doesn't exist atos will exit which will lead to IOError
+            # exceptions being raised later on so just don't try to symbolize.
+            return ["{} ({}:{}+{})".format(addr, binary, self.arch, offset)]
+        atos_line = self.atos.convert("0x%x" % int(offset, 16))
+        while "got symbolicator for" in atos_line:
+            atos_line = self.atos.readline()
+        # A well-formed atos response looks like this:
+        #   foo(type1, type2) (in object.name) (filename.cc:80)
+        # NOTE:
+        #   * For C functions atos omits parentheses and argument types.
+        #   * For C++ functions the function name (i.e., `foo` above) may contain
+        #     templates which may contain parentheses.
+        match = re.match("^(.*) \(in (.*)\) \((.*:\d*)\)$", atos_line)
+        logging.debug("atos_line: %s", atos_line)
+        if match:
+            function_name = match.group(1)
+            file_name = fix_filename(match.group(3))
+            return ["%s in %s %s" % (addr, function_name, file_name)]
+        else:
+            return ["%s in %s" % (addr, atos_line)]
 
 
 # Chain several symbolizers so that if one symbolizer fails, we fall back
 # to the next symbolizer in chain.
 class ChainSymbolizer(Symbolizer):
-  def __init__(self, symbolizer_list):
-    super(ChainSymbolizer, self).__init__()
-    self.symbolizer_list = symbolizer_list
-
-  def symbolize(self, addr, binary, offset):
-    """Overrides Symbolizer.symbolize."""
-    for symbolizer in self.symbolizer_list:
-      if symbolizer:
-        result = symbolizer.symbolize(addr, binary, offset)
-        if result:
-          return result
-    return None
+    def __init__(self, symbolizer_list):
+        super(ChainSymbolizer, self).__init__()
+        self.symbolizer_list = symbolizer_list
+
+    def symbolize(self, addr, binary, offset):
+        """Overrides Symbolizer.symbolize."""
+        for symbolizer in self.symbolizer_list:
+            if symbolizer:
+                result = symbolizer.symbolize(addr, binary, offset)
+                if result:
+                    return result
+        return None
 
-  def append_symbolizer(self, symbolizer):
-    self.symbolizer_list.append(symbolizer)
+    def append_symbolizer(self, symbolizer):
+        self.symbolizer_list.append(symbolizer)
 
 
 def BreakpadSymbolizerFactory(binary):
-  suffix = os.getenv('BREAKPAD_SUFFIX')
-  if suffix:
-    filename = binary + suffix
-    if os.access(filename, os.F_OK):
-      return BreakpadSymbolizer(filename)
-  return None
+    suffix = os.getenv("BREAKPAD_SUFFIX")
+    if suffix:
+        filename = binary + suffix
+        if os.access(filename, os.F_OK):
+            return BreakpadSymbolizer(filename)
+    return None
 
 
 def SystemSymbolizerFactory(system, addr, binary, arch):
-  if system == 'Darwin':
-    return DarwinSymbolizer(addr, binary, arch)
-  elif system in ['Linux', 'FreeBSD', 'NetBSD', 'SunOS']:
-    return Addr2LineSymbolizer(binary)
+    if system == "Darwin":
+        return DarwinSymbolizer(addr, binary, arch)
+    elif system in ["Linux", "FreeBSD", "NetBSD", "SunOS"]:
+        return Addr2LineSymbolizer(binary)
 
 
 class BreakpadSymbolizer(Symbolizer):
-  def __init__(self, filename):
-    super(BreakpadSymbolizer, self).__init__()
-    self.filename = filename
-    lines = file(filename).readlines()
-    self.files = []
-    self.symbols = {}
-    self.address_list = []
-    self.addresses = {}
-    # MODULE mac x86_64 A7001116478B33F18FF9BEDE9F615F190 t
-    fragments = lines[0].rstrip().split()
-    self.arch = fragments[2]
-    self.debug_id = fragments[3]
-    self.binary = ' '.join(fragments[4:])
-    self.parse_lines(lines[1:])
-
-  def parse_lines(self, lines):
-    cur_function_addr = ''
-    for line in lines:
-      fragments = line.split()
-      if fragments[0] == 'FILE':
-        assert int(fragments[1]) == len(self.files)
-        self.files.append(' '.join(fragments[2:]))
-      elif fragments[0] == 'PUBLIC':
-        self.symbols[int(fragments[1], 16)] = ' '.join(fragments[3:])
-      elif fragments[0] in ['CFI', 'STACK']:
-        pass
-      elif fragments[0] == 'FUNC':
-        cur_function_addr = int(fragments[1], 16)
-        if not cur_function_addr in self.symbols.keys():
-          self.symbols[cur_function_addr] = ' '.join(fragments[4:])
-      else:
-        # Line starting with an address.
-        addr = int(fragments[0], 16)
-        self.address_list.append(addr)
-        # Tuple of symbol address, size, line, file number.
-        self.addresses[addr] = (cur_function_addr,
-                                int(fragments[1], 16),
-                                int(fragments[2]),
-                                int(fragments[3]))
-    self.address_list.sort()
-
-  def get_sym_file_line(self, addr):
-    key = None
-    if addr in self.addresses.keys():
-      key = addr
-    else:
-      index = bisect.bisect_left(self.address_list, addr)
-      if index == 0:
-        return None
-      else:
-        key = self.address_list[index - 1]
-    sym_id, size, line_no, file_no = self.addresses[key]
-    symbol = self.symbols[sym_id]
-    filename = self.files[file_no]
-    if addr < key + size:
-      return symbol, filename, line_no
-    else:
-      return None
-
-  def symbolize(self, addr, binary, offset):
-    if self.binary != binary:
-      return None
-    res = self.get_sym_file_line(int(offset, 16))
-    if res:
-      function_name, file_name, line_no = res
-      result = ['%s in %s %s:%d' % (
-          addr, function_name, file_name, line_no)]
-      print(result)
-      return result
-    else:
-      return None
+    def __init__(self, filename):
+        super(BreakpadSymbolizer, self).__init__()
+        self.filename = filename
+        lines = file(filename).readlines()
+        self.files = []
+        self.symbols = {}
+        self.address_list = []
+        self.addresses = {}
+        # MODULE mac x86_64 A7001116478B33F18FF9BEDE9F615F190 t
+        fragments = lines[0].rstrip().split()
+        self.arch = fragments[2]
+        self.debug_id = fragments[3]
+        self.binary = " ".join(fragments[4:])
+        self.parse_lines(lines[1:])
+
+    def parse_lines(self, lines):
+        cur_function_addr = ""
+        for line in lines:
+            fragments = line.split()
+            if fragments[0] == "FILE":
+                assert int(fragments[1]) == len(self.files)
+                self.files.append(" ".join(fragments[2:]))
+            elif fragments[0] == "PUBLIC":
+                self.symbols[int(fragments[1], 16)] = " ".join(fragments[3:])
+            elif fragments[0] in ["CFI", "STACK"]:
+                pass
+            elif fragments[0] == "FUNC":
+                cur_function_addr = int(fragments[1], 16)
+                if not cur_function_addr in self.symbols.keys():
+                    self.symbols[cur_function_addr] = " ".join(fragments[4:])
+            else:
+                # Line starting with an address.
+                addr = int(fragments[0], 16)
+                self.address_list.append(addr)
+                # Tuple of symbol address, size, line, file number.
+                self.addresses[addr] = (
+                    cur_function_addr,
+                    int(fragments[1], 16),
+                    int(fragments[2]),
+                    int(fragments[3]),
+                )
+        self.address_list.sort()
+
+    def get_sym_file_line(self, addr):
+        key = None
+        if addr in self.addresses.keys():
+            key = addr
+        else:
+            index = bisect.bisect_left(self.address_list, addr)
+            if index == 0:
+                return None
+            else:
+                key = self.address_list[index - 1]
+        sym_id, size, line_no, file_no = self.addresses[key]
+        symbol = self.symbols[sym_id]
+        filename = self.files[file_no]
+        if addr < key + size:
+            return symbol, filename, line_no
+        else:
+            return None
+
+    def symbolize(self, addr, binary, offset):
+        if self.binary != binary:
+            return None
+        res = self.get_sym_file_line(int(offset, 16))
+        if res:
+            function_name, file_name, line_no = res
+            result = ["%s in %s %s:%d" % (addr, function_name, file_name, line_no)]
+            print(result)
+            return result
+        else:
+            return None
 
 
 class SymbolizationLoop(object):
-  def __init__(self, plugin_proxy=None, dsym_hint_producer=None):
-    self.plugin_proxy = plugin_proxy
-    if sys.platform == 'win32':
-      # ASan on Windows uses dbghelp.dll to symbolize in-process, which works
-      # even in sandboxed processes.  Nothing needs to be done here.
-      self.process_line = self.process_line_echo
-    else:
-      # Used by clients who may want to supply a 
diff erent binary name.
-      # E.g. in Chrome several binaries may share a single .dSYM.
-      self.dsym_hint_producer = dsym_hint_producer
-      self.system = os.uname()[0]
-      if self.system not in ['Linux', 'Darwin', 'FreeBSD', 'NetBSD','SunOS']:
-        raise Exception('Unknown system')
-      self.llvm_symbolizers = {}
-      self.last_llvm_symbolizer = None
-      self.dsym_hints = set([])
-      self.frame_no = 0
-      self.process_line = self.process_line_posix
-      self.using_module_map = plugin_proxy.has_plugin(ModuleMapPlugIn.get_name())
-
-  def symbolize_address(self, addr, binary, offset, arch):
-    # On non-Darwin (i.e. on platforms without .dSYM debug info) always use
-    # a single symbolizer binary.
-    # On Darwin, if the dsym hint producer is present:
-    #  1. check whether we've seen this binary already; if so,
-    #     use |llvm_symbolizers[binary]|, which has already loaded the debug
-    #     info for this binary (might not be the case for
-    #     |last_llvm_symbolizer|);
-    #  2. otherwise check if we've seen all the hints for this binary already;
-    #     if so, reuse |last_llvm_symbolizer| which has the full set of hints;
-    #  3. otherwise create a new symbolizer and pass all currently known
-    #     .dSYM hints to it.
-    result = None
-    if not force_system_symbolizer:
-      if not binary in self.llvm_symbolizers:
-        use_new_symbolizer = True
-        if self.system == 'Darwin' and self.dsym_hint_producer:
-          dsym_hints_for_binary = set(self.dsym_hint_producer(binary))
-          use_new_symbolizer = bool(dsym_hints_for_binary - self.dsym_hints)
-          self.dsym_hints |= dsym_hints_for_binary
-        if self.last_llvm_symbolizer and not use_new_symbolizer:
-            self.llvm_symbolizers[binary] = self.last_llvm_symbolizer
+    def __init__(self, plugin_proxy=None, dsym_hint_producer=None):
+        self.plugin_proxy = plugin_proxy
+        if sys.platform == "win32":
+            # ASan on Windows uses dbghelp.dll to symbolize in-process, which works
+            # even in sandboxed processes.  Nothing needs to be done here.
+            self.process_line = self.process_line_echo
         else:
-          self.last_llvm_symbolizer = LLVMSymbolizerFactory(
-              self.system, arch, self.dsym_hints)
-          self.llvm_symbolizers[binary] = self.last_llvm_symbolizer
-      # Use the chain of symbolizers:
-      # Breakpad symbolizer -> LLVM symbolizer -> addr2line/atos
-      # (fall back to next symbolizer if the previous one fails).
-      if not binary in symbolizers:
-        symbolizers[binary] = ChainSymbolizer(
-            [BreakpadSymbolizerFactory(binary), self.llvm_symbolizers[binary]])
-      result = symbolizers[binary].symbolize(addr, binary, offset)
-    else:
-      symbolizers[binary] = ChainSymbolizer([])
-    if result is None:
-      if not allow_system_symbolizer:
-        raise Exception('Failed to launch or use llvm-symbolizer.')
-      # Initialize system symbolizer only if other symbolizers failed.
-      symbolizers[binary].append_symbolizer(
-          SystemSymbolizerFactory(self.system, addr, binary, arch))
-      result = symbolizers[binary].symbolize(addr, binary, offset)
-    # The system symbolizer must produce some result.
-    assert result
-    return result
-
-  def get_symbolized_lines(self, symbolized_lines, inc_frame_counter=True):
-    if not symbolized_lines:
-      if inc_frame_counter:
-        self.frame_no += 1
-      return [self.current_line]
-    else:
-      assert inc_frame_counter
-      result = []
-      for symbolized_frame in symbolized_lines:
-        result.append('    #%s %s' % (str(self.frame_no), symbolized_frame.rstrip()))
-        self.frame_no += 1
-      return result
-
-  def process_logfile(self):
-    self.frame_no = 0
-    for line in logfile:
-      processed = self.process_line(line)
-      print('\n'.join(processed))
-
-  def process_line_echo(self, line):
-    return [line.rstrip()]
-
-  def process_line_posix(self, line):
-    self.current_line = line.rstrip()
-    # Unsymbolicated:
-    # #0 0x7f6e35cf2e45  (/blah/foo.so+0x11fe45)
-    # Partially symbolicated:
-    # #0 0x7f6e35cf2e45 in foo (foo.so+0x11fe45)
-    # NOTE: We have to very liberal with symbol
-    # names in the regex because it could be an
-    # Objective-C or C++ demangled name.
-    stack_trace_line_format = (
-        '^( *#([0-9]+) *)(0x[0-9a-f]+) *(?:in *.+)? *\((.*)\+(0x[0-9a-f]+)\)')
-    match = re.match(stack_trace_line_format, line)
-    if not match:
-      logging.debug('Line "{}" does not match regex'.format(line))
-      # Not a frame line so don't increment the frame counter.
-      return self.get_symbolized_lines(None, inc_frame_counter=False)
-    logging.debug(line)
-    _, frameno_str, addr, binary, offset = match.groups()
-
-    if not self.using_module_map and not os.path.isabs(binary):
-      # Do not try to symbolicate if the binary is just the module file name
-      # and a module map is unavailable.
-      # FIXME(dliew): This is currently necessary for reports on Darwin that are
-      # partially symbolicated by `atos`.
-      return self.get_symbolized_lines(None)
-    arch = ""
-    # Arch can be embedded in the filename, e.g.: "libabc.dylib:x86_64h"
-    colon_pos = binary.rfind(":")
-    if colon_pos != -1:
-      maybe_arch = binary[colon_pos+1:]
-      if is_valid_arch(maybe_arch):
-        arch = maybe_arch
-        binary = binary[0:colon_pos]
-    if arch == "":
-      arch = guess_arch(addr)
-    if frameno_str == '0':
-      # Assume that frame #0 is the first frame of new stack trace.
-      self.frame_no = 0
-    original_binary = binary
-    binary = self.plugin_proxy.filter_binary_path(binary)
-    if binary is None:
-      # The binary filter has told us this binary can't be symbolized.
-      logging.debug('Skipping symbolication of binary "%s"', original_binary)
-      return self.get_symbolized_lines(None)
-    symbolized_line = self.symbolize_address(addr, binary, offset, arch)
-    if not symbolized_line:
-      if original_binary != binary:
-        symbolized_line = self.symbolize_address(addr, original_binary, offset, arch)
-    return self.get_symbolized_lines(symbolized_line)
+            # Used by clients who may want to supply a 
diff erent binary name.
+            # E.g. in Chrome several binaries may share a single .dSYM.
+            self.dsym_hint_producer = dsym_hint_producer
+            self.system = os.uname()[0]
+            if self.system not in ["Linux", "Darwin", "FreeBSD", "NetBSD", "SunOS"]:
+                raise Exception("Unknown system")
+            self.llvm_symbolizers = {}
+            self.last_llvm_symbolizer = None
+            self.dsym_hints = set([])
+            self.frame_no = 0
+            self.process_line = self.process_line_posix
+            self.using_module_map = plugin_proxy.has_plugin(ModuleMapPlugIn.get_name())
+
+    def symbolize_address(self, addr, binary, offset, arch):
+        # On non-Darwin (i.e. on platforms without .dSYM debug info) always use
+        # a single symbolizer binary.
+        # On Darwin, if the dsym hint producer is present:
+        #  1. check whether we've seen this binary already; if so,
+        #     use |llvm_symbolizers[binary]|, which has already loaded the debug
+        #     info for this binary (might not be the case for
+        #     |last_llvm_symbolizer|);
+        #  2. otherwise check if we've seen all the hints for this binary already;
+        #     if so, reuse |last_llvm_symbolizer| which has the full set of hints;
+        #  3. otherwise create a new symbolizer and pass all currently known
+        #     .dSYM hints to it.
+        result = None
+        if not force_system_symbolizer:
+            if not binary in self.llvm_symbolizers:
+                use_new_symbolizer = True
+                if self.system == "Darwin" and self.dsym_hint_producer:
+                    dsym_hints_for_binary = set(self.dsym_hint_producer(binary))
+                    use_new_symbolizer = bool(dsym_hints_for_binary - self.dsym_hints)
+                    self.dsym_hints |= dsym_hints_for_binary
+                if self.last_llvm_symbolizer and not use_new_symbolizer:
+                    self.llvm_symbolizers[binary] = self.last_llvm_symbolizer
+                else:
+                    self.last_llvm_symbolizer = LLVMSymbolizerFactory(
+                        self.system, arch, self.dsym_hints
+                    )
+                    self.llvm_symbolizers[binary] = self.last_llvm_symbolizer
+            # Use the chain of symbolizers:
+            # Breakpad symbolizer -> LLVM symbolizer -> addr2line/atos
+            # (fall back to next symbolizer if the previous one fails).
+            if not binary in symbolizers:
+                symbolizers[binary] = ChainSymbolizer(
+                    [BreakpadSymbolizerFactory(binary), self.llvm_symbolizers[binary]]
+                )
+            result = symbolizers[binary].symbolize(addr, binary, offset)
+        else:
+            symbolizers[binary] = ChainSymbolizer([])
+        if result is None:
+            if not allow_system_symbolizer:
+                raise Exception("Failed to launch or use llvm-symbolizer.")
+            # Initialize system symbolizer only if other symbolizers failed.
+            symbolizers[binary].append_symbolizer(
+                SystemSymbolizerFactory(self.system, addr, binary, arch)
+            )
+            result = symbolizers[binary].symbolize(addr, binary, offset)
+        # The system symbolizer must produce some result.
+        assert result
+        return result
+
+    def get_symbolized_lines(self, symbolized_lines, inc_frame_counter=True):
+        if not symbolized_lines:
+            if inc_frame_counter:
+                self.frame_no += 1
+            return [self.current_line]
+        else:
+            assert inc_frame_counter
+            result = []
+            for symbolized_frame in symbolized_lines:
+                result.append(
+                    "    #%s %s" % (str(self.frame_no), symbolized_frame.rstrip())
+                )
+                self.frame_no += 1
+            return result
+
+    def process_logfile(self):
+        self.frame_no = 0
+        for line in logfile:
+            processed = self.process_line(line)
+            print("\n".join(processed))
+
+    def process_line_echo(self, line):
+        return [line.rstrip()]
+
+    def process_line_posix(self, line):
+        self.current_line = line.rstrip()
+        # Unsymbolicated:
+        # #0 0x7f6e35cf2e45  (/blah/foo.so+0x11fe45)
+        # Partially symbolicated:
+        # #0 0x7f6e35cf2e45 in foo (foo.so+0x11fe45)
+        # NOTE: We have to very liberal with symbol
+        # names in the regex because it could be an
+        # Objective-C or C++ demangled name.
+        stack_trace_line_format = (
+            "^( *#([0-9]+) *)(0x[0-9a-f]+) *(?:in *.+)? *\((.*)\+(0x[0-9a-f]+)\)"
+        )
+        match = re.match(stack_trace_line_format, line)
+        if not match:
+            logging.debug('Line "{}" does not match regex'.format(line))
+            # Not a frame line so don't increment the frame counter.
+            return self.get_symbolized_lines(None, inc_frame_counter=False)
+        logging.debug(line)
+        _, frameno_str, addr, binary, offset = match.groups()
+
+        if not self.using_module_map and not os.path.isabs(binary):
+            # Do not try to symbolicate if the binary is just the module file name
+            # and a module map is unavailable.
+            # FIXME(dliew): This is currently necessary for reports on Darwin that are
+            # partially symbolicated by `atos`.
+            return self.get_symbolized_lines(None)
+        arch = ""
+        # Arch can be embedded in the filename, e.g.: "libabc.dylib:x86_64h"
+        colon_pos = binary.rfind(":")
+        if colon_pos != -1:
+            maybe_arch = binary[colon_pos + 1 :]
+            if is_valid_arch(maybe_arch):
+                arch = maybe_arch
+                binary = binary[0:colon_pos]
+        if arch == "":
+            arch = guess_arch(addr)
+        if frameno_str == "0":
+            # Assume that frame #0 is the first frame of new stack trace.
+            self.frame_no = 0
+        original_binary = binary
+        binary = self.plugin_proxy.filter_binary_path(binary)
+        if binary is None:
+            # The binary filter has told us this binary can't be symbolized.
+            logging.debug('Skipping symbolication of binary "%s"', original_binary)
+            return self.get_symbolized_lines(None)
+        symbolized_line = self.symbolize_address(addr, binary, offset, arch)
+        if not symbolized_line:
+            if original_binary != binary:
+                symbolized_line = self.symbolize_address(
+                    addr, original_binary, offset, arch
+                )
+        return self.get_symbolized_lines(symbolized_line)
+
 
 class AsanSymbolizerPlugInProxy(object):
-  """
+    """
     Serves several purposes:
     - Manages the lifetime of plugins (must be used a `with` statement).
     - Provides interface for calling into plugins from within this script.
-  """
-  def __init__(self):
-    self._plugins = [ ]
-    self._plugin_names = set()
-
-  def _load_plugin_from_file_impl_py_gt_2(self, file_path, globals_space):
-      with open(file_path, 'r') as f:
-        exec(f.read(), globals_space, None)
-
-  def load_plugin_from_file(self, file_path):
-    logging.info('Loading plugins from "{}"'.format(file_path))
-    globals_space = dict(globals())
-    # Provide function to register plugins
-    def register_plugin(plugin):
-      logging.info('Registering plugin %s', plugin.get_name())
-      self.add_plugin(plugin)
-    globals_space['register_plugin'] = register_plugin
-    if sys.version_info.major < 3:
-      execfile(file_path, globals_space, None)
-    else:
-      # Indirection here is to avoid a bug in older Python 2 versions:
-      # `SyntaxError: unqualified exec is not allowed in function ...`
-      self._load_plugin_from_file_impl_py_gt_2(file_path, globals_space)
-
-  def add_plugin(self, plugin):
-    assert isinstance(plugin, AsanSymbolizerPlugIn)
-    self._plugins.append(plugin)
-    self._plugin_names.add(plugin.get_name())
-    plugin._receive_proxy(self)
-
-  def remove_plugin(self, plugin):
-    assert isinstance(plugin, AsanSymbolizerPlugIn)
-    self._plugins.remove(plugin)
-    self._plugin_names.remove(plugin.get_name())
-    logging.debug('Removing plugin %s', plugin.get_name())
-    plugin.destroy()
-
-  def has_plugin(self, name):
-    """
-      Returns true iff the plugin name is currently
-      being managed by AsanSymbolizerPlugInProxy.
-    """
-    return name in self._plugin_names
-
-  def register_cmdline_args(self, parser):
-    plugins = list(self._plugins)
-    for plugin in plugins:
-      plugin.register_cmdline_args(parser)
-
-  def process_cmdline_args(self, pargs):
-    # Use copy so we can remove items as we iterate.
-    plugins = list(self._plugins)
-    for plugin in plugins:
-      keep = plugin.process_cmdline_args(pargs)
-      assert isinstance(keep, bool)
-      if not keep:
-        self.remove_plugin(plugin)
-
-  def __enter__(self):
-    return self
-
-  def __exit__(self, exc_type, exc_val, exc_tb):
-    for plugin in self._plugins:
-      plugin.destroy()
-    # Don't suppress raised exceptions
-    return False
-
-  def _filter_single_value(self, function_name, input_value):
     """
-      Helper for filter style plugin functions.
-    """
-    new_value = input_value
-    for plugin in self._plugins:
-      result = getattr(plugin, function_name)(new_value)
-      if result is None:
-        return None
-      new_value = result
-    return new_value
 
-  def filter_binary_path(self, binary_path):
-    """
-      Consult available plugins to filter the path to a binary
-      to make it suitable for symbolication.
-
-      Returns `None` if symbolication should not be attempted for this
-      binary.
-    """
-    return self._filter_single_value('filter_binary_path', binary_path)
-
-  def filter_module_desc(self, module_desc):
-    """
-      Consult available plugins to determine the module
-      description suitable for symbolication.
+    def __init__(self):
+        self._plugins = []
+        self._plugin_names = set()
+
+    def _load_plugin_from_file_impl_py_gt_2(self, file_path, globals_space):
+        with open(file_path, "r") as f:
+            exec(f.read(), globals_space, None)
+
+    def load_plugin_from_file(self, file_path):
+        logging.info('Loading plugins from "{}"'.format(file_path))
+        globals_space = dict(globals())
+        # Provide function to register plugins
+        def register_plugin(plugin):
+            logging.info("Registering plugin %s", plugin.get_name())
+            self.add_plugin(plugin)
+
+        globals_space["register_plugin"] = register_plugin
+        if sys.version_info.major < 3:
+            execfile(file_path, globals_space, None)
+        else:
+            # Indirection here is to avoid a bug in older Python 2 versions:
+            # `SyntaxError: unqualified exec is not allowed in function ...`
+            self._load_plugin_from_file_impl_py_gt_2(file_path, globals_space)
+
+    def add_plugin(self, plugin):
+        assert isinstance(plugin, AsanSymbolizerPlugIn)
+        self._plugins.append(plugin)
+        self._plugin_names.add(plugin.get_name())
+        plugin._receive_proxy(self)
+
+    def remove_plugin(self, plugin):
+        assert isinstance(plugin, AsanSymbolizerPlugIn)
+        self._plugins.remove(plugin)
+        self._plugin_names.remove(plugin.get_name())
+        logging.debug("Removing plugin %s", plugin.get_name())
+        plugin.destroy()
+
+    def has_plugin(self, name):
+        """
+        Returns true iff the plugin name is currently
+        being managed by AsanSymbolizerPlugInProxy.
+        """
+        return name in self._plugin_names
+
+    def register_cmdline_args(self, parser):
+        plugins = list(self._plugins)
+        for plugin in plugins:
+            plugin.register_cmdline_args(parser)
+
+    def process_cmdline_args(self, pargs):
+        # Use copy so we can remove items as we iterate.
+        plugins = list(self._plugins)
+        for plugin in plugins:
+            keep = plugin.process_cmdline_args(pargs)
+            assert isinstance(keep, bool)
+            if not keep:
+                self.remove_plugin(plugin)
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        for plugin in self._plugins:
+            plugin.destroy()
+        # Don't suppress raised exceptions
+        return False
+
+    def _filter_single_value(self, function_name, input_value):
+        """
+        Helper for filter style plugin functions.
+        """
+        new_value = input_value
+        for plugin in self._plugins:
+            result = getattr(plugin, function_name)(new_value)
+            if result is None:
+                return None
+            new_value = result
+        return new_value
+
+    def filter_binary_path(self, binary_path):
+        """
+        Consult available plugins to filter the path to a binary
+        to make it suitable for symbolication.
+
+        Returns `None` if symbolication should not be attempted for this
+        binary.
+        """
+        return self._filter_single_value("filter_binary_path", binary_path)
+
+    def filter_module_desc(self, module_desc):
+        """
+        Consult available plugins to determine the module
+        description suitable for symbolication.
+
+        Returns `None` if symbolication should not be attempted for this module.
+        """
+        assert isinstance(module_desc, ModuleDesc)
+        return self._filter_single_value("filter_module_desc", module_desc)
 
-      Returns `None` if symbolication should not be attempted for this module.
-    """
-    assert isinstance(module_desc, ModuleDesc)
-    return self._filter_single_value('filter_module_desc', module_desc)
 
 class AsanSymbolizerPlugIn(object):
-  """
+    """
     This is the interface the `asan_symbolize.py` code uses to talk
     to plugins.
-  """
-  @classmethod
-  def get_name(cls):
     """
-      Returns the name of the plugin.
-    """
-    return cls.__name__
 
-  def _receive_proxy(self, proxy):
-    assert isinstance(proxy, AsanSymbolizerPlugInProxy)
-    self.proxy = proxy
+    @classmethod
+    def get_name(cls):
+        """
+        Returns the name of the plugin.
+        """
+        return cls.__name__
 
-  def register_cmdline_args(self, parser):
-    """
-      Hook for registering command line arguments to be
-      consumed in `process_cmdline_args()`.
+    def _receive_proxy(self, proxy):
+        assert isinstance(proxy, AsanSymbolizerPlugInProxy)
+        self.proxy = proxy
 
-      `parser` - Instance of `argparse.ArgumentParser`.
-    """
-    pass
+    def register_cmdline_args(self, parser):
+        """
+        Hook for registering command line arguments to be
+        consumed in `process_cmdline_args()`.
 
-  def process_cmdline_args(self, pargs):
-    """
-      Hook for handling parsed arguments. Implementations
-      should not modify `pargs`.
+        `parser` - Instance of `argparse.ArgumentParser`.
+        """
+        pass
 
-      `pargs` - Instance of `argparse.Namespace` containing
-      parsed command line arguments.
+    def process_cmdline_args(self, pargs):
+        """
+        Hook for handling parsed arguments. Implementations
+        should not modify `pargs`.
 
-      Return `True` if plug-in should be used, otherwise
-      return `False`.
-    """
-    return True
+        `pargs` - Instance of `argparse.Namespace` containing
+        parsed command line arguments.
 
-  def destroy(self):
-    """
-      Hook called when a plugin is about to be destroyed.
-      Implementations should free any allocated resources here.
-    """
-    pass
+        Return `True` if plug-in should be used, otherwise
+        return `False`.
+        """
+        return True
 
-  # Symbolization hooks
-  def filter_binary_path(self, binary_path):
-    """
-      Given a binary path return a binary path suitable for symbolication.
+    def destroy(self):
+        """
+        Hook called when a plugin is about to be destroyed.
+        Implementations should free any allocated resources here.
+        """
+        pass
 
-      Implementations should return `None` if symbolication of this binary
-      should be skipped.
-    """
-    return binary_path
+    # Symbolization hooks
+    def filter_binary_path(self, binary_path):
+        """
+        Given a binary path return a binary path suitable for symbolication.
 
-  def filter_module_desc(self, module_desc):
-    """
-      Given a ModuleDesc object (`module_desc`) return
-      a ModuleDesc suitable for symbolication.
+        Implementations should return `None` if symbolication of this binary
+        should be skipped.
+        """
+        return binary_path
+
+    def filter_module_desc(self, module_desc):
+        """
+        Given a ModuleDesc object (`module_desc`) return
+        a ModuleDesc suitable for symbolication.
+
+        Implementations should return `None` if symbolication of this binary
+        should be skipped.
+        """
+        return module_desc
 
-      Implementations should return `None` if symbolication of this binary
-      should be skipped.
-    """
-    return module_desc
 
 class ModuleDesc(object):
-  def __init__(self, name, arch, start_addr, end_addr, module_path, uuid):
-    self.name = name
-    self.arch = arch
-    self.start_addr = start_addr
-    self.end_addr = end_addr
-    # Module path from an ASan report.
-    self.module_path = module_path
-    # Module for performing symbolization, by default same as above.
-    self.module_path_for_symbolization = module_path
-    self.uuid = uuid
-    assert self.is_valid()
-
-  def __str__(self):
-    assert self.is_valid()
-    return "{name} {arch} {start_addr:#016x}-{end_addr:#016x} {module_path} {uuid}".format(
-      name=self.name,
-      arch=self.arch,
-      start_addr=self.start_addr,
-      end_addr=self.end_addr,
-      module_path=self.module_path if self.module_path == self.module_path_for_symbolization else '{} ({})'.format(self.module_path_for_symbolization, self.module_path),
-      uuid=self.uuid
-    )
+    def __init__(self, name, arch, start_addr, end_addr, module_path, uuid):
+        self.name = name
+        self.arch = arch
+        self.start_addr = start_addr
+        self.end_addr = end_addr
+        # Module path from an ASan report.
+        self.module_path = module_path
+        # Module for performing symbolization, by default same as above.
+        self.module_path_for_symbolization = module_path
+        self.uuid = uuid
+        assert self.is_valid()
+
+    def __str__(self):
+        assert self.is_valid()
+        return "{name} {arch} {start_addr:#016x}-{end_addr:#016x} {module_path} {uuid}".format(
+            name=self.name,
+            arch=self.arch,
+            start_addr=self.start_addr,
+            end_addr=self.end_addr,
+            module_path=self.module_path
+            if self.module_path == self.module_path_for_symbolization
+            else "{} ({})".format(self.module_path_for_symbolization, self.module_path),
+            uuid=self.uuid,
+        )
+
+    def is_valid(self):
+        if not isinstance(self.name, str):
+            return False
+        if not isinstance(self.arch, str):
+            return False
+        if not isinstance(self.start_addr, int):
+            return False
+        if self.start_addr < 0:
+            return False
+        if not isinstance(self.end_addr, int):
+            return False
+        if self.end_addr <= self.start_addr:
+            return False
+        if not isinstance(self.module_path, str):
+            return False
+        if not os.path.isabs(self.module_path):
+            return False
+        if not isinstance(self.module_path_for_symbolization, str):
+            return False
+        if not os.path.isabs(self.module_path_for_symbolization):
+            return False
+        if not isinstance(self.uuid, str):
+            return False
+        return True
 
-  def is_valid(self):
-    if not isinstance(self.name, str):
-      return False
-    if not isinstance(self.arch, str):
-      return False
-    if not isinstance(self.start_addr, int):
-      return False
-    if self.start_addr < 0:
-      return False
-    if not isinstance(self.end_addr, int):
-      return False
-    if self.end_addr <= self.start_addr:
-      return False
-    if not isinstance(self.module_path, str):
-      return False
-    if not os.path.isabs(self.module_path):
-      return False
-    if not isinstance(self.module_path_for_symbolization, str):
-      return False
-    if not os.path.isabs(self.module_path_for_symbolization):
-      return False
-    if not isinstance(self.uuid, str):
-      return False
-    return True
 
 class GetUUIDFromBinaryException(Exception):
-  def __init__(self, msg):
-    super(GetUUIDFromBinaryException, self).__init__(msg)
+    def __init__(self, msg):
+        super(GetUUIDFromBinaryException, self).__init__(msg)
+
 
 _get_uuid_from_binary_cache = dict()
 
-def get_uuid_from_binary(path_to_binary, arch=None):
-  cache_key = (path_to_binary, arch)
-  cached_value = _get_uuid_from_binary_cache.get(cache_key)
-  if cached_value:
-    return cached_value
-  if not os.path.exists(path_to_binary):
-    raise GetUUIDFromBinaryException('Binary "{}" does not exist'.format(path_to_binary))
-  cmd = [ '/usr/bin/otool', '-l']
-  if arch:
-    cmd.extend(['-arch', arch])
-  cmd.append(path_to_binary)
-  output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
-  # Look for this output:
-  # cmd LC_UUID
-  # cmdsize 24
-  # uuid 4CA778FE-5BF9-3C45-AE59-7DF01B2BE83F
-  if isinstance(output, str):
-    output_str = output
-  else:
-    assert isinstance(output, bytes)
-    output_str = output.decode()
-  assert isinstance(output_str, str)
-  lines = output_str.split('\n')
-  uuid = None
-  for index, line in enumerate(lines):
-    stripped_line = line.strip()
-    if not stripped_line.startswith('cmd LC_UUID'):
-      continue
-    uuid_line = lines[index+2].strip()
-    if not uuid_line.startswith('uuid'):
-      raise GetUUIDFromBinaryException('Malformed output: "{}"'.format(uuid_line))
-    split_uuid_line = uuid_line.split()
-    uuid = split_uuid_line[1]
-    break
-  if uuid is None:
-    logging.error('Failed to retrieve UUID from binary {}'.format(path_to_binary))
-    logging.error('otool output was:\n{}'.format(output_str))
-    raise GetUUIDFromBinaryException('Failed to retrieve UUID from binary "{}"'.format(path_to_binary))
-  else:
-    # Update cache
-    _get_uuid_from_binary_cache[cache_key] = uuid
-  return uuid
 
-class ModuleMap(object):
-  def __init__(self):
-    self._module_name_to_description_map = dict()
-
-  def add_module(self, desc):
-    assert isinstance(desc, ModuleDesc)
-    assert desc.name not in self._module_name_to_description_map
-    self._module_name_to_description_map[desc.name] = desc
-
-  def find_module_by_name(self, name):
-    return self._module_name_to_description_map.get(name, None)
-
-  def __str__(self):
-    s = '{} modules:\n'.format(self.num_modules)
-    for module_desc in sorted(self._module_name_to_description_map.values(), key=lambda v: v.start_addr):
-      s += str(module_desc) + '\n'
-    return s
-
-  @property
-  def num_modules(self):
-    return len(self._module_name_to_description_map)
-
-  @property
-  def modules(self):
-    return set(self._module_name_to_description_map.values())
-
-  def get_module_path_for_symbolication(self, module_name, proxy, validate_uuid):
-    module_desc = self.find_module_by_name(module_name)
-    if module_desc is None:
-      return None
-    # Allow a plug-in to change the module description to make it
-    # suitable for symbolication or avoid symbolication altogether.
-    module_desc = proxy.filter_module_desc(module_desc)
-    if module_desc is None:
-      return None
-    if validate_uuid:
-      logging.debug('Validating UUID of {}'.format(module_desc.module_path_for_symbolization))
-      try:
-        uuid = get_uuid_from_binary(module_desc.module_path_for_symbolization, arch = module_desc.arch)
-        if uuid != module_desc.uuid:
-          logging.warning("Detected UUID mismatch {} != {}".format(uuid, module_desc.uuid))
-          # UUIDs don't match. Tell client to not symbolize this.
-          return None
-      except GetUUIDFromBinaryException as e:
-        logging.error('Failed to get binary from UUID: %s', str(e))
-        return None
-    else:
-      logging.warning('Skipping validation of UUID of {}'.format(module_desc.module_path_for_symbolization))
-    return module_desc.module_path_for_symbolization
-
-  @staticmethod
-  def parse_from_file(module_map_path):
-    if not os.path.exists(module_map_path):
-      raise Exception('module map "{}" does not exist'.format(module_map_path))
-    with open(module_map_path, 'r') as f:
-      mm = None
-      # E.g.
-      # 0x2db4000-0x102ddc000 /path/to (arm64) <0D6BBDE0-FF90-3680-899D-8E6F9528E04C>
-      hex_regex = lambda name: r'0x(?P<' + name + r'>[0-9a-f]+)'
-      module_path_regex = r'(?P<path>.+)'
-      arch_regex = r'\((?P<arch>.+)\)'
-      uuid_regex = r'<(?P<uuid>[0-9A-Z-]+)>'
-      line_regex = r'^{}-{}\s+{}\s+{}\s+{}'.format(
-        hex_regex('start_addr'),
-        hex_regex('end_addr'),
-        module_path_regex,
-        arch_regex,
-        uuid_regex
-      )
-      matcher = re.compile(line_regex)
-      line_num = 0
-      line = 'dummy'
-      while line != '':
-        line = f.readline()
-        line_num += 1
-        if mm is None:
-          if line.startswith('Process module map:'):
-            mm = ModuleMap()
-          continue
-        if line.startswith('End of module map'):
-          break
-        m_obj = matcher.match(line)
-        if not m_obj:
-          raise Exception('Failed to parse line {} "{}"'.format(line_num, line))
-        arch = m_obj.group('arch')
-        start_addr = int(m_obj.group('start_addr'), base=16)
-        end_addr = int(m_obj.group('end_addr'), base=16)
-        module_path = m_obj.group('path')
-        uuid = m_obj.group('uuid')
-        module_desc = ModuleDesc(
-          name=os.path.basename(module_path),
-          arch=arch,
-          start_addr=start_addr,
-          end_addr=end_addr,
-          module_path=module_path,
-          uuid=uuid
+def get_uuid_from_binary(path_to_binary, arch=None):
+    cache_key = (path_to_binary, arch)
+    cached_value = _get_uuid_from_binary_cache.get(cache_key)
+    if cached_value:
+        return cached_value
+    if not os.path.exists(path_to_binary):
+        raise GetUUIDFromBinaryException(
+            'Binary "{}" does not exist'.format(path_to_binary)
         )
-        mm.add_module(module_desc)
-      if mm is not None:
-        logging.debug('Loaded Module map from "{}":\n{}'.format(
-          f.name,
-          str(mm))
+    cmd = ["/usr/bin/otool", "-l"]
+    if arch:
+        cmd.extend(["-arch", arch])
+    cmd.append(path_to_binary)
+    output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+    # Look for this output:
+    # cmd LC_UUID
+    # cmdsize 24
+    # uuid 4CA778FE-5BF9-3C45-AE59-7DF01B2BE83F
+    if isinstance(output, str):
+        output_str = output
+    else:
+        assert isinstance(output, bytes)
+        output_str = output.decode()
+    assert isinstance(output_str, str)
+    lines = output_str.split("\n")
+    uuid = None
+    for index, line in enumerate(lines):
+        stripped_line = line.strip()
+        if not stripped_line.startswith("cmd LC_UUID"):
+            continue
+        uuid_line = lines[index + 2].strip()
+        if not uuid_line.startswith("uuid"):
+            raise GetUUIDFromBinaryException('Malformed output: "{}"'.format(uuid_line))
+        split_uuid_line = uuid_line.split()
+        uuid = split_uuid_line[1]
+        break
+    if uuid is None:
+        logging.error("Failed to retrieve UUID from binary {}".format(path_to_binary))
+        logging.error("otool output was:\n{}".format(output_str))
+        raise GetUUIDFromBinaryException(
+            'Failed to retrieve UUID from binary "{}"'.format(path_to_binary)
         )
-      return mm
+    else:
+        # Update cache
+        _get_uuid_from_binary_cache[cache_key] = uuid
+    return uuid
+
+
+class ModuleMap(object):
+    def __init__(self):
+        self._module_name_to_description_map = dict()
+
+    def add_module(self, desc):
+        assert isinstance(desc, ModuleDesc)
+        assert desc.name not in self._module_name_to_description_map
+        self._module_name_to_description_map[desc.name] = desc
+
+    def find_module_by_name(self, name):
+        return self._module_name_to_description_map.get(name, None)
+
+    def __str__(self):
+        s = "{} modules:\n".format(self.num_modules)
+        for module_desc in sorted(
+            self._module_name_to_description_map.values(), key=lambda v: v.start_addr
+        ):
+            s += str(module_desc) + "\n"
+        return s
+
+    @property
+    def num_modules(self):
+        return len(self._module_name_to_description_map)
+
+    @property
+    def modules(self):
+        return set(self._module_name_to_description_map.values())
+
+    def get_module_path_for_symbolication(self, module_name, proxy, validate_uuid):
+        module_desc = self.find_module_by_name(module_name)
+        if module_desc is None:
+            return None
+        # Allow a plug-in to change the module description to make it
+        # suitable for symbolication or avoid symbolication altogether.
+        module_desc = proxy.filter_module_desc(module_desc)
+        if module_desc is None:
+            return None
+        if validate_uuid:
+            logging.debug(
+                "Validating UUID of {}".format(
+                    module_desc.module_path_for_symbolization
+                )
+            )
+            try:
+                uuid = get_uuid_from_binary(
+                    module_desc.module_path_for_symbolization, arch=module_desc.arch
+                )
+                if uuid != module_desc.uuid:
+                    logging.warning(
+                        "Detected UUID mismatch {} != {}".format(uuid, module_desc.uuid)
+                    )
+                    # UUIDs don't match. Tell client to not symbolize this.
+                    return None
+            except GetUUIDFromBinaryException as e:
+                logging.error("Failed to get binary from UUID: %s", str(e))
+                return None
+        else:
+            logging.warning(
+                "Skipping validation of UUID of {}".format(
+                    module_desc.module_path_for_symbolization
+                )
+            )
+        return module_desc.module_path_for_symbolization
+
+    @staticmethod
+    def parse_from_file(module_map_path):
+        if not os.path.exists(module_map_path):
+            raise Exception('module map "{}" does not exist'.format(module_map_path))
+        with open(module_map_path, "r") as f:
+            mm = None
+            # E.g.
+            # 0x2db4000-0x102ddc000 /path/to (arm64) <0D6BBDE0-FF90-3680-899D-8E6F9528E04C>
+            hex_regex = lambda name: r"0x(?P<" + name + r">[0-9a-f]+)"
+            module_path_regex = r"(?P<path>.+)"
+            arch_regex = r"\((?P<arch>.+)\)"
+            uuid_regex = r"<(?P<uuid>[0-9A-Z-]+)>"
+            line_regex = r"^{}-{}\s+{}\s+{}\s+{}".format(
+                hex_regex("start_addr"),
+                hex_regex("end_addr"),
+                module_path_regex,
+                arch_regex,
+                uuid_regex,
+            )
+            matcher = re.compile(line_regex)
+            line_num = 0
+            line = "dummy"
+            while line != "":
+                line = f.readline()
+                line_num += 1
+                if mm is None:
+                    if line.startswith("Process module map:"):
+                        mm = ModuleMap()
+                    continue
+                if line.startswith("End of module map"):
+                    break
+                m_obj = matcher.match(line)
+                if not m_obj:
+                    raise Exception(
+                        'Failed to parse line {} "{}"'.format(line_num, line)
+                    )
+                arch = m_obj.group("arch")
+                start_addr = int(m_obj.group("start_addr"), base=16)
+                end_addr = int(m_obj.group("end_addr"), base=16)
+                module_path = m_obj.group("path")
+                uuid = m_obj.group("uuid")
+                module_desc = ModuleDesc(
+                    name=os.path.basename(module_path),
+                    arch=arch,
+                    start_addr=start_addr,
+                    end_addr=end_addr,
+                    module_path=module_path,
+                    uuid=uuid,
+                )
+                mm.add_module(module_desc)
+            if mm is not None:
+                logging.debug(
+                    'Loaded Module map from "{}":\n{}'.format(f.name, str(mm))
+                )
+            return mm
+
 
 class SysRootFilterPlugIn(AsanSymbolizerPlugIn):
-  """
+    """
     Simple plug-in to add sys root prefix to all binary paths
     used for symbolication.
-  """
-  def __init__(self):
-    self.sysroot_path = ""
+    """
+
+    def __init__(self):
+        self.sysroot_path = ""
+
+    def register_cmdline_args(self, parser):
+        parser.add_argument(
+            "-s",
+            dest="sys_root",
+            metavar="SYSROOT",
+            help="set path to sysroot for sanitized binaries",
+        )
 
-  def register_cmdline_args(self, parser):
-    parser.add_argument('-s', dest='sys_root', metavar='SYSROOT',
-                      help='set path to sysroot for sanitized binaries')
+    def process_cmdline_args(self, pargs):
+        if pargs.sys_root is None:
+            # Not being used so remove ourselves.
+            return False
+        self.sysroot_path = pargs.sys_root
+        return True
 
-  def process_cmdline_args(self, pargs):
-    if pargs.sys_root is None:
-      # Not being used so remove ourselves.
-      return False
-    self.sysroot_path = pargs.sys_root
-    return True
+    def filter_binary_path(self, path):
+        return self.sysroot_path + path
 
-  def filter_binary_path(self, path):
-    return self.sysroot_path + path
 
 class ModuleMapPlugIn(AsanSymbolizerPlugIn):
-  def __init__(self):
-    self._module_map = None
-    self._uuid_validation = True
-  def register_cmdline_args(self, parser):
-    parser.add_argument('--module-map',
-                        help='Path to text file containing module map'
-                        'output. See print_module_map ASan option.')
-    parser.add_argument('--skip-uuid-validation',
-                        default=False,
-                        action='store_true',
-                        help='Skips validating UUID of modules using otool.')
-
-  def process_cmdline_args(self, pargs):
-    if not pargs.module_map:
-      return False
-    self._module_map = ModuleMap.parse_from_file(args.module_map)
-    if self._module_map is None:
-      msg = 'Failed to find module map'
-      logging.error(msg)
-      raise Exception(msg)
-    self._uuid_validation = not pargs.skip_uuid_validation
-    return True
-
-  def filter_binary_path(self, binary_path):
-    if os.path.isabs(binary_path):
-      # This is a binary path so transform into
-      # a module name
-      module_name = os.path.basename(binary_path)
-    else:
-      module_name = binary_path
-    return self._module_map.get_module_path_for_symbolication(
-      module_name,
-      self.proxy,
-      self._uuid_validation
-    )
+    def __init__(self):
+        self._module_map = None
+        self._uuid_validation = True
+
+    def register_cmdline_args(self, parser):
+        parser.add_argument(
+            "--module-map",
+            help="Path to text file containing module map"
+            "output. See print_module_map ASan option.",
+        )
+        parser.add_argument(
+            "--skip-uuid-validation",
+            default=False,
+            action="store_true",
+            help="Skips validating UUID of modules using otool.",
+        )
+
+    def process_cmdline_args(self, pargs):
+        if not pargs.module_map:
+            return False
+        self._module_map = ModuleMap.parse_from_file(args.module_map)
+        if self._module_map is None:
+            msg = "Failed to find module map"
+            logging.error(msg)
+            raise Exception(msg)
+        self._uuid_validation = not pargs.skip_uuid_validation
+        return True
+
+    def filter_binary_path(self, binary_path):
+        if os.path.isabs(binary_path):
+            # This is a binary path so transform into
+            # a module name
+            module_name = os.path.basename(binary_path)
+        else:
+            module_name = binary_path
+        return self._module_map.get_module_path_for_symbolication(
+            module_name, self.proxy, self._uuid_validation
+        )
+
 
 def add_logging_args(parser):
-  parser.add_argument('--log-dest',
-    default=None,
-    help='Destination path for script logging (default stderr).',
-  )
-  parser.add_argument('--log-level',
-    choices=['debug', 'info', 'warning', 'error', 'critical'],
-    default='info',
-    help='Log level for script (default: %(default)s).'
-  )
+    parser.add_argument(
+        "--log-dest",
+        default=None,
+        help="Destination path for script logging (default stderr).",
+    )
+    parser.add_argument(
+        "--log-level",
+        choices=["debug", "info", "warning", "error", "critical"],
+        default="info",
+        help="Log level for script (default: %(default)s).",
+    )
+
 
 def setup_logging():
-  # Set up a parser just for parsing the logging arguments.
-  # This is necessary because logging should be configured before we
-  # perform the main argument parsing.
-  parser = argparse.ArgumentParser(add_help=False)
-  add_logging_args(parser)
-  pargs, unparsed_args = parser.parse_known_args()
-
-  log_level = getattr(logging, pargs.log_level.upper())
-  if log_level == logging.DEBUG:
-    log_format = '%(levelname)s: [%(funcName)s() %(filename)s:%(lineno)d] %(message)s'
-  else:
-    log_format = '%(levelname)s: %(message)s'
-  basic_config = {
-    'level': log_level,
-    'format': log_format
-  }
-  log_dest = pargs.log_dest
-  if log_dest:
-    basic_config['filename'] = log_dest
-  logging.basicConfig(**basic_config)
-  logging.debug('Logging level set to "{}" and directing output to "{}"'.format(
-    pargs.log_level,
-    'stderr' if log_dest is None else log_dest)
-  )
-  return unparsed_args
+    # Set up a parser just for parsing the logging arguments.
+    # This is necessary because logging should be configured before we
+    # perform the main argument parsing.
+    parser = argparse.ArgumentParser(add_help=False)
+    add_logging_args(parser)
+    pargs, unparsed_args = parser.parse_known_args()
+
+    log_level = getattr(logging, pargs.log_level.upper())
+    if log_level == logging.DEBUG:
+        log_format = (
+            "%(levelname)s: [%(funcName)s() %(filename)s:%(lineno)d] %(message)s"
+        )
+    else:
+        log_format = "%(levelname)s: %(message)s"
+    basic_config = {"level": log_level, "format": log_format}
+    log_dest = pargs.log_dest
+    if log_dest:
+        basic_config["filename"] = log_dest
+    logging.basicConfig(**basic_config)
+    logging.debug(
+        'Logging level set to "{}" and directing output to "{}"'.format(
+            pargs.log_level, "stderr" if log_dest is None else log_dest
+        )
+    )
+    return unparsed_args
+
 
 def add_load_plugin_args(parser):
-  parser.add_argument('-p', '--plugins',
-    help='Load plug-in', nargs='+', default=[])
+    parser.add_argument("-p", "--plugins", help="Load plug-in", nargs="+", default=[])
+
 
 def setup_plugins(plugin_proxy, args):
-  parser = argparse.ArgumentParser(add_help=False)
-  add_load_plugin_args(parser)
-  pargs , unparsed_args = parser.parse_known_args()
-  for plugin_path in pargs.plugins:
-    plugin_proxy.load_plugin_from_file(plugin_path)
-  # Add built-in plugins.
-  plugin_proxy.add_plugin(ModuleMapPlugIn())
-  plugin_proxy.add_plugin(SysRootFilterPlugIn())
-  return unparsed_args
-
-if __name__ == '__main__':
-  remaining_args = setup_logging()
-  with AsanSymbolizerPlugInProxy() as plugin_proxy:
-    remaining_args = setup_plugins(plugin_proxy, remaining_args)
-    parser = argparse.ArgumentParser(
-        formatter_class=argparse.RawDescriptionHelpFormatter,
-        description='ASan symbolization script',
-        epilog=__doc__)
-    parser.add_argument('path_to_cut', nargs='*',
-                        help='pattern to be cut from the result file path ')
-    parser.add_argument('-d','--demangle', action='store_true',
-                        help='demangle function names')
-    parser.add_argument('-c', metavar='CROSS_COMPILE',
-                        help='set prefix for binutils')
-    parser.add_argument('-l','--logfile', default=sys.stdin,
-                        type=argparse.FileType('r'),
-                        help='set log file name to parse, default is stdin')
-    parser.add_argument('--force-system-symbolizer', action='store_true',
-                        help='don\'t use llvm-symbolizer')
-    # Add logging arguments so that `--help` shows them.
-    add_logging_args(parser)
-    # Add load plugin arguments so that `--help` shows them.
+    parser = argparse.ArgumentParser(add_help=False)
     add_load_plugin_args(parser)
-    plugin_proxy.register_cmdline_args(parser)
-    args = parser.parse_args(remaining_args)
-    plugin_proxy.process_cmdline_args(args)
-    if args.path_to_cut:
-      fix_filename_patterns = args.path_to_cut
-    if args.demangle:
-      demangle = True
-    if args.c:
-      binutils_prefix = args.c
-    if args.logfile:
-      logfile = args.logfile
-    else:
-      logfile = sys.stdin
-    if args.force_system_symbolizer:
-      force_system_symbolizer = True
-    if force_system_symbolizer:
-      assert(allow_system_symbolizer)
-    loop = SymbolizationLoop(plugin_proxy)
-    loop.process_logfile()
+    pargs, unparsed_args = parser.parse_known_args()
+    for plugin_path in pargs.plugins:
+        plugin_proxy.load_plugin_from_file(plugin_path)
+    # Add built-in plugins.
+    plugin_proxy.add_plugin(ModuleMapPlugIn())
+    plugin_proxy.add_plugin(SysRootFilterPlugIn())
+    return unparsed_args
+
+
+if __name__ == "__main__":
+    remaining_args = setup_logging()
+    with AsanSymbolizerPlugInProxy() as plugin_proxy:
+        remaining_args = setup_plugins(plugin_proxy, remaining_args)
+        parser = argparse.ArgumentParser(
+            formatter_class=argparse.RawDescriptionHelpFormatter,
+            description="ASan symbolization script",
+            epilog=__doc__,
+        )
+        parser.add_argument(
+            "path_to_cut",
+            nargs="*",
+            help="pattern to be cut from the result file path ",
+        )
+        parser.add_argument(
+            "-d", "--demangle", action="store_true", help="demangle function names"
+        )
+        parser.add_argument(
+            "-c", metavar="CROSS_COMPILE", help="set prefix for binutils"
+        )
+        parser.add_argument(
+            "-l",
+            "--logfile",
+            default=sys.stdin,
+            type=argparse.FileType("r"),
+            help="set log file name to parse, default is stdin",
+        )
+        parser.add_argument(
+            "--force-system-symbolizer",
+            action="store_true",
+            help="don't use llvm-symbolizer",
+        )
+        # Add logging arguments so that `--help` shows them.
+        add_logging_args(parser)
+        # Add load plugin arguments so that `--help` shows them.
+        add_load_plugin_args(parser)
+        plugin_proxy.register_cmdline_args(parser)
+        args = parser.parse_args(remaining_args)
+        plugin_proxy.process_cmdline_args(args)
+        if args.path_to_cut:
+            fix_filename_patterns = args.path_to_cut
+        if args.demangle:
+            demangle = True
+        if args.c:
+            binutils_prefix = args.c
+        if args.logfile:
+            logfile = args.logfile
+        else:
+            logfile = sys.stdin
+        if args.force_system_symbolizer:
+            force_system_symbolizer = True
+        if force_system_symbolizer:
+            assert allow_system_symbolizer
+        loop = SymbolizationLoop(plugin_proxy)
+        loop.process_logfile()

diff  --git a/compiler-rt/lib/dfsan/scripts/build-libc-list.py b/compiler-rt/lib/dfsan/scripts/build-libc-list.py
index aa155b24cb4a2..9ec17cbdc9e1f 100755
--- a/compiler-rt/lib/dfsan/scripts/build-libc-list.py
+++ b/compiler-rt/lib/dfsan/scripts/build-libc-list.py
@@ -1,11 +1,11 @@
 #!/usr/bin/env python3
-#===- lib/dfsan/scripts/build-libc-list.py ---------------------------------===#
+# ===- lib/dfsan/scripts/build-libc-list.py ---------------------------------===#
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===------------------------------------------------------------------------===#
+# ===------------------------------------------------------------------------===#
 # The purpose of this script is to identify every function symbol in a set of
 # libraries (in this case, libc and libgcc) so that they can be marked as
 # uninstrumented, thus allowing the instrumentation pass to treat calls to those
@@ -42,50 +42,62 @@
 
 
 def defined_function_list(lib):
-  """Get non-local function symbols from lib."""
-  functions = []
-  readelf_proc = subprocess.Popen(['readelf', '-s', '-W', lib],
-                                  stdout=subprocess.PIPE)
-  readelf = readelf_proc.communicate()[0].decode().split('\n')
-  if readelf_proc.returncode != 0:
-    raise subprocess.CalledProcessError(readelf_proc.returncode, 'readelf')
-  for line in readelf:
-    if (line[31:35] == 'FUNC' or line[31:36] == 'IFUNC') and \
-       line[39:44] != 'LOCAL' and \
-       line[55:58] != 'UND':
-      function_name = line[59:].split('@')[0]
-      functions.append(function_name)
-  return functions
+    """Get non-local function symbols from lib."""
+    functions = []
+    readelf_proc = subprocess.Popen(
+        ["readelf", "-s", "-W", lib], stdout=subprocess.PIPE
+    )
+    readelf = readelf_proc.communicate()[0].decode().split("\n")
+    if readelf_proc.returncode != 0:
+        raise subprocess.CalledProcessError(readelf_proc.returncode, "readelf")
+    for line in readelf:
+        if (
+            (line[31:35] == "FUNC" or line[31:36] == "IFUNC")
+            and line[39:44] != "LOCAL"
+            and line[55:58] != "UND"
+        ):
+            function_name = line[59:].split("@")[0]
+            functions.append(function_name)
+    return functions
+
 
 p = OptionParser()
 
-p.add_option('--lib-file', action='append', metavar='PATH',
-             help='Specific library files to add.',
-             default=[])
+p.add_option(
+    "--lib-file",
+    action="append",
+    metavar="PATH",
+    help="Specific library files to add.",
+    default=[],
+)
 
-p.add_option('--error-missing-lib', action='store_true',
-             help='Make this script exit with an error code if any library is missing.',
-             dest='error_missing_lib', default=False)
+p.add_option(
+    "--error-missing-lib",
+    action="store_true",
+    help="Make this script exit with an error code if any library is missing.",
+    dest="error_missing_lib",
+    default=False,
+)
 
 (options, args) = p.parse_args()
 
 libs = options.lib_file
 if not libs:
-    print('No libraries provided.', file=sys.stderr)
+    print("No libraries provided.", file=sys.stderr)
     exit(1)
 
 missing_lib = False
 functions = []
 for l in libs:
-  if os.path.exists(l):
-    functions += defined_function_list(l)
-  else:
-    missing_lib = True
-    print('warning: library %s not found' % l, file=sys.stderr)
+    if os.path.exists(l):
+        functions += defined_function_list(l)
+    else:
+        missing_lib = True
+        print("warning: library %s not found" % l, file=sys.stderr)
 
 if options.error_missing_lib and missing_lib:
-    print('Exiting with failure code due to missing library.', file=sys.stderr)
+    print("Exiting with failure code due to missing library.", file=sys.stderr)
     exit(1)
 
 for f in sorted(set(functions)):
-  print('fun:%s=uninstrumented' % f)
+    print("fun:%s=uninstrumented" % f)

diff  --git a/compiler-rt/lib/fuzzer/scripts/unbalanced_allocs.py b/compiler-rt/lib/fuzzer/scripts/unbalanced_allocs.py
index 579e481a237ac..7ba7e09cb4e50 100755
--- a/compiler-rt/lib/fuzzer/scripts/unbalanced_allocs.py
+++ b/compiler-rt/lib/fuzzer/scripts/unbalanced_allocs.py
@@ -1,92 +1,100 @@
 #!/usr/bin/env python
-#===- lib/fuzzer/scripts/unbalanced_allocs.py ------------------------------===#
+# ===- lib/fuzzer/scripts/unbalanced_allocs.py ------------------------------===#
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===------------------------------------------------------------------------===#
+# ===------------------------------------------------------------------------===#
 #
 # Post-process -trace_malloc=2 output and printout only allocations and frees
 # unbalanced inside of fuzzer runs.
 # Usage:
 #   my_fuzzer -trace_malloc=2 -runs=10 2>&1 | unbalanced_allocs.py -skip=5
 #
-#===------------------------------------------------------------------------===#
+# ===------------------------------------------------------------------------===#
 
 import argparse
 import sys
 
 _skip = 0
 
+
 def PrintStack(line, stack):
-  global _skip
-  if _skip > 0:
-    return
-  print('Unbalanced ' + line.rstrip());
-  for l in stack:
-    print(l.rstrip())
+    global _skip
+    if _skip > 0:
+        return
+    print("Unbalanced " + line.rstrip())
+    for l in stack:
+        print(l.rstrip())
+
 
 def ProcessStack(line, f):
-  stack = []
-  while line and line.startswith('    #'):
-    stack += [line]
-    line = f.readline()
-  return line, stack
+    stack = []
+    while line and line.startswith("    #"):
+        stack += [line]
+        line = f.readline()
+    return line, stack
+
 
 def ProcessFree(line, f, allocs):
-  if not line.startswith('FREE['):
-    return f.readline()
+    if not line.startswith("FREE["):
+        return f.readline()
+
+    addr = int(line.split()[1], 16)
+    next_line, stack = ProcessStack(f.readline(), f)
+    if addr in allocs:
+        del allocs[addr]
+    else:
+        PrintStack(line, stack)
+    return next_line
 
-  addr = int(line.split()[1], 16)
-  next_line, stack = ProcessStack(f.readline(), f)
-  if addr in allocs:
-    del allocs[addr]
-  else:
-    PrintStack(line, stack)
-  return next_line
 
 def ProcessMalloc(line, f, allocs):
-  if not line.startswith('MALLOC['):
-    return ProcessFree(line, f, allocs)
+    if not line.startswith("MALLOC["):
+        return ProcessFree(line, f, allocs)
+
+    addr = int(line.split()[1], 16)
+    assert not addr in allocs
 
-  addr = int(line.split()[1], 16)
-  assert not addr in allocs
+    next_line, stack = ProcessStack(f.readline(), f)
+    allocs[addr] = (line, stack)
+    return next_line
 
-  next_line, stack = ProcessStack(f.readline(), f)
-  allocs[addr] = (line, stack)
-  return next_line
 
 def ProcessRun(line, f):
-  if not line.startswith('MallocFreeTracer: START'):
-    return ProcessMalloc(line, f, {})
-
-  allocs = {}
-  print(line.rstrip())
-  line = f.readline()
-  while line:
-    if line.startswith('MallocFreeTracer: STOP'):
-      global _skip
-      _skip = _skip - 1
-      for _, (l, s) in allocs.items():
-        PrintStack(l, s)
-      print(line.rstrip())
-      return f.readline()
-    line = ProcessMalloc(line, f, allocs)
-  return line
+    if not line.startswith("MallocFreeTracer: START"):
+        return ProcessMalloc(line, f, {})
+
+    allocs = {}
+    print(line.rstrip())
+    line = f.readline()
+    while line:
+        if line.startswith("MallocFreeTracer: STOP"):
+            global _skip
+            _skip = _skip - 1
+            for _, (l, s) in allocs.items():
+                PrintStack(l, s)
+            print(line.rstrip())
+            return f.readline()
+        line = ProcessMalloc(line, f, allocs)
+    return line
+
 
 def ProcessFile(f):
-  line = f.readline()
-  while line:
-    line = ProcessRun(line, f);
+    line = f.readline()
+    while line:
+        line = ProcessRun(line, f)
+
 
 def main(argv):
-  parser = argparse.ArgumentParser()
-  parser.add_argument('--skip', default=0, help='number of runs to ignore')
-  args = parser.parse_args()
-  global _skip
-  _skip = int(args.skip) + 1
-  ProcessFile(sys.stdin)
-
-if __name__ == '__main__':
-  main(sys.argv)
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--skip", default=0, help="number of runs to ignore")
+    args = parser.parse_args()
+    global _skip
+    _skip = int(args.skip) + 1
+    ProcessFile(sys.stdin)
+
+
+if __name__ == "__main__":
+    main(sys.argv)

diff  --git a/compiler-rt/lib/sanitizer_common/scripts/gen_dynamic_list.py b/compiler-rt/lib/sanitizer_common/scripts/gen_dynamic_list.py
index 6585a42dc81b8..8c0a1ea84a8d5 100755
--- a/compiler-rt/lib/sanitizer_common/scripts/gen_dynamic_list.py
+++ b/compiler-rt/lib/sanitizer_common/scripts/gen_dynamic_list.py
@@ -1,18 +1,18 @@
 #!/usr/bin/env python
-#===- lib/sanitizer_common/scripts/gen_dynamic_list.py ---------------------===#
+# ===- lib/sanitizer_common/scripts/gen_dynamic_list.py ---------------------===#
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===------------------------------------------------------------------------===#
+# ===------------------------------------------------------------------------===#
 #
 # Generates the list of functions that should be exported from sanitizer
 # runtimes. The output format is recognized by --dynamic-list linker option.
 # Usage:
 #   gen_dynamic_list.py libclang_rt.*san*.a [ files ... ]
 #
-#===------------------------------------------------------------------------===#
+# ===------------------------------------------------------------------------===#
 from __future__ import print_function
 import argparse
 import os
@@ -21,115 +21,143 @@
 import sys
 import platform
 
-new_delete = set([
-                  '_Znam', '_ZnamRKSt9nothrow_t',    # operator new[](unsigned long)
-                  '_Znwm', '_ZnwmRKSt9nothrow_t',    # operator new(unsigned long)
-                  '_Znaj', '_ZnajRKSt9nothrow_t',    # operator new[](unsigned int)
-                  '_Znwj', '_ZnwjRKSt9nothrow_t',    # operator new(unsigned int)
-                  # operator new(unsigned long, std::align_val_t)
-                  '_ZnwmSt11align_val_t', '_ZnwmSt11align_val_tRKSt9nothrow_t',
-                  # operator new(unsigned int, std::align_val_t)
-                  '_ZnwjSt11align_val_t', '_ZnwjSt11align_val_tRKSt9nothrow_t',
-                  # operator new[](unsigned long, std::align_val_t)
-                  '_ZnamSt11align_val_t', '_ZnamSt11align_val_tRKSt9nothrow_t',
-                  # operator new[](unsigned int, std::align_val_t)
-                  '_ZnajSt11align_val_t', '_ZnajSt11align_val_tRKSt9nothrow_t',
-                  '_ZdaPv', '_ZdaPvRKSt9nothrow_t',  # operator delete[](void *)
-                  '_ZdlPv', '_ZdlPvRKSt9nothrow_t',  # operator delete(void *)
-                  '_ZdaPvm',                         # operator delete[](void*, unsigned long)
-                  '_ZdlPvm',                         # operator delete(void*, unsigned long)
-                  '_ZdaPvj',                         # operator delete[](void*, unsigned int)
-                  '_ZdlPvj',                         # operator delete(void*, unsigned int)
-                  # operator delete(void*, std::align_val_t)
-                  '_ZdlPvSt11align_val_t', '_ZdlPvSt11align_val_tRKSt9nothrow_t',
-                  # operator delete[](void*, std::align_val_t)
-                  '_ZdaPvSt11align_val_t', '_ZdaPvSt11align_val_tRKSt9nothrow_t',
-                  # operator delete(void*, unsigned long,  std::align_val_t)
-                  '_ZdlPvmSt11align_val_t',
-                  # operator delete[](void*, unsigned long, std::align_val_t)
-                  '_ZdaPvmSt11align_val_t',
-                  # operator delete(void*, unsigned int,  std::align_val_t)
-                  '_ZdlPvjSt11align_val_t',
-                  # operator delete[](void*, unsigned int, std::align_val_t)
-                  '_ZdaPvjSt11align_val_t',
-                  ])
+new_delete = set(
+    [
+        "_Znam",
+        "_ZnamRKSt9nothrow_t",  # operator new[](unsigned long)
+        "_Znwm",
+        "_ZnwmRKSt9nothrow_t",  # operator new(unsigned long)
+        "_Znaj",
+        "_ZnajRKSt9nothrow_t",  # operator new[](unsigned int)
+        "_Znwj",
+        "_ZnwjRKSt9nothrow_t",  # operator new(unsigned int)
+        # operator new(unsigned long, std::align_val_t)
+        "_ZnwmSt11align_val_t",
+        "_ZnwmSt11align_val_tRKSt9nothrow_t",
+        # operator new(unsigned int, std::align_val_t)
+        "_ZnwjSt11align_val_t",
+        "_ZnwjSt11align_val_tRKSt9nothrow_t",
+        # operator new[](unsigned long, std::align_val_t)
+        "_ZnamSt11align_val_t",
+        "_ZnamSt11align_val_tRKSt9nothrow_t",
+        # operator new[](unsigned int, std::align_val_t)
+        "_ZnajSt11align_val_t",
+        "_ZnajSt11align_val_tRKSt9nothrow_t",
+        "_ZdaPv",
+        "_ZdaPvRKSt9nothrow_t",  # operator delete[](void *)
+        "_ZdlPv",
+        "_ZdlPvRKSt9nothrow_t",  # operator delete(void *)
+        "_ZdaPvm",  # operator delete[](void*, unsigned long)
+        "_ZdlPvm",  # operator delete(void*, unsigned long)
+        "_ZdaPvj",  # operator delete[](void*, unsigned int)
+        "_ZdlPvj",  # operator delete(void*, unsigned int)
+        # operator delete(void*, std::align_val_t)
+        "_ZdlPvSt11align_val_t",
+        "_ZdlPvSt11align_val_tRKSt9nothrow_t",
+        # operator delete[](void*, std::align_val_t)
+        "_ZdaPvSt11align_val_t",
+        "_ZdaPvSt11align_val_tRKSt9nothrow_t",
+        # operator delete(void*, unsigned long,  std::align_val_t)
+        "_ZdlPvmSt11align_val_t",
+        # operator delete[](void*, unsigned long, std::align_val_t)
+        "_ZdaPvmSt11align_val_t",
+        # operator delete(void*, unsigned int,  std::align_val_t)
+        "_ZdlPvjSt11align_val_t",
+        # operator delete[](void*, unsigned int, std::align_val_t)
+        "_ZdaPvjSt11align_val_t",
+    ]
+)
+
+versioned_functions = set(
+    [
+        "memcpy",
+        "pthread_attr_getaffinity_np",
+        "pthread_cond_broadcast",
+        "pthread_cond_destroy",
+        "pthread_cond_init",
+        "pthread_cond_signal",
+        "pthread_cond_timedwait",
+        "pthread_cond_wait",
+        "realpath",
+        "sched_getaffinity",
+    ]
+)
 
-versioned_functions = set(['memcpy', 'pthread_attr_getaffinity_np',
-                           'pthread_cond_broadcast',
-                           'pthread_cond_destroy', 'pthread_cond_init',
-                           'pthread_cond_signal', 'pthread_cond_timedwait',
-                           'pthread_cond_wait', 'realpath',
-                           'sched_getaffinity'])
 
 def get_global_functions(nm_executable, library):
-  functions = []
-  nm = os.environ.get('NM', nm_executable)
-  nm_proc = subprocess.Popen([nm, library], stdout=subprocess.PIPE,
-                             stderr=subprocess.PIPE)
-  nm_out = nm_proc.communicate()[0].decode().split('\n')
-  if nm_proc.returncode != 0:
-    raise subprocess.CalledProcessError(nm_proc.returncode, nm)
-  func_symbols = ['T', 'W']
-  # On PowerPC, nm prints function descriptors from .data section.
-  if platform.uname()[4] in ["powerpc", "ppc64"]:
-    func_symbols += ['D']
-  for line in nm_out:
-    cols = line.split(' ')
-    if len(cols) == 3 and cols[1] in func_symbols :
-      functions.append(cols[2])
-  return functions
+    functions = []
+    nm = os.environ.get("NM", nm_executable)
+    nm_proc = subprocess.Popen(
+        [nm, library], stdout=subprocess.PIPE, stderr=subprocess.PIPE
+    )
+    nm_out = nm_proc.communicate()[0].decode().split("\n")
+    if nm_proc.returncode != 0:
+        raise subprocess.CalledProcessError(nm_proc.returncode, nm)
+    func_symbols = ["T", "W"]
+    # On PowerPC, nm prints function descriptors from .data section.
+    if platform.uname()[4] in ["powerpc", "ppc64"]:
+        func_symbols += ["D"]
+    for line in nm_out:
+        cols = line.split(" ")
+        if len(cols) == 3 and cols[1] in func_symbols:
+            functions.append(cols[2])
+    return functions
+
 
 def main(argv):
-  parser = argparse.ArgumentParser()
-  parser.add_argument('--version-list', action='store_true')
-  parser.add_argument('--extra', default=[], action='append')
-  parser.add_argument('libraries', default=[], nargs='+')
-  parser.add_argument('--nm-executable', required=True)
-  parser.add_argument('-o', '--output', required=True)
-  args = parser.parse_args()
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--version-list", action="store_true")
+    parser.add_argument("--extra", default=[], action="append")
+    parser.add_argument("libraries", default=[], nargs="+")
+    parser.add_argument("--nm-executable", required=True)
+    parser.add_argument("-o", "--output", required=True)
+    args = parser.parse_args()
+
+    result = []
 
-  result = []
+    all_functions = []
+    for library in args.libraries:
+        all_functions.extend(get_global_functions(args.nm_executable, library))
+    function_set = set(all_functions)
+    for func in all_functions:
+        # Export new/delete operators.
+        if func in new_delete:
+            result.append(func)
+            continue
+        # Export interceptors.
+        match = re.match("__interceptor_(.*)", func)
+        if match:
+            result.append(func)
+            # We have to avoid exporting the interceptors for versioned library
+            # functions due to gold internal error.
+            orig_name = match.group(1)
+            if orig_name in function_set and (
+                args.version_list or orig_name not in versioned_functions
+            ):
+                result.append(orig_name)
+            continue
+        # Export sanitizer interface functions.
+        if re.match("__sanitizer_(.*)", func):
+            result.append(func)
 
-  all_functions = []
-  for library in args.libraries:
-    all_functions.extend(get_global_functions(args.nm_executable, library))
-  function_set = set(all_functions)
-  for func in all_functions:
-    # Export new/delete operators.
-    if func in new_delete:
-      result.append(func)
-      continue
-    # Export interceptors.
-    match = re.match('__interceptor_(.*)', func)
-    if match:
-      result.append(func)
-      # We have to avoid exporting the interceptors for versioned library
-      # functions due to gold internal error.
-      orig_name = match.group(1)
-      if orig_name in function_set and (args.version_list or orig_name not in versioned_functions):
-        result.append(orig_name)
-      continue
-    # Export sanitizer interface functions.
-    if re.match('__sanitizer_(.*)', func):
-      result.append(func)
+    # Additional exported functions from files.
+    for fname in args.extra:
+        f = open(fname, "r")
+        for line in f:
+            result.append(line.rstrip())
+    # Print the resulting list in the format recognized by ld.
+    with open(args.output, "w") as f:
+        print("{", file=f)
+        if args.version_list:
+            print("global:", file=f)
+        result.sort()
+        for sym in result:
+            print("  %s;" % sym, file=f)
+        if args.version_list:
+            print("local:", file=f)
+            print("  *;", file=f)
+        print("};", file=f)
 
-  # Additional exported functions from files.
-  for fname in args.extra:
-    f = open(fname, 'r')
-    for line in f:
-      result.append(line.rstrip())
-  # Print the resulting list in the format recognized by ld.
-  with open(args.output, 'w') as f:
-    print('{', file=f)
-    if args.version_list:
-      print('global:', file=f)
-    result.sort()
-    for sym in result:
-      print(u'  %s;' % sym, file=f)
-    if args.version_list:
-      print('local:', file=f)
-      print('  *;', file=f)
-    print('};', file=f)
 
-if __name__ == '__main__':
-  main(sys.argv)
+if __name__ == "__main__":
+    main(sys.argv)

diff  --git a/compiler-rt/lib/sanitizer_common/scripts/sancov.py b/compiler-rt/lib/sanitizer_common/scripts/sancov.py
index 759eb0cb8bd5c..31055086350ab 100755
--- a/compiler-rt/lib/sanitizer_common/scripts/sancov.py
+++ b/compiler-rt/lib/sanitizer_common/scripts/sancov.py
@@ -13,237 +13,276 @@
 
 prog_name = ""
 
+
 def Usage():
-  sys.stderr.write(
-    "Usage: \n" + \
-    " " + prog_name + " merge FILE [FILE...] > OUTPUT\n" \
-    " " + prog_name + " print FILE [FILE...]\n" \
-    " " + prog_name + " unpack FILE [FILE...]\n" \
-    " " + prog_name + " rawunpack FILE [FILE ...]\n" \
-    " " + prog_name + " missing BINARY < LIST_OF_PCS\n" \
-    "\n")
-  exit(1)
+    sys.stderr.write(
+        "Usage: \n" + " " + prog_name + " merge FILE [FILE...] > OUTPUT\n"
+        " " + prog_name + " print FILE [FILE...]\n"
+        " " + prog_name + " unpack FILE [FILE...]\n"
+        " " + prog_name + " rawunpack FILE [FILE ...]\n"
+        " " + prog_name + " missing BINARY < LIST_OF_PCS\n"
+        "\n"
+    )
+    exit(1)
+
 
 def CheckBits(bits):
-  if bits != 32 and bits != 64:
-    raise Exception("Wrong bitness: %d" % bits)
+    if bits != 32 and bits != 64:
+        raise Exception("Wrong bitness: %d" % bits)
+
 
 def TypeCodeForBits(bits):
-  CheckBits(bits)
-  return 'L' if bits == 64 else 'I'
+    CheckBits(bits)
+    return "L" if bits == 64 else "I"
+
 
 def TypeCodeForStruct(bits):
-  CheckBits(bits)
-  return 'Q' if bits == 64 else 'I'
+    CheckBits(bits)
+    return "Q" if bits == 64 else "I"
+
+
+kMagic32SecondHalf = 0xFFFFFF32
+kMagic64SecondHalf = 0xFFFFFF64
+kMagicFirstHalf = 0xC0BFFFFF
 
-kMagic32SecondHalf = 0xFFFFFF32;
-kMagic64SecondHalf = 0xFFFFFF64;
-kMagicFirstHalf    = 0xC0BFFFFF;
 
 def MagicForBits(bits):
-  CheckBits(bits)
-  if sys.byteorder == 'little':
-    return [kMagic64SecondHalf if bits == 64 else kMagic32SecondHalf, kMagicFirstHalf]
-  else:
-    return [kMagicFirstHalf, kMagic64SecondHalf if bits == 64 else kMagic32SecondHalf]
+    CheckBits(bits)
+    if sys.byteorder == "little":
+        return [
+            kMagic64SecondHalf if bits == 64 else kMagic32SecondHalf,
+            kMagicFirstHalf,
+        ]
+    else:
+        return [
+            kMagicFirstHalf,
+            kMagic64SecondHalf if bits == 64 else kMagic32SecondHalf,
+        ]
+
 
 def ReadMagicAndReturnBitness(f, path):
-  magic_bytes = f.read(8)
-  magic_words = struct.unpack('II', magic_bytes);
-  bits = 0
-  idx = 1 if sys.byteorder == 'little' else 0
-  if magic_words[idx] == kMagicFirstHalf:
-    if magic_words[1-idx] == kMagic64SecondHalf:
-      bits = 64
-    elif magic_words[1-idx] == kMagic32SecondHalf:
-      bits = 32
-  if bits == 0:
-    raise Exception('Bad magic word in %s' % path)
-  return bits
+    magic_bytes = f.read(8)
+    magic_words = struct.unpack("II", magic_bytes)
+    bits = 0
+    idx = 1 if sys.byteorder == "little" else 0
+    if magic_words[idx] == kMagicFirstHalf:
+        if magic_words[1 - idx] == kMagic64SecondHalf:
+            bits = 64
+        elif magic_words[1 - idx] == kMagic32SecondHalf:
+            bits = 32
+    if bits == 0:
+        raise Exception("Bad magic word in %s" % path)
+    return bits
+
 
 def ReadOneFile(path):
-  with open(path, mode="rb") as f:
-    f.seek(0, 2)
-    size = f.tell()
-    f.seek(0, 0)
-    if size < 8:
-      raise Exception('File %s is short (< 8 bytes)' % path)
-    bits = ReadMagicAndReturnBitness(f, path)
-    size -= 8
-    w = size * 8 // bits
-    s = struct.unpack_from(TypeCodeForStruct(bits) * (w), f.read(size))
-  sys.stderr.write(
-    "%s: read %d %d-bit PCs from %s\n" % (prog_name, w, bits, path))
-  return s
+    with open(path, mode="rb") as f:
+        f.seek(0, 2)
+        size = f.tell()
+        f.seek(0, 0)
+        if size < 8:
+            raise Exception("File %s is short (< 8 bytes)" % path)
+        bits = ReadMagicAndReturnBitness(f, path)
+        size -= 8
+        w = size * 8 // bits
+        s = struct.unpack_from(TypeCodeForStruct(bits) * (w), f.read(size))
+    sys.stderr.write("%s: read %d %d-bit PCs from %s\n" % (prog_name, w, bits, path))
+    return s
+
 
 def Merge(files):
-  s = set()
-  for f in files:
-    s = s.union(set(ReadOneFile(f)))
-  sys.stderr.write(
-    "%s: %d files merged; %d PCs total\n" % (prog_name, len(files), len(s))
-  )
-  return sorted(s)
+    s = set()
+    for f in files:
+        s = s.union(set(ReadOneFile(f)))
+    sys.stderr.write(
+        "%s: %d files merged; %d PCs total\n" % (prog_name, len(files), len(s))
+    )
+    return sorted(s)
+
 
 def PrintFiles(files):
-  if len(files) > 1:
-    s = Merge(files)
-  else:  # If there is just on file, print the PCs in order.
-    s = ReadOneFile(files[0])
-    sys.stderr.write("%s: 1 file merged; %d PCs total\n" % (prog_name, len(s)))
-  for i in s:
-    print("0x%x" % i)
+    if len(files) > 1:
+        s = Merge(files)
+    else:  # If there is just on file, print the PCs in order.
+        s = ReadOneFile(files[0])
+        sys.stderr.write("%s: 1 file merged; %d PCs total\n" % (prog_name, len(s)))
+    for i in s:
+        print("0x%x" % i)
+
 
 def MergeAndPrint(files):
-  if sys.stdout.isatty():
-    Usage()
-  s = Merge(files)
-  bits = 32
-  if max(s) > 0xFFFFFFFF:
-    bits = 64
-  stdout_buf = getattr(sys.stdout, 'buffer', sys.stdout)
-  array.array('I', MagicForBits(bits)).tofile(stdout_buf)
-  a = struct.pack(TypeCodeForStruct(bits) * len(s), *s)
-  stdout_buf.write(a)
+    if sys.stdout.isatty():
+        Usage()
+    s = Merge(files)
+    bits = 32
+    if max(s) > 0xFFFFFFFF:
+        bits = 64
+    stdout_buf = getattr(sys.stdout, "buffer", sys.stdout)
+    array.array("I", MagicForBits(bits)).tofile(stdout_buf)
+    a = struct.pack(TypeCodeForStruct(bits) * len(s), *s)
+    stdout_buf.write(a)
 
 
 def UnpackOneFile(path):
-  with open(path, mode="rb") as f:
-    sys.stderr.write("%s: unpacking %s\n" % (prog_name, path))
-    while True:
-      header = f.read(12)
-      if not header: return
-      if len(header) < 12:
-        break
-      pid, module_length, blob_size = struct.unpack('iII', header)
-      module = f.read(module_length).decode('utf-8')
-      blob = f.read(blob_size)
-      assert(len(module) == module_length)
-      assert(len(blob) == blob_size)
-      extracted_file = "%s.%d.sancov" % (module, pid)
-      sys.stderr.write("%s: extracting %s\n" % (prog_name, extracted_file))
-      # The packed file may contain multiple blobs for the same pid/module
-      # pair. Append to the end of the file instead of overwriting.
-      with open(extracted_file, 'ab') as f2:
-        f2.write(blob)
-    # fail
-    raise Exception('Error reading file %s' % path)
+    with open(path, mode="rb") as f:
+        sys.stderr.write("%s: unpacking %s\n" % (prog_name, path))
+        while True:
+            header = f.read(12)
+            if not header:
+                return
+            if len(header) < 12:
+                break
+            pid, module_length, blob_size = struct.unpack("iII", header)
+            module = f.read(module_length).decode("utf-8")
+            blob = f.read(blob_size)
+            assert len(module) == module_length
+            assert len(blob) == blob_size
+            extracted_file = "%s.%d.sancov" % (module, pid)
+            sys.stderr.write("%s: extracting %s\n" % (prog_name, extracted_file))
+            # The packed file may contain multiple blobs for the same pid/module
+            # pair. Append to the end of the file instead of overwriting.
+            with open(extracted_file, "ab") as f2:
+                f2.write(blob)
+        # fail
+        raise Exception("Error reading file %s" % path)
 
 
 def Unpack(files):
-  for f in files:
-    UnpackOneFile(f)
+    for f in files:
+        UnpackOneFile(f)
+
 
 def UnpackOneRawFile(path, map_path):
-  mem_map = []
-  with open(map_path, mode="rt") as f_map:
-    sys.stderr.write("%s: reading map %s\n" % (prog_name, map_path))
-    bits = int(f_map.readline())
-    if bits != 32 and bits != 64:
-      raise Exception('Wrong bits size in the map')
-    for line in f_map:
-      parts = line.rstrip().split()
-      mem_map.append((int(parts[0], 16),
-                  int(parts[1], 16),
-                  int(parts[2], 16),
-                  ' '.join(parts[3:])))
-  mem_map.sort(key=lambda m : m[0])
-  mem_map_keys = [m[0] for m in mem_map]
-
-  with open(path, mode="rb") as f:
-    sys.stderr.write("%s: unpacking %s\n" % (prog_name, path))
-
-    f.seek(0, 2)
-    size = f.tell()
-    f.seek(0, 0)
-    pcs = struct.unpack_from(TypeCodeForStruct(bits) * (size * 8 // bits), f.read(size))
-    mem_map_pcs = [[] for i in range(0, len(mem_map))]
-
-    for pc in pcs:
-      if pc == 0: continue
-      map_idx = bisect.bisect(mem_map_keys, pc) - 1
-      (start, end, base, module_path) = mem_map[map_idx]
-      assert pc >= start
-      if pc >= end:
-        sys.stderr.write("warning: %s: pc %x outside of any known mapping\n" % (prog_name, pc))
-        continue
-      mem_map_pcs[map_idx].append(pc - base)
-
-    for ((start, end, base, module_path), pc_list) in zip(mem_map, mem_map_pcs):
-      if len(pc_list) == 0: continue
-      assert path.endswith('.sancov.raw')
-      dst_path = module_path + '.' + os.path.basename(path)[:-4]
-      sys.stderr.write("%s: writing %d PCs to %s\n" % (prog_name, len(pc_list), dst_path))
-      sorted_pc_list = sorted(pc_list)
-      pc_buffer = struct.pack(TypeCodeForStruct(bits) * len(pc_list), *sorted_pc_list)
-      with open(dst_path, 'ab+') as f2:
-        array.array('I', MagicForBits(bits)).tofile(f2)
-        f2.seek(0, 2)
-        f2.write(pc_buffer)
+    mem_map = []
+    with open(map_path, mode="rt") as f_map:
+        sys.stderr.write("%s: reading map %s\n" % (prog_name, map_path))
+        bits = int(f_map.readline())
+        if bits != 32 and bits != 64:
+            raise Exception("Wrong bits size in the map")
+        for line in f_map:
+            parts = line.rstrip().split()
+            mem_map.append(
+                (
+                    int(parts[0], 16),
+                    int(parts[1], 16),
+                    int(parts[2], 16),
+                    " ".join(parts[3:]),
+                )
+            )
+    mem_map.sort(key=lambda m: m[0])
+    mem_map_keys = [m[0] for m in mem_map]
+
+    with open(path, mode="rb") as f:
+        sys.stderr.write("%s: unpacking %s\n" % (prog_name, path))
+
+        f.seek(0, 2)
+        size = f.tell()
+        f.seek(0, 0)
+        pcs = struct.unpack_from(
+            TypeCodeForStruct(bits) * (size * 8 // bits), f.read(size)
+        )
+        mem_map_pcs = [[] for i in range(0, len(mem_map))]
+
+        for pc in pcs:
+            if pc == 0:
+                continue
+            map_idx = bisect.bisect(mem_map_keys, pc) - 1
+            (start, end, base, module_path) = mem_map[map_idx]
+            assert pc >= start
+            if pc >= end:
+                sys.stderr.write(
+                    "warning: %s: pc %x outside of any known mapping\n"
+                    % (prog_name, pc)
+                )
+                continue
+            mem_map_pcs[map_idx].append(pc - base)
+
+        for ((start, end, base, module_path), pc_list) in zip(mem_map, mem_map_pcs):
+            if len(pc_list) == 0:
+                continue
+            assert path.endswith(".sancov.raw")
+            dst_path = module_path + "." + os.path.basename(path)[:-4]
+            sys.stderr.write(
+                "%s: writing %d PCs to %s\n" % (prog_name, len(pc_list), dst_path)
+            )
+            sorted_pc_list = sorted(pc_list)
+            pc_buffer = struct.pack(
+                TypeCodeForStruct(bits) * len(pc_list), *sorted_pc_list
+            )
+            with open(dst_path, "ab+") as f2:
+                array.array("I", MagicForBits(bits)).tofile(f2)
+                f2.seek(0, 2)
+                f2.write(pc_buffer)
+
 
 def RawUnpack(files):
-  for f in files:
-    if not f.endswith('.sancov.raw'):
-      raise Exception('Unexpected raw file name %s' % f)
-    f_map = f[:-3] + 'map'
-    UnpackOneRawFile(f, f_map)
+    for f in files:
+        if not f.endswith(".sancov.raw"):
+            raise Exception("Unexpected raw file name %s" % f)
+        f_map = f[:-3] + "map"
+        UnpackOneRawFile(f, f_map)
+
 
 def GetInstrumentedPCs(binary):
-  # This looks scary, but all it does is extract all offsets where we call:
-  # - __sanitizer_cov() or __sanitizer_cov_with_check(),
-  # - with call or callq,
-  # - directly or via PLT.
-  cmd = r"objdump --no-show-raw-insn -d %s | " \
-        r"grep '^\s\+[0-9a-f]\+:\s\+call\(q\|\)\s\+\(0x\|\)[0-9a-f]\+ <__sanitizer_cov\(_with_check\|\|_trace_pc_guard\)\(@plt\|\)>' | " \
+    # This looks scary, but all it does is extract all offsets where we call:
+    # - __sanitizer_cov() or __sanitizer_cov_with_check(),
+    # - with call or callq,
+    # - directly or via PLT.
+    cmd = (
+        r"objdump --no-show-raw-insn -d %s | "
+        r"grep '^\s\+[0-9a-f]\+:\s\+call\(q\|\)\s\+\(0x\|\)[0-9a-f]\+ <__sanitizer_cov\(_with_check\|\|_trace_pc_guard\)\(@plt\|\)>' | "
         r"grep -o '^\s\+[0-9a-f]\+'" % binary
-  lines = subprocess.check_output(cmd, stdin=subprocess.PIPE, shell=True).splitlines()
-  # The PCs we get from objdump are off by 4 bytes, as they point to the
-  # beginning of the callq instruction. Empirically this is true on x86 and
-  # x86_64.
-  return set(int(line.strip(), 16) + 4 for line in lines)
+    )
+    lines = subprocess.check_output(cmd, stdin=subprocess.PIPE, shell=True).splitlines()
+    # The PCs we get from objdump are off by 4 bytes, as they point to the
+    # beginning of the callq instruction. Empirically this is true on x86 and
+    # x86_64.
+    return set(int(line.strip(), 16) + 4 for line in lines)
+
 
 def PrintMissing(binary):
-  if not os.path.isfile(binary):
-    raise Exception('File not found: %s' % binary)
-  instrumented = GetInstrumentedPCs(binary)
-  sys.stderr.write("%s: found %d instrumented PCs in %s\n" % (prog_name,
-                                                              len(instrumented),
-                                                              binary))
-  covered = set(int(line, 16) for line in sys.stdin)
-  sys.stderr.write("%s: read %d PCs from stdin\n" % (prog_name, len(covered)))
-  missing = instrumented - covered
-  sys.stderr.write("%s: %d PCs missing from coverage\n" % (prog_name, len(missing)))
-  if (len(missing) > len(instrumented) - len(covered)):
+    if not os.path.isfile(binary):
+        raise Exception("File not found: %s" % binary)
+    instrumented = GetInstrumentedPCs(binary)
     sys.stderr.write(
-      "%s: WARNING: stdin contains PCs not found in binary\n" % prog_name
+        "%s: found %d instrumented PCs in %s\n" % (prog_name, len(instrumented), binary)
     )
-  for pc in sorted(missing):
-    print("0x%x" % pc)
-
-if __name__ == '__main__':
-  prog_name = sys.argv[0]
-  if len(sys.argv) <= 2:
-    Usage();
-
-  if sys.argv[1] == "missing":
-    if len(sys.argv) != 3:
-      Usage()
-    PrintMissing(sys.argv[2])
-    exit(0)
-
-  file_list = []
-  for f in sys.argv[2:]:
-    file_list += glob.glob(f)
-  if not file_list:
-    Usage()
-
-  if sys.argv[1] == "print":
-    PrintFiles(file_list)
-  elif sys.argv[1] == "merge":
-    MergeAndPrint(file_list)
-  elif sys.argv[1] == "unpack":
-    Unpack(file_list)
-  elif sys.argv[1] == "rawunpack":
-    RawUnpack(file_list)
-  else:
-    Usage()
+    covered = set(int(line, 16) for line in sys.stdin)
+    sys.stderr.write("%s: read %d PCs from stdin\n" % (prog_name, len(covered)))
+    missing = instrumented - covered
+    sys.stderr.write("%s: %d PCs missing from coverage\n" % (prog_name, len(missing)))
+    if len(missing) > len(instrumented) - len(covered):
+        sys.stderr.write(
+            "%s: WARNING: stdin contains PCs not found in binary\n" % prog_name
+        )
+    for pc in sorted(missing):
+        print("0x%x" % pc)
+
+
+if __name__ == "__main__":
+    prog_name = sys.argv[0]
+    if len(sys.argv) <= 2:
+        Usage()
+
+    if sys.argv[1] == "missing":
+        if len(sys.argv) != 3:
+            Usage()
+        PrintMissing(sys.argv[2])
+        exit(0)
+
+    file_list = []
+    for f in sys.argv[2:]:
+        file_list += glob.glob(f)
+    if not file_list:
+        Usage()
+
+    if sys.argv[1] == "print":
+        PrintFiles(file_list)
+    elif sys.argv[1] == "merge":
+        MergeAndPrint(file_list)
+    elif sys.argv[1] == "unpack":
+        Unpack(file_list)
+    elif sys.argv[1] == "rawunpack":
+        RawUnpack(file_list)
+    else:
+        Usage()

diff  --git a/compiler-rt/test/asan/TestCases/Android/lit.local.cfg.py b/compiler-rt/test/asan/TestCases/Android/lit.local.cfg.py
index 63a6e52826a36..4c6066a65d4f3 100644
--- a/compiler-rt/test/asan/TestCases/Android/lit.local.cfg.py
+++ b/compiler-rt/test/asan/TestCases/Android/lit.local.cfg.py
@@ -1,11 +1,12 @@
 def getRoot(config):
-  if not config.parent:
-    return config
-  return getRoot(config.parent)
+    if not config.parent:
+        return config
+    return getRoot(config.parent)
+
 
 root = getRoot(config)
 
 if root.android != "1":
-  config.unsupported = True
+    config.unsupported = True
 
-config.substitutions.append( ("%device", "/data/local/tmp/Output") )
+config.substitutions.append(("%device", "/data/local/tmp/Output"))

diff  --git a/compiler-rt/test/asan/TestCases/Darwin/lit.local.cfg.py b/compiler-rt/test/asan/TestCases/Darwin/lit.local.cfg.py
index a85dfcd24c08e..520a963d01198 100644
--- a/compiler-rt/test/asan/TestCases/Darwin/lit.local.cfg.py
+++ b/compiler-rt/test/asan/TestCases/Darwin/lit.local.cfg.py
@@ -1,9 +1,10 @@
 def getRoot(config):
-  if not config.parent:
-    return config
-  return getRoot(config.parent)
+    if not config.parent:
+        return config
+    return getRoot(config.parent)
+
 
 root = getRoot(config)
 
-if root.host_os not in ['Darwin']:
-  config.unsupported = True
+if root.host_os not in ["Darwin"]:
+    config.unsupported = True

diff  --git a/compiler-rt/test/asan/TestCases/Linux/lit.local.cfg.py b/compiler-rt/test/asan/TestCases/Linux/lit.local.cfg.py
index 57271b8078a49..603ca0365068f 100644
--- a/compiler-rt/test/asan/TestCases/Linux/lit.local.cfg.py
+++ b/compiler-rt/test/asan/TestCases/Linux/lit.local.cfg.py
@@ -1,9 +1,10 @@
 def getRoot(config):
-  if not config.parent:
-    return config
-  return getRoot(config.parent)
+    if not config.parent:
+        return config
+    return getRoot(config.parent)
+
 
 root = getRoot(config)
 
-if root.host_os not in ['Linux']:
-  config.unsupported = True
+if root.host_os not in ["Linux"]:
+    config.unsupported = True

diff  --git a/compiler-rt/test/asan/TestCases/Posix/asan_symbolize_script/plugin_no_op.py b/compiler-rt/test/asan/TestCases/Posix/asan_symbolize_script/plugin_no_op.py
index c636bdfa24c32..e414c73c8c34a 100644
--- a/compiler-rt/test/asan/TestCases/Posix/asan_symbolize_script/plugin_no_op.py
+++ b/compiler-rt/test/asan/TestCases/Posix/asan_symbolize_script/plugin_no_op.py
@@ -1,17 +1,20 @@
 class NoOpPlugin(AsanSymbolizerPlugIn):
-  def register_cmdline_args(self, parser):
-    logging.info('Adding --unlikely-option-name-XXX option')
-    parser.add_argument('--unlikely-option-name-XXX', type=int, default=0)
+    def register_cmdline_args(self, parser):
+        logging.info("Adding --unlikely-option-name-XXX option")
+        parser.add_argument("--unlikely-option-name-XXX", type=int, default=0)
 
-  def process_cmdline_args(self, pargs):
-    logging.info('GOT --unlikely-option-name-XXX=%d', pargs.unlikely_option_name_XXX)
-    return True
+    def process_cmdline_args(self, pargs):
+        logging.info(
+            "GOT --unlikely-option-name-XXX=%d", pargs.unlikely_option_name_XXX
+        )
+        return True
 
-  def destroy(self):
-    logging.info('destroy() called on NoOpPlugin')
+    def destroy(self):
+        logging.info("destroy() called on NoOpPlugin")
+
+    def filter_binary_path(self, path):
+        logging.info("filter_binary_path called in NoOpPlugin")
+        return path
 
-  def filter_binary_path(self, path):
-    logging.info('filter_binary_path called in NoOpPlugin')
-    return path
 
 register_plugin(NoOpPlugin())

diff  --git a/compiler-rt/test/asan/TestCases/Posix/asan_symbolize_script/plugin_wrong_frame_number_bug.py b/compiler-rt/test/asan/TestCases/Posix/asan_symbolize_script/plugin_wrong_frame_number_bug.py
index 4551202817e90..8c6f0fbc83071 100644
--- a/compiler-rt/test/asan/TestCases/Posix/asan_symbolize_script/plugin_wrong_frame_number_bug.py
+++ b/compiler-rt/test/asan/TestCases/Posix/asan_symbolize_script/plugin_wrong_frame_number_bug.py
@@ -1,7 +1,8 @@
 import logging
 
+
 class FailOncePlugin(AsanSymbolizerPlugIn):
-  """
+    """
     This is a simple plug-in that always claims
     that a binary can't be symbolized on the first
     call but succeeds for all subsequent calls.
@@ -14,18 +15,20 @@ class FailOncePlugin(AsanSymbolizerPlugIn):
     that didn't increment the frame counter which
     caused subsequent symbolization attempts to
     print the wrong frame number.
-  """
-  def __init__(self):
-    self.should_fail = True
-    pass
-
-  def filter_binary_path(self, path):
-    logging.info('filter_binary_path called in NoOpPlugin')
-    if self.should_fail:
-      logging.info('Doing first fail')
-      self.should_fail = False
-      return None
-    logging.info('Doing succeed')
-    return path
+    """
+
+    def __init__(self):
+        self.should_fail = True
+        pass
+
+    def filter_binary_path(self, path):
+        logging.info("filter_binary_path called in NoOpPlugin")
+        if self.should_fail:
+            logging.info("Doing first fail")
+            self.should_fail = False
+            return None
+        logging.info("Doing succeed")
+        return path
+
 
 register_plugin(FailOncePlugin())

diff  --git a/compiler-rt/test/asan/TestCases/Posix/lit.local.cfg.py b/compiler-rt/test/asan/TestCases/Posix/lit.local.cfg.py
index 60a9460820a62..63240c3962565 100644
--- a/compiler-rt/test/asan/TestCases/Posix/lit.local.cfg.py
+++ b/compiler-rt/test/asan/TestCases/Posix/lit.local.cfg.py
@@ -1,9 +1,10 @@
 def getRoot(config):
-  if not config.parent:
-    return config
-  return getRoot(config.parent)
+    if not config.parent:
+        return config
+    return getRoot(config.parent)
+
 
 root = getRoot(config)
 
-if root.host_os in ['Windows']:
-  config.unsupported = True
+if root.host_os in ["Windows"]:
+    config.unsupported = True

diff  --git a/compiler-rt/test/asan/TestCases/Windows/lit.local.cfg.py b/compiler-rt/test/asan/TestCases/Windows/lit.local.cfg.py
index e924d91c44934..57c0979e60962 100644
--- a/compiler-rt/test/asan/TestCases/Windows/lit.local.cfg.py
+++ b/compiler-rt/test/asan/TestCases/Windows/lit.local.cfg.py
@@ -1,9 +1,10 @@
 def getRoot(config):
-  if not config.parent:
-    return config
-  return getRoot(config.parent)
+    if not config.parent:
+        return config
+    return getRoot(config.parent)
+
 
 root = getRoot(config)
 
-if root.host_os not in ['Windows']:
-  config.unsupported = True
+if root.host_os not in ["Windows"]:
+    config.unsupported = True

diff  --git a/compiler-rt/test/asan/TestCases/Windows/msvc/lit.local.cfg.py b/compiler-rt/test/asan/TestCases/Windows/msvc/lit.local.cfg.py
index 91979845cb23f..198945aab191f 100644
--- a/compiler-rt/test/asan/TestCases/Windows/msvc/lit.local.cfg.py
+++ b/compiler-rt/test/asan/TestCases/Windows/msvc/lit.local.cfg.py
@@ -1,4 +1,4 @@
 import re
 
-if not re.match(r'.*-windows-msvc$', config.target_triple):
+if not re.match(r".*-windows-msvc$", config.target_triple):
     config.unsupported = True

diff  --git a/compiler-rt/test/asan/lit.cfg.py b/compiler-rt/test/asan/lit.cfg.py
index 03ac2063c3974..e245a61ef918c 100644
--- a/compiler-rt/test/asan/lit.cfg.py
+++ b/compiler-rt/test/asan/lit.cfg.py
@@ -7,257 +7,321 @@
 
 import lit.formats
 
+
 def get_required_attr(config, attr_name):
-  attr_value = getattr(config, attr_name, None)
-  if attr_value == None:
-    lit_config.fatal(
-      "No attribute %r in test configuration! You may need to run "
-      "tests from your build directory or add this attribute "
-      "to lit.site.cfg.py " % attr_name)
-  return attr_value
+    attr_value = getattr(config, attr_name, None)
+    if attr_value == None:
+        lit_config.fatal(
+            "No attribute %r in test configuration! You may need to run "
+            "tests from your build directory or add this attribute "
+            "to lit.site.cfg.py " % attr_name
+        )
+    return attr_value
+
 
 def push_dynamic_library_lookup_path(config, new_path):
-  if platform.system() == 'Windows':
-    dynamic_library_lookup_var = 'PATH'
-  elif platform.system() == 'Darwin':
-    dynamic_library_lookup_var = 'DYLD_LIBRARY_PATH'
-  else:
-    dynamic_library_lookup_var = 'LD_LIBRARY_PATH'
-
-  new_ld_library_path = os.path.pathsep.join(
-    (new_path, config.environment.get(dynamic_library_lookup_var, '')))
-  config.environment[dynamic_library_lookup_var] = new_ld_library_path
-
-  if platform.system() == 'FreeBSD':
-    dynamic_library_lookup_var = 'LD_32_LIBRARY_PATH'
-    new_ld_32_library_path = os.path.pathsep.join(
-      (new_path, config.environment.get(dynamic_library_lookup_var, '')))
-    config.environment[dynamic_library_lookup_var] = new_ld_32_library_path
-
-  if platform.system() == 'SunOS':
-    dynamic_library_lookup_var = 'LD_LIBRARY_PATH_32'
-    new_ld_library_path_32 = os.path.pathsep.join(
-      (new_path, config.environment.get(dynamic_library_lookup_var, '')))
-    config.environment[dynamic_library_lookup_var] = new_ld_library_path_32
-
-    dynamic_library_lookup_var = 'LD_LIBRARY_PATH_64'
-    new_ld_library_path_64 = os.path.pathsep.join(
-      (new_path, config.environment.get(dynamic_library_lookup_var, '')))
-    config.environment[dynamic_library_lookup_var] = new_ld_library_path_64
+    if platform.system() == "Windows":
+        dynamic_library_lookup_var = "PATH"
+    elif platform.system() == "Darwin":
+        dynamic_library_lookup_var = "DYLD_LIBRARY_PATH"
+    else:
+        dynamic_library_lookup_var = "LD_LIBRARY_PATH"
+
+    new_ld_library_path = os.path.pathsep.join(
+        (new_path, config.environment.get(dynamic_library_lookup_var, ""))
+    )
+    config.environment[dynamic_library_lookup_var] = new_ld_library_path
+
+    if platform.system() == "FreeBSD":
+        dynamic_library_lookup_var = "LD_32_LIBRARY_PATH"
+        new_ld_32_library_path = os.path.pathsep.join(
+            (new_path, config.environment.get(dynamic_library_lookup_var, ""))
+        )
+        config.environment[dynamic_library_lookup_var] = new_ld_32_library_path
+
+    if platform.system() == "SunOS":
+        dynamic_library_lookup_var = "LD_LIBRARY_PATH_32"
+        new_ld_library_path_32 = os.path.pathsep.join(
+            (new_path, config.environment.get(dynamic_library_lookup_var, ""))
+        )
+        config.environment[dynamic_library_lookup_var] = new_ld_library_path_32
+
+        dynamic_library_lookup_var = "LD_LIBRARY_PATH_64"
+        new_ld_library_path_64 = os.path.pathsep.join(
+            (new_path, config.environment.get(dynamic_library_lookup_var, ""))
+        )
+        config.environment[dynamic_library_lookup_var] = new_ld_library_path_64
+
 
 # Setup config name.
-config.name = 'AddressSanitizer' + config.name_suffix
+config.name = "AddressSanitizer" + config.name_suffix
 
 # Platform-specific default ASAN_OPTIONS for lit tests.
 default_asan_opts = list(config.default_sanitizer_opts)
 
 # On Darwin, leak checking is not enabled by default. Enable on macOS
 # tests to prevent regressions
-if config.host_os == 'Darwin' and config.apple_platform == 'osx':
-  default_asan_opts += ['detect_leaks=1']
+if config.host_os == "Darwin" and config.apple_platform == "osx":
+    default_asan_opts += ["detect_leaks=1"]
 
-default_asan_opts_str = ':'.join(default_asan_opts)
+default_asan_opts_str = ":".join(default_asan_opts)
 if default_asan_opts_str:
-  config.environment['ASAN_OPTIONS'] = default_asan_opts_str
-  default_asan_opts_str += ':'
-config.substitutions.append(('%env_asan_opts=',
-                             'env ASAN_OPTIONS=' + default_asan_opts_str))
+    config.environment["ASAN_OPTIONS"] = default_asan_opts_str
+    default_asan_opts_str += ":"
+config.substitutions.append(
+    ("%env_asan_opts=", "env ASAN_OPTIONS=" + default_asan_opts_str)
+)
 
 # Setup source root.
 config.test_source_root = os.path.dirname(__file__)
 
-if config.host_os not in ['FreeBSD', 'NetBSD']:
-  libdl_flag = "-ldl"
+if config.host_os not in ["FreeBSD", "NetBSD"]:
+    libdl_flag = "-ldl"
 else:
-  libdl_flag = ""
+    libdl_flag = ""
 
 # GCC-ASan doesn't link in all the necessary libraries automatically, so
 # we have to do it ourselves.
-if config.compiler_id == 'GNU':
-  extra_link_flags = ["-pthread", "-lstdc++", libdl_flag]
+if config.compiler_id == "GNU":
+    extra_link_flags = ["-pthread", "-lstdc++", libdl_flag]
 else:
-  extra_link_flags = []
+    extra_link_flags = []
 
 # Setup default compiler flags used with -fsanitize=address option.
 # FIXME: Review the set of required flags and check if it can be reduced.
 target_cflags = [get_required_attr(config, "target_cflags")] + extra_link_flags
 target_cxxflags = config.cxx_mode_flags + target_cflags
-clang_asan_static_cflags = (["-fsanitize=address",
-                            "-mno-omit-leaf-frame-pointer",
-                            "-fno-omit-frame-pointer",
-                            "-fno-optimize-sibling-calls"] +
-                            config.debug_info_flags + target_cflags)
-if config.target_arch == 's390x':
-  clang_asan_static_cflags.append("-mbackchain")
+clang_asan_static_cflags = (
+    [
+        "-fsanitize=address",
+        "-mno-omit-leaf-frame-pointer",
+        "-fno-omit-frame-pointer",
+        "-fno-optimize-sibling-calls",
+    ]
+    + config.debug_info_flags
+    + target_cflags
+)
+if config.target_arch == "s390x":
+    clang_asan_static_cflags.append("-mbackchain")
 clang_asan_static_cxxflags = config.cxx_mode_flags + clang_asan_static_cflags
 
-target_is_msvc = bool(re.match(r'.*-windows-msvc$', config.target_triple))
+target_is_msvc = bool(re.match(r".*-windows-msvc$", config.target_triple))
 
 asan_dynamic_flags = []
 if config.asan_dynamic:
-  asan_dynamic_flags = ["-shared-libasan"]
-  if platform.system() == 'Windows' and target_is_msvc:
-    # On MSVC target, we need to simulate "clang-cl /MD" on the clang driver side.
-    asan_dynamic_flags += ["-D_MT", "-D_DLL", "-Wl,-nodefaultlib:libcmt,-defaultlib:msvcrt,-defaultlib:oldnames"]
-  elif platform.system() == 'FreeBSD':
-    # On FreeBSD, we need to add -pthread to ensure pthread functions are available.
-    asan_dynamic_flags += ['-pthread']
-  config.available_features.add("asan-dynamic-runtime")
+    asan_dynamic_flags = ["-shared-libasan"]
+    if platform.system() == "Windows" and target_is_msvc:
+        # On MSVC target, we need to simulate "clang-cl /MD" on the clang driver side.
+        asan_dynamic_flags += [
+            "-D_MT",
+            "-D_DLL",
+            "-Wl,-nodefaultlib:libcmt,-defaultlib:msvcrt,-defaultlib:oldnames",
+        ]
+    elif platform.system() == "FreeBSD":
+        # On FreeBSD, we need to add -pthread to ensure pthread functions are available.
+        asan_dynamic_flags += ["-pthread"]
+    config.available_features.add("asan-dynamic-runtime")
 else:
-  config.available_features.add("asan-static-runtime")
+    config.available_features.add("asan-static-runtime")
 clang_asan_cflags = clang_asan_static_cflags + asan_dynamic_flags
 clang_asan_cxxflags = clang_asan_static_cxxflags + asan_dynamic_flags
 
 # Add win32-(static|dynamic)-asan features to mark tests as passing or failing
 # in those modes. lit doesn't support logical feature test combinations.
-if platform.system() == 'Windows':
-  if config.asan_dynamic:
-    win_runtime_feature = "win32-dynamic-asan"
-  else:
-    win_runtime_feature = "win32-static-asan"
-  config.available_features.add(win_runtime_feature)
+if platform.system() == "Windows":
+    if config.asan_dynamic:
+        win_runtime_feature = "win32-dynamic-asan"
+    else:
+        win_runtime_feature = "win32-static-asan"
+    config.available_features.add(win_runtime_feature)
+
 
 def build_invocation(compile_flags):
-  return " " + " ".join([config.clang] + compile_flags) + " "
+    return " " + " ".join([config.clang] + compile_flags) + " "
+
 
-config.substitutions.append( ("%clang ", build_invocation(target_cflags)) )
-config.substitutions.append( ("%clangxx ", build_invocation(target_cxxflags)) )
-config.substitutions.append( ("%clang_asan ", build_invocation(clang_asan_cflags)) )
-config.substitutions.append( ("%clangxx_asan ", build_invocation(clang_asan_cxxflags)) )
+config.substitutions.append(("%clang ", build_invocation(target_cflags)))
+config.substitutions.append(("%clangxx ", build_invocation(target_cxxflags)))
+config.substitutions.append(("%clang_asan ", build_invocation(clang_asan_cflags)))
+config.substitutions.append(("%clangxx_asan ", build_invocation(clang_asan_cxxflags)))
 if config.asan_dynamic:
-  if config.host_os in ['Linux', 'FreeBSD', 'NetBSD', 'SunOS']:
-    shared_libasan_path = os.path.join(config.compiler_rt_libdir, "libclang_rt.asan{}.so".format(config.target_suffix))
-  elif config.host_os == 'Darwin':
-    shared_libasan_path = os.path.join(config.compiler_rt_libdir, 'libclang_rt.asan_{}_dynamic.dylib'.format(config.apple_platform))
-  else:
-    lit_config.warning('%shared_libasan substitution not set but dynamic ASan is available.')
-    shared_libasan_path = None
-
-  if shared_libasan_path is not None:
-    config.substitutions.append( ("%shared_libasan", shared_libasan_path) )
-  config.substitutions.append( ("%clang_asan_static ", build_invocation(clang_asan_static_cflags)) )
-  config.substitutions.append( ("%clangxx_asan_static ", build_invocation(clang_asan_static_cxxflags)) )
-
-if platform.system() == 'Windows':
-  # MSVC-specific tests might also use the clang-cl.exe driver.
-  if target_is_msvc:
-    clang_cl_cxxflags = ["-Wno-deprecated-declarations",
-                        "-WX",
-                        "-D_HAS_EXCEPTIONS=0",
-                        "-Zi"] + target_cflags
-    clang_cl_asan_cxxflags = ["-fsanitize=address"] + clang_cl_cxxflags
-    if config.asan_dynamic:
-      clang_cl_asan_cxxflags.append("-MD")
-
-    clang_cl_invocation = build_invocation(clang_cl_cxxflags)
-    clang_cl_invocation = clang_cl_invocation.replace("clang.exe","clang-cl.exe")
-    config.substitutions.append( ("%clang_cl ", clang_cl_invocation) )
-
-    clang_cl_asan_invocation = build_invocation(clang_cl_asan_cxxflags)
-    clang_cl_asan_invocation = clang_cl_asan_invocation.replace("clang.exe","clang-cl.exe")
-    config.substitutions.append( ("%clang_cl_asan ", clang_cl_asan_invocation) )
-    config.substitutions.append( ("%clang_cl_nocxx_asan ", clang_cl_asan_invocation) )
-    config.substitutions.append( ("%Od", "-Od") )
-    config.substitutions.append( ("%Fe", "-Fe") )
-    config.substitutions.append( ("%LD", "-LD") )
-    config.substitutions.append( ("%MD", "-MD") )
-    config.substitutions.append( ("%MT", "-MT") )
-    config.substitutions.append( ("%Gw", "-Gw") )
-
-    base_lib = os.path.join(config.compiler_rt_libdir, "clang_rt.asan%%s%s.lib" % config.target_suffix)
-    config.substitutions.append( ("%asan_lib", base_lib % "") )
-    config.substitutions.append( ("%asan_cxx_lib", base_lib % "_cxx") )
-    config.substitutions.append( ("%asan_dll_thunk", base_lib % "_dll_thunk") )
-  else:
-    # To make some of these tests work on MinGW target without changing their
-    # behaviour for MSVC target, substitute clang-cl flags with gcc-like ones.
-    config.substitutions.append( ("%clang_cl ", build_invocation(target_cxxflags)) )
-    config.substitutions.append( ("%clang_cl_asan ", build_invocation(clang_asan_cxxflags)) )
-    config.substitutions.append( ("%clang_cl_nocxx_asan ", build_invocation(clang_asan_cflags)) )
-    config.substitutions.append( ("%Od", "-O0") )
-    config.substitutions.append( ("%Fe", "-o") )
-    config.substitutions.append( ("%LD", "-shared") )
-    config.substitutions.append( ("%MD", "") )
-    config.substitutions.append( ("%MT", "") )
-    config.substitutions.append( ("%Gw", "-fdata-sections") )
+    if config.host_os in ["Linux", "FreeBSD", "NetBSD", "SunOS"]:
+        shared_libasan_path = os.path.join(
+            config.compiler_rt_libdir,
+            "libclang_rt.asan{}.so".format(config.target_suffix),
+        )
+    elif config.host_os == "Darwin":
+        shared_libasan_path = os.path.join(
+            config.compiler_rt_libdir,
+            "libclang_rt.asan_{}_dynamic.dylib".format(config.apple_platform),
+        )
+    else:
+        lit_config.warning(
+            "%shared_libasan substitution not set but dynamic ASan is available."
+        )
+        shared_libasan_path = None
+
+    if shared_libasan_path is not None:
+        config.substitutions.append(("%shared_libasan", shared_libasan_path))
+    config.substitutions.append(
+        ("%clang_asan_static ", build_invocation(clang_asan_static_cflags))
+    )
+    config.substitutions.append(
+        ("%clangxx_asan_static ", build_invocation(clang_asan_static_cxxflags))
+    )
+
+if platform.system() == "Windows":
+    # MSVC-specific tests might also use the clang-cl.exe driver.
+    if target_is_msvc:
+        clang_cl_cxxflags = [
+            "-Wno-deprecated-declarations",
+            "-WX",
+            "-D_HAS_EXCEPTIONS=0",
+            "-Zi",
+        ] + target_cflags
+        clang_cl_asan_cxxflags = ["-fsanitize=address"] + clang_cl_cxxflags
+        if config.asan_dynamic:
+            clang_cl_asan_cxxflags.append("-MD")
+
+        clang_cl_invocation = build_invocation(clang_cl_cxxflags)
+        clang_cl_invocation = clang_cl_invocation.replace("clang.exe", "clang-cl.exe")
+        config.substitutions.append(("%clang_cl ", clang_cl_invocation))
+
+        clang_cl_asan_invocation = build_invocation(clang_cl_asan_cxxflags)
+        clang_cl_asan_invocation = clang_cl_asan_invocation.replace(
+            "clang.exe", "clang-cl.exe"
+        )
+        config.substitutions.append(("%clang_cl_asan ", clang_cl_asan_invocation))
+        config.substitutions.append(("%clang_cl_nocxx_asan ", clang_cl_asan_invocation))
+        config.substitutions.append(("%Od", "-Od"))
+        config.substitutions.append(("%Fe", "-Fe"))
+        config.substitutions.append(("%LD", "-LD"))
+        config.substitutions.append(("%MD", "-MD"))
+        config.substitutions.append(("%MT", "-MT"))
+        config.substitutions.append(("%Gw", "-Gw"))
+
+        base_lib = os.path.join(
+            config.compiler_rt_libdir, "clang_rt.asan%%s%s.lib" % config.target_suffix
+        )
+        config.substitutions.append(("%asan_lib", base_lib % ""))
+        config.substitutions.append(("%asan_cxx_lib", base_lib % "_cxx"))
+        config.substitutions.append(("%asan_dll_thunk", base_lib % "_dll_thunk"))
+    else:
+        # To make some of these tests work on MinGW target without changing their
+        # behaviour for MSVC target, substitute clang-cl flags with gcc-like ones.
+        config.substitutions.append(("%clang_cl ", build_invocation(target_cxxflags)))
+        config.substitutions.append(
+            ("%clang_cl_asan ", build_invocation(clang_asan_cxxflags))
+        )
+        config.substitutions.append(
+            ("%clang_cl_nocxx_asan ", build_invocation(clang_asan_cflags))
+        )
+        config.substitutions.append(("%Od", "-O0"))
+        config.substitutions.append(("%Fe", "-o"))
+        config.substitutions.append(("%LD", "-shared"))
+        config.substitutions.append(("%MD", ""))
+        config.substitutions.append(("%MT", ""))
+        config.substitutions.append(("%Gw", "-fdata-sections"))
 
 # FIXME: De-hardcode this path.
 asan_source_dir = os.path.join(
-  get_required_attr(config, "compiler_rt_src_root"), "lib", "asan")
+    get_required_attr(config, "compiler_rt_src_root"), "lib", "asan"
+)
 python_exec = shlex.quote(get_required_attr(config, "python_executable"))
 # Setup path to asan_symbolize.py script.
 asan_symbolize = os.path.join(asan_source_dir, "scripts", "asan_symbolize.py")
 if not os.path.exists(asan_symbolize):
-  lit_config.fatal("Can't find script on path %r" % asan_symbolize)
-config.substitutions.append( ("%asan_symbolize", python_exec + " " + asan_symbolize + " ") )
+    lit_config.fatal("Can't find script on path %r" % asan_symbolize)
+config.substitutions.append(
+    ("%asan_symbolize", python_exec + " " + asan_symbolize + " ")
+)
 # Setup path to sancov.py script.
 sanitizer_common_source_dir = os.path.join(
-  get_required_attr(config, "compiler_rt_src_root"), "lib", "sanitizer_common")
+    get_required_attr(config, "compiler_rt_src_root"), "lib", "sanitizer_common"
+)
 sancov = os.path.join(sanitizer_common_source_dir, "scripts", "sancov.py")
 if not os.path.exists(sancov):
-  lit_config.fatal("Can't find script on path %r" % sancov)
-config.substitutions.append( ("%sancov ", python_exec + " " + sancov + " ") )
+    lit_config.fatal("Can't find script on path %r" % sancov)
+config.substitutions.append(("%sancov ", python_exec + " " + sancov + " "))
 
 # Determine kernel bitness
-if config.host_arch.find('64') != -1 and not config.android:
-  kernel_bits = '64'
+if config.host_arch.find("64") != -1 and not config.android:
+    kernel_bits = "64"
 else:
-  kernel_bits = '32'
+    kernel_bits = "32"
 
-config.substitutions.append( ('CHECK-%kernel_bits', ("CHECK-kernel-" + kernel_bits + "-bits")))
+config.substitutions.append(
+    ("CHECK-%kernel_bits", ("CHECK-kernel-" + kernel_bits + "-bits"))
+)
 
-config.substitutions.append( ("%libdl", libdl_flag) )
+config.substitutions.append(("%libdl", libdl_flag))
 
 config.available_features.add("asan-" + config.bits + "-bits")
 
 # Fast unwinder doesn't work with Thumb
 if not config.arm_thumb:
-  config.available_features.add('fast-unwinder-works')
+    config.available_features.add("fast-unwinder-works")
 
 # Turn on leak detection on 64-bit Linux.
-leak_detection_android = config.android and 'android-thread-properties-api' in config.available_features and (config.target_arch in ['x86_64', 'i386', 'i686', 'aarch64'])
-leak_detection_linux = (config.host_os == 'Linux') and (not config.android) and (config.target_arch in ['x86_64', 'i386', 'riscv64', 'loongarch64'])
-leak_detection_mac = (config.host_os == 'Darwin') and (config.apple_platform == 'osx')
-leak_detection_netbsd = (config.host_os == 'NetBSD') and (config.target_arch in ['x86_64', 'i386'])
-if leak_detection_android or leak_detection_linux or leak_detection_mac or leak_detection_netbsd:
-  config.available_features.add('leak-detection')
+leak_detection_android = (
+    config.android
+    and "android-thread-properties-api" in config.available_features
+    and (config.target_arch in ["x86_64", "i386", "i686", "aarch64"])
+)
+leak_detection_linux = (
+    (config.host_os == "Linux")
+    and (not config.android)
+    and (config.target_arch in ["x86_64", "i386", "riscv64", "loongarch64"])
+)
+leak_detection_mac = (config.host_os == "Darwin") and (config.apple_platform == "osx")
+leak_detection_netbsd = (config.host_os == "NetBSD") and (
+    config.target_arch in ["x86_64", "i386"]
+)
+if (
+    leak_detection_android
+    or leak_detection_linux
+    or leak_detection_mac
+    or leak_detection_netbsd
+):
+    config.available_features.add("leak-detection")
 
 # Set LD_LIBRARY_PATH to pick dynamic runtime up properly.
 push_dynamic_library_lookup_path(config, config.compiler_rt_libdir)
 
 # GCC-ASan uses dynamic runtime by default.
-if config.compiler_id == 'GNU':
-  gcc_dir = os.path.dirname(config.clang)
-  libasan_dir = os.path.join(gcc_dir, "..", "lib" + config.bits)
-  push_dynamic_library_lookup_path(config, libasan_dir)
+if config.compiler_id == "GNU":
+    gcc_dir = os.path.dirname(config.clang)
+    libasan_dir = os.path.join(gcc_dir, "..", "lib" + config.bits)
+    push_dynamic_library_lookup_path(config, libasan_dir)
 
 # Add the RT libdir to PATH directly so that we can successfully run the gtest
 # binary to list its tests.
-if config.host_os == 'Windows' and config.asan_dynamic:
-  os.environ['PATH'] = os.path.pathsep.join([config.compiler_rt_libdir,
-                                             os.environ.get('PATH', '')])
+if config.host_os == "Windows" and config.asan_dynamic:
+    os.environ["PATH"] = os.path.pathsep.join(
+        [config.compiler_rt_libdir, os.environ.get("PATH", "")]
+    )
 
 # Default test suffixes.
-config.suffixes = ['.c', '.cpp']
+config.suffixes = [".c", ".cpp"]
 
-if config.host_os == 'Darwin':
-  config.suffixes.append('.mm')
+if config.host_os == "Darwin":
+    config.suffixes.append(".mm")
 
-if config.host_os == 'Windows':
-  config.substitutions.append(('%fPIC', ''))
-  config.substitutions.append(('%fPIE', ''))
-  config.substitutions.append(('%pie', ''))
+if config.host_os == "Windows":
+    config.substitutions.append(("%fPIC", ""))
+    config.substitutions.append(("%fPIE", ""))
+    config.substitutions.append(("%pie", ""))
 else:
-  config.substitutions.append(('%fPIC', '-fPIC'))
-  config.substitutions.append(('%fPIE', '-fPIE'))
-  config.substitutions.append(('%pie', '-pie'))
+    config.substitutions.append(("%fPIC", "-fPIC"))
+    config.substitutions.append(("%fPIE", "-fPIE"))
+    config.substitutions.append(("%pie", "-pie"))
 
 # Only run the tests on supported OSs.
-if config.host_os not in ['Linux', 'Darwin', 'FreeBSD', 'SunOS', 'Windows', 'NetBSD']:
-  config.unsupported = True
+if config.host_os not in ["Linux", "Darwin", "FreeBSD", "SunOS", "Windows", "NetBSD"]:
+    config.unsupported = True
 
 if not config.parallelism_group:
-  config.parallelism_group = 'shadow-memory'
+    config.parallelism_group = "shadow-memory"
 
-if config.host_os == 'NetBSD':
-  config.substitutions.insert(0, ('%run', config.netbsd_noaslr_prefix))
+if config.host_os == "NetBSD":
+    config.substitutions.insert(0, ("%run", config.netbsd_noaslr_prefix))

diff  --git a/compiler-rt/test/builtins/TestCases/Darwin/lit.local.cfg.py b/compiler-rt/test/builtins/TestCases/Darwin/lit.local.cfg.py
index a85dfcd24c08e..520a963d01198 100644
--- a/compiler-rt/test/builtins/TestCases/Darwin/lit.local.cfg.py
+++ b/compiler-rt/test/builtins/TestCases/Darwin/lit.local.cfg.py
@@ -1,9 +1,10 @@
 def getRoot(config):
-  if not config.parent:
-    return config
-  return getRoot(config.parent)
+    if not config.parent:
+        return config
+    return getRoot(config.parent)
+
 
 root = getRoot(config)
 
-if root.host_os not in ['Darwin']:
-  config.unsupported = True
+if root.host_os not in ["Darwin"]:
+    config.unsupported = True

diff  --git a/compiler-rt/test/builtins/Unit/lit.cfg.py b/compiler-rt/test/builtins/Unit/lit.cfg.py
index e3602a99087d6..12a77436983b8 100644
--- a/compiler-rt/test/builtins/Unit/lit.cfg.py
+++ b/compiler-rt/test/builtins/Unit/lit.cfg.py
@@ -10,26 +10,29 @@
 use_lit_shell = os.environ.get("LIT_USE_INTERNAL_SHELL")
 if use_lit_shell:
     # 0 is external, "" is default, and everything else is internal.
-    execute_external = (use_lit_shell == "0")
+    execute_external = use_lit_shell == "0"
 else:
     # Otherwise we default to internal on Windows and external elsewhere, as
     # bash on Windows is usually very slow.
-    execute_external = (not sys.platform in ['win32'])
+    execute_external = not sys.platform in ["win32"]
+
 
 def get_required_attr(config, attr_name):
-  attr_value = getattr(config, attr_name, None)
-  if attr_value == None:
-    lit_config.fatal(
-      "No attribute %r in test configuration! You may need to run "
-      "tests from your build directory or add this attribute "
-      "to lit.site.cfg.py " % attr_name)
-  return attr_value
+    attr_value = getattr(config, attr_name, None)
+    if attr_value == None:
+        lit_config.fatal(
+            "No attribute %r in test configuration! You may need to run "
+            "tests from your build directory or add this attribute "
+            "to lit.site.cfg.py " % attr_name
+        )
+    return attr_value
+
 
 # Setup config name.
-config.name = 'Builtins' + config.name_suffix
+config.name = "Builtins" + config.name_suffix
 
 # Platform-specific default Builtins_OPTIONS for lit tests.
-default_builtins_opts = ''
+default_builtins_opts = ""
 
 # Setup source root.
 config.test_source_root = os.path.dirname(__file__)
@@ -37,44 +40,52 @@ def get_required_attr(config, attr_name):
 # Path to the static library
 is_msvc = get_required_attr(config, "is_msvc")
 if is_msvc:
-  base_lib = os.path.join(config.compiler_rt_libdir, "clang_rt.builtins%s.lib "
-                          % config.target_suffix)
-  config.substitutions.append( ("%librt ", base_lib) )
-elif config.host_os  == 'Darwin':
-  base_lib = os.path.join(config.compiler_rt_libdir, "libclang_rt.osx.a ")
-  config.substitutions.append( ("%librt ", base_lib + ' -lSystem ') )
-elif config.host_os  == 'Windows':
-  base_lib = os.path.join(config.compiler_rt_libdir, "libclang_rt.builtins%s.a"
-                          % config.target_suffix)
-  if sys.platform in ['win32'] and execute_external:
-    # Don't pass dosish path separator to msys bash.exe.
-    base_lib = base_lib.replace('\\', '/')
-  config.substitutions.append( ("%librt ", base_lib + ' -lmingw32 -lmoldname -lmingwex -lmsvcrt -ladvapi32 -lshell32 -luser32 -lkernel32 ') )
+    base_lib = os.path.join(
+        config.compiler_rt_libdir, "clang_rt.builtins%s.lib " % config.target_suffix
+    )
+    config.substitutions.append(("%librt ", base_lib))
+elif config.host_os == "Darwin":
+    base_lib = os.path.join(config.compiler_rt_libdir, "libclang_rt.osx.a ")
+    config.substitutions.append(("%librt ", base_lib + " -lSystem "))
+elif config.host_os == "Windows":
+    base_lib = os.path.join(
+        config.compiler_rt_libdir, "libclang_rt.builtins%s.a" % config.target_suffix
+    )
+    if sys.platform in ["win32"] and execute_external:
+        # Don't pass dosish path separator to msys bash.exe.
+        base_lib = base_lib.replace("\\", "/")
+    config.substitutions.append(
+        (
+            "%librt ",
+            base_lib
+            + " -lmingw32 -lmoldname -lmingwex -lmsvcrt -ladvapi32 -lshell32 -luser32 -lkernel32 ",
+        )
+    )
 else:
-  base_lib = os.path.join(config.compiler_rt_libdir, "libclang_rt.builtins%s.a"
-                          % config.target_suffix)
-  if sys.platform in ['win32'] and execute_external:
-    # Don't pass dosish path separator to msys bash.exe.
-    base_lib = base_lib.replace('\\', '/')
-  config.substitutions.append( ("%librt ", base_lib + ' -lc -lm ') )
+    base_lib = os.path.join(
+        config.compiler_rt_libdir, "libclang_rt.builtins%s.a" % config.target_suffix
+    )
+    if sys.platform in ["win32"] and execute_external:
+        # Don't pass dosish path separator to msys bash.exe.
+        base_lib = base_lib.replace("\\", "/")
+    config.substitutions.append(("%librt ", base_lib + " -lc -lm "))
 
 builtins_source_dir = os.path.join(
-  get_required_attr(config, "compiler_rt_src_root"), "lib", "builtins")
-if sys.platform in ['win32'] and execute_external:
-  # Don't pass dosish path separator to msys bash.exe.
-  builtins_source_dir = builtins_source_dir.replace('\\', '/')
+    get_required_attr(config, "compiler_rt_src_root"), "lib", "builtins"
+)
+if sys.platform in ["win32"] and execute_external:
+    # Don't pass dosish path separator to msys bash.exe.
+    builtins_source_dir = builtins_source_dir.replace("\\", "/")
 builtins_lit_source_dir = get_required_attr(config, "builtins_lit_source_dir")
 
 extra_link_flags = ["-nodefaultlibs"]
 
 target_cflags = [get_required_attr(config, "target_cflags")]
-target_cflags += ['-fno-builtin', '-I', builtins_source_dir]
+target_cflags += ["-fno-builtin", "-I", builtins_source_dir]
 target_cflags += extra_link_flags
 target_cxxflags = config.cxx_mode_flags + target_cflags
-clang_builtins_static_cflags = ([""] +
-                            config.debug_info_flags + target_cflags)
-clang_builtins_static_cxxflags = config.cxx_mode_flags + \
-                                 clang_builtins_static_cflags
+clang_builtins_static_cflags = [""] + config.debug_info_flags + target_cflags
+clang_builtins_static_cxxflags = config.cxx_mode_flags + clang_builtins_static_cflags
 
 clang_builtins_cflags = clang_builtins_static_cflags
 clang_builtins_cxxflags = clang_builtins_static_cxxflags
@@ -82,49 +93,53 @@ def get_required_attr(config, attr_name):
 # FIXME: Right now we don't compile the C99 complex builtins when using
 # clang-cl. Fix that.
 if not is_msvc:
-  config.available_features.add('c99-complex')
+    config.available_features.add("c99-complex")
 
 builtins_is_msvc = get_required_attr(config, "builtins_is_msvc")
 if not builtins_is_msvc:
-  config.available_features.add('int128')
+    config.available_features.add("int128")
 
 clang_wrapper = ""
 
+
 def build_invocation(compile_flags):
-  return " " + " ".join([clang_wrapper, config.clang] + compile_flags) + " "
+    return " " + " ".join([clang_wrapper, config.clang] + compile_flags) + " "
 
 
-config.substitutions.append( ("%clang ", build_invocation(target_cflags)) )
-config.substitutions.append( ("%clangxx ", build_invocation(target_cxxflags)) )
-config.substitutions.append( ("%clang_builtins ", \
-                              build_invocation(clang_builtins_cflags)))
-config.substitutions.append( ("%clangxx_builtins ", \
-                              build_invocation(clang_builtins_cxxflags)))
+config.substitutions.append(("%clang ", build_invocation(target_cflags)))
+config.substitutions.append(("%clangxx ", build_invocation(target_cxxflags)))
+config.substitutions.append(
+    ("%clang_builtins ", build_invocation(clang_builtins_cflags))
+)
+config.substitutions.append(
+    ("%clangxx_builtins ", build_invocation(clang_builtins_cxxflags))
+)
 
 # Default test suffixes.
-config.suffixes = ['.c', '.cpp']
+config.suffixes = [".c", ".cpp"]
 
 if not config.emulator:
-  config.available_features.add('native-run')
+    config.available_features.add("native-run")
 
 # Add features for available sources
-builtins_source_features = config.builtins_lit_source_features.split(';')
+builtins_source_features = config.builtins_lit_source_features.split(";")
 # Sanity checks
 if not builtins_source_features:
-  lit_config.fatal('builtins_source_features cannot be empty')
+    lit_config.fatal("builtins_source_features cannot be empty")
 builtins_source_features_set = set()
 builtins_source_feature_duplicates = []
 for builtin_source_feature in builtins_source_features:
-  if len(builtin_source_feature) == 0:
-    lit_config.fatal('builtins_source_feature cannot contain empty features')
-  if builtin_source_feature not in builtins_source_features_set:
-    builtins_source_features_set.add(builtin_source_feature)
-  else:
-    builtins_source_feature_duplicates.append(builtin_source_feature)
+    if len(builtin_source_feature) == 0:
+        lit_config.fatal("builtins_source_feature cannot contain empty features")
+    if builtin_source_feature not in builtins_source_features_set:
+        builtins_source_features_set.add(builtin_source_feature)
+    else:
+        builtins_source_feature_duplicates.append(builtin_source_feature)
 
 if len(builtins_source_feature_duplicates) > 0:
-  lit_config.fatal(
-    'builtins_source_features contains duplicates: {}'.format(
-      builtins_source_feature_duplicates)
-  )
+    lit_config.fatal(
+        "builtins_source_features contains duplicates: {}".format(
+            builtins_source_feature_duplicates
+        )
+    )
 config.available_features.update(builtins_source_features)

diff  --git a/compiler-rt/test/builtins/lit.cfg.py b/compiler-rt/test/builtins/lit.cfg.py
index f9aae8fded6c3..22fea267c7cac 100644
--- a/compiler-rt/test/builtins/lit.cfg.py
+++ b/compiler-rt/test/builtins/lit.cfg.py
@@ -3,18 +3,24 @@
 import os
 
 # Setup config name.
-config.name = 'Builtins'
+config.name = "Builtins"
 
 # Setup source root.
 config.test_source_root = os.path.dirname(__file__)
 
 # Test suffixes.
-config.suffixes = ['.c', '.cpp', '.m', '.mm']
+config.suffixes = [".c", ".cpp", ".m", ".mm"]
 
 # Define %clang and %clangxx substitutions to use in test RUN lines.
-config.substitutions.append( ("%clang ", " " + config.clang + " ") )
+config.substitutions.append(("%clang ", " " + config.clang + " "))
 
-if config.host_os == 'Darwin':
-  config.substitutions.append( ("%macos_version_major", str(config.darwin_osx_version[0])) )
-  config.substitutions.append( ("%macos_version_minor", str(config.darwin_osx_version[1])) )
-  config.substitutions.append( ("%macos_version_subminor", str(config.darwin_osx_version[2])) )
+if config.host_os == "Darwin":
+    config.substitutions.append(
+        ("%macos_version_major", str(config.darwin_osx_version[0]))
+    )
+    config.substitutions.append(
+        ("%macos_version_minor", str(config.darwin_osx_version[1]))
+    )
+    config.substitutions.append(
+        ("%macos_version_subminor", str(config.darwin_osx_version[2]))
+    )

diff  --git a/compiler-rt/test/cfi/cross-dso/icall/lit.local.cfg.py b/compiler-rt/test/cfi/cross-dso/icall/lit.local.cfg.py
index db08765a2bb29..749c265bbf1c0 100644
--- a/compiler-rt/test/cfi/cross-dso/icall/lit.local.cfg.py
+++ b/compiler-rt/test/cfi/cross-dso/icall/lit.local.cfg.py
@@ -1,3 +1,3 @@
 # The cfi-icall checker is only supported on x86 and x86_64 for now.
-if config.root.host_arch not in ['x86', 'x86_64']:
-  config.unsupported = True
+if config.root.host_arch not in ["x86", "x86_64"]:
+    config.unsupported = True

diff  --git a/compiler-rt/test/cfi/cross-dso/lit.local.cfg.py b/compiler-rt/test/cfi/cross-dso/lit.local.cfg.py
index 245d434faed99..dceb7cde7218b 100644
--- a/compiler-rt/test/cfi/cross-dso/lit.local.cfg.py
+++ b/compiler-rt/test/cfi/cross-dso/lit.local.cfg.py
@@ -1,13 +1,14 @@
 def getRoot(config):
-  if not config.parent:
-    return config
-  return getRoot(config.parent)
+    if not config.parent:
+        return config
+    return getRoot(config.parent)
+
 
 root = getRoot(config)
 
-if root.host_os not in ['Linux', 'FreeBSD', 'NetBSD']:
-  config.unsupported = True
+if root.host_os not in ["Linux", "FreeBSD", "NetBSD"]:
+    config.unsupported = True
 
 # Android O (API level 26) has support for cross-dso cfi in libdl.so.
-if config.android and 'android-26' not in config.available_features:
-  config.unsupported = True
+if config.android and "android-26" not in config.available_features:
+    config.unsupported = True

diff  --git a/compiler-rt/test/cfi/icall/lit.local.cfg.py b/compiler-rt/test/cfi/icall/lit.local.cfg.py
index db08765a2bb29..749c265bbf1c0 100644
--- a/compiler-rt/test/cfi/icall/lit.local.cfg.py
+++ b/compiler-rt/test/cfi/icall/lit.local.cfg.py
@@ -1,3 +1,3 @@
 # The cfi-icall checker is only supported on x86 and x86_64 for now.
-if config.root.host_arch not in ['x86', 'x86_64']:
-  config.unsupported = True
+if config.root.host_arch not in ["x86", "x86_64"]:
+    config.unsupported = True

diff  --git a/compiler-rt/test/cfi/lit.cfg.py b/compiler-rt/test/cfi/lit.cfg.py
index 2f2d1ddcaa792..f9afc6bc0234f 100644
--- a/compiler-rt/test/cfi/lit.cfg.py
+++ b/compiler-rt/test/cfi/lit.cfg.py
@@ -1,50 +1,60 @@
 import lit.formats
 import os
 
-config.name = 'cfi' + config.name_suffix
-config.suffixes = ['.c', '.cpp', '.test']
+config.name = "cfi" + config.name_suffix
+config.suffixes = [".c", ".cpp", ".test"]
 config.test_source_root = os.path.dirname(__file__)
 
+
 def build_invocation(compile_flags):
-  return " " + " ".join([config.clang] + compile_flags) + " "
+    return " " + " ".join([config.clang] + compile_flags) + " "
+
 
 clang = build_invocation([config.target_cflags])
 clangxx = build_invocation([config.target_cflags] + config.cxx_mode_flags)
 
-config.substitutions.append((r"%clang ", clang + ' '))
-config.substitutions.append((r"%clangxx ", clangxx + ' '))
+config.substitutions.append((r"%clang ", clang + " "))
+config.substitutions.append((r"%clangxx ", clangxx + " "))
 
-if 'darwin' in config.available_features:
-  # -fsanitize=cfi is not supported on Darwin hosts
-  config.unsupported = True
+if "darwin" in config.available_features:
+    # -fsanitize=cfi is not supported on Darwin hosts
+    config.unsupported = True
 elif config.lto_supported:
-  clang_cfi = clang + '-fsanitize=cfi '
-
-  if config.cfi_lit_test_mode == "Devirt":
-    config.available_features.add('devirt')
-    clang_cfi += '-fwhole-program-vtables '
-    config.substitutions.append((r"%expect_crash_unless_devirt ", ""))
-  else:
-    config.substitutions.append((r"%expect_crash_unless_devirt ", config.expect_crash))
-
-  cxx = ' '.join(config.cxx_mode_flags) + ' '
-  diag = '-fno-sanitize-trap=cfi -fsanitize-recover=cfi '
-  non_dso = '-fvisibility=hidden '
-  dso = '-fsanitize-cfi-cross-dso -fvisibility=default '
-  if config.android:
-    dso += '-include ' + config.test_source_root + '/cross-dso/util/cfi_stubs.h '
-  config.substitutions.append((r"%clang_cfi ", clang_cfi + non_dso))
-  config.substitutions.append((r"%clangxx_cfi ", clang_cfi + cxx + non_dso))
-  config.substitutions.append((r"%clang_cfi_diag ", clang_cfi + non_dso + diag))
-  config.substitutions.append((r"%clangxx_cfi_diag ", clang_cfi + cxx + non_dso + diag))
-  config.substitutions.append((r"%clangxx_cfi_dso ", clang_cfi + cxx + dso))
-  config.substitutions.append((r"%clangxx_cfi_dso_diag ", clang_cfi + cxx + dso + diag))
-  config.substitutions.append((r"%debug_info_flags", ' '.join(config.debug_info_flags)))
+    clang_cfi = clang + "-fsanitize=cfi "
+
+    if config.cfi_lit_test_mode == "Devirt":
+        config.available_features.add("devirt")
+        clang_cfi += "-fwhole-program-vtables "
+        config.substitutions.append((r"%expect_crash_unless_devirt ", ""))
+    else:
+        config.substitutions.append(
+            (r"%expect_crash_unless_devirt ", config.expect_crash)
+        )
+
+    cxx = " ".join(config.cxx_mode_flags) + " "
+    diag = "-fno-sanitize-trap=cfi -fsanitize-recover=cfi "
+    non_dso = "-fvisibility=hidden "
+    dso = "-fsanitize-cfi-cross-dso -fvisibility=default "
+    if config.android:
+        dso += "-include " + config.test_source_root + "/cross-dso/util/cfi_stubs.h "
+    config.substitutions.append((r"%clang_cfi ", clang_cfi + non_dso))
+    config.substitutions.append((r"%clangxx_cfi ", clang_cfi + cxx + non_dso))
+    config.substitutions.append((r"%clang_cfi_diag ", clang_cfi + non_dso + diag))
+    config.substitutions.append(
+        (r"%clangxx_cfi_diag ", clang_cfi + cxx + non_dso + diag)
+    )
+    config.substitutions.append((r"%clangxx_cfi_dso ", clang_cfi + cxx + dso))
+    config.substitutions.append(
+        (r"%clangxx_cfi_dso_diag ", clang_cfi + cxx + dso + diag)
+    )
+    config.substitutions.append(
+        (r"%debug_info_flags", " ".join(config.debug_info_flags))
+    )
 else:
-  config.unsupported = True
+    config.unsupported = True
 
 if config.default_sanitizer_opts:
-  config.environment['UBSAN_OPTIONS'] = ':'.join(config.default_sanitizer_opts)
+    config.environment["UBSAN_OPTIONS"] = ":".join(config.default_sanitizer_opts)
 
-if lit_config.params.get('check_supported', None) and config.unsupported:
-  raise BaseException("Tests unsupported")
+if lit_config.params.get("check_supported", None) and config.unsupported:
+    raise BaseException("Tests unsupported")

diff  --git a/compiler-rt/test/crt/lit.cfg.py b/compiler-rt/test/crt/lit.cfg.py
index d5a6aa9862d9d..7024c7e80dae8 100644
--- a/compiler-rt/test/crt/lit.cfg.py
+++ b/compiler-rt/test/crt/lit.cfg.py
@@ -5,7 +5,7 @@
 import shlex
 
 # Setup config name.
-config.name = 'CRT' + config.name_suffix
+config.name = "CRT" + config.name_suffix
 
 # Setup source root.
 config.test_source_root = os.path.dirname(__file__)
@@ -16,80 +16,80 @@
 use_lit_shell = os.environ.get("LIT_USE_INTERNAL_SHELL")
 if use_lit_shell:
     # 0 is external, "" is default, and everything else is internal.
-    execute_external = (use_lit_shell == "0")
+    execute_external = use_lit_shell == "0"
 else:
     # Otherwise we default to internal on Windows and external elsewhere, as
     # bash on Windows is usually very slow.
-    execute_external = (not sys.platform in ['win32'])
+    execute_external = not sys.platform in ["win32"]
+
 
 def get_library_path(file):
-    cmd = subprocess.Popen([config.clang.strip(),
-                            '-print-file-name=%s' % file] +
-                           shlex.split(config.target_cflags),
-                           stdout=subprocess.PIPE,
-                           env=config.environment,
-                           universal_newlines=True)
+    cmd = subprocess.Popen(
+        [config.clang.strip(), "-print-file-name=%s" % file]
+        + shlex.split(config.target_cflags),
+        stdout=subprocess.PIPE,
+        env=config.environment,
+        universal_newlines=True,
+    )
     if not cmd.stdout:
-      lit_config.fatal("Couldn't find the library path for '%s'" % file)
+        lit_config.fatal("Couldn't find the library path for '%s'" % file)
     dir = cmd.stdout.read().strip()
-    if sys.platform in ['win32'] and execute_external:
+    if sys.platform in ["win32"] and execute_external:
         # Don't pass dosish path separator to msys bash.exe.
-        dir = dir.replace('\\', '/')
+        dir = dir.replace("\\", "/")
     return dir
 
 
 def get_libgcc_file_name():
-    cmd = subprocess.Popen([config.clang.strip(),
-                            '-print-libgcc-file-name'] +
-                           shlex.split(config.target_cflags),
-                           stdout=subprocess.PIPE,
-                           env=config.environment,
-                           universal_newlines=True)
+    cmd = subprocess.Popen(
+        [config.clang.strip(), "-print-libgcc-file-name"]
+        + shlex.split(config.target_cflags),
+        stdout=subprocess.PIPE,
+        env=config.environment,
+        universal_newlines=True,
+    )
     if not cmd.stdout:
-      lit_config.fatal("Couldn't find the library path for '%s'" % file)
+        lit_config.fatal("Couldn't find the library path for '%s'" % file)
     dir = cmd.stdout.read().strip()
-    if sys.platform in ['win32'] and execute_external:
+    if sys.platform in ["win32"] and execute_external:
         # Don't pass dosish path separator to msys bash.exe.
-        dir = dir.replace('\\', '/')
+        dir = dir.replace("\\", "/")
     return dir
 
 
 def build_invocation(compile_flags):
-    return ' ' + ' '.join([config.clang] + compile_flags) + ' '
+    return " " + " ".join([config.clang] + compile_flags) + " "
 
 
 # Setup substitutions.
+config.substitutions.append(("%clang ", build_invocation([config.target_cflags])))
 config.substitutions.append(
-    ('%clang ', build_invocation([config.target_cflags])))
-config.substitutions.append(
-    ('%clangxx ',
-     build_invocation(config.cxx_mode_flags + [config.target_cflags])))
+    ("%clangxx ", build_invocation(config.cxx_mode_flags + [config.target_cflags]))
+)
 
 base_lib = os.path.join(
-    config.compiler_rt_libdir, "clang_rt.%%s%s.o" % config.target_suffix)
+    config.compiler_rt_libdir, "clang_rt.%%s%s.o" % config.target_suffix
+)
 
-if sys.platform in ['win32'] and execute_external:
+if sys.platform in ["win32"] and execute_external:
     # Don't pass dosish path separator to msys bash.exe.
-    base_lib = base_lib.replace('\\', '/')
+    base_lib = base_lib.replace("\\", "/")
 
-config.substitutions.append(('%crtbegin', base_lib % "crtbegin"))
-config.substitutions.append(('%crtend', base_lib % "crtend"))
+config.substitutions.append(("%crtbegin", base_lib % "crtbegin"))
+config.substitutions.append(("%crtend", base_lib % "crtend"))
 
-config.substitutions.append(
-    ('%crt1', get_library_path('crt1.o')))
-config.substitutions.append(
-    ('%crti', get_library_path('crti.o')))
-config.substitutions.append(
-    ('%crtn', get_library_path('crtn.o')))
+config.substitutions.append(("%crt1", get_library_path("crt1.o")))
+config.substitutions.append(("%crti", get_library_path("crti.o")))
+config.substitutions.append(("%crtn", get_library_path("crtn.o")))
 
-config.substitutions.append(
-    ('%libgcc', get_libgcc_file_name()))
+config.substitutions.append(("%libgcc", get_libgcc_file_name()))
 
 config.substitutions.append(
-    ('%libstdcxx', '-l' + config.sanitizer_cxx_lib.lstrip('lib')))
+    ("%libstdcxx", "-l" + config.sanitizer_cxx_lib.lstrip("lib"))
+)
 
 # Default test suffixes.
-config.suffixes = ['.c', '.cpp']
+config.suffixes = [".c", ".cpp"]
 
-if config.host_os not in ['Linux']:
+if config.host_os not in ["Linux"]:
     config.unsupported = True

diff  --git a/compiler-rt/test/dfsan/lit.cfg.py b/compiler-rt/test/dfsan/lit.cfg.py
index ea1d450a34530..286c0c7aab1da 100644
--- a/compiler-rt/test/dfsan/lit.cfg.py
+++ b/compiler-rt/test/dfsan/lit.cfg.py
@@ -3,26 +3,27 @@
 import os
 
 # Setup config name.
-config.name = 'DataFlowSanitizer' + config.name_suffix
+config.name = "DataFlowSanitizer" + config.name_suffix
 
 # Setup source root.
 config.test_source_root = os.path.dirname(__file__)
 
 # Setup default compiler flags used with -fsanitize=dataflow option.
-clang_dfsan_cflags = (["-fsanitize=dataflow"] +
-                      [config.target_cflags])
+clang_dfsan_cflags = ["-fsanitize=dataflow"] + [config.target_cflags]
 
 clang_dfsan_cxxflags = config.cxx_mode_flags + clang_dfsan_cflags
 
+
 def build_invocation(compile_flags):
-  return " " + " ".join([config.clang] + compile_flags) + " "
+    return " " + " ".join([config.clang] + compile_flags) + " "
+
 
-config.substitutions.append( ("%clang_dfsan ", build_invocation(clang_dfsan_cflags)) )
-config.substitutions.append( ("%clangxx_dfsan ", build_invocation(clang_dfsan_cxxflags)) )
+config.substitutions.append(("%clang_dfsan ", build_invocation(clang_dfsan_cflags)))
+config.substitutions.append(("%clangxx_dfsan ", build_invocation(clang_dfsan_cxxflags)))
 
 # Default test suffixes.
-config.suffixes = ['.c', '.cpp']
+config.suffixes = [".c", ".cpp"]
 
 # DataFlowSanitizer tests are currently supported on Linux only.
-if not (config.host_os in ['Linux'] and config.target_arch in ['aarch64', 'x86_64']):
-  config.unsupported = True
+if not (config.host_os in ["Linux"] and config.target_arch in ["aarch64", "x86_64"]):
+    config.unsupported = True

diff  --git a/compiler-rt/test/fuzzer/lit.cfg.py b/compiler-rt/test/fuzzer/lit.cfg.py
index f0140c8f3ba40..7db3bd9340b04 100644
--- a/compiler-rt/test/fuzzer/lit.cfg.py
+++ b/compiler-rt/test/fuzzer/lit.cfg.py
@@ -4,7 +4,7 @@
 
 config.name = "libFuzzer" + config.name_suffix
 config.test_format = lit.formats.ShTest(True)
-config.suffixes = ['.test']
+config.suffixes = [".test"]
 config.test_source_root = os.path.dirname(__file__)
 config.available_features.add(config.target_arch)
 lit_config.note(f'arch feature "{config.target_arch}" available')
@@ -14,11 +14,11 @@
 use_lit_shell = os.environ.get("LIT_USE_INTERNAL_SHELL")
 if use_lit_shell:
     # 0 is external, "" is default, and everything else is internal.
-    execute_external = (use_lit_shell == "0")
+    execute_external = use_lit_shell == "0"
 else:
     # Otherwise we default to internal on Windows and external elsewhere, as
     # bash on Windows is usually very slow.
-    execute_external = (not sys.platform in ['win32'])
+    execute_external = not sys.platform in ["win32"]
 
 # testFormat: The test format to use to interpret tests.
 #
@@ -27,100 +27,116 @@
 config.test_format = lit.formats.ShTest(execute_external)
 
 # LeakSanitizer is not supported on OSX or Windows right now.
-if (sys.platform.startswith('darwin') or
-    sys.platform.startswith('freebsd') or
-    sys.platform.startswith('win')):
-  lit_config.note('lsan feature unavailable')
+if (
+    sys.platform.startswith("darwin")
+    or sys.platform.startswith("freebsd")
+    or sys.platform.startswith("win")
+):
+    lit_config.note("lsan feature unavailable")
 else:
-  lit_config.note('lsan feature available')
-  config.available_features.add('lsan')
+    lit_config.note("lsan feature available")
+    config.available_features.add("lsan")
 
 # MemorySanitizer is not supported on OSX or Windows right now
-if (sys.platform.startswith('darwin') or sys.platform.startswith('win') or
-    config.target_arch == 'i386'):
-  lit_config.note('msan feature unavailable')
-  assert 'msan' not in config.available_features
+if (
+    sys.platform.startswith("darwin")
+    or sys.platform.startswith("win")
+    or config.target_arch == "i386"
+):
+    lit_config.note("msan feature unavailable")
+    assert "msan" not in config.available_features
 else:
-  lit_config.note('msan feature available')
-  config.available_features.add('msan')
+    lit_config.note("msan feature available")
+    config.available_features.add("msan")
 
-if sys.platform.startswith('win') or sys.platform.startswith('cygwin'):
-  config.available_features.add('windows')
+if sys.platform.startswith("win") or sys.platform.startswith("cygwin"):
+    config.available_features.add("windows")
 
-if sys.platform.startswith('darwin'):
-  config.available_features.add('darwin')
+if sys.platform.startswith("darwin"):
+    config.available_features.add("darwin")
 
-if sys.platform.startswith('linux'):
-  # Note the value of ``sys.platform`` is not consistent
-  # between python 2 and 3, hence the use of ``.startswith()``.
-  lit_config.note('linux feature available')
-  config.available_features.add('linux')
+if sys.platform.startswith("linux"):
+    # Note the value of ``sys.platform`` is not consistent
+    # between python 2 and 3, hence the use of ``.startswith()``.
+    lit_config.note("linux feature available")
+    config.available_features.add("linux")
 else:
-  lit_config.note('linux feature unavailable')
+    lit_config.note("linux feature unavailable")
 
 if config.arm_thumb:
-  config.available_features.add('thumb')
+    config.available_features.add("thumb")
 
-config.substitutions.append(('%build_dir', config.cmake_binary_dir))
+config.substitutions.append(("%build_dir", config.cmake_binary_dir))
 libfuzzer_src_root = os.path.join(config.compiler_rt_src_root, "lib", "fuzzer")
-config.substitutions.append(('%libfuzzer_src', libfuzzer_src_root))
+config.substitutions.append(("%libfuzzer_src", libfuzzer_src_root))
+
+config.substitutions.append(("%python", '"%s"' % (sys.executable)))
 
-config.substitutions.append(('%python', '"%s"' % (sys.executable)))
 
 def generate_compiler_cmd(is_cpp=True, fuzzer_enabled=True, msan_enabled=False):
-  compiler_cmd = config.clang
-  extra_cmd = config.target_flags
-
-  if is_cpp:
-    std_cmd = '--driver-mode=g++'
-  else:
-    std_cmd = ''
-
-  if msan_enabled:
-    sanitizers = ['memory']
-  else:
-    sanitizers = ['address']
-  if fuzzer_enabled:
-    sanitizers.append('fuzzer')
-  sanitizers_cmd = ('-fsanitize=%s' % ','.join(sanitizers))
-  return " ".join([
-    compiler_cmd,
-    std_cmd,
-    "-O2 -gline-tables-only",
-    sanitizers_cmd,
-    "-I%s" % libfuzzer_src_root,
-    extra_cmd
-  ])
-
-config.substitutions.append(('%cpp_compiler',
-      generate_compiler_cmd(is_cpp=True, fuzzer_enabled=True)
-      ))
-
-config.substitutions.append(('%c_compiler',
-      generate_compiler_cmd(is_cpp=False, fuzzer_enabled=True)
-      ))
-
-config.substitutions.append(('%no_fuzzer_cpp_compiler',
-      generate_compiler_cmd(is_cpp=True, fuzzer_enabled=False)
-      ))
-
-config.substitutions.append(('%no_fuzzer_c_compiler',
-      generate_compiler_cmd(is_cpp=False, fuzzer_enabled=False)
-      ))
-
-config.substitutions.append(('%msan_compiler',
-      generate_compiler_cmd(is_cpp=True, fuzzer_enabled=True, msan_enabled=True)
-      ))
-
-default_asan_opts_str = ':'.join(config.default_sanitizer_opts)
+    compiler_cmd = config.clang
+    extra_cmd = config.target_flags
+
+    if is_cpp:
+        std_cmd = "--driver-mode=g++"
+    else:
+        std_cmd = ""
+
+    if msan_enabled:
+        sanitizers = ["memory"]
+    else:
+        sanitizers = ["address"]
+    if fuzzer_enabled:
+        sanitizers.append("fuzzer")
+    sanitizers_cmd = "-fsanitize=%s" % ",".join(sanitizers)
+    return " ".join(
+        [
+            compiler_cmd,
+            std_cmd,
+            "-O2 -gline-tables-only",
+            sanitizers_cmd,
+            "-I%s" % libfuzzer_src_root,
+            extra_cmd,
+        ]
+    )
+
+
+config.substitutions.append(
+    ("%cpp_compiler", generate_compiler_cmd(is_cpp=True, fuzzer_enabled=True))
+)
+
+config.substitutions.append(
+    ("%c_compiler", generate_compiler_cmd(is_cpp=False, fuzzer_enabled=True))
+)
+
+config.substitutions.append(
+    (
+        "%no_fuzzer_cpp_compiler",
+        generate_compiler_cmd(is_cpp=True, fuzzer_enabled=False),
+    )
+)
+
+config.substitutions.append(
+    ("%no_fuzzer_c_compiler", generate_compiler_cmd(is_cpp=False, fuzzer_enabled=False))
+)
+
+config.substitutions.append(
+    (
+        "%msan_compiler",
+        generate_compiler_cmd(is_cpp=True, fuzzer_enabled=True, msan_enabled=True),
+    )
+)
+
+default_asan_opts_str = ":".join(config.default_sanitizer_opts)
 if default_asan_opts_str:
-  config.environment['ASAN_OPTIONS'] = default_asan_opts_str
-  default_asan_opts_str += ':'
-config.substitutions.append(('%env_asan_opts=',
-                             'env ASAN_OPTIONS=' + default_asan_opts_str))
+    config.environment["ASAN_OPTIONS"] = default_asan_opts_str
+    default_asan_opts_str += ":"
+config.substitutions.append(
+    ("%env_asan_opts=", "env ASAN_OPTIONS=" + default_asan_opts_str)
+)
 
 if not config.parallelism_group:
-  config.parallelism_group = 'shadow-memory'
+    config.parallelism_group = "shadow-memory"
 
-if config.host_os == 'NetBSD':
-  config.substitutions.insert(0, ('%run', config.netbsd_noaslr_prefix))
+if config.host_os == "NetBSD":
+    config.substitutions.insert(0, ("%run", config.netbsd_noaslr_prefix))

diff  --git a/compiler-rt/test/gwp_asan/lit.cfg.py b/compiler-rt/test/gwp_asan/lit.cfg.py
index c806ae2d47271..7f68682162e3f 100644
--- a/compiler-rt/test/gwp_asan/lit.cfg.py
+++ b/compiler-rt/test/gwp_asan/lit.cfg.py
@@ -3,54 +3,69 @@
 import os
 
 # Setup config name.
-config.name = 'GWP-ASan' + config.name_suffix
+config.name = "GWP-ASan" + config.name_suffix
 
 # Setup source root.
 config.test_source_root = os.path.dirname(__file__)
 
 # Test suffixes.
-config.suffixes = ['.c', '.cpp', '.test']
+config.suffixes = [".c", ".cpp", ".test"]
 
 # C & CXX flags.
-c_flags = ([config.target_cflags])
+c_flags = [config.target_cflags]
 
-cxx_flags = (c_flags + config.cxx_mode_flags + ["-std=c++14"])
+cxx_flags = c_flags + config.cxx_mode_flags + ["-std=c++14"]
 
 libscudo_standalone = os.path.join(
-    config.compiler_rt_libdir,
-    "libclang_rt.scudo_standalone%s.a" % config.target_suffix)
+    config.compiler_rt_libdir, "libclang_rt.scudo_standalone%s.a" % config.target_suffix
+)
 libscudo_standalone_cxx = os.path.join(
     config.compiler_rt_libdir,
-    "libclang_rt.scudo_standalone_cxx%s.a" % config.target_suffix)
+    "libclang_rt.scudo_standalone_cxx%s.a" % config.target_suffix,
+)
 
-scudo_link_flags = ["-pthread", "-Wl,--whole-archive", libscudo_standalone,
-                    "-Wl,--no-whole-archive"]
-scudo_link_cxx_flags = ["-Wl,--whole-archive", libscudo_standalone_cxx,
-                        "-Wl,--no-whole-archive"]
+scudo_link_flags = [
+    "-pthread",
+    "-Wl,--whole-archive",
+    libscudo_standalone,
+    "-Wl,--no-whole-archive",
+]
+scudo_link_cxx_flags = [
+    "-Wl,--whole-archive",
+    libscudo_standalone_cxx,
+    "-Wl,--no-whole-archive",
+]
 
 # -rdynamic is necessary for online function symbolization.
 gwp_asan_flags = ["-rdynamic"] + scudo_link_flags
 
+
 def build_invocation(compile_flags):
-  return " " + " ".join([config.clang] + compile_flags) + " "
+    return " " + " ".join([config.clang] + compile_flags) + " "
+
 
 # Add substitutions.
 config.substitutions.append(("%clang ", build_invocation(c_flags)))
 config.substitutions.append(
-    ("%clang_gwp_asan ", build_invocation(c_flags + gwp_asan_flags)))
-config.substitutions.append((
-    "%clangxx_gwp_asan ",
-    build_invocation(cxx_flags + gwp_asan_flags + scudo_link_cxx_flags)))
+    ("%clang_gwp_asan ", build_invocation(c_flags + gwp_asan_flags))
+)
+config.substitutions.append(
+    (
+        "%clangxx_gwp_asan ",
+        build_invocation(cxx_flags + gwp_asan_flags + scudo_link_cxx_flags),
+    )
+)
 
 # Platform-specific default GWP_ASAN for lit tests. Ensure that GWP-ASan is
 # enabled and that it samples every allocation.
-default_gwp_asan_options = 'GWP_ASAN_Enabled=1:GWP_ASAN_SampleRate=1'
+default_gwp_asan_options = "GWP_ASAN_Enabled=1:GWP_ASAN_SampleRate=1"
 
-config.environment['SCUDO_OPTIONS'] = default_gwp_asan_options
-default_gwp_asan_options += ':'
-config.substitutions.append(('%env_scudo_options=',
-                             'env SCUDO_OPTIONS=' + default_gwp_asan_options))
+config.environment["SCUDO_OPTIONS"] = default_gwp_asan_options
+default_gwp_asan_options += ":"
+config.substitutions.append(
+    ("%env_scudo_options=", "env SCUDO_OPTIONS=" + default_gwp_asan_options)
+)
 
 # GWP-ASan tests are currently supported on Linux only.
-if config.host_os not in ['Linux']:
-   config.unsupported = True
+if config.host_os not in ["Linux"]:
+    config.unsupported = True

diff  --git a/compiler-rt/test/hwasan/TestCases/Linux/lit.local.cfg.py b/compiler-rt/test/hwasan/TestCases/Linux/lit.local.cfg.py
index 57271b8078a49..603ca0365068f 100644
--- a/compiler-rt/test/hwasan/TestCases/Linux/lit.local.cfg.py
+++ b/compiler-rt/test/hwasan/TestCases/Linux/lit.local.cfg.py
@@ -1,9 +1,10 @@
 def getRoot(config):
-  if not config.parent:
-    return config
-  return getRoot(config.parent)
+    if not config.parent:
+        return config
+    return getRoot(config.parent)
+
 
 root = getRoot(config)
 
-if root.host_os not in ['Linux']:
-  config.unsupported = True
+if root.host_os not in ["Linux"]:
+    config.unsupported = True

diff  --git a/compiler-rt/test/hwasan/TestCases/Posix/lit.local.cfg.py b/compiler-rt/test/hwasan/TestCases/Posix/lit.local.cfg.py
index 60a9460820a62..63240c3962565 100644
--- a/compiler-rt/test/hwasan/TestCases/Posix/lit.local.cfg.py
+++ b/compiler-rt/test/hwasan/TestCases/Posix/lit.local.cfg.py
@@ -1,9 +1,10 @@
 def getRoot(config):
-  if not config.parent:
-    return config
-  return getRoot(config.parent)
+    if not config.parent:
+        return config
+    return getRoot(config.parent)
+
 
 root = getRoot(config)
 
-if root.host_os in ['Windows']:
-  config.unsupported = True
+if root.host_os in ["Windows"]:
+    config.unsupported = True

diff  --git a/compiler-rt/test/hwasan/lit.cfg.py b/compiler-rt/test/hwasan/lit.cfg.py
index 85fda0c70324c..71a7f96d652ee 100644
--- a/compiler-rt/test/hwasan/lit.cfg.py
+++ b/compiler-rt/test/hwasan/lit.cfg.py
@@ -3,7 +3,7 @@
 import os
 
 # Setup config name.
-config.name = 'HWAddressSanitizer' + getattr(config, 'name_suffix', 'default')
+config.name = "HWAddressSanitizer" + getattr(config, "name_suffix", "default")
 
 # Setup source root.
 config.test_source_root = os.path.dirname(__file__)
@@ -13,46 +13,69 @@
 clang_cxxflags = config.cxx_mode_flags + clang_cflags
 clang_hwasan_common_cflags = clang_cflags + ["-fsanitize=hwaddress", "-fuse-ld=lld"]
 
-if config.target_arch == 'x86_64' and config.enable_aliases == '1':
-  clang_hwasan_common_cflags += ["-fsanitize-hwaddress-experimental-aliasing"]
+if config.target_arch == "x86_64" and config.enable_aliases == "1":
+    clang_hwasan_common_cflags += ["-fsanitize-hwaddress-experimental-aliasing"]
 else:
-  config.available_features.add('pointer-tagging')
-if config.target_arch == 'x86_64':
-  # The callback instrumentation used on x86_64 has a 1/64 chance of choosing a
-  # stack tag of 0.  This causes stack tests to become flaky, so we force tags
-  # to be generated via calls to __hwasan_generate_tag, which never returns 0.
-  # TODO: See if we can remove this once we use the outlined instrumentation.
-  clang_hwasan_common_cflags += ["-mllvm", "-hwasan-generate-tags-with-calls=1"]
-clang_hwasan_cflags = clang_hwasan_common_cflags + ["-mllvm", "-hwasan-globals",
-                                                   "-mllvm", "-hwasan-use-short-granules",
-                                                   "-mllvm", "-hwasan-instrument-landing-pads=0",
-                                                   "-mllvm", "-hwasan-instrument-personality-functions"]
-clang_hwasan_oldrt_cflags = clang_hwasan_common_cflags + ["-mllvm", "-hwasan-use-short-granules=0",
-                                                          "-mllvm", "-hwasan-instrument-landing-pads=1",
-                                                          "-mllvm", "-hwasan-instrument-personality-functions=0"]
+    config.available_features.add("pointer-tagging")
+if config.target_arch == "x86_64":
+    # The callback instrumentation used on x86_64 has a 1/64 chance of choosing a
+    # stack tag of 0.  This causes stack tests to become flaky, so we force tags
+    # to be generated via calls to __hwasan_generate_tag, which never returns 0.
+    # TODO: See if we can remove this once we use the outlined instrumentation.
+    clang_hwasan_common_cflags += ["-mllvm", "-hwasan-generate-tags-with-calls=1"]
+clang_hwasan_cflags = clang_hwasan_common_cflags + [
+    "-mllvm",
+    "-hwasan-globals",
+    "-mllvm",
+    "-hwasan-use-short-granules",
+    "-mllvm",
+    "-hwasan-instrument-landing-pads=0",
+    "-mllvm",
+    "-hwasan-instrument-personality-functions",
+]
+clang_hwasan_oldrt_cflags = clang_hwasan_common_cflags + [
+    "-mllvm",
+    "-hwasan-use-short-granules=0",
+    "-mllvm",
+    "-hwasan-instrument-landing-pads=1",
+    "-mllvm",
+    "-hwasan-instrument-personality-functions=0",
+]
 
 clang_hwasan_cxxflags = config.cxx_mode_flags + clang_hwasan_cflags
 clang_hwasan_oldrt_cxxflags = config.cxx_mode_flags + clang_hwasan_oldrt_cflags
 
+
 def build_invocation(compile_flags):
-  return " " + " ".join([config.clang] + compile_flags) + " "
+    return " " + " ".join([config.clang] + compile_flags) + " "
+
 
-config.substitutions.append( ("%clangxx ", build_invocation(clang_cxxflags)) )
-config.substitutions.append( ("%clang_hwasan ", build_invocation(clang_hwasan_cflags)) )
-config.substitutions.append( ("%clang_hwasan_oldrt ", build_invocation(clang_hwasan_oldrt_cflags)) )
-config.substitutions.append( ("%clangxx_hwasan ", build_invocation(clang_hwasan_cxxflags)) )
-config.substitutions.append( ("%clangxx_hwasan_oldrt ", build_invocation(clang_hwasan_oldrt_cxxflags)) )
-config.substitutions.append( ("%compiler_rt_libdir", config.compiler_rt_libdir) )
+config.substitutions.append(("%clangxx ", build_invocation(clang_cxxflags)))
+config.substitutions.append(("%clang_hwasan ", build_invocation(clang_hwasan_cflags)))
+config.substitutions.append(
+    ("%clang_hwasan_oldrt ", build_invocation(clang_hwasan_oldrt_cflags))
+)
+config.substitutions.append(
+    ("%clangxx_hwasan ", build_invocation(clang_hwasan_cxxflags))
+)
+config.substitutions.append(
+    ("%clangxx_hwasan_oldrt ", build_invocation(clang_hwasan_oldrt_cxxflags))
+)
+config.substitutions.append(("%compiler_rt_libdir", config.compiler_rt_libdir))
 
-default_hwasan_opts_str = ':'.join(['disable_allocator_tagging=1', 'random_tags=0', 'fail_without_syscall_abi=0'] + config.default_sanitizer_opts)
+default_hwasan_opts_str = ":".join(
+    ["disable_allocator_tagging=1", "random_tags=0", "fail_without_syscall_abi=0"]
+    + config.default_sanitizer_opts
+)
 if default_hwasan_opts_str:
-  config.environment['HWASAN_OPTIONS'] = default_hwasan_opts_str
-  default_hwasan_opts_str += ':'
-config.substitutions.append(('%env_hwasan_opts=',
-                             'env HWASAN_OPTIONS=' + default_hwasan_opts_str))
+    config.environment["HWASAN_OPTIONS"] = default_hwasan_opts_str
+    default_hwasan_opts_str += ":"
+config.substitutions.append(
+    ("%env_hwasan_opts=", "env HWASAN_OPTIONS=" + default_hwasan_opts_str)
+)
 
 # Default test suffixes.
-config.suffixes = ['.c', '.cpp']
+config.suffixes = [".c", ".cpp"]
 
-if config.host_os not in ['Linux', 'Android'] or not config.has_lld:
-  config.unsupported = True
+if config.host_os not in ["Linux", "Android"] or not config.has_lld:
+    config.unsupported = True

diff  --git a/compiler-rt/test/lit.common.cfg.py b/compiler-rt/test/lit.common.cfg.py
index 979659b1e3f30..a6ecbd95e0667 100644
--- a/compiler-rt/test/lit.common.cfg.py
+++ b/compiler-rt/test/lit.common.cfg.py
@@ -13,78 +13,79 @@
 import lit.formats
 import lit.util
 
+
 def find_compiler_libdir():
-  """
+    """
     Returns the path to library resource directory used
     by the compiler.
-  """
-  if config.compiler_id != 'Clang':
-    lit_config.warning(f'Determining compiler\'s runtime directory is not supported for {config.compiler_id}')
-    # TODO: Support other compilers.
-    return None
-  def get_path_from_clang(args, allow_failure):
-    clang_cmd = [
-      config.clang.strip(),
-      f'--target={config.target_triple}',
-    ]
-    clang_cmd.extend(args)
-    path = None
-    try:
-      result = subprocess.run(
-        clang_cmd,
-        stdout=subprocess.PIPE,
-        stderr=subprocess.PIPE,
-        check=True
-      )
-      path = result.stdout.decode().strip()
-    except subprocess.CalledProcessError as e:
-      msg = f'Failed to run {clang_cmd}\nrc:{e.returncode}\nstdout:{e.stdout}\ne.stderr{e.stderr}'
-      if allow_failure:
-        lit_config.warning(msg)
-      else:
-        lit_config.fatal(msg)
-    return path, clang_cmd
-
-  # Try using `-print-runtime-dir`. This is only supported by very new versions of Clang.
-  # so allow failure here.
-  runtime_dir, clang_cmd = get_path_from_clang(shlex.split(config.target_cflags)
-                                               + ['-print-runtime-dir'],
-                                               allow_failure=True)
-  if runtime_dir:
-    if os.path.exists(runtime_dir):
-      return os.path.realpath(runtime_dir)
-    # TODO(dliew): This should be a fatal error but it seems to trip the `llvm-clang-win-x-aarch64`
-    # bot which is likely misconfigured
-    lit_config.warning(
-      f'Path reported by clang does not exist: \"{runtime_dir}\". '
-      f'This path was found by running {clang_cmd}.'
+    """
+    if config.compiler_id != "Clang":
+        lit_config.warning(
+            f"Determining compiler's runtime directory is not supported for {config.compiler_id}"
+        )
+        # TODO: Support other compilers.
+        return None
+
+    def get_path_from_clang(args, allow_failure):
+        clang_cmd = [
+            config.clang.strip(),
+            f"--target={config.target_triple}",
+        ]
+        clang_cmd.extend(args)
+        path = None
+        try:
+            result = subprocess.run(
+                clang_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True
+            )
+            path = result.stdout.decode().strip()
+        except subprocess.CalledProcessError as e:
+            msg = f"Failed to run {clang_cmd}\nrc:{e.returncode}\nstdout:{e.stdout}\ne.stderr{e.stderr}"
+            if allow_failure:
+                lit_config.warning(msg)
+            else:
+                lit_config.fatal(msg)
+        return path, clang_cmd
+
+    # Try using `-print-runtime-dir`. This is only supported by very new versions of Clang.
+    # so allow failure here.
+    runtime_dir, clang_cmd = get_path_from_clang(
+        shlex.split(config.target_cflags) + ["-print-runtime-dir"], allow_failure=True
     )
+    if runtime_dir:
+        if os.path.exists(runtime_dir):
+            return os.path.realpath(runtime_dir)
+        # TODO(dliew): This should be a fatal error but it seems to trip the `llvm-clang-win-x-aarch64`
+        # bot which is likely misconfigured
+        lit_config.warning(
+            f'Path reported by clang does not exist: "{runtime_dir}". '
+            f"This path was found by running {clang_cmd}."
+        )
+        return None
+
+    # Fall back for older AppleClang that doesn't support `-print-runtime-dir`
+    # Note `-print-file-name=<path to compiler-rt lib>` was broken for Apple
+    # platforms so we can't use that approach here (see https://reviews.llvm.org/D101682).
+    if config.host_os == "Darwin":
+        lib_dir, _ = get_path_from_clang(["-print-file-name=lib"], allow_failure=False)
+        runtime_dir = os.path.join(lib_dir, "darwin")
+        if not os.path.exists(runtime_dir):
+            lit_config.fatal(f"Path reported by clang does not exist: {runtime_dir}")
+        return os.path.realpath(runtime_dir)
+
+    lit_config.warning("Failed to determine compiler's runtime directory")
     return None
 
-  # Fall back for older AppleClang that doesn't support `-print-runtime-dir`
-  # Note `-print-file-name=<path to compiler-rt lib>` was broken for Apple
-  # platforms so we can't use that approach here (see https://reviews.llvm.org/D101682).
-  if config.host_os == 'Darwin':
-    lib_dir, _ = get_path_from_clang(['-print-file-name=lib'], allow_failure=False)
-    runtime_dir = os.path.join(lib_dir, 'darwin')
-    if not os.path.exists(runtime_dir):
-      lit_config.fatal(f'Path reported by clang does not exist: {runtime_dir}')
-    return os.path.realpath(runtime_dir)
-
-  lit_config.warning('Failed to determine compiler\'s runtime directory')
-  return None
-
 
 # Choose between lit's internal shell pipeline runner and a real shell.  If
 # LIT_USE_INTERNAL_SHELL is in the environment, we use that as an override.
 use_lit_shell = os.environ.get("LIT_USE_INTERNAL_SHELL")
 if use_lit_shell:
     # 0 is external, "" is default, and everything else is internal.
-    execute_external = (use_lit_shell == "0")
+    execute_external = use_lit_shell == "0"
 else:
     # Otherwise we default to internal on Windows and external elsewhere, as
     # bash on Windows is usually very slow.
-    execute_external = (not sys.platform in ['win32'])
+    execute_external = not sys.platform in ["win32"]
 
 # Allow expanding substitutions that are based on other substitutions
 config.recursiveExpansionLimit = 10
@@ -92,30 +93,30 @@ def get_path_from_clang(args, allow_failure):
 # Setup test format.
 config.test_format = lit.formats.ShTest(execute_external)
 if execute_external:
-  config.available_features.add('shell')
+    config.available_features.add("shell")
 
-target_is_msvc = bool(re.match(r'.*-windows-msvc$', config.target_triple))
+target_is_msvc = bool(re.match(r".*-windows-msvc$", config.target_triple))
 
-compiler_id = getattr(config, 'compiler_id', None)
+compiler_id = getattr(config, "compiler_id", None)
 if compiler_id == "Clang":
-  if not (platform.system() == 'Windows' and target_is_msvc):
-    config.cxx_mode_flags = ["--driver-mode=g++"]
-  else:
-    config.cxx_mode_flags = []
-  # We assume that sanitizers should provide good enough error
-  # reports and stack traces even with minimal debug info.
-  config.debug_info_flags = ["-gline-tables-only"]
-  if platform.system() == 'Windows' and target_is_msvc:
-    # On MSVC, use CodeView with column info instead of DWARF. Both VS and
-    # windbg do not behave well when column info is enabled, but users have
-    # requested it because it makes ASan reports more precise.
-    config.debug_info_flags.append("-gcodeview")
-    config.debug_info_flags.append("-gcolumn-info")
-elif compiler_id == 'GNU':
-  config.cxx_mode_flags = ["-x c++"]
-  config.debug_info_flags = ["-g"]
+    if not (platform.system() == "Windows" and target_is_msvc):
+        config.cxx_mode_flags = ["--driver-mode=g++"]
+    else:
+        config.cxx_mode_flags = []
+    # We assume that sanitizers should provide good enough error
+    # reports and stack traces even with minimal debug info.
+    config.debug_info_flags = ["-gline-tables-only"]
+    if platform.system() == "Windows" and target_is_msvc:
+        # On MSVC, use CodeView with column info instead of DWARF. Both VS and
+        # windbg do not behave well when column info is enabled, but users have
+        # requested it because it makes ASan reports more precise.
+        config.debug_info_flags.append("-gcodeview")
+        config.debug_info_flags.append("-gcolumn-info")
+elif compiler_id == "GNU":
+    config.cxx_mode_flags = ["-x c++"]
+    config.debug_info_flags = ["-g"]
 else:
-  lit_config.fatal("Unsupported compiler id: %r" % compiler_id)
+    lit_config.fatal("Unsupported compiler id: %r" % compiler_id)
 # Add compiler ID to the list of available features.
 config.available_features.add(compiler_id)
 
@@ -124,10 +125,14 @@ def get_path_from_clang(args, allow_failure):
 # triple as the trailing path component. The value is incorrect for -m32/-m64.
 # Adjust config.compiler_rt accordingly.
 if config.enable_per_target_runtime_dir:
-    if '-m32' in shlex.split(config.target_cflags):
-        config.compiler_rt_libdir = re.sub(r'/x86_64(?=-[^/]+$)', '/i386', config.compiler_rt_libdir)
-    elif '-m64' in shlex.split(config.target_cflags):
-        config.compiler_rt_libdir = re.sub(r'/i386(?=-[^/]+$)', '/x86_64', config.compiler_rt_libdir)
+    if "-m32" in shlex.split(config.target_cflags):
+        config.compiler_rt_libdir = re.sub(
+            r"/x86_64(?=-[^/]+$)", "/i386", config.compiler_rt_libdir
+        )
+    elif "-m64" in shlex.split(config.target_cflags):
+        config.compiler_rt_libdir = re.sub(
+            r"/i386(?=-[^/]+$)", "/x86_64", config.compiler_rt_libdir
+        )
 
 # Ask the compiler for the path to libraries it is going to use. If this
 # doesn't match config.compiler_rt_libdir then it means we might be testing the
@@ -135,99 +140,121 @@ def get_path_from_clang(args, allow_failure):
 # Warn about about this and handle appropriately.
 compiler_libdir = find_compiler_libdir()
 if compiler_libdir:
-  compiler_rt_libdir_real = os.path.realpath(config.compiler_rt_libdir)
-  if compiler_libdir != compiler_rt_libdir_real:
-    lit_config.warning(
-      'Compiler lib dir != compiler-rt lib dir\n'
-      f'Compiler libdir:     "{compiler_libdir}"\n'
-      f'compiler-rt libdir:  "{compiler_rt_libdir_real}"')
-    if config.test_standalone_build_libs:
-      # Use just built runtime libraries, i.e. the the libraries this built just built.
-      if not config.test_suite_supports_overriding_runtime_lib_path:
-        # Test suite doesn't support this configuration.
-        # TODO(dliew): This should be an error but it seems several bots are
-        # testing incorrectly and having this as an error breaks them.
+    compiler_rt_libdir_real = os.path.realpath(config.compiler_rt_libdir)
+    if compiler_libdir != compiler_rt_libdir_real:
         lit_config.warning(
-            'COMPILER_RT_TEST_STANDALONE_BUILD_LIBS=ON, but this test suite '
-            'does not support testing the just-built runtime libraries '
-            'when the test compiler is configured to use 
diff erent runtime '
-            'libraries. Either modify this test suite to support this test '
-            'configuration, or set COMPILER_RT_TEST_STANDALONE_BUILD_LIBS=OFF '
-            'to test the runtime libraries included in the compiler instead.'
+            "Compiler lib dir != compiler-rt lib dir\n"
+            f'Compiler libdir:     "{compiler_libdir}"\n'
+            f'compiler-rt libdir:  "{compiler_rt_libdir_real}"'
         )
-    else:
-      # Use Compiler's resource library directory instead.
-      config.compiler_rt_libdir = compiler_libdir
-    lit_config.note(f'Testing using libraries in "{config.compiler_rt_libdir}"')
+        if config.test_standalone_build_libs:
+            # Use just built runtime libraries, i.e. the the libraries this built just built.
+            if not config.test_suite_supports_overriding_runtime_lib_path:
+                # Test suite doesn't support this configuration.
+                # TODO(dliew): This should be an error but it seems several bots are
+                # testing incorrectly and having this as an error breaks them.
+                lit_config.warning(
+                    "COMPILER_RT_TEST_STANDALONE_BUILD_LIBS=ON, but this test suite "
+                    "does not support testing the just-built runtime libraries "
+                    "when the test compiler is configured to use 
diff erent runtime "
+                    "libraries. Either modify this test suite to support this test "
+                    "configuration, or set COMPILER_RT_TEST_STANDALONE_BUILD_LIBS=OFF "
+                    "to test the runtime libraries included in the compiler instead."
+                )
+        else:
+            # Use Compiler's resource library directory instead.
+            config.compiler_rt_libdir = compiler_libdir
+        lit_config.note(f'Testing using libraries in "{config.compiler_rt_libdir}"')
 
 # If needed, add cflag for shadow scale.
-if config.asan_shadow_scale != '':
-  config.target_cflags += " -mllvm -asan-mapping-scale=" + config.asan_shadow_scale
-if config.memprof_shadow_scale != '':
-  config.target_cflags += " -mllvm -memprof-mapping-scale=" + config.memprof_shadow_scale
+if config.asan_shadow_scale != "":
+    config.target_cflags += " -mllvm -asan-mapping-scale=" + config.asan_shadow_scale
+if config.memprof_shadow_scale != "":
+    config.target_cflags += (
+        " -mllvm -memprof-mapping-scale=" + config.memprof_shadow_scale
+    )
 
 # Clear some environment variables that might affect Clang.
-possibly_dangerous_env_vars = ['ASAN_OPTIONS', 'DFSAN_OPTIONS', 'HWASAN_OPTIONS',
-                               'LSAN_OPTIONS', 'MSAN_OPTIONS', 'UBSAN_OPTIONS',
-                               'COMPILER_PATH', 'RC_DEBUG_OPTIONS',
-                               'CINDEXTEST_PREAMBLE_FILE', 'LIBRARY_PATH',
-                               'CPATH', 'C_INCLUDE_PATH', 'CPLUS_INCLUDE_PATH',
-                               'OBJC_INCLUDE_PATH', 'OBJCPLUS_INCLUDE_PATH',
-                               'LIBCLANG_TIMING', 'LIBCLANG_OBJTRACKING',
-                               'LIBCLANG_LOGGING', 'LIBCLANG_BGPRIO_INDEX',
-                               'LIBCLANG_BGPRIO_EDIT', 'LIBCLANG_NOTHREADS',
-                               'LIBCLANG_RESOURCE_USAGE',
-                               'LIBCLANG_CODE_COMPLETION_LOGGING',
-                               'XRAY_OPTIONS']
+possibly_dangerous_env_vars = [
+    "ASAN_OPTIONS",
+    "DFSAN_OPTIONS",
+    "HWASAN_OPTIONS",
+    "LSAN_OPTIONS",
+    "MSAN_OPTIONS",
+    "UBSAN_OPTIONS",
+    "COMPILER_PATH",
+    "RC_DEBUG_OPTIONS",
+    "CINDEXTEST_PREAMBLE_FILE",
+    "LIBRARY_PATH",
+    "CPATH",
+    "C_INCLUDE_PATH",
+    "CPLUS_INCLUDE_PATH",
+    "OBJC_INCLUDE_PATH",
+    "OBJCPLUS_INCLUDE_PATH",
+    "LIBCLANG_TIMING",
+    "LIBCLANG_OBJTRACKING",
+    "LIBCLANG_LOGGING",
+    "LIBCLANG_BGPRIO_INDEX",
+    "LIBCLANG_BGPRIO_EDIT",
+    "LIBCLANG_NOTHREADS",
+    "LIBCLANG_RESOURCE_USAGE",
+    "LIBCLANG_CODE_COMPLETION_LOGGING",
+    "XRAY_OPTIONS",
+]
 # Clang/MSVC may refer to %INCLUDE%. vsvarsall.bat sets it.
-if not (platform.system() == 'Windows' and target_is_msvc):
-    possibly_dangerous_env_vars.append('INCLUDE')
+if not (platform.system() == "Windows" and target_is_msvc):
+    possibly_dangerous_env_vars.append("INCLUDE")
 for name in possibly_dangerous_env_vars:
-  if name in config.environment:
-    del config.environment[name]
+    if name in config.environment:
+        del config.environment[name]
 
 # Tweak PATH to include llvm tools dir.
 if (not config.llvm_tools_dir) or (not os.path.exists(config.llvm_tools_dir)):
-  lit_config.fatal("Invalid llvm_tools_dir config attribute: %r" % config.llvm_tools_dir)
-path = os.path.pathsep.join((config.llvm_tools_dir, config.environment['PATH']))
-config.environment['PATH'] = path
+    lit_config.fatal(
+        "Invalid llvm_tools_dir config attribute: %r" % config.llvm_tools_dir
+    )
+path = os.path.pathsep.join((config.llvm_tools_dir, config.environment["PATH"]))
+config.environment["PATH"] = path
 
 # Help MSVS link.exe find the standard libraries.
 # Make sure we only try to use it when targetting Windows.
-if platform.system() == 'Windows' and target_is_msvc:
-  config.environment['LIB'] = os.environ['LIB']
+if platform.system() == "Windows" and target_is_msvc:
+    config.environment["LIB"] = os.environ["LIB"]
 
 config.available_features.add(config.host_os.lower())
 
 if config.target_triple.startswith("ppc") or config.target_triple.startswith("powerpc"):
-  config.available_features.add("ppc")
+    config.available_features.add("ppc")
 
-if re.match(r'^x86_64.*-linux', config.target_triple):
-  config.available_features.add("x86_64-linux")
+if re.match(r"^x86_64.*-linux", config.target_triple):
+    config.available_features.add("x86_64-linux")
 
 config.available_features.add("host-byteorder-" + sys.byteorder + "-endian")
 
 if config.have_zlib == "1":
-  config.available_features.add("zlib")
+    config.available_features.add("zlib")
 
 # Use ugly construction to explicitly prohibit "clang", "clang++" etc.
 # in RUN lines.
 config.substitutions.append(
-    (' clang', """\n\n*** Do not use 'clangXXX' in tests,
-     instead define '%clangXXX' substitution in lit config. ***\n\n""") )
-
-if config.host_os == 'NetBSD':
-  nb_commands_dir = os.path.join(config.compiler_rt_src_root,
-                                 "test", "sanitizer_common", "netbsd_commands")
-  config.netbsd_noaslr_prefix = ('sh ' +
-                                 os.path.join(nb_commands_dir, 'run_noaslr.sh'))
-  config.netbsd_nomprotect_prefix = ('sh ' +
-                                     os.path.join(nb_commands_dir,
-                                                  'run_nomprotect.sh'))
-  config.substitutions.append( ('%run_nomprotect',
-                                config.netbsd_nomprotect_prefix) )
+    (
+        " clang",
+        """\n\n*** Do not use 'clangXXX' in tests,
+     instead define '%clangXXX' substitution in lit config. ***\n\n""",
+    )
+)
+
+if config.host_os == "NetBSD":
+    nb_commands_dir = os.path.join(
+        config.compiler_rt_src_root, "test", "sanitizer_common", "netbsd_commands"
+    )
+    config.netbsd_noaslr_prefix = "sh " + os.path.join(nb_commands_dir, "run_noaslr.sh")
+    config.netbsd_nomprotect_prefix = "sh " + os.path.join(
+        nb_commands_dir, "run_nomprotect.sh"
+    )
+    config.substitutions.append(("%run_nomprotect", config.netbsd_nomprotect_prefix))
 else:
-  config.substitutions.append( ('%run_nomprotect', '%run') )
+    config.substitutions.append(("%run_nomprotect", "%run"))
 
 # Copied from libcxx's config.py
 def get_lit_conf(name, default=None):
@@ -239,492 +266,610 @@ def get_lit_conf(name, default=None):
             val = default
     return val
 
-emulator = get_lit_conf('emulator', None)
+
+emulator = get_lit_conf("emulator", None)
+
 
 def get_ios_commands_dir():
-  return os.path.join(config.compiler_rt_src_root, "test", "sanitizer_common", "ios_commands")
+    return os.path.join(
+        config.compiler_rt_src_root, "test", "sanitizer_common", "ios_commands"
+    )
+
 
 # Allow tests to be executed on a simulator or remotely.
 if emulator:
-  config.substitutions.append( ('%run', emulator) )
-  config.substitutions.append( ('%env ', "env ") )
-  # TODO: Implement `%device_rm` to perform removal of files in the emulator.
-  # For now just make it a no-op.
-  lit_config.warning('%device_rm is not implemented')
-  config.substitutions.append( ('%device_rm', 'echo ') )
-  config.compile_wrapper = ""
-elif config.host_os == 'Darwin' and config.apple_platform != "osx":
-  # Darwin tests can be targetting macOS, a device or a simulator. All devices
-  # are declared as "ios", even for iOS derivatives (tvOS, watchOS). Similarly,
-  # all simulators are "iossim". See the table below.
-  #
-  # =========================================================================
-  # Target             | Feature set
-  # =========================================================================
-  # macOS              | darwin
-  # iOS device         | darwin, ios
-  # iOS simulator      | darwin, ios, iossim
-  # tvOS device        | darwin, ios, tvos
-  # tvOS simulator     | darwin, ios, iossim, tvos, tvossim
-  # watchOS device     | darwin, ios, watchos
-  # watchOS simulator  | darwin, ios, iossim, watchos, watchossim
-  # =========================================================================
-
-  ios_or_iossim = "iossim" if config.apple_platform.endswith("sim") else "ios"
-
-  config.available_features.add('ios')
-  device_id_env = "SANITIZER_" + ios_or_iossim.upper() + "_TEST_DEVICE_IDENTIFIER"
-  if ios_or_iossim == "iossim":
-    config.available_features.add('iossim')
-    if device_id_env not in os.environ:
-      lit_config.fatal(
-        '{} must be set in the environment when running iossim tests'.format(
-          device_id_env))
-  if config.apple_platform != "ios" and config.apple_platform != "iossim":
-    config.available_features.add(config.apple_platform)
-
-  ios_commands_dir = get_ios_commands_dir()
-
-  run_wrapper = os.path.join(ios_commands_dir, ios_or_iossim + "_run.py")
-  env_wrapper = os.path.join(ios_commands_dir, ios_or_iossim + "_env.py")
-  compile_wrapper = os.path.join(ios_commands_dir, ios_or_iossim + "_compile.py")
-  prepare_script = os.path.join(ios_commands_dir, ios_or_iossim + "_prepare.py")
-
-  if device_id_env in os.environ:
-    config.environment[device_id_env] = os.environ[device_id_env]
-  config.substitutions.append(('%run', run_wrapper))
-  config.substitutions.append(('%env ', env_wrapper + " "))
-  # Current implementation of %device_rm uses the run_wrapper to do
-  # the work.
-  config.substitutions.append(('%device_rm', '{} rm '.format(run_wrapper)))
-  config.compile_wrapper = compile_wrapper
-
-  try:
-    prepare_output = subprocess.check_output([prepare_script, config.apple_platform, config.clang]).decode().strip()
-  except subprocess.CalledProcessError as e:
-    print("Command failed:")
-    print(e.output)
-    raise e
-  if len(prepare_output) > 0: print(prepare_output)
-  prepare_output_json = prepare_output.split("\n")[-1]
-  prepare_output = json.loads(prepare_output_json)
-  config.environment.update(prepare_output["env"])
+    config.substitutions.append(("%run", emulator))
+    config.substitutions.append(("%env ", "env "))
+    # TODO: Implement `%device_rm` to perform removal of files in the emulator.
+    # For now just make it a no-op.
+    lit_config.warning("%device_rm is not implemented")
+    config.substitutions.append(("%device_rm", "echo "))
+    config.compile_wrapper = ""
+elif config.host_os == "Darwin" and config.apple_platform != "osx":
+    # Darwin tests can be targetting macOS, a device or a simulator. All devices
+    # are declared as "ios", even for iOS derivatives (tvOS, watchOS). Similarly,
+    # all simulators are "iossim". See the table below.
+    #
+    # =========================================================================
+    # Target             | Feature set
+    # =========================================================================
+    # macOS              | darwin
+    # iOS device         | darwin, ios
+    # iOS simulator      | darwin, ios, iossim
+    # tvOS device        | darwin, ios, tvos
+    # tvOS simulator     | darwin, ios, iossim, tvos, tvossim
+    # watchOS device     | darwin, ios, watchos
+    # watchOS simulator  | darwin, ios, iossim, watchos, watchossim
+    # =========================================================================
+
+    ios_or_iossim = "iossim" if config.apple_platform.endswith("sim") else "ios"
+
+    config.available_features.add("ios")
+    device_id_env = "SANITIZER_" + ios_or_iossim.upper() + "_TEST_DEVICE_IDENTIFIER"
+    if ios_or_iossim == "iossim":
+        config.available_features.add("iossim")
+        if device_id_env not in os.environ:
+            lit_config.fatal(
+                "{} must be set in the environment when running iossim tests".format(
+                    device_id_env
+                )
+            )
+    if config.apple_platform != "ios" and config.apple_platform != "iossim":
+        config.available_features.add(config.apple_platform)
+
+    ios_commands_dir = get_ios_commands_dir()
+
+    run_wrapper = os.path.join(ios_commands_dir, ios_or_iossim + "_run.py")
+    env_wrapper = os.path.join(ios_commands_dir, ios_or_iossim + "_env.py")
+    compile_wrapper = os.path.join(ios_commands_dir, ios_or_iossim + "_compile.py")
+    prepare_script = os.path.join(ios_commands_dir, ios_or_iossim + "_prepare.py")
+
+    if device_id_env in os.environ:
+        config.environment[device_id_env] = os.environ[device_id_env]
+    config.substitutions.append(("%run", run_wrapper))
+    config.substitutions.append(("%env ", env_wrapper + " "))
+    # Current implementation of %device_rm uses the run_wrapper to do
+    # the work.
+    config.substitutions.append(("%device_rm", "{} rm ".format(run_wrapper)))
+    config.compile_wrapper = compile_wrapper
+
+    try:
+        prepare_output = (
+            subprocess.check_output(
+                [prepare_script, config.apple_platform, config.clang]
+            )
+            .decode()
+            .strip()
+        )
+    except subprocess.CalledProcessError as e:
+        print("Command failed:")
+        print(e.output)
+        raise e
+    if len(prepare_output) > 0:
+        print(prepare_output)
+    prepare_output_json = prepare_output.split("\n")[-1]
+    prepare_output = json.loads(prepare_output_json)
+    config.environment.update(prepare_output["env"])
 elif config.android:
-  config.available_features.add('android')
-  compile_wrapper = os.path.join(config.compiler_rt_src_root, "test", "sanitizer_common", "android_commands", "android_compile.py") + " "
-  config.compile_wrapper = compile_wrapper
-  config.substitutions.append( ('%run', "") )
-  config.substitutions.append( ('%env ', "env ") )
+    config.available_features.add("android")
+    compile_wrapper = (
+        os.path.join(
+            config.compiler_rt_src_root,
+            "test",
+            "sanitizer_common",
+            "android_commands",
+            "android_compile.py",
+        )
+        + " "
+    )
+    config.compile_wrapper = compile_wrapper
+    config.substitutions.append(("%run", ""))
+    config.substitutions.append(("%env ", "env "))
 else:
-  config.substitutions.append( ('%run', "") )
-  config.substitutions.append( ('%env ', "env ") )
-  # When running locally %device_rm is a no-op.
-  config.substitutions.append( ('%device_rm', 'echo ') )
-  config.compile_wrapper = ""
+    config.substitutions.append(("%run", ""))
+    config.substitutions.append(("%env ", "env "))
+    # When running locally %device_rm is a no-op.
+    config.substitutions.append(("%device_rm", "echo "))
+    config.compile_wrapper = ""
 
 # Define CHECK-%os to check for OS-dependent output.
-config.substitutions.append( ('CHECK-%os', ("CHECK-" + config.host_os)))
+config.substitutions.append(("CHECK-%os", ("CHECK-" + config.host_os)))
 
 # Define %arch to check for architecture-dependent output.
-config.substitutions.append( ('%arch', (config.host_arch)))
-
-if config.host_os == 'Windows':
-  # FIXME: This isn't quite right. Specifically, it will succeed if the program
-  # does not crash but exits with a non-zero exit code. We ought to merge
-  # KillTheDoctor and not --crash to make the latter more useful and remove the
-  # need for this substitution.
-  config.expect_crash = "not KillTheDoctor "
+config.substitutions.append(("%arch", (config.host_arch)))
+
+if config.host_os == "Windows":
+    # FIXME: This isn't quite right. Specifically, it will succeed if the program
+    # does not crash but exits with a non-zero exit code. We ought to merge
+    # KillTheDoctor and not --crash to make the latter more useful and remove the
+    # need for this substitution.
+    config.expect_crash = "not KillTheDoctor "
 else:
-  config.expect_crash = "not --crash "
+    config.expect_crash = "not --crash "
 
-config.substitutions.append( ("%expect_crash ", config.expect_crash) )
+config.substitutions.append(("%expect_crash ", config.expect_crash))
 
-target_arch = getattr(config, 'target_arch', None)
+target_arch = getattr(config, "target_arch", None)
 if target_arch:
-  config.available_features.add(target_arch + '-target-arch')
-  if target_arch in ['x86_64', 'i386']:
-    config.available_features.add('x86-target-arch')
-  config.available_features.add(target_arch + '-' + config.host_os.lower())
+    config.available_features.add(target_arch + "-target-arch")
+    if target_arch in ["x86_64", "i386"]:
+        config.available_features.add("x86-target-arch")
+    config.available_features.add(target_arch + "-" + config.host_os.lower())
 
-compiler_rt_debug = getattr(config, 'compiler_rt_debug', False)
+compiler_rt_debug = getattr(config, "compiler_rt_debug", False)
 if not compiler_rt_debug:
-  config.available_features.add('compiler-rt-optimized')
+    config.available_features.add("compiler-rt-optimized")
 
-libdispatch = getattr(config, 'compiler_rt_intercept_libdispatch', False)
+libdispatch = getattr(config, "compiler_rt_intercept_libdispatch", False)
 if libdispatch:
-  config.available_features.add('libdispatch')
+    config.available_features.add("libdispatch")
 
-sanitizer_can_use_cxxabi = getattr(config, 'sanitizer_can_use_cxxabi', True)
+sanitizer_can_use_cxxabi = getattr(config, "sanitizer_can_use_cxxabi", True)
 if sanitizer_can_use_cxxabi:
-  config.available_features.add('cxxabi')
+    config.available_features.add("cxxabi")
 
-if not getattr(config, 'sanitizer_uses_static_cxxabi', False):
-  config.available_features.add('shared_cxxabi')
+if not getattr(config, "sanitizer_uses_static_cxxabi", False):
+    config.available_features.add("shared_cxxabi")
 
-if not getattr(config, 'sanitizer_uses_static_unwind', False):
-  config.available_features.add('shared_unwind')
+if not getattr(config, "sanitizer_uses_static_unwind", False):
+    config.available_features.add("shared_unwind")
 
 if config.has_lld:
-  config.available_features.add('lld-available')
+    config.available_features.add("lld-available")
 
 if config.use_lld:
-  config.available_features.add('lld')
+    config.available_features.add("lld")
 
 if config.can_symbolize:
-  config.available_features.add('can-symbolize')
+    config.available_features.add("can-symbolize")
 
 if config.gwp_asan:
-  config.available_features.add('gwp_asan')
+    config.available_features.add("gwp_asan")
 
 lit.util.usePlatformSdkOnDarwin(config, lit_config)
 
 min_macos_deployment_target_substitutions = [
-  (10, 11),
-  (10, 12),
+    (10, 11),
+    (10, 12),
 ]
 # TLS requires watchOS 3+
-config.substitutions.append( ('%darwin_min_target_with_tls_support', '%min_macos_deployment_target=10.12') )
-
-if config.host_os == 'Darwin':
-  osx_version = (10, 0, 0)
-  try:
-    osx_version = subprocess.check_output(["sw_vers", "-productVersion"],
-                                          universal_newlines=True)
-    osx_version = tuple(int(x) for x in osx_version.split('.'))
-    if len(osx_version) == 2: osx_version = (osx_version[0], osx_version[1], 0)
-    if osx_version >= (10, 11):
-      config.available_features.add('osx-autointerception')
-      config.available_features.add('osx-ld64-live_support')
-    if osx_version >= (13, 1):
-      config.available_features.add('jit-compatible-osx-swift-runtime')
-  except subprocess.CalledProcessError:
-    pass
-
-  config.darwin_osx_version = osx_version
-
-  # Detect x86_64h
-  try:
-    output = subprocess.check_output(["sysctl", "hw.cpusubtype"])
-    output_re = re.match("^hw.cpusubtype: ([0-9]+)$", output)
-    if output_re:
-      cpu_subtype = int(output_re.group(1))
-      if cpu_subtype == 8: # x86_64h
-        config.available_features.add('x86_64h')
-  except:
-    pass
-
-  # 32-bit iOS simulator is deprecated and removed in latest Xcode.
-  if config.apple_platform == "iossim":
-    if config.target_arch == "i386":
-      config.unsupported = True
-
-  def get_macos_aligned_version(macos_vers):
-    platform = config.apple_platform
-    if platform == 'osx':
-      return macos_vers
-
-    macos_major, macos_minor = macos_vers
-    assert macos_major >= 10
-
-    if macos_major == 10:  # macOS 10.x
-      major = macos_minor
-      minor = 0
-    else:                  # macOS 11+
-      major = macos_major + 5
-      minor = macos_minor
-
-    assert major >= 11
-
-    if platform.startswith('ios') or platform.startswith('tvos'):
-      major -= 2
-    elif platform.startswith('watch'):
-      major -= 9
-    else:
-      lit_config.fatal("Unsupported apple platform '{}'".format(platform))
-
-    return (major, minor)
+config.substitutions.append(
+    ("%darwin_min_target_with_tls_support", "%min_macos_deployment_target=10.12")
+)
 
-  for vers in min_macos_deployment_target_substitutions:
-    flag = config.apple_platform_min_deployment_target_flag
-    major, minor = get_macos_aligned_version(vers)
-    if 'mtargetos' in flag:
-      sim = '-simulator' if 'sim' in config.apple_platform else ''
-      config.substitutions.append( ('%%min_macos_deployment_target=%s.%s' % vers, '{}{}.{}{}'.format(flag, major, minor, sim)) )
-    else:
-      config.substitutions.append( ('%%min_macos_deployment_target=%s.%s' % vers, '{}={}.{}'.format(flag, major, minor)) )
+if config.host_os == "Darwin":
+    osx_version = (10, 0, 0)
+    try:
+        osx_version = subprocess.check_output(
+            ["sw_vers", "-productVersion"], universal_newlines=True
+        )
+        osx_version = tuple(int(x) for x in osx_version.split("."))
+        if len(osx_version) == 2:
+            osx_version = (osx_version[0], osx_version[1], 0)
+        if osx_version >= (10, 11):
+            config.available_features.add("osx-autointerception")
+            config.available_features.add("osx-ld64-live_support")
+        if osx_version >= (13, 1):
+            config.available_features.add("jit-compatible-osx-swift-runtime")
+    except subprocess.CalledProcessError:
+        pass
+
+    config.darwin_osx_version = osx_version
+
+    # Detect x86_64h
+    try:
+        output = subprocess.check_output(["sysctl", "hw.cpusubtype"])
+        output_re = re.match("^hw.cpusubtype: ([0-9]+)$", output)
+        if output_re:
+            cpu_subtype = int(output_re.group(1))
+            if cpu_subtype == 8:  # x86_64h
+                config.available_features.add("x86_64h")
+    except:
+        pass
+
+    # 32-bit iOS simulator is deprecated and removed in latest Xcode.
+    if config.apple_platform == "iossim":
+        if config.target_arch == "i386":
+            config.unsupported = True
+
+    def get_macos_aligned_version(macos_vers):
+        platform = config.apple_platform
+        if platform == "osx":
+            return macos_vers
+
+        macos_major, macos_minor = macos_vers
+        assert macos_major >= 10
+
+        if macos_major == 10:  # macOS 10.x
+            major = macos_minor
+            minor = 0
+        else:  # macOS 11+
+            major = macos_major + 5
+            minor = macos_minor
+
+        assert major >= 11
+
+        if platform.startswith("ios") or platform.startswith("tvos"):
+            major -= 2
+        elif platform.startswith("watch"):
+            major -= 9
+        else:
+            lit_config.fatal("Unsupported apple platform '{}'".format(platform))
+
+        return (major, minor)
+
+    for vers in min_macos_deployment_target_substitutions:
+        flag = config.apple_platform_min_deployment_target_flag
+        major, minor = get_macos_aligned_version(vers)
+        if "mtargetos" in flag:
+            sim = "-simulator" if "sim" in config.apple_platform else ""
+            config.substitutions.append(
+                (
+                    "%%min_macos_deployment_target=%s.%s" % vers,
+                    "{}{}.{}{}".format(flag, major, minor, sim),
+                )
+            )
+        else:
+            config.substitutions.append(
+                (
+                    "%%min_macos_deployment_target=%s.%s" % vers,
+                    "{}={}.{}".format(flag, major, minor),
+                )
+            )
 else:
-  for vers in min_macos_deployment_target_substitutions:
-    config.substitutions.append( ('%%min_macos_deployment_target=%s.%s' % vers, '') )
+    for vers in min_macos_deployment_target_substitutions:
+        config.substitutions.append(("%%min_macos_deployment_target=%s.%s" % vers, ""))
 
 if config.android:
-  env = os.environ.copy()
-  if config.android_serial:
-    env['ANDROID_SERIAL'] = config.android_serial
-    config.environment['ANDROID_SERIAL'] = config.android_serial
-
-  adb = os.environ.get('ADB', 'adb')
-
-  # These are needed for tests to upload/download temp files, such as
-  # suppression-files, to device.
-  config.substitutions.append( ('%device_rundir/', "/data/local/tmp/Output/") )
-  config.substitutions.append( ('%push_to_device', "%s -s '%s' push " % (adb, env['ANDROID_SERIAL']) ) )
-  config.substitutions.append( ('%adb_shell ', "%s -s '%s' shell " % (adb, env['ANDROID_SERIAL']) ) )
-  config.substitutions.append( ('%device_rm', "%s -s '%s' shell 'rm ' " % (adb, env['ANDROID_SERIAL']) ) )
-
-  try:
-    android_api_level_str = subprocess.check_output([adb, "shell", "getprop", "ro.build.version.sdk"], env=env).rstrip()
-    android_api_codename = subprocess.check_output([adb, "shell", "getprop", "ro.build.version.codename"], env=env).rstrip().decode("utf-8")
-  except (subprocess.CalledProcessError, OSError):
-    lit_config.fatal("Failed to read ro.build.version.sdk (using '%s' as adb)" % adb)
-  try:
-    android_api_level = int(android_api_level_str)
-  except ValueError:
-    lit_config.fatal("Failed to read ro.build.version.sdk (using '%s' as adb): got '%s'" % (adb, android_api_level_str))
-  android_api_level = min(android_api_level, int(config.android_api_level))
-  for required in [26, 28, 29, 30]:
-    if android_api_level >= required:
-      config.available_features.add('android-%s' % required)
-  # FIXME: Replace with appropriate version when availible.
-  if android_api_level > 30 or (android_api_level == 30 and android_api_codename == 'S'):
-    config.available_features.add('android-thread-properties-api')
-
-  # Prepare the device.
-  android_tmpdir = '/data/local/tmp/Output'
-  subprocess.check_call([adb, "shell", "mkdir", "-p", android_tmpdir], env=env)
-  for file in config.android_files_to_push:
-    subprocess.check_call([adb, "push", file, android_tmpdir], env=env)
+    env = os.environ.copy()
+    if config.android_serial:
+        env["ANDROID_SERIAL"] = config.android_serial
+        config.environment["ANDROID_SERIAL"] = config.android_serial
+
+    adb = os.environ.get("ADB", "adb")
+
+    # These are needed for tests to upload/download temp files, such as
+    # suppression-files, to device.
+    config.substitutions.append(("%device_rundir/", "/data/local/tmp/Output/"))
+    config.substitutions.append(
+        ("%push_to_device", "%s -s '%s' push " % (adb, env["ANDROID_SERIAL"]))
+    )
+    config.substitutions.append(
+        ("%adb_shell ", "%s -s '%s' shell " % (adb, env["ANDROID_SERIAL"]))
+    )
+    config.substitutions.append(
+        ("%device_rm", "%s -s '%s' shell 'rm ' " % (adb, env["ANDROID_SERIAL"]))
+    )
+
+    try:
+        android_api_level_str = subprocess.check_output(
+            [adb, "shell", "getprop", "ro.build.version.sdk"], env=env
+        ).rstrip()
+        android_api_codename = (
+            subprocess.check_output(
+                [adb, "shell", "getprop", "ro.build.version.codename"], env=env
+            )
+            .rstrip()
+            .decode("utf-8")
+        )
+    except (subprocess.CalledProcessError, OSError):
+        lit_config.fatal(
+            "Failed to read ro.build.version.sdk (using '%s' as adb)" % adb
+        )
+    try:
+        android_api_level = int(android_api_level_str)
+    except ValueError:
+        lit_config.fatal(
+            "Failed to read ro.build.version.sdk (using '%s' as adb): got '%s'"
+            % (adb, android_api_level_str)
+        )
+    android_api_level = min(android_api_level, int(config.android_api_level))
+    for required in [26, 28, 29, 30]:
+        if android_api_level >= required:
+            config.available_features.add("android-%s" % required)
+    # FIXME: Replace with appropriate version when availible.
+    if android_api_level > 30 or (
+        android_api_level == 30 and android_api_codename == "S"
+    ):
+        config.available_features.add("android-thread-properties-api")
+
+    # Prepare the device.
+    android_tmpdir = "/data/local/tmp/Output"
+    subprocess.check_call([adb, "shell", "mkdir", "-p", android_tmpdir], env=env)
+    for file in config.android_files_to_push:
+        subprocess.check_call([adb, "push", file, android_tmpdir], env=env)
 else:
-  config.substitutions.append( ('%device_rundir/', "") )
-  config.substitutions.append( ('%push_to_device', "echo ") )
-  config.substitutions.append( ('%adb_shell', "echo ") )
-
-if config.host_os == 'Linux':
-  # detect whether we are using glibc, and which version
-  # NB: 'ldd' is just one of the tools commonly installed as part of glibc/musl
-  ldd_ver_cmd = subprocess.Popen(['ldd', '--version'],
-                                 stdout=subprocess.PIPE,
-                                 stderr=subprocess.DEVNULL,
-                                 env={'LANG': 'C'})
-  sout, _ = ldd_ver_cmd.communicate()
-  ver_lines = sout.splitlines()
-  if not config.android and len(ver_lines) and ver_lines[0].startswith(b"ldd "):
-    from distutils.version import LooseVersion
-    ver = LooseVersion(ver_lines[0].split()[-1].decode())
-    any_glibc = False
-    for required in ["2.19", "2.27", "2.30", "2.34", "2.37"]:
-      if ver >= LooseVersion(required):
-        config.available_features.add("glibc-" + required)
-        any_glibc = True
-      if any_glibc:
-        config.available_features.add("glibc")
+    config.substitutions.append(("%device_rundir/", ""))
+    config.substitutions.append(("%push_to_device", "echo "))
+    config.substitutions.append(("%adb_shell", "echo "))
+
+if config.host_os == "Linux":
+    # detect whether we are using glibc, and which version
+    # NB: 'ldd' is just one of the tools commonly installed as part of glibc/musl
+    ldd_ver_cmd = subprocess.Popen(
+        ["ldd", "--version"],
+        stdout=subprocess.PIPE,
+        stderr=subprocess.DEVNULL,
+        env={"LANG": "C"},
+    )
+    sout, _ = ldd_ver_cmd.communicate()
+    ver_lines = sout.splitlines()
+    if not config.android and len(ver_lines) and ver_lines[0].startswith(b"ldd "):
+        from distutils.version import LooseVersion
+
+        ver = LooseVersion(ver_lines[0].split()[-1].decode())
+        any_glibc = False
+        for required in ["2.19", "2.27", "2.30", "2.34", "2.37"]:
+            if ver >= LooseVersion(required):
+                config.available_features.add("glibc-" + required)
+                any_glibc = True
+            if any_glibc:
+                config.available_features.add("glibc")
 
 sancovcc_path = os.path.join(config.llvm_tools_dir, "sancov")
 if os.path.exists(sancovcc_path):
-  config.available_features.add("has_sancovcc")
-  config.substitutions.append( ("%sancovcc ", sancovcc_path) )
+    config.available_features.add("has_sancovcc")
+    config.substitutions.append(("%sancovcc ", sancovcc_path))
+
 
 def liblto_path():
-  return os.path.join(config.llvm_shlib_dir, 'libLTO.dylib')
+    return os.path.join(config.llvm_shlib_dir, "libLTO.dylib")
+
 
 def is_darwin_lto_supported():
-  return os.path.exists(liblto_path())
+    return os.path.exists(liblto_path())
+
 
 def is_binutils_lto_supported():
-  if not os.path.exists(os.path.join(config.llvm_shlib_dir, 'LLVMgold.so')):
-    return False
+    if not os.path.exists(os.path.join(config.llvm_shlib_dir, "LLVMgold.so")):
+        return False
+
+    # We require both ld.bfd and ld.gold exist and support plugins. They are in
+    # the same repository 'binutils-gdb' and usually built together.
+    for exe in (config.gnu_ld_executable, config.gold_executable):
+        try:
+            ld_cmd = subprocess.Popen(
+                [exe, "--help"], stdout=subprocess.PIPE, env={"LANG": "C"}
+            )
+            ld_out = ld_cmd.stdout.read().decode()
+            ld_cmd.wait()
+        except OSError:
+            return False
+        if not "-plugin" in ld_out:
+            return False
 
-  # We require both ld.bfd and ld.gold exist and support plugins. They are in
-  # the same repository 'binutils-gdb' and usually built together.
-  for exe in (config.gnu_ld_executable, config.gold_executable):
-    try:
-      ld_cmd = subprocess.Popen([exe, '--help'], stdout=subprocess.PIPE, env={'LANG': 'C'})
-      ld_out = ld_cmd.stdout.read().decode()
-      ld_cmd.wait()
-    except OSError:
-      return False
-    if not '-plugin' in ld_out:
-      return False
+    return True
 
-  return True
 
 def is_windows_lto_supported():
-  if not target_is_msvc:
-    return True
-  return os.path.exists(os.path.join(config.llvm_tools_dir, 'lld-link.exe'))
-
-if config.host_os == 'Darwin' and is_darwin_lto_supported():
-  config.lto_supported = True
-  config.lto_flags = [ '-Wl,-lto_library,' + liblto_path() ]
-elif config.host_os in ['Linux', 'FreeBSD', 'NetBSD']:
-  config.lto_supported = False
-  if config.use_lld:
-    config.lto_supported = True
-  if is_binutils_lto_supported():
-    config.available_features.add('binutils_lto')
-    config.lto_supported = True
+    if not target_is_msvc:
+        return True
+    return os.path.exists(os.path.join(config.llvm_tools_dir, "lld-link.exe"))
 
-  if config.lto_supported:
+
+if config.host_os == "Darwin" and is_darwin_lto_supported():
+    config.lto_supported = True
+    config.lto_flags = ["-Wl,-lto_library," + liblto_path()]
+elif config.host_os in ["Linux", "FreeBSD", "NetBSD"]:
+    config.lto_supported = False
     if config.use_lld:
-      config.lto_flags = ["-fuse-ld=lld"]
-    else:
-      config.lto_flags = ["-fuse-ld=gold"]
-elif config.host_os == 'Windows' and is_windows_lto_supported():
-  config.lto_supported = True
-  config.lto_flags = ["-fuse-ld=lld"]
+        config.lto_supported = True
+    if is_binutils_lto_supported():
+        config.available_features.add("binutils_lto")
+        config.lto_supported = True
+
+    if config.lto_supported:
+        if config.use_lld:
+            config.lto_flags = ["-fuse-ld=lld"]
+        else:
+            config.lto_flags = ["-fuse-ld=gold"]
+elif config.host_os == "Windows" and is_windows_lto_supported():
+    config.lto_supported = True
+    config.lto_flags = ["-fuse-ld=lld"]
 else:
-  config.lto_supported = False
+    config.lto_supported = False
 
 if config.lto_supported:
-  config.available_features.add('lto')
-  if config.use_thinlto:
-    config.available_features.add('thinlto')
-    config.lto_flags += ["-flto=thin"]
-  else:
-    config.lto_flags += ["-flto"]
+    config.available_features.add("lto")
+    if config.use_thinlto:
+        config.available_features.add("thinlto")
+        config.lto_flags += ["-flto=thin"]
+    else:
+        config.lto_flags += ["-flto"]
 
 if config.have_rpc_xdr_h:
-  config.available_features.add('sunrpc')
+    config.available_features.add("sunrpc")
 
 # Ask llvm-config about assertion mode.
 try:
-  llvm_config_cmd = subprocess.Popen(
-      [os.path.join(config.llvm_tools_dir, 'llvm-config'), '--assertion-mode'],
-      stdout = subprocess.PIPE,
-      env=config.environment)
+    llvm_config_cmd = subprocess.Popen(
+        [os.path.join(config.llvm_tools_dir, "llvm-config"), "--assertion-mode"],
+        stdout=subprocess.PIPE,
+        env=config.environment,
+    )
 except OSError as e:
-  print("Could not launch llvm-config in " + config.llvm_tools_dir)
-  print("    Failed with error #{0}: {1}".format(e.errno, e.strerror))
-  exit(42)
+    print("Could not launch llvm-config in " + config.llvm_tools_dir)
+    print("    Failed with error #{0}: {1}".format(e.errno, e.strerror))
+    exit(42)
 
-if re.search(r'ON', llvm_config_cmd.stdout.read().decode('ascii')):
-  config.available_features.add('asserts')
+if re.search(r"ON", llvm_config_cmd.stdout.read().decode("ascii")):
+    config.available_features.add("asserts")
 llvm_config_cmd.wait()
 
 # Sanitizer tests tend to be flaky on Windows due to PR24554, so add some
 # retries. We don't do this on otther platforms because it's slower.
-if platform.system() == 'Windows':
-  config.test_retry_attempts = 2
+if platform.system() == "Windows":
+    config.test_retry_attempts = 2
 
 # No throttling on non-Darwin platforms.
-lit_config.parallelism_groups['shadow-memory'] = None
-
-if platform.system() == 'Darwin':
-  ios_device = config.apple_platform != 'osx' and not config.apple_platform.endswith('sim')
-  # Force sequential execution when running tests on iOS devices.
-  if ios_device:
-    lit_config.warning('Forcing sequential execution for iOS device tests')
-    lit_config.parallelism_groups['ios-device'] = 1
-    config.parallelism_group = 'ios-device'
-
-  # Only run up to 3 processes that require shadow memory simultaneously on
-  # 64-bit Darwin. Using more scales badly and hogs the system due to
-  # inefficient handling of large mmap'd regions (terabytes) by the kernel.
-  else:
-    lit_config.warning('Throttling sanitizer tests that require shadow memory on Darwin')
-    lit_config.parallelism_groups['shadow-memory'] = 3
+lit_config.parallelism_groups["shadow-memory"] = None
+
+if platform.system() == "Darwin":
+    ios_device = config.apple_platform != "osx" and not config.apple_platform.endswith(
+        "sim"
+    )
+    # Force sequential execution when running tests on iOS devices.
+    if ios_device:
+        lit_config.warning("Forcing sequential execution for iOS device tests")
+        lit_config.parallelism_groups["ios-device"] = 1
+        config.parallelism_group = "ios-device"
+
+    # Only run up to 3 processes that require shadow memory simultaneously on
+    # 64-bit Darwin. Using more scales badly and hogs the system due to
+    # inefficient handling of large mmap'd regions (terabytes) by the kernel.
+    else:
+        lit_config.warning(
+            "Throttling sanitizer tests that require shadow memory on Darwin"
+        )
+        lit_config.parallelism_groups["shadow-memory"] = 3
 
 # Multiple substitutions are necessary to support multiple shared objects used
 # at once.
 # Note that substitutions with numbers have to be defined first to avoid
 # being subsumed by substitutions with smaller postfix.
 for postfix in ["2", "1", ""]:
-  if config.host_os == 'Darwin':
-    config.substitutions.append( ("%ld_flags_rpath_exe" + postfix, '-Wl,-rpath, at executable_path/ %dynamiclib' + postfix) )
-    config.substitutions.append( ("%ld_flags_rpath_so" + postfix, '-install_name @rpath/`basename %dynamiclib{}`'.format(postfix)) )
-  elif config.host_os in ('FreeBSD', 'NetBSD', 'OpenBSD'):
-    config.substitutions.append( ("%ld_flags_rpath_exe" + postfix, "-Wl,-z,origin -Wl,-rpath,\$ORIGIN -L%T -l%xdynamiclib_namespec" + postfix) )
-    config.substitutions.append( ("%ld_flags_rpath_so" + postfix, '') )
-  elif config.host_os == 'Linux':
-    config.substitutions.append( ("%ld_flags_rpath_exe" + postfix, "-Wl,-rpath,\$ORIGIN -L%T -l%xdynamiclib_namespec" + postfix) )
-    config.substitutions.append( ("%ld_flags_rpath_so" + postfix, '') )
-  elif config.host_os == 'SunOS':
-    config.substitutions.append( ("%ld_flags_rpath_exe" + postfix, "-Wl,-R\$ORIGIN -L%T -l%xdynamiclib_namespec" + postfix) )
-    config.substitutions.append( ("%ld_flags_rpath_so" + postfix, '') )
-
-  # Must be defined after the substitutions that use %dynamiclib.
-  config.substitutions.append( ("%dynamiclib" + postfix, '%T/%xdynamiclib_filename' + postfix) )
-  config.substitutions.append( ("%xdynamiclib_filename" + postfix, 'lib%xdynamiclib_namespec{}.so'.format(postfix)) )
-  config.substitutions.append( ("%xdynamiclib_namespec", '%basename_t.dynamic') )
+    if config.host_os == "Darwin":
+        config.substitutions.append(
+            (
+                "%ld_flags_rpath_exe" + postfix,
+                "-Wl,-rpath, at executable_path/ %dynamiclib" + postfix,
+            )
+        )
+        config.substitutions.append(
+            (
+                "%ld_flags_rpath_so" + postfix,
+                "-install_name @rpath/`basename %dynamiclib{}`".format(postfix),
+            )
+        )
+    elif config.host_os in ("FreeBSD", "NetBSD", "OpenBSD"):
+        config.substitutions.append(
+            (
+                "%ld_flags_rpath_exe" + postfix,
+                "-Wl,-z,origin -Wl,-rpath,\$ORIGIN -L%T -l%xdynamiclib_namespec"
+                + postfix,
+            )
+        )
+        config.substitutions.append(("%ld_flags_rpath_so" + postfix, ""))
+    elif config.host_os == "Linux":
+        config.substitutions.append(
+            (
+                "%ld_flags_rpath_exe" + postfix,
+                "-Wl,-rpath,\$ORIGIN -L%T -l%xdynamiclib_namespec" + postfix,
+            )
+        )
+        config.substitutions.append(("%ld_flags_rpath_so" + postfix, ""))
+    elif config.host_os == "SunOS":
+        config.substitutions.append(
+            (
+                "%ld_flags_rpath_exe" + postfix,
+                "-Wl,-R\$ORIGIN -L%T -l%xdynamiclib_namespec" + postfix,
+            )
+        )
+        config.substitutions.append(("%ld_flags_rpath_so" + postfix, ""))
+
+    # Must be defined after the substitutions that use %dynamiclib.
+    config.substitutions.append(
+        ("%dynamiclib" + postfix, "%T/%xdynamiclib_filename" + postfix)
+    )
+    config.substitutions.append(
+        (
+            "%xdynamiclib_filename" + postfix,
+            "lib%xdynamiclib_namespec{}.so".format(postfix),
+        )
+    )
+    config.substitutions.append(("%xdynamiclib_namespec", "%basename_t.dynamic"))
 
 config.default_sanitizer_opts = []
-if config.host_os == 'Darwin':
-  # On Darwin, we default to `abort_on_error=1`, which would make tests run
-  # much slower. Let's override this and run lit tests with 'abort_on_error=0'.
-  config.default_sanitizer_opts += ['abort_on_error=0']
-  config.default_sanitizer_opts += ['log_to_syslog=0']
-  if lit.util.which('log'):
-    # Querying the log can only done by a privileged user so
-    # so check if we can query the log.
-    exit_code = -1
-    with open('/dev/null', 'r') as f:
-      # Run a `log show` command the should finish fairly quickly and produce very little output.
-      exit_code = subprocess.call(['log', 'show', '--last', '1m', '--predicate', '1 == 0'], stdout=f, stderr=f)
-    if exit_code == 0:
-      config.available_features.add('darwin_log_cmd')
+if config.host_os == "Darwin":
+    # On Darwin, we default to `abort_on_error=1`, which would make tests run
+    # much slower. Let's override this and run lit tests with 'abort_on_error=0'.
+    config.default_sanitizer_opts += ["abort_on_error=0"]
+    config.default_sanitizer_opts += ["log_to_syslog=0"]
+    if lit.util.which("log"):
+        # Querying the log can only done by a privileged user so
+        # so check if we can query the log.
+        exit_code = -1
+        with open("/dev/null", "r") as f:
+            # Run a `log show` command the should finish fairly quickly and produce very little output.
+            exit_code = subprocess.call(
+                ["log", "show", "--last", "1m", "--predicate", "1 == 0"],
+                stdout=f,
+                stderr=f,
+            )
+        if exit_code == 0:
+            config.available_features.add("darwin_log_cmd")
+        else:
+            lit_config.warning("log command found but cannot queried")
     else:
-      lit_config.warning('log command found but cannot queried')
-  else:
-    lit_config.warning('log command not found. Some tests will be skipped.')
+        lit_config.warning("log command not found. Some tests will be skipped.")
 elif config.android:
-  config.default_sanitizer_opts += ['abort_on_error=0']
+    config.default_sanitizer_opts += ["abort_on_error=0"]
 
 # Allow tests to use REQUIRES=stable-runtime.  For use when you cannot use XFAIL
 # because the test hangs or fails on one configuration and not the other.
-if config.android or (config.target_arch not in ['arm', 'armhf', 'aarch64']):
-  config.available_features.add('stable-runtime')
+if config.android or (config.target_arch not in ["arm", "armhf", "aarch64"]):
+    config.available_features.add("stable-runtime")
 
 if config.asan_shadow_scale:
-  config.available_features.add("shadow-scale-%s" % config.asan_shadow_scale)
+    config.available_features.add("shadow-scale-%s" % config.asan_shadow_scale)
 else:
-  config.available_features.add("shadow-scale-3")
+    config.available_features.add("shadow-scale-3")
 
 if config.memprof_shadow_scale:
-  config.available_features.add("memprof-shadow-scale-%s" % config.memprof_shadow_scale)
+    config.available_features.add(
+        "memprof-shadow-scale-%s" % config.memprof_shadow_scale
+    )
 else:
-  config.available_features.add("memprof-shadow-scale-3")
+    config.available_features.add("memprof-shadow-scale-3")
 
 if config.expensive_checks:
-  config.available_features.add("expensive_checks")
+    config.available_features.add("expensive_checks")
 
 # Propagate the LLD/LTO into the clang config option, so nothing else is needed.
 run_wrapper = []
-target_cflags = [getattr(config, 'target_cflags', None)]
+target_cflags = [getattr(config, "target_cflags", None)]
 extra_cflags = []
 
 if config.use_lto and config.lto_supported:
-  extra_cflags += config.lto_flags
+    extra_cflags += config.lto_flags
 elif config.use_lto and (not config.lto_supported):
-  config.unsupported = True
+    config.unsupported = True
 
 if config.use_lld and config.has_lld and not config.use_lto:
-  extra_cflags += ["-fuse-ld=lld"]
+    extra_cflags += ["-fuse-ld=lld"]
 elif config.use_lld and (not config.has_lld):
-  config.unsupported = True
+    config.unsupported = True
 
 # Append any extra flags passed in lit_config
-append_target_cflags = lit_config.params.get('append_target_cflags', None)
+append_target_cflags = lit_config.params.get("append_target_cflags", None)
 if append_target_cflags:
-  lit_config.note('Appending to extra_cflags: "{}"'.format(append_target_cflags))
-  extra_cflags += [append_target_cflags]
+    lit_config.note('Appending to extra_cflags: "{}"'.format(append_target_cflags))
+    extra_cflags += [append_target_cflags]
 
-config.clang = " " + " ".join(run_wrapper + [config.compile_wrapper, config.clang]) + " "
+config.clang = (
+    " " + " ".join(run_wrapper + [config.compile_wrapper, config.clang]) + " "
+)
 config.target_cflags = " " + " ".join(target_cflags + extra_cflags) + " "
 
-if config.host_os == 'Darwin':
-  config.substitutions.append((
-    "%get_pid_from_output",
-    "{} {}/get_pid_from_output.py".format(
-      shlex.quote(config.python_executable),
-      shlex.quote(get_ios_commands_dir())
-    ))
-  )
-  config.substitutions.append(
-    ("%print_crashreport_for_pid",
-    "{} {}/print_crashreport_for_pid.py".format(
-      shlex.quote(config.python_executable),
-      shlex.quote(get_ios_commands_dir())
-    ))
-  )
+if config.host_os == "Darwin":
+    config.substitutions.append(
+        (
+            "%get_pid_from_output",
+            "{} {}/get_pid_from_output.py".format(
+                shlex.quote(config.python_executable),
+                shlex.quote(get_ios_commands_dir()),
+            ),
+        )
+    )
+    config.substitutions.append(
+        (
+            "%print_crashreport_for_pid",
+            "{} {}/print_crashreport_for_pid.py".format(
+                shlex.quote(config.python_executable),
+                shlex.quote(get_ios_commands_dir()),
+            ),
+        )
+    )
 
 # It is not realistically possible to account for all options that could
 # possibly be present in system and user configuration files, so disable

diff  --git a/compiler-rt/test/lsan/TestCases/Darwin/lit.local.cfg.py b/compiler-rt/test/lsan/TestCases/Darwin/lit.local.cfg.py
index a85dfcd24c08e..520a963d01198 100644
--- a/compiler-rt/test/lsan/TestCases/Darwin/lit.local.cfg.py
+++ b/compiler-rt/test/lsan/TestCases/Darwin/lit.local.cfg.py
@@ -1,9 +1,10 @@
 def getRoot(config):
-  if not config.parent:
-    return config
-  return getRoot(config.parent)
+    if not config.parent:
+        return config
+    return getRoot(config.parent)
+
 
 root = getRoot(config)
 
-if root.host_os not in ['Darwin']:
-  config.unsupported = True
+if root.host_os not in ["Darwin"]:
+    config.unsupported = True

diff  --git a/compiler-rt/test/lsan/TestCases/Linux/lit.local.cfg.py b/compiler-rt/test/lsan/TestCases/Linux/lit.local.cfg.py
index 57271b8078a49..603ca0365068f 100644
--- a/compiler-rt/test/lsan/TestCases/Linux/lit.local.cfg.py
+++ b/compiler-rt/test/lsan/TestCases/Linux/lit.local.cfg.py
@@ -1,9 +1,10 @@
 def getRoot(config):
-  if not config.parent:
-    return config
-  return getRoot(config.parent)
+    if not config.parent:
+        return config
+    return getRoot(config.parent)
+
 
 root = getRoot(config)
 
-if root.host_os not in ['Linux']:
-  config.unsupported = True
+if root.host_os not in ["Linux"]:
+    config.unsupported = True

diff  --git a/compiler-rt/test/lsan/TestCases/Posix/lit.local.cfg.py b/compiler-rt/test/lsan/TestCases/Posix/lit.local.cfg.py
index 60a9460820a62..63240c3962565 100644
--- a/compiler-rt/test/lsan/TestCases/Posix/lit.local.cfg.py
+++ b/compiler-rt/test/lsan/TestCases/Posix/lit.local.cfg.py
@@ -1,9 +1,10 @@
 def getRoot(config):
-  if not config.parent:
-    return config
-  return getRoot(config.parent)
+    if not config.parent:
+        return config
+    return getRoot(config.parent)
+
 
 root = getRoot(config)
 
-if root.host_os in ['Windows']:
-  config.unsupported = True
+if root.host_os in ["Windows"]:
+    config.unsupported = True

diff  --git a/compiler-rt/test/lsan/lit.common.cfg.py b/compiler-rt/test/lsan/lit.common.cfg.py
index cec5664f45d35..e9b974955730d 100644
--- a/compiler-rt/test/lsan/lit.common.cfg.py
+++ b/compiler-rt/test/lsan/lit.common.cfg.py
@@ -7,66 +7,70 @@
 
 import lit.util
 
+
 def get_required_attr(config, attr_name):
-  attr_value = getattr(config, attr_name, None)
-  if attr_value == None:
-    lit_config.fatal(
-      "No attribute %r in test configuration! You may need to run "
-      "tests from your build directory or add this attribute "
-      "to lit.site.cfg.py " % attr_name)
-  return attr_value
+    attr_value = getattr(config, attr_name, None)
+    if attr_value == None:
+        lit_config.fatal(
+            "No attribute %r in test configuration! You may need to run "
+            "tests from your build directory or add this attribute "
+            "to lit.site.cfg.py " % attr_name
+        )
+    return attr_value
+
 
 # Setup source root.
 config.test_source_root = os.path.dirname(__file__)
 
 # Choose between standalone and LSan+(ASan|HWAsan) modes.
-lsan_lit_test_mode = get_required_attr(config, 'lsan_lit_test_mode')
-target_arch = getattr(config, 'target_arch', None)
+lsan_lit_test_mode = get_required_attr(config, "lsan_lit_test_mode")
+target_arch = getattr(config, "target_arch", None)
 
 if lsan_lit_test_mode == "Standalone":
-  config.name = "LeakSanitizer-Standalone"
-  lsan_cflags = ["-fsanitize=leak"]
-  config.available_features.add('lsan-standalone')
+    config.name = "LeakSanitizer-Standalone"
+    lsan_cflags = ["-fsanitize=leak"]
+    config.available_features.add("lsan-standalone")
 elif lsan_lit_test_mode == "AddressSanitizer":
-  config.name = "LeakSanitizer-AddressSanitizer"
-  lsan_cflags = ["-fsanitize=address"]
-  config.available_features.add('asan')
-  if config.host_os == 'NetBSD':
-    config.substitutions.insert(0, ('%run', config.netbsd_noaslr_prefix))
+    config.name = "LeakSanitizer-AddressSanitizer"
+    lsan_cflags = ["-fsanitize=address"]
+    config.available_features.add("asan")
+    if config.host_os == "NetBSD":
+        config.substitutions.insert(0, ("%run", config.netbsd_noaslr_prefix))
 elif lsan_lit_test_mode == "HWAddressSanitizer":
-  config.name = "LeakSanitizer-HWAddressSanitizer"
-  lsan_cflags = ["-fsanitize=hwaddress", "-fuse-ld=lld"]
-  if target_arch == "x86_64":
-    lsan_cflags = lsan_cflags + [ '-fsanitize-hwaddress-experimental-aliasing']
-  config.available_features.add('hwasan')
-  if config.host_os == 'NetBSD':
-    config.substitutions.insert(0, ('%run', config.netbsd_noaslr_prefix))
+    config.name = "LeakSanitizer-HWAddressSanitizer"
+    lsan_cflags = ["-fsanitize=hwaddress", "-fuse-ld=lld"]
+    if target_arch == "x86_64":
+        lsan_cflags = lsan_cflags + ["-fsanitize-hwaddress-experimental-aliasing"]
+    config.available_features.add("hwasan")
+    if config.host_os == "NetBSD":
+        config.substitutions.insert(0, ("%run", config.netbsd_noaslr_prefix))
 else:
-  lit_config.fatal("Unknown LSan test mode: %r" % lsan_lit_test_mode)
+    lit_config.fatal("Unknown LSan test mode: %r" % lsan_lit_test_mode)
 config.name += config.name_suffix
 
 # Platform-specific default LSAN_OPTIONS for lit tests.
-default_common_opts_str = ':'.join(list(config.default_sanitizer_opts))
-default_lsan_opts = default_common_opts_str + ':detect_leaks=1'
-if config.host_os == 'Darwin':
-  # On Darwin, we default to `abort_on_error=1`, which would make tests run
-  # much slower. Let's override this and run lit tests with 'abort_on_error=0'.
-  # Also, make sure we do not overwhelm the syslog while testing.
-  default_lsan_opts += ':abort_on_error=0'
-  default_lsan_opts += ':log_to_syslog=0'
+default_common_opts_str = ":".join(list(config.default_sanitizer_opts))
+default_lsan_opts = default_common_opts_str + ":detect_leaks=1"
+if config.host_os == "Darwin":
+    # On Darwin, we default to `abort_on_error=1`, which would make tests run
+    # much slower. Let's override this and run lit tests with 'abort_on_error=0'.
+    # Also, make sure we do not overwhelm the syslog while testing.
+    default_lsan_opts += ":abort_on_error=0"
+    default_lsan_opts += ":log_to_syslog=0"
 
 if default_lsan_opts:
-  config.environment['LSAN_OPTIONS'] = default_lsan_opts
-  default_lsan_opts += ':'
-config.substitutions.append(('%env_lsan_opts=',
-                             'env LSAN_OPTIONS=' + default_lsan_opts))
+    config.environment["LSAN_OPTIONS"] = default_lsan_opts
+    default_lsan_opts += ":"
+config.substitutions.append(
+    ("%env_lsan_opts=", "env LSAN_OPTIONS=" + default_lsan_opts)
+)
 
-if lit.util.which('strace'):
-  config.available_features.add('strace')
+if lit.util.which("strace"):
+    config.available_features.add("strace")
 
 clang_cflags = ["-O0", config.target_cflags] + config.debug_info_flags
 if config.android:
-  clang_cflags = clang_cflags + ["-fno-emulated-tls"]
+    clang_cflags = clang_cflags + ["-fno-emulated-tls"]
 clang_cxxflags = config.cxx_mode_flags + clang_cflags
 lsan_incdir = config.test_source_root + "/../"
 clang_lsan_cflags = clang_cflags + lsan_cflags + ["-I%s" % lsan_incdir]
@@ -75,33 +79,59 @@ def get_required_attr(config, attr_name):
 config.clang_cflags = clang_cflags
 config.clang_cxxflags = clang_cxxflags
 
+
 def build_invocation(compile_flags):
-  return " " + " ".join([config.clang] + compile_flags) + " "
+    return " " + " ".join([config.clang] + compile_flags) + " "
+
 
-config.substitutions.append( ("%clang ", build_invocation(clang_cflags)) )
-config.substitutions.append( ("%clangxx ", build_invocation(clang_cxxflags)) )
-config.substitutions.append( ("%clang_lsan ", build_invocation(clang_lsan_cflags)) )
-config.substitutions.append( ("%clangxx_lsan ", build_invocation(clang_lsan_cxxflags)) )
-config.substitutions.append( ("%clang_hwasan ", build_invocation(clang_lsan_cflags)) )
-config.substitutions.append( ("%clangxx_hwasan ", build_invocation(clang_lsan_cxxflags)) )
+config.substitutions.append(("%clang ", build_invocation(clang_cflags)))
+config.substitutions.append(("%clangxx ", build_invocation(clang_cxxflags)))
+config.substitutions.append(("%clang_lsan ", build_invocation(clang_lsan_cflags)))
+config.substitutions.append(("%clangxx_lsan ", build_invocation(clang_lsan_cxxflags)))
+config.substitutions.append(("%clang_hwasan ", build_invocation(clang_lsan_cflags)))
+config.substitutions.append(("%clangxx_hwasan ", build_invocation(clang_lsan_cxxflags)))
 
 
 # LeakSanitizer tests are currently supported on
 # Android{aarch64, x86, x86_64}, x86-64 Linux, PowerPC64 Linux, arm Linux, mips64 Linux, s390x Linux, loongarch64 Linux and x86_64 Darwin.
-supported_android = config.android and config.target_arch in ['x86_64', 'i386', 'aarch64'] and 'android-thread-properties-api' in config.available_features
-supported_linux = (not config.android) and config.host_os == 'Linux' and config.host_arch in ['aarch64', 'x86_64', 'ppc64', 'ppc64le', 'mips64', 'riscv64', 'arm', 'armhf', 'armv7l', 's390x', 'loongarch64']
-supported_darwin = config.host_os == 'Darwin' and config.target_arch in ['x86_64']
-supported_netbsd = config.host_os == 'NetBSD' and config.target_arch in ['x86_64', 'i386']
+supported_android = (
+    config.android
+    and config.target_arch in ["x86_64", "i386", "aarch64"]
+    and "android-thread-properties-api" in config.available_features
+)
+supported_linux = (
+    (not config.android)
+    and config.host_os == "Linux"
+    and config.host_arch
+    in [
+        "aarch64",
+        "x86_64",
+        "ppc64",
+        "ppc64le",
+        "mips64",
+        "riscv64",
+        "arm",
+        "armhf",
+        "armv7l",
+        "s390x",
+        "loongarch64",
+    ]
+)
+supported_darwin = config.host_os == "Darwin" and config.target_arch in ["x86_64"]
+supported_netbsd = config.host_os == "NetBSD" and config.target_arch in [
+    "x86_64",
+    "i386",
+]
 if not (supported_android or supported_linux or supported_darwin or supported_netbsd):
-  config.unsupported = True
+    config.unsupported = True
 
 # Don't support Thumb due to broken fast unwinder
-if re.search('mthumb', config.target_cflags) is not None:
-  config.unsupported = True
+if re.search("mthumb", config.target_cflags) is not None:
+    config.unsupported = True
 
 # HWASAN tests require lld because without D65857, ld.bfd and ld.gold would
 # generate a corrupted binary. Mark them unsupported if lld is not available.
-if 'hwasan' in config.available_features and not config.has_lld:
-  config.unsupported = True
+if "hwasan" in config.available_features and not config.has_lld:
+    config.unsupported = True
 
-config.suffixes = ['.c', '.cpp', '.mm']
+config.suffixes = [".c", ".cpp", ".mm"]

diff  --git a/compiler-rt/test/memprof/lit.cfg.py b/compiler-rt/test/memprof/lit.cfg.py
index 80a325a38e40d..4e5d7ba405a20 100644
--- a/compiler-rt/test/memprof/lit.cfg.py
+++ b/compiler-rt/test/memprof/lit.cfg.py
@@ -6,87 +6,108 @@
 
 import lit.formats
 
+
 def get_required_attr(config, attr_name):
-  attr_value = getattr(config, attr_name, None)
-  if attr_value == None:
-    lit_config.fatal(
-      "No attribute %r in test configuration! You may need to run "
-      "tests from your build directory or add this attribute "
-      "to lit.site.cfg.py " % attr_name)
-  return attr_value
+    attr_value = getattr(config, attr_name, None)
+    if attr_value == None:
+        lit_config.fatal(
+            "No attribute %r in test configuration! You may need to run "
+            "tests from your build directory or add this attribute "
+            "to lit.site.cfg.py " % attr_name
+        )
+    return attr_value
+
 
 # Setup config name.
-config.name = 'MemProfiler' + config.name_suffix
+config.name = "MemProfiler" + config.name_suffix
 
 # Platform-specific default MEMPROF_OPTIONS for lit tests.
 default_memprof_opts = list(config.default_sanitizer_opts)
 
-default_memprof_opts_str = ':'.join(default_memprof_opts)
+default_memprof_opts_str = ":".join(default_memprof_opts)
 if default_memprof_opts_str:
-  config.environment['MEMPROF_OPTIONS'] = default_memprof_opts_str
-  default_memprof_opts_str += ':'
-config.substitutions.append(('%env_memprof_opts=',
-                             'env MEMPROF_OPTIONS=' + default_memprof_opts_str))
+    config.environment["MEMPROF_OPTIONS"] = default_memprof_opts_str
+    default_memprof_opts_str += ":"
+config.substitutions.append(
+    ("%env_memprof_opts=", "env MEMPROF_OPTIONS=" + default_memprof_opts_str)
+)
 
 # Setup source root.
 config.test_source_root = os.path.dirname(__file__)
 
-libdl_flag = '-ldl'
+libdl_flag = "-ldl"
 
 # Setup default compiler flags used with -fmemory-profile option.
 # FIXME: Review the set of required flags and check if it can be reduced.
-target_cflags = [get_required_attr(config, 'target_cflags')]
+target_cflags = [get_required_attr(config, "target_cflags")]
 target_cxxflags = config.cxx_mode_flags + target_cflags
-clang_memprof_static_cflags = (['-fmemory-profile',
-                            '-mno-omit-leaf-frame-pointer',
-                            '-fno-omit-frame-pointer',
-                            '-fno-optimize-sibling-calls'] +
-                            config.debug_info_flags + target_cflags)
+clang_memprof_static_cflags = (
+    [
+        "-fmemory-profile",
+        "-mno-omit-leaf-frame-pointer",
+        "-fno-omit-frame-pointer",
+        "-fno-optimize-sibling-calls",
+    ]
+    + config.debug_info_flags
+    + target_cflags
+)
 clang_memprof_static_cxxflags = config.cxx_mode_flags + clang_memprof_static_cflags
 
 memprof_dynamic_flags = []
 if config.memprof_dynamic:
-  memprof_dynamic_flags = ['-shared-libsan']
-  config.available_features.add('memprof-dynamic-runtime')
+    memprof_dynamic_flags = ["-shared-libsan"]
+    config.available_features.add("memprof-dynamic-runtime")
 else:
-  config.available_features.add('memprof-static-runtime')
+    config.available_features.add("memprof-static-runtime")
 clang_memprof_cflags = clang_memprof_static_cflags + memprof_dynamic_flags
 clang_memprof_cxxflags = clang_memprof_static_cxxflags + memprof_dynamic_flags
 
+
 def build_invocation(compile_flags):
-  return ' ' + ' '.join([config.clang] + compile_flags) + ' '
+    return " " + " ".join([config.clang] + compile_flags) + " "
+
 
-config.substitutions.append( ("%clang ", build_invocation(target_cflags)) )
-config.substitutions.append( ("%clangxx ", build_invocation(target_cxxflags)) )
-config.substitutions.append( ("%clang_memprof ", build_invocation(clang_memprof_cflags)) )
-config.substitutions.append( ("%clangxx_memprof ", build_invocation(clang_memprof_cxxflags)) )
+config.substitutions.append(("%clang ", build_invocation(target_cflags)))
+config.substitutions.append(("%clangxx ", build_invocation(target_cxxflags)))
+config.substitutions.append(("%clang_memprof ", build_invocation(clang_memprof_cflags)))
+config.substitutions.append(
+    ("%clangxx_memprof ", build_invocation(clang_memprof_cxxflags))
+)
 if config.memprof_dynamic:
-  shared_libmemprof_path = os.path.join(config.compiler_rt_libdir, 'libclang_rt.memprof{}.so'.format(config.target_suffix))
-  config.substitutions.append( ("%shared_libmemprof", shared_libmemprof_path) )
-  config.substitutions.append( ("%clang_memprof_static ", build_invocation(clang_memprof_static_cflags)) )
-  config.substitutions.append( ("%clangxx_memprof_static ", build_invocation(clang_memprof_static_cxxflags)) )
+    shared_libmemprof_path = os.path.join(
+        config.compiler_rt_libdir,
+        "libclang_rt.memprof{}.so".format(config.target_suffix),
+    )
+    config.substitutions.append(("%shared_libmemprof", shared_libmemprof_path))
+    config.substitutions.append(
+        ("%clang_memprof_static ", build_invocation(clang_memprof_static_cflags))
+    )
+    config.substitutions.append(
+        ("%clangxx_memprof_static ", build_invocation(clang_memprof_static_cxxflags))
+    )
 
-config.substitutions.append( ("%libdl", libdl_flag) )
+config.substitutions.append(("%libdl", libdl_flag))
 
-config.available_features.add('memprof-' + config.bits + '-bits')
+config.available_features.add("memprof-" + config.bits + "-bits")
 
-config.available_features.add('fast-unwinder-works')
+config.available_features.add("fast-unwinder-works")
 
 # Set LD_LIBRARY_PATH to pick dynamic runtime up properly.
 new_ld_library_path = os.path.pathsep.join(
-  (config.compiler_rt_libdir, config.environment.get('LD_LIBRARY_PATH', '')))
-config.environment['LD_LIBRARY_PATH'] = new_ld_library_path
+    (config.compiler_rt_libdir, config.environment.get("LD_LIBRARY_PATH", ""))
+)
+config.environment["LD_LIBRARY_PATH"] = new_ld_library_path
 
 # Default test suffixes.
-config.suffixes = ['.c', '.cpp']
+config.suffixes = [".c", ".cpp"]
 
-config.substitutions.append(('%fPIC', '-fPIC'))
-config.substitutions.append(('%fPIE', '-fPIE'))
-config.substitutions.append(('%pie', '-pie'))
+config.substitutions.append(("%fPIC", "-fPIC"))
+config.substitutions.append(("%fPIE", "-fPIE"))
+config.substitutions.append(("%pie", "-pie"))
 
 # Only run the tests on supported OSs.
-if config.host_os not in ['Linux']:
-  config.unsupported = True
+if config.host_os not in ["Linux"]:
+    config.unsupported = True
 
 if not config.parallelism_group:
-  config.parallelism_group = 'shadow-memory'
+    config.parallelism_group = "shadow-memory"

diff  --git a/compiler-rt/test/metadata/lit.cfg.py b/compiler-rt/test/metadata/lit.cfg.py
index aefc97f09ed92..a66e01e483d77 100644
--- a/compiler-rt/test/metadata/lit.cfg.py
+++ b/compiler-rt/test/metadata/lit.cfg.py
@@ -1,9 +1,9 @@
 import os
 
-config.name = 'SanitizerBinaryMetadata'
+config.name = "SanitizerBinaryMetadata"
 config.test_source_root = os.path.dirname(__file__)
-config.suffixes = ['.cpp']
+config.suffixes = [".cpp"]
 # Binary metadata is currently emited only for ELF binaries
 # and sizes of stack arguments depend on the arch.
-if config.host_os not in ['Linux'] or config.target_arch not in ['x86_64']:
-   config.unsupported = True
+if config.host_os not in ["Linux"] or config.target_arch not in ["x86_64"]:
+    config.unsupported = True

diff  --git a/compiler-rt/test/msan/Linux/lit.local.cfg.py b/compiler-rt/test/msan/Linux/lit.local.cfg.py
index 57271b8078a49..603ca0365068f 100644
--- a/compiler-rt/test/msan/Linux/lit.local.cfg.py
+++ b/compiler-rt/test/msan/Linux/lit.local.cfg.py
@@ -1,9 +1,10 @@
 def getRoot(config):
-  if not config.parent:
-    return config
-  return getRoot(config.parent)
+    if not config.parent:
+        return config
+    return getRoot(config.parent)
+
 
 root = getRoot(config)
 
-if root.host_os not in ['Linux']:
-  config.unsupported = True
+if root.host_os not in ["Linux"]:
+    config.unsupported = True

diff  --git a/compiler-rt/test/msan/lit.cfg.py b/compiler-rt/test/msan/lit.cfg.py
index 8ec1614be130f..361be79e2557e 100644
--- a/compiler-rt/test/msan/lit.cfg.py
+++ b/compiler-rt/test/msan/lit.cfg.py
@@ -3,51 +3,57 @@
 import os
 
 # Setup config name.
-config.name = 'MemorySanitizer' + getattr(config, 'name_suffix', 'default')
+config.name = "MemorySanitizer" + getattr(config, "name_suffix", "default")
 
 # Setup source root.
 config.test_source_root = os.path.dirname(__file__)
 
 # Setup default compiler flags used with -fsanitize=memory option.
-clang_msan_cflags = (["-fsanitize=memory",
-                      "-mno-omit-leaf-frame-pointer",
-                      "-fno-omit-frame-pointer",
-                      "-fno-optimize-sibling-calls"] +
-                      [config.target_cflags] +
-                      config.debug_info_flags)
+clang_msan_cflags = (
+    [
+        "-fsanitize=memory",
+        "-mno-omit-leaf-frame-pointer",
+        "-fno-omit-frame-pointer",
+        "-fno-optimize-sibling-calls",
+    ]
+    + [config.target_cflags]
+    + config.debug_info_flags
+)
 # Some Msan tests leverage backtrace() which requires libexecinfo on FreeBSD.
-if config.host_os == 'FreeBSD':
-  clang_msan_cflags += ["-lexecinfo", "-fPIC"]
+if config.host_os == "FreeBSD":
+    clang_msan_cflags += ["-lexecinfo", "-fPIC"]
 # On SystemZ we need -mbackchain to make the fast unwinder work.
-if config.target_arch == 's390x':
-  clang_msan_cflags.append("-mbackchain")
+if config.target_arch == "s390x":
+    clang_msan_cflags.append("-mbackchain")
 clang_msan_cxxflags = config.cxx_mode_flags + clang_msan_cflags
 
 # Flags for KMSAN invocation. This is C-only, we're not interested in C++.
-clang_kmsan_cflags = (["-fsanitize=kernel-memory"] +
-                      [config.target_cflags] +
-                      config.debug_info_flags)
+clang_kmsan_cflags = (
+    ["-fsanitize=kernel-memory"] + [config.target_cflags] + config.debug_info_flags
+)
+
 
 def build_invocation(compile_flags):
-  return " " + " ".join([config.clang] + compile_flags) + " "
+    return " " + " ".join([config.clang] + compile_flags) + " "
+
 
-config.substitutions.append( ("%clang_msan ", build_invocation(clang_msan_cflags)) )
-config.substitutions.append( ("%clangxx_msan ", build_invocation(clang_msan_cxxflags)) )
-config.substitutions.append( ("%clang_kmsan ", build_invocation(clang_kmsan_cflags)) )
+config.substitutions.append(("%clang_msan ", build_invocation(clang_msan_cflags)))
+config.substitutions.append(("%clangxx_msan ", build_invocation(clang_msan_cxxflags)))
+config.substitutions.append(("%clang_kmsan ", build_invocation(clang_kmsan_cflags)))
 
 # Default test suffixes.
-config.suffixes = ['.c', '.cpp']
+config.suffixes = [".c", ".cpp"]
 
-if config.host_os not in ['Linux', 'NetBSD', 'FreeBSD']:
-  config.unsupported = True
+if config.host_os not in ["Linux", "NetBSD", "FreeBSD"]:
+    config.unsupported = True
 
 # For mips64, mips64el we have forced store_context_size to 1 because these
 # archs use slow unwinder which is not async signal safe. Therefore we only
 # check the first frame since store_context size is 1.
-if config.host_arch in ['mips64', 'mips64el']:
-  config.substitutions.append( ('CHECK-%short-stack', 'CHECK-SHORT-STACK'))
+if config.host_arch in ["mips64", "mips64el"]:
+    config.substitutions.append(("CHECK-%short-stack", "CHECK-SHORT-STACK"))
 else:
-  config.substitutions.append( ('CHECK-%short-stack', 'CHECK-FULL-STACK'))
+    config.substitutions.append(("CHECK-%short-stack", "CHECK-FULL-STACK"))
 
-if config.host_os == 'NetBSD':
-  config.substitutions.insert(0, ('%run', config.netbsd_noaslr_prefix))
+if config.host_os == "NetBSD":
+    config.substitutions.insert(0, ("%run", config.netbsd_noaslr_prefix))

diff  --git a/compiler-rt/test/orc/TestCases/Darwin/arm64/lit.local.cfg.py b/compiler-rt/test/orc/TestCases/Darwin/arm64/lit.local.cfg.py
index a4b20fc5adf6e..79398ec6738c8 100644
--- a/compiler-rt/test/orc/TestCases/Darwin/arm64/lit.local.cfg.py
+++ b/compiler-rt/test/orc/TestCases/Darwin/arm64/lit.local.cfg.py
@@ -1,5 +1,5 @@
-if config.root.host_arch not in ['aarch64', 'arm64']:
-  config.unsupported = True
+if config.root.host_arch not in ["aarch64", "arm64"]:
+    config.unsupported = True
 
-if config.target_arch not in ['aarch64', 'arm64']:
-  config.unsupported = True
\ No newline at end of file
+if config.target_arch not in ["aarch64", "arm64"]:
+    config.unsupported = True

diff  --git a/compiler-rt/test/orc/TestCases/Darwin/lit.local.cfg.py b/compiler-rt/test/orc/TestCases/Darwin/lit.local.cfg.py
index ffa8fd83cbf8e..b455a936e7cc1 100644
--- a/compiler-rt/test/orc/TestCases/Darwin/lit.local.cfg.py
+++ b/compiler-rt/test/orc/TestCases/Darwin/lit.local.cfg.py
@@ -1,2 +1,2 @@
-if config.root.host_os != 'Darwin':
-  config.unsupported = True
+if config.root.host_os != "Darwin":
+    config.unsupported = True

diff  --git a/compiler-rt/test/orc/TestCases/Darwin/x86-64/lit.local.cfg.py b/compiler-rt/test/orc/TestCases/Darwin/x86-64/lit.local.cfg.py
index c6eb0acca30e1..1a853ffcd61fb 100644
--- a/compiler-rt/test/orc/TestCases/Darwin/x86-64/lit.local.cfg.py
+++ b/compiler-rt/test/orc/TestCases/Darwin/x86-64/lit.local.cfg.py
@@ -1,5 +1,5 @@
-if config.root.host_arch != 'x86_64':
-  config.unsupported = True
+if config.root.host_arch != "x86_64":
+    config.unsupported = True
 
-if config.target_arch != 'x86_64':
-  config.unsupported = True
\ No newline at end of file
+if config.target_arch != "x86_64":
+    config.unsupported = True

diff  --git a/compiler-rt/test/orc/TestCases/FreeBSD/lit.local.cfg.py b/compiler-rt/test/orc/TestCases/FreeBSD/lit.local.cfg.py
index d00a28f9f961d..e9b1b38ccacd1 100644
--- a/compiler-rt/test/orc/TestCases/FreeBSD/lit.local.cfg.py
+++ b/compiler-rt/test/orc/TestCases/FreeBSD/lit.local.cfg.py
@@ -1,2 +1,2 @@
-if config.root.host_os != 'FreeBSD':
-  config.unsupported = True
+if config.root.host_os != "FreeBSD":
+    config.unsupported = True

diff  --git a/compiler-rt/test/orc/TestCases/FreeBSD/x86-64/lit.local.cfg.py b/compiler-rt/test/orc/TestCases/FreeBSD/x86-64/lit.local.cfg.py
index 117b77e5a3c97..675dc4d2cddd6 100644
--- a/compiler-rt/test/orc/TestCases/FreeBSD/x86-64/lit.local.cfg.py
+++ b/compiler-rt/test/orc/TestCases/FreeBSD/x86-64/lit.local.cfg.py
@@ -1,5 +1,5 @@
-if config.root.host_arch not in ['x86_64', 'amd64']:
-  config.unsupported = True
+if config.root.host_arch not in ["x86_64", "amd64"]:
+    config.unsupported = True
 
-if config.target_arch not in ['x86_64', 'amd64']:
-  config.unsupported = True
\ No newline at end of file
+if config.target_arch not in ["x86_64", "amd64"]:
+    config.unsupported = True

diff  --git a/compiler-rt/test/orc/TestCases/Generic/lit.local.cfg.py b/compiler-rt/test/orc/TestCases/Generic/lit.local.cfg.py
index 05be16df78e20..043dabec0112c 100644
--- a/compiler-rt/test/orc/TestCases/Generic/lit.local.cfg.py
+++ b/compiler-rt/test/orc/TestCases/Generic/lit.local.cfg.py
@@ -1,2 +1,2 @@
 if not config.test_target_is_host_executable:
-  config.unsupported = True
+    config.unsupported = True

diff  --git a/compiler-rt/test/orc/TestCases/Linux/aarch64/lit.local.cfg.py b/compiler-rt/test/orc/TestCases/Linux/aarch64/lit.local.cfg.py
index ad928a9a56a29..ab47e9203a278 100644
--- a/compiler-rt/test/orc/TestCases/Linux/aarch64/lit.local.cfg.py
+++ b/compiler-rt/test/orc/TestCases/Linux/aarch64/lit.local.cfg.py
@@ -1,5 +1,5 @@
-if config.root.host_arch != 'aarch64':
-  config.unsupported = True
+if config.root.host_arch != "aarch64":
+    config.unsupported = True
 
-if config.target_arch != 'aarch64':
-  config.unsupported = True
+if config.target_arch != "aarch64":
+    config.unsupported = True

diff  --git a/compiler-rt/test/orc/TestCases/Linux/lit.local.cfg.py b/compiler-rt/test/orc/TestCases/Linux/lit.local.cfg.py
index 6eceeb1e673ef..7d85fa3fce392 100644
--- a/compiler-rt/test/orc/TestCases/Linux/lit.local.cfg.py
+++ b/compiler-rt/test/orc/TestCases/Linux/lit.local.cfg.py
@@ -1,2 +1,2 @@
-if config.root.host_os != 'Linux':
-  config.unsupported = True
+if config.root.host_os != "Linux":
+    config.unsupported = True

diff  --git a/compiler-rt/test/orc/TestCases/Linux/x86-64/lit.local.cfg.py b/compiler-rt/test/orc/TestCases/Linux/x86-64/lit.local.cfg.py
index f5b50ef01c370..1a853ffcd61fb 100644
--- a/compiler-rt/test/orc/TestCases/Linux/x86-64/lit.local.cfg.py
+++ b/compiler-rt/test/orc/TestCases/Linux/x86-64/lit.local.cfg.py
@@ -1,5 +1,5 @@
-if config.root.host_arch != 'x86_64':
-  config.unsupported = True
+if config.root.host_arch != "x86_64":
+    config.unsupported = True
 
-if config.target_arch != 'x86_64':
-  config.unsupported = True
+if config.target_arch != "x86_64":
+    config.unsupported = True

diff  --git a/compiler-rt/test/orc/TestCases/Windows/lit.local.cfg.py b/compiler-rt/test/orc/TestCases/Windows/lit.local.cfg.py
index 28ddacdd3e99c..6d4e7da813641 100644
--- a/compiler-rt/test/orc/TestCases/Windows/lit.local.cfg.py
+++ b/compiler-rt/test/orc/TestCases/Windows/lit.local.cfg.py
@@ -1,2 +1,2 @@
-if config.root.host_os != 'Windows':
-  config.unsupported = True
+if config.root.host_os != "Windows":
+    config.unsupported = True

diff  --git a/compiler-rt/test/orc/TestCases/Windows/x86-64/lit.local.cfg.py b/compiler-rt/test/orc/TestCases/Windows/x86-64/lit.local.cfg.py
index d04690255869e..4d62696ad8872 100644
--- a/compiler-rt/test/orc/TestCases/Windows/x86-64/lit.local.cfg.py
+++ b/compiler-rt/test/orc/TestCases/Windows/x86-64/lit.local.cfg.py
@@ -1,5 +1,5 @@
-if config.root.host_arch not in ['AMD64','x86_64']:
-  config.unsupported = True
+if config.root.host_arch not in ["AMD64", "x86_64"]:
+    config.unsupported = True
 
-if config.target_arch not in ['AMD64','x86_64']:
-  config.unsupported = True
+if config.target_arch not in ["AMD64", "x86_64"]:
+    config.unsupported = True

diff  --git a/compiler-rt/test/orc/lit.cfg.py b/compiler-rt/test/orc/lit.cfg.py
index 9b409c42f005c..bd031d79826d3 100644
--- a/compiler-rt/test/orc/lit.cfg.py
+++ b/compiler-rt/test/orc/lit.cfg.py
@@ -3,7 +3,7 @@
 import os
 
 # Setup config name.
-config.name = 'ORC' + config.name_suffix
+config.name = "ORC" + config.name_suffix
 
 # Setup source root.
 config.test_source_root = os.path.dirname(__file__)
@@ -12,51 +12,70 @@
 host_arch_compatible = config.target_arch == config.host_arch
 
 if config.host_arch == "x86_64h" and config.target_arch == "x86_64":
-  host_arch_compatible = True
-config.test_target_is_host_executable = config.target_os == config.host_os and host_arch_compatible
+    host_arch_compatible = True
+config.test_target_is_host_executable = (
+    config.target_os == config.host_os and host_arch_compatible
+)
 
 # Assume that llvm-jitlink is in the config.llvm_tools_dir.
-llvm_jitlink = os.path.join(config.llvm_tools_dir, 'llvm-jitlink')
-orc_rt_executor_stem = os.path.join(config.compiler_rt_obj_root, 'lib/orc/tests/tools/orc-rt-executor')
-lli = os.path.join(config.llvm_tools_dir, 'lli')
-if config.host_os == 'Darwin':
-  orc_rt_path = '%s/liborc_rt_osx.a' % config.compiler_rt_libdir
+llvm_jitlink = os.path.join(config.llvm_tools_dir, "llvm-jitlink")
+orc_rt_executor_stem = os.path.join(
+    config.compiler_rt_obj_root, "lib/orc/tests/tools/orc-rt-executor"
+)
+lli = os.path.join(config.llvm_tools_dir, "lli")
+if config.host_os == "Darwin":
+    orc_rt_path = "%s/liborc_rt_osx.a" % config.compiler_rt_libdir
 else:
-  orc_rt_path = '%s/liborc_rt%s.a' % (config.compiler_rt_libdir, config.target_suffix)
+    orc_rt_path = "%s/liborc_rt%s.a" % (config.compiler_rt_libdir, config.target_suffix)
 
 if config.libunwind_shared:
-  config.available_features.add('libunwind-available')
-  shared_libunwind_path = os.path.join(config.libunwind_install_dir, 'libunwind.so')
-  config.substitutions.append( ("%shared_libunwind", shared_libunwind_path) )
+    config.available_features.add("libunwind-available")
+    shared_libunwind_path = os.path.join(config.libunwind_install_dir, "libunwind.so")
+    config.substitutions.append(("%shared_libunwind", shared_libunwind_path))
+
 
 def build_invocation(compile_flags):
-  return ' ' + ' '.join([config.clang] + compile_flags) + ' '
+    return " " + " ".join([config.clang] + compile_flags) + " "
 
+
+config.substitutions.append(("%clang ", build_invocation([config.target_cflags])))
 config.substitutions.append(
-    ('%clang ', build_invocation([config.target_cflags])))
-config.substitutions.append(
-    ('%clangxx ',
-     build_invocation(config.cxx_mode_flags + [config.target_cflags])))
+    ("%clangxx ", build_invocation(config.cxx_mode_flags + [config.target_cflags]))
+)
 config.substitutions.append(
-    ('%clang_cl ',
-     build_invocation(['--driver-mode=cl'] + [config.target_cflags])))
-if config.host_os == 'Windows':
-  config.substitutions.append(
-      ('%llvm_jitlink', (llvm_jitlink + ' -orc-runtime=' +
-       orc_rt_path + ' -no-process-syms=true -slab-allocate=64MB')))
+    ("%clang_cl ", build_invocation(["--driver-mode=cl"] + [config.target_cflags]))
+)
+if config.host_os == "Windows":
+    config.substitutions.append(
+        (
+            "%llvm_jitlink",
+            (
+                llvm_jitlink
+                + " -orc-runtime="
+                + orc_rt_path
+                + " -no-process-syms=true -slab-allocate=64MB"
+            ),
+        )
+    )
 else:
-  config.substitutions.append(
-      ('%llvm_jitlink', (llvm_jitlink + ' -orc-runtime=' + orc_rt_path)))
+    config.substitutions.append(
+        ("%llvm_jitlink", (llvm_jitlink + " -orc-runtime=" + orc_rt_path))
+    )
 config.substitutions.append(
-    ('%orc_rt_executor', orc_rt_executor_stem + "-" + config.host_arch))
+    ("%orc_rt_executor", orc_rt_executor_stem + "-" + config.host_arch)
+)
 config.substitutions.append(
-    ('%lli_orc_jitlink', (lli + ' -jit-kind=orc -jit-linker=jitlink -orc-runtime=' + orc_rt_path)))
+    (
+        "%lli_orc_jitlink",
+        (lli + " -jit-kind=orc -jit-linker=jitlink -orc-runtime=" + orc_rt_path),
+    )
+)
 
 # Default test suffixes.
-config.suffixes = ['.c', '.cpp', '.S', '.ll', '.test']
+config.suffixes = [".c", ".cpp", ".S", ".ll", ".test"]
 
 # Exclude Inputs directories.
-config.excludes = ['Inputs']
+config.excludes = ["Inputs"]
 
-if config.host_os not in ['Darwin', 'FreeBSD', 'Linux', 'Windows']:
-  config.unsupported = True
+if config.host_os not in ["Darwin", "FreeBSD", "Linux", "Windows"]:
+    config.unsupported = True

diff  --git a/compiler-rt/test/profile/AIX/lit.local.cfg.py b/compiler-rt/test/profile/AIX/lit.local.cfg.py
index 7ec27bf021e2d..55462708e3b6c 100644
--- a/compiler-rt/test/profile/AIX/lit.local.cfg.py
+++ b/compiler-rt/test/profile/AIX/lit.local.cfg.py
@@ -1,9 +1,10 @@
 def getRoot(config):
-  if not config.parent:
-    return config
-  return getRoot(config.parent)
+    if not config.parent:
+        return config
+    return getRoot(config.parent)
+
 
 root = getRoot(config)
 
-if root.host_os not in ['AIX']:
-  config.unsupported = True
+if root.host_os not in ["AIX"]:
+    config.unsupported = True

diff  --git a/compiler-rt/test/profile/Darwin/lit.local.cfg.py b/compiler-rt/test/profile/Darwin/lit.local.cfg.py
index a85dfcd24c08e..520a963d01198 100644
--- a/compiler-rt/test/profile/Darwin/lit.local.cfg.py
+++ b/compiler-rt/test/profile/Darwin/lit.local.cfg.py
@@ -1,9 +1,10 @@
 def getRoot(config):
-  if not config.parent:
-    return config
-  return getRoot(config.parent)
+    if not config.parent:
+        return config
+    return getRoot(config.parent)
+
 
 root = getRoot(config)
 
-if root.host_os not in ['Darwin']:
-  config.unsupported = True
+if root.host_os not in ["Darwin"]:
+    config.unsupported = True

diff  --git a/compiler-rt/test/profile/Linux/lit.local.cfg.py b/compiler-rt/test/profile/Linux/lit.local.cfg.py
index 9bb92b7f14fe6..c1e89581a1ab9 100644
--- a/compiler-rt/test/profile/Linux/lit.local.cfg.py
+++ b/compiler-rt/test/profile/Linux/lit.local.cfg.py
@@ -1,43 +1,49 @@
 import subprocess
 
+
 def getRoot(config):
-  if not config.parent:
-    return config
-  return getRoot(config.parent)
+    if not config.parent:
+        return config
+    return getRoot(config.parent)
 
 
 def is_gold_linker_available():
 
-  if not config.gold_executable:
-    return False
-  try:
-    ld_cmd = subprocess.Popen([config.gold_executable, '--help'], stdout = subprocess.PIPE)
-    ld_out = ld_cmd.stdout.read().decode()
-    ld_cmd.wait()
-  except:
-    return False
+    if not config.gold_executable:
+        return False
+    try:
+        ld_cmd = subprocess.Popen(
+            [config.gold_executable, "--help"], stdout=subprocess.PIPE
+        )
+        ld_out = ld_cmd.stdout.read().decode()
+        ld_cmd.wait()
+    except:
+        return False
+
+    if not "-plugin" in ld_out:
+        return False
+
+    # config.clang is not guaranteed to be just the executable!
+    clang_cmd = subprocess.Popen(
+        " ".join([config.clang, "-fuse-ld=gold", "-xc", "-"]),
+        shell=True,
+        universal_newlines=True,
+        stdin=subprocess.PIPE,
+        stdout=subprocess.PIPE,
+        stderr=subprocess.PIPE,
+    )
+    clang_err = clang_cmd.communicate("int main() { return 0; }")[1]
+
+    if not "invalid linker" in clang_err:
+        return True
 
-  if not '-plugin' in ld_out:
     return False
 
-  # config.clang is not guaranteed to be just the executable!
-  clang_cmd = subprocess.Popen(" ".join([config.clang, '-fuse-ld=gold', '-xc', '-']),
-                               shell=True,
-                               universal_newlines = True,
-                               stdin = subprocess.PIPE,
-                               stdout = subprocess.PIPE,
-                               stderr = subprocess.PIPE)
-  clang_err = clang_cmd.communicate('int main() { return 0; }')[1]
-
-  if not 'invalid linker' in clang_err:
-    return True
-
-  return False
 
 root = getRoot(config)
 
-if root.host_os not in ['Linux'] or not is_gold_linker_available():
-  config.unsupported = True
+if root.host_os not in ["Linux"] or not is_gold_linker_available():
+    config.unsupported = True
 
 if config.have_curl:
-    config.available_features.add('curl')
+    config.available_features.add("curl")

diff  --git a/compiler-rt/test/profile/Posix/lit.local.cfg.py b/compiler-rt/test/profile/Posix/lit.local.cfg.py
index b4faa9f5052a0..17a67689192d0 100644
--- a/compiler-rt/test/profile/Posix/lit.local.cfg.py
+++ b/compiler-rt/test/profile/Posix/lit.local.cfg.py
@@ -1,16 +1,17 @@
 def getRoot(config):
-  if not config.parent:
-    return config
-  return getRoot(config.parent)
+    if not config.parent:
+        return config
+    return getRoot(config.parent)
+
 
 root = getRoot(config)
 
-if root.host_os in ['Windows']:
-  config.unsupported = True
+if root.host_os in ["Windows"]:
+    config.unsupported = True
 
 # AIX usually usually makes use of an explicit export list when linking a shared
 # object, since the linker doesn't export anything by default.
-if root.host_os in ['AIX']:
-  config.substitutions.append(('%shared_linker_xopts', '-Wl,-bE:shr.exp'))
+if root.host_os in ["AIX"]:
+    config.substitutions.append(("%shared_linker_xopts", "-Wl,-bE:shr.exp"))
 else:
-  config.substitutions.append(('%shared_linker_xopts', ''))
+    config.substitutions.append(("%shared_linker_xopts", ""))

diff  --git a/compiler-rt/test/profile/Windows/lit.local.cfg.py b/compiler-rt/test/profile/Windows/lit.local.cfg.py
index e924d91c44934..57c0979e60962 100644
--- a/compiler-rt/test/profile/Windows/lit.local.cfg.py
+++ b/compiler-rt/test/profile/Windows/lit.local.cfg.py
@@ -1,9 +1,10 @@
 def getRoot(config):
-  if not config.parent:
-    return config
-  return getRoot(config.parent)
+    if not config.parent:
+        return config
+    return getRoot(config.parent)
+
 
 root = getRoot(config)
 
-if root.host_os not in ['Windows']:
-  config.unsupported = True
+if root.host_os not in ["Windows"]:
+    config.unsupported = True

diff  --git a/compiler-rt/test/profile/lit.cfg.py b/compiler-rt/test/profile/lit.cfg.py
index 9a96f472d40cf..d3ba115731c5d 100644
--- a/compiler-rt/test/profile/lit.cfg.py
+++ b/compiler-rt/test/profile/lit.cfg.py
@@ -3,45 +3,50 @@
 import os
 import re
 
+
 def get_required_attr(config, attr_name):
-  attr_value = getattr(config, attr_name, None)
-  if attr_value == None:
-    lit_config.fatal(
-      "No attribute %r in test configuration! You may need to run "
-      "tests from your build directory or add this attribute "
-      "to lit.site.cfg.py " % attr_name)
-  return attr_value
+    attr_value = getattr(config, attr_name, None)
+    if attr_value == None:
+        lit_config.fatal(
+            "No attribute %r in test configuration! You may need to run "
+            "tests from your build directory or add this attribute "
+            "to lit.site.cfg.py " % attr_name
+        )
+    return attr_value
+
 
 # Setup config name.
-config.name = 'Profile-' + config.target_arch
+config.name = "Profile-" + config.target_arch
 
 # Setup source root.
 config.test_source_root = os.path.dirname(__file__)
 
 # Setup executable root.
-if hasattr(config, 'profile_lit_binary_dir') and \
-        config.profile_lit_binary_dir is not None:
+if (
+    hasattr(config, "profile_lit_binary_dir")
+    and config.profile_lit_binary_dir is not None
+):
     config.test_exec_root = os.path.join(config.profile_lit_binary_dir, config.name)
 
-target_is_msvc = bool(re.match(r'.*-windows-msvc$', config.target_triple))
+target_is_msvc = bool(re.match(r".*-windows-msvc$", config.target_triple))
 
-if config.host_os in ['Linux']:
-  extra_link_flags = ["-ldl"]
+if config.host_os in ["Linux"]:
+    extra_link_flags = ["-ldl"]
 elif target_is_msvc:
-  # InstrProf is incompatible with incremental linking. Disable it as a
-  # workaround.
-  extra_link_flags = ["-Wl,-incremental:no"]
+    # InstrProf is incompatible with incremental linking. Disable it as a
+    # workaround.
+    extra_link_flags = ["-Wl,-incremental:no"]
 else:
-  extra_link_flags = []
+    extra_link_flags = []
 
 # Test suffixes.
-config.suffixes = ['.c', '.cpp', '.m', '.mm', '.ll', '.test']
+config.suffixes = [".c", ".cpp", ".m", ".mm", ".ll", ".test"]
 
 # What to exclude.
-config.excludes = ['Inputs']
+config.excludes = ["Inputs"]
 
 # Clang flags.
-target_cflags=[get_required_attr(config, "target_cflags")]
+target_cflags = [get_required_attr(config, "target_cflags")]
 clang_cflags = target_cflags + extra_link_flags
 clang_cxxflags = config.cxx_mode_flags + clang_cflags
 
@@ -51,65 +56,126 @@ def get_required_attr(config, attr_name):
 #       We remove -stdlib= from the cflags here to avoid problems, but the interaction between
 #       CMake and compiler-rt's tests should be reworked so that cflags don't contain C++ only
 #       flags.
-clang_cflags = [flag.replace('-stdlib=libc++', '').replace('-stdlib=libstdc++', '') for flag in clang_cflags]
-
-def build_invocation(compile_flags, with_lto = False):
-  lto_flags = []
-  if with_lto and config.lto_supported:
-    lto_flags += config.lto_flags
-  return " " + " ".join([config.clang] + lto_flags + compile_flags) + " "
-
-def exclude_unsupported_files_for_aix(dirname):
-   for filename in os.listdir(dirname):
-       source_path = os.path.join( dirname, filename)
-       if os.path.isdir(source_path):
-           continue
-       f = open(source_path, 'r')
-       try:
-          data = f.read()
-          # -fprofile-instr-generate and rpath are not supported on AIX, exclude all tests with them.
-          if ("%clang_profgen" in data or "%clangxx_profgen" in data or "-rpath" in data):
-            config.excludes += [ filename ]
-       finally:
-          f.close()
-
-# Add clang substitutions.
-config.substitutions.append( ("%clang ", build_invocation(clang_cflags)) )
-config.substitutions.append( ("%clangxx ", build_invocation(clang_cxxflags)) )
-
-config.substitutions.append( ("%clang_profgen ", build_invocation(clang_cflags) + " -fprofile-instr-generate ") )
-config.substitutions.append( ("%clang_profgen=", build_invocation(clang_cflags) + " -fprofile-instr-generate=") )
-config.substitutions.append( ("%clangxx_profgen ", build_invocation(clang_cxxflags) + " -fprofile-instr-generate ") )
-config.substitutions.append( ("%clangxx_profgen=", build_invocation(clang_cxxflags) + " -fprofile-instr-generate=") )
-
-config.substitutions.append( ("%clang_pgogen ", build_invocation(clang_cflags) + " -fprofile-generate ") )
-config.substitutions.append( ("%clang_pgogen=", build_invocation(clang_cflags) + " -fprofile-generate=") )
-config.substitutions.append( ("%clangxx_pgogen ", build_invocation(clang_cxxflags) + " -fprofile-generate ") )
-config.substitutions.append( ("%clangxx_pgogen=", build_invocation(clang_cxxflags) + " -fprofile-generate=") )
+clang_cflags = [
+    flag.replace("-stdlib=libc++", "").replace("-stdlib=libstdc++", "")
+    for flag in clang_cflags
+]
 
-config.substitutions.append( ("%clang_cspgogen ", build_invocation(clang_cflags) + " -fcs-profile-generate ") )
-config.substitutions.append( ("%clang_cspgogen=", build_invocation(clang_cflags) + " -fcs-profile-generate=") )
-config.substitutions.append( ("%clangxx_cspgogen ", build_invocation(clang_cxxflags) + " -fcs-profile-generate ") )
-config.substitutions.append( ("%clangxx_cspgogen=", build_invocation(clang_cxxflags) + " -fcs-profile-generate=") )
 
-config.substitutions.append( ("%clang_profuse=", build_invocation(clang_cflags) + " -fprofile-instr-use=") )
-config.substitutions.append( ("%clangxx_profuse=", build_invocation(clang_cxxflags) + " -fprofile-instr-use=") )
+def build_invocation(compile_flags, with_lto=False):
+    lto_flags = []
+    if with_lto and config.lto_supported:
+        lto_flags += config.lto_flags
+    return " " + " ".join([config.clang] + lto_flags + compile_flags) + " "
 
-config.substitutions.append( ("%clang_pgouse=", build_invocation(clang_cflags) + " -fprofile-use=") )
-config.substitutions.append( ("%clangxx_profuse=", build_invocation(clang_cxxflags) + " -fprofile-instr-use=") )
 
-config.substitutions.append( ("%clang_lto_profgen=", build_invocation(clang_cflags, True) + " -fprofile-instr-generate=") )
-
-if config.host_os not in ['Windows', 'Darwin', 'FreeBSD', 'Linux', 'NetBSD', 'SunOS', 'AIX']:
-  config.unsupported = True
+def exclude_unsupported_files_for_aix(dirname):
+    for filename in os.listdir(dirname):
+        source_path = os.path.join(dirname, filename)
+        if os.path.isdir(source_path):
+            continue
+        f = open(source_path, "r")
+        try:
+            data = f.read()
+            # -fprofile-instr-generate and rpath are not supported on AIX, exclude all tests with them.
+            if (
+                "%clang_profgen" in data
+                or "%clangxx_profgen" in data
+                or "-rpath" in data
+            ):
+                config.excludes += [filename]
+        finally:
+            f.close()
 
-if config.host_os in ['AIX']:
-  config.available_features.add('system-aix')
-  exclude_unsupported_files_for_aix(config.test_source_root)
-  exclude_unsupported_files_for_aix(config.test_source_root + "/Posix")
 
-if config.target_arch in ['armv7l']:
-  config.unsupported = True
+# Add clang substitutions.
+config.substitutions.append(("%clang ", build_invocation(clang_cflags)))
+config.substitutions.append(("%clangxx ", build_invocation(clang_cxxflags)))
+
+config.substitutions.append(
+    ("%clang_profgen ", build_invocation(clang_cflags) + " -fprofile-instr-generate ")
+)
+config.substitutions.append(
+    ("%clang_profgen=", build_invocation(clang_cflags) + " -fprofile-instr-generate=")
+)
+config.substitutions.append(
+    (
+        "%clangxx_profgen ",
+        build_invocation(clang_cxxflags) + " -fprofile-instr-generate ",
+    )
+)
+config.substitutions.append(
+    (
+        "%clangxx_profgen=",
+        build_invocation(clang_cxxflags) + " -fprofile-instr-generate=",
+    )
+)
+
+config.substitutions.append(
+    ("%clang_pgogen ", build_invocation(clang_cflags) + " -fprofile-generate ")
+)
+config.substitutions.append(
+    ("%clang_pgogen=", build_invocation(clang_cflags) + " -fprofile-generate=")
+)
+config.substitutions.append(
+    ("%clangxx_pgogen ", build_invocation(clang_cxxflags) + " -fprofile-generate ")
+)
+config.substitutions.append(
+    ("%clangxx_pgogen=", build_invocation(clang_cxxflags) + " -fprofile-generate=")
+)
+
+config.substitutions.append(
+    ("%clang_cspgogen ", build_invocation(clang_cflags) + " -fcs-profile-generate ")
+)
+config.substitutions.append(
+    ("%clang_cspgogen=", build_invocation(clang_cflags) + " -fcs-profile-generate=")
+)
+config.substitutions.append(
+    ("%clangxx_cspgogen ", build_invocation(clang_cxxflags) + " -fcs-profile-generate ")
+)
+config.substitutions.append(
+    ("%clangxx_cspgogen=", build_invocation(clang_cxxflags) + " -fcs-profile-generate=")
+)
+
+config.substitutions.append(
+    ("%clang_profuse=", build_invocation(clang_cflags) + " -fprofile-instr-use=")
+)
+config.substitutions.append(
+    ("%clangxx_profuse=", build_invocation(clang_cxxflags) + " -fprofile-instr-use=")
+)
+
+config.substitutions.append(
+    ("%clang_pgouse=", build_invocation(clang_cflags) + " -fprofile-use=")
+)
+config.substitutions.append(
+    ("%clangxx_profuse=", build_invocation(clang_cxxflags) + " -fprofile-instr-use=")
+)
+
+config.substitutions.append(
+    (
+        "%clang_lto_profgen=",
+        build_invocation(clang_cflags, True) + " -fprofile-instr-generate=",
+    )
+)
+
+if config.host_os not in [
+    "Windows",
+    "Darwin",
+    "FreeBSD",
+    "Linux",
+    "NetBSD",
+    "SunOS",
+    "AIX",
+]:
+    config.unsupported = True
+
+if config.host_os in ["AIX"]:
+    config.available_features.add("system-aix")
+    exclude_unsupported_files_for_aix(config.test_source_root)
+    exclude_unsupported_files_for_aix(config.test_source_root + "/Posix")
+
+if config.target_arch in ["armv7l"]:
+    config.unsupported = True
 
 if config.android:
-  config.unsupported = True
+    config.unsupported = True

diff  --git a/compiler-rt/test/safestack/lit.cfg.py b/compiler-rt/test/safestack/lit.cfg.py
index b3cf928e4114f..adf27a0d7e5ea 100644
--- a/compiler-rt/test/safestack/lit.cfg.py
+++ b/compiler-rt/test/safestack/lit.cfg.py
@@ -3,20 +3,29 @@
 import os
 
 # Setup config name.
-config.name = 'SafeStack'
+config.name = "SafeStack"
 
 # Setup source root.
 config.test_source_root = os.path.dirname(__file__)
 
 # Test suffixes.
-config.suffixes = ['.c', '.cpp', '.m', '.mm', '.ll', '.test']
+config.suffixes = [".c", ".cpp", ".m", ".mm", ".ll", ".test"]
 
 # Add clang substitutions.
-config.substitutions.append( ("%clang_nosafestack ", config.clang + " -O0 -fno-sanitize=safe-stack ") )
-config.substitutions.append( ("%clang_safestack ", config.clang + " -O0 -fsanitize=safe-stack ") )
+config.substitutions.append(
+    ("%clang_nosafestack ", config.clang + " -O0 -fno-sanitize=safe-stack ")
+)
+config.substitutions.append(
+    ("%clang_safestack ", config.clang + " -O0 -fsanitize=safe-stack ")
+)
 
 if config.lto_supported:
-  config.substitutions.append((r"%clang_lto_safestack ", ' '.join([config.clang] + config.lto_flags + ['-fsanitize=safe-stack '])))
+    config.substitutions.append(
+        (
+            r"%clang_lto_safestack ",
+            " ".join([config.clang] + config.lto_flags + ["-fsanitize=safe-stack "]),
+        )
+    )
 
-if config.host_os not in ['Linux', 'FreeBSD', 'NetBSD']:
-   config.unsupported = True
+if config.host_os not in ["Linux", "FreeBSD", "NetBSD"]:
+    config.unsupported = True

diff  --git a/compiler-rt/test/sanitizer_common/TestCases/Darwin/lit.local.cfg.py b/compiler-rt/test/sanitizer_common/TestCases/Darwin/lit.local.cfg.py
index a85dfcd24c08e..520a963d01198 100644
--- a/compiler-rt/test/sanitizer_common/TestCases/Darwin/lit.local.cfg.py
+++ b/compiler-rt/test/sanitizer_common/TestCases/Darwin/lit.local.cfg.py
@@ -1,9 +1,10 @@
 def getRoot(config):
-  if not config.parent:
-    return config
-  return getRoot(config.parent)
+    if not config.parent:
+        return config
+    return getRoot(config.parent)
+
 
 root = getRoot(config)
 
-if root.host_os not in ['Darwin']:
-  config.unsupported = True
+if root.host_os not in ["Darwin"]:
+    config.unsupported = True

diff  --git a/compiler-rt/test/sanitizer_common/TestCases/FreeBSD/lit.local.cfg.py b/compiler-rt/test/sanitizer_common/TestCases/FreeBSD/lit.local.cfg.py
index 6f2f4280f978b..0102001660cf1 100644
--- a/compiler-rt/test/sanitizer_common/TestCases/FreeBSD/lit.local.cfg.py
+++ b/compiler-rt/test/sanitizer_common/TestCases/FreeBSD/lit.local.cfg.py
@@ -1,9 +1,10 @@
 def getRoot(config):
-  if not config.parent:
-    return config
-  return getRoot(config.parent)
+    if not config.parent:
+        return config
+    return getRoot(config.parent)
+
 
 root = getRoot(config)
 
-if root.host_os not in ['FreeBSD']:
-  config.unsupported = True
+if root.host_os not in ["FreeBSD"]:
+    config.unsupported = True

diff  --git a/compiler-rt/test/sanitizer_common/TestCases/Linux/lit.local.cfg.py b/compiler-rt/test/sanitizer_common/TestCases/Linux/lit.local.cfg.py
index 57271b8078a49..603ca0365068f 100644
--- a/compiler-rt/test/sanitizer_common/TestCases/Linux/lit.local.cfg.py
+++ b/compiler-rt/test/sanitizer_common/TestCases/Linux/lit.local.cfg.py
@@ -1,9 +1,10 @@
 def getRoot(config):
-  if not config.parent:
-    return config
-  return getRoot(config.parent)
+    if not config.parent:
+        return config
+    return getRoot(config.parent)
+
 
 root = getRoot(config)
 
-if root.host_os not in ['Linux']:
-  config.unsupported = True
+if root.host_os not in ["Linux"]:
+    config.unsupported = True

diff  --git a/compiler-rt/test/sanitizer_common/TestCases/NetBSD/lit.local.cfg.py b/compiler-rt/test/sanitizer_common/TestCases/NetBSD/lit.local.cfg.py
index 94023561ffe30..3cd1aa667343c 100644
--- a/compiler-rt/test/sanitizer_common/TestCases/NetBSD/lit.local.cfg.py
+++ b/compiler-rt/test/sanitizer_common/TestCases/NetBSD/lit.local.cfg.py
@@ -1,9 +1,10 @@
 def getRoot(config):
-  if not config.parent:
-    return config
-  return getRoot(config.parent)
+    if not config.parent:
+        return config
+    return getRoot(config.parent)
+
 
 root = getRoot(config)
 
-if root.host_os not in ['NetBSD']:
-  config.unsupported = True
+if root.host_os not in ["NetBSD"]:
+    config.unsupported = True

diff  --git a/compiler-rt/test/sanitizer_common/TestCases/Posix/lit.local.cfg.py b/compiler-rt/test/sanitizer_common/TestCases/Posix/lit.local.cfg.py
index 60a9460820a62..63240c3962565 100644
--- a/compiler-rt/test/sanitizer_common/TestCases/Posix/lit.local.cfg.py
+++ b/compiler-rt/test/sanitizer_common/TestCases/Posix/lit.local.cfg.py
@@ -1,9 +1,10 @@
 def getRoot(config):
-  if not config.parent:
-    return config
-  return getRoot(config.parent)
+    if not config.parent:
+        return config
+    return getRoot(config.parent)
+
 
 root = getRoot(config)
 
-if root.host_os in ['Windows']:
-  config.unsupported = True
+if root.host_os in ["Windows"]:
+    config.unsupported = True

diff  --git a/compiler-rt/test/sanitizer_common/android_commands/android_common.py b/compiler-rt/test/sanitizer_common/android_commands/android_common.py
index 81caae8741c2d..4e7d15c9ebc23 100644
--- a/compiler-rt/test/sanitizer_common/android_commands/android_common.py
+++ b/compiler-rt/test/sanitizer_common/android_commands/android_common.py
@@ -1,44 +1,52 @@
 import os, sys, subprocess, tempfile
 import time
 
-ANDROID_TMPDIR = '/data/local/tmp/Output'
-ADB = os.environ.get('ADB', 'adb')
+ANDROID_TMPDIR = "/data/local/tmp/Output"
+ADB = os.environ.get("ADB", "adb")
 
 verbose = False
-if os.environ.get('ANDROID_RUN_VERBOSE') == '1':
+if os.environ.get("ANDROID_RUN_VERBOSE") == "1":
     verbose = True
 
+
 def host_to_device_path(path):
     rel = os.path.relpath(path, "/")
     dev = os.path.join(ANDROID_TMPDIR, rel)
     return dev
 
-def adb(args, attempts = 1, timeout_sec = 600):
+
+def adb(args, attempts=1, timeout_sec=600):
     if verbose:
         print(args)
     tmpname = tempfile.mktemp()
-    out = open(tmpname, 'w')
+    out = open(tmpname, "w")
     ret = 255
     while attempts > 0 and ret != 0:
-      attempts -= 1
-      ret = subprocess.call(['timeout', str(timeout_sec), ADB] + args, stdout=out, stderr=subprocess.STDOUT)
+        attempts -= 1
+        ret = subprocess.call(
+            ["timeout", str(timeout_sec), ADB] + args,
+            stdout=out,
+            stderr=subprocess.STDOUT,
+        )
     if ret != 0:
-      print("adb command failed", args)
-      print(tmpname)
-      out.close()
-      out = open(tmpname, 'r')
-      print(out.read())
+        print("adb command failed", args)
+        print(tmpname)
+        out.close()
+        out = open(tmpname, "r")
+        print(out.read())
     out.close()
     os.unlink(tmpname)
     return ret
 
+
 def pull_from_device(path):
     tmp = tempfile.mktemp()
-    adb(['pull', path, tmp], 5, 60)
-    text = open(tmp, 'r').read()
+    adb(["pull", path, tmp], 5, 60)
+    text = open(tmp, "r").read()
     os.unlink(tmp)
     return text
 
+
 def push_to_device(path):
     dst_path = host_to_device_path(path)
-    adb(['push', path, dst_path], 5, 60)
+    adb(["push", path, dst_path], 5, 60)

diff  --git a/compiler-rt/test/sanitizer_common/android_commands/android_compile.py b/compiler-rt/test/sanitizer_common/android_commands/android_compile.py
index 8f657d32be562..f86831ffef899 100755
--- a/compiler-rt/test/sanitizer_common/android_commands/android_compile.py
+++ b/compiler-rt/test/sanitizer_common/android_commands/android_compile.py
@@ -5,19 +5,19 @@
 
 
 here = os.path.abspath(os.path.dirname(sys.argv[0]))
-android_run = os.path.join(here, 'android_run.py')
+android_run = os.path.join(here, "android_run.py")
 
 output = None
-output_type = 'executable'
+output_type = "executable"
 
 args = sys.argv[1:]
 while args:
     arg = args.pop(0)
-    if arg == '-shared':
-        output_type = 'shared'
-    elif arg == '-c':
-        output_type = 'object'
-    elif arg == '-o':
+    if arg == "-shared":
+        output_type = "shared"
+    elif arg == "-c":
+        output_type = "object"
+    elif arg == "-o":
         output = args.pop(0)
 
 if output == None:
@@ -28,9 +28,9 @@
 if ret != 0:
     sys.exit(ret)
 
-if output_type in ['executable', 'shared']:
+if output_type in ["executable", "shared"]:
     push_to_device(output)
 
-if output_type == 'executable':
-    os.rename(output, output + '.real')
+if output_type == "executable":
+    os.rename(output, output + ".real")
     os.symlink(android_run, output)

diff  --git a/compiler-rt/test/sanitizer_common/android_commands/android_run.py b/compiler-rt/test/sanitizer_common/android_commands/android_run.py
index 6f454ab3a4d38..22c88aadce043 100755
--- a/compiler-rt/test/sanitizer_common/android_commands/android_run.py
+++ b/compiler-rt/test/sanitizer_common/android_commands/android_run.py
@@ -3,29 +3,47 @@
 import os, signal, sys, subprocess, tempfile
 from android_common import *
 
-ANDROID_TMPDIR = '/data/local/tmp/Output'
+ANDROID_TMPDIR = "/data/local/tmp/Output"
 
 device_binary = host_to_device_path(sys.argv[0])
 
+
 def build_env():
     args = []
     # Android linker ignores RPATH. Set LD_LIBRARY_PATH to Output dir.
-    args.append('LD_LIBRARY_PATH=%s' % (ANDROID_TMPDIR,))
+    args.append("LD_LIBRARY_PATH=%s" % (ANDROID_TMPDIR,))
     for (key, value) in list(os.environ.items()):
-        if key in ['ASAN_ACTIVATION_OPTIONS', 'SCUDO_OPTIONS'] or key.endswith('SAN_OPTIONS'):
+        if key in ["ASAN_ACTIVATION_OPTIONS", "SCUDO_OPTIONS"] or key.endswith(
+            "SAN_OPTIONS"
+        ):
             args.append('%s="%s"' % (key, value.replace('"', '\\"')))
-    return ' '.join(args)
+    return " ".join(args)
+
 
-is_64bit = str(subprocess.check_output(['file', sys.argv[0] + '.real'])).find('64-bit') != -1
+is_64bit = (
+    str(subprocess.check_output(["file", sys.argv[0] + ".real"])).find("64-bit") != -1
+)
 
 device_env = build_env()
-device_args = ' '.join(sys.argv[1:]) # FIXME: escape?
-device_stdout = device_binary + '.stdout'
-device_stderr = device_binary + '.stderr'
-device_exitcode = device_binary + '.exitcode'
-ret = adb(['shell', 'cd %s && %s %s %s >%s 2>%s ; echo $? >%s' %
-           (ANDROID_TMPDIR, device_env, device_binary, device_args,
-            device_stdout, device_stderr, device_exitcode)])
+device_args = " ".join(sys.argv[1:])  # FIXME: escape?
+device_stdout = device_binary + ".stdout"
+device_stderr = device_binary + ".stderr"
+device_exitcode = device_binary + ".exitcode"
+ret = adb(
+    [
+        "shell",
+        "cd %s && %s %s %s >%s 2>%s ; echo $? >%s"
+        % (
+            ANDROID_TMPDIR,
+            device_env,
+            device_binary,
+            device_args,
+            device_stdout,
+            device_stderr,
+            device_exitcode,
+        ),
+    ]
+)
 if ret != 0:
     sys.exit(ret)
 
@@ -35,5 +53,5 @@ def build_env():
 # If the device process died with a signal, do abort().
 # Not exactly the same, but good enough to fool "not --crash".
 if retcode > 128:
-  os.kill(os.getpid(), signal.SIGABRT)
+    os.kill(os.getpid(), signal.SIGABRT)
 sys.exit(retcode)

diff  --git a/compiler-rt/test/sanitizer_common/ios_commands/get_pid_from_output.py b/compiler-rt/test/sanitizer_common/ios_commands/get_pid_from_output.py
index 4b0dbae4cc6a8..6569762a5026c 100644
--- a/compiler-rt/test/sanitizer_common/ios_commands/get_pid_from_output.py
+++ b/compiler-rt/test/sanitizer_common/ios_commands/get_pid_from_output.py
@@ -3,10 +3,23 @@
 """
 import sys, argparse, re
 
+
 def main():
     parser = argparse.ArgumentParser()
-    parser.add_argument('--infile', nargs='?', type=argparse.FileType('r'), default=sys.stdin, help='The sanitizer output to get the pid from')
-    parser.add_argument('--outfile', nargs='?', type=argparse.FileType('r'), default=sys.stdout, help='Where to write the result')
+    parser.add_argument(
+        "--infile",
+        nargs="?",
+        type=argparse.FileType("r"),
+        default=sys.stdin,
+        help="The sanitizer output to get the pid from",
+    )
+    parser.add_argument(
+        "--outfile",
+        nargs="?",
+        type=argparse.FileType("r"),
+        default=sys.stdout,
+        help="Where to write the result",
+    )
     args = parser.parse_args()
 
     pid = process_file(args.infile)
@@ -15,15 +28,14 @@ def main():
     args.outfile.close()
 
 
-
 def process_file(infile):
     # check first line is just ==== divider
-    first_line_pattern = re.compile(r'=*')
+    first_line_pattern = re.compile(r"=*")
     assert first_line_pattern.match(infile.readline())
 
-    # parse out pid from 2nd line 
+    # parse out pid from 2nd line
     # `==PID==ERROR: SanitizerName: error-type on address...`
-    pid_pattern = re.compile(r'==([0-9]*)==ERROR:')
+    pid_pattern = re.compile(r"==([0-9]*)==ERROR:")
     pid = pid_pattern.search(infile.readline()).group(1)
 
     # ignore the rest
@@ -32,5 +44,6 @@ def process_file(infile):
 
     return pid
 
-if __name__ == '__main__':
+
+if __name__ == "__main__":
     main()

diff  --git a/compiler-rt/test/sanitizer_common/ios_commands/iossim_compile.py b/compiler-rt/test/sanitizer_common/ios_commands/iossim_compile.py
index 9a5da6482e381..39b211b093c92 100755
--- a/compiler-rt/test/sanitizer_common/ios_commands/iossim_compile.py
+++ b/compiler-rt/test/sanitizer_common/ios_commands/iossim_compile.py
@@ -3,20 +3,20 @@
 import os, sys, subprocess
 
 output = None
-output_type = 'executable'
+output_type = "executable"
 
 args = sys.argv[1:]
 while args:
     arg = args.pop(0)
-    if arg == '-shared':
-        output_type = 'shared'
-    elif arg == '-dynamiclib':
-        output_type = 'dylib'
-    elif arg == '-c':
-        output_type = 'object'
-    elif arg == '-S':
-        output_type = 'assembly'
-    elif arg == '-o':
+    if arg == "-shared":
+        output_type = "shared"
+    elif arg == "-dynamiclib":
+        output_type = "dylib"
+    elif arg == "-c":
+        output_type = "object"
+    elif arg == "-S":
+        output_type = "assembly"
+    elif arg == "-o":
         output = args.pop(0)
 
 if output == None:
@@ -28,5 +28,5 @@
     sys.exit(ret)
 
 # If we produce a dylib, ad-hoc sign it.
-if output_type in ['shared', 'dylib']:
+if output_type in ["shared", "dylib"]:
     ret = subprocess.call(["codesign", "-s", "-", output])

diff  --git a/compiler-rt/test/sanitizer_common/ios_commands/iossim_run.py b/compiler-rt/test/sanitizer_common/ios_commands/iossim_run.py
index 47e7048f96e44..5e977ea5ed908 100755
--- a/compiler-rt/test/sanitizer_common/ios_commands/iossim_run.py
+++ b/compiler-rt/test/sanitizer_common/ios_commands/iossim_run.py
@@ -3,65 +3,76 @@
 import glob, os, pipes, sys, subprocess
 
 
-device_id = os.environ.get('SANITIZER_IOSSIM_TEST_DEVICE_IDENTIFIER')
-iossim_run_verbose = os.environ.get('SANITIZER_IOSSIM_RUN_VERBOSE')
-wait_for_debug = os.environ.get('SANITIZER_IOSSIM_RUN_WAIT_FOR_DEBUGGER')
+device_id = os.environ.get("SANITIZER_IOSSIM_TEST_DEVICE_IDENTIFIER")
+iossim_run_verbose = os.environ.get("SANITIZER_IOSSIM_RUN_VERBOSE")
+wait_for_debug = os.environ.get("SANITIZER_IOSSIM_RUN_WAIT_FOR_DEBUGGER")
 
 if not device_id:
-  raise EnvironmentError("Specify SANITIZER_IOSSIM_TEST_DEVICE_IDENTIFIER to select which simulator to use.")
+    raise EnvironmentError(
+        "Specify SANITIZER_IOSSIM_TEST_DEVICE_IDENTIFIER to select which simulator to use."
+    )
 
 for e in [
-  "ASAN_OPTIONS",
-  "TSAN_OPTIONS",
-  "UBSAN_OPTIONS",
-  "LSAN_OPTIONS",
-  "APPLE_ASAN_INIT_FOR_DLOPEN",
-  "ASAN_ACTIVATION_OPTIONS",
-  "MallocNanoZone",
+    "ASAN_OPTIONS",
+    "TSAN_OPTIONS",
+    "UBSAN_OPTIONS",
+    "LSAN_OPTIONS",
+    "APPLE_ASAN_INIT_FOR_DLOPEN",
+    "ASAN_ACTIVATION_OPTIONS",
+    "MallocNanoZone",
 ]:
-  if e in os.environ:
-    os.environ["SIMCTL_CHILD_" + e] = os.environ[e]
+    if e in os.environ:
+        os.environ["SIMCTL_CHILD_" + e] = os.environ[e]
 
-find_atos_cmd = 'xcrun -sdk iphonesimulator -f atos'
-atos_path = subprocess.run(find_atos_cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True).stdout.decode().strip()
-for san in ['ASAN', 'TSAN', 'UBSAN', 'LSAN']:
-  os.environ[f'SIMCTL_CHILD_{san}_SYMBOLIZER_PATH'] = atos_path
+find_atos_cmd = "xcrun -sdk iphonesimulator -f atos"
+atos_path = (
+    subprocess.run(
+        find_atos_cmd.split(),
+        stdout=subprocess.PIPE,
+        stderr=subprocess.PIPE,
+        check=True,
+    )
+    .stdout.decode()
+    .strip()
+)
+for san in ["ASAN", "TSAN", "UBSAN", "LSAN"]:
+    os.environ[f"SIMCTL_CHILD_{san}_SYMBOLIZER_PATH"] = atos_path
 
 prog = sys.argv[1]
 exit_code = None
-if prog == 'rm':
-  # The simulator and host actually share the same file system so we can just
-  # execute directly on the host.
-  rm_args = []
-  for arg in sys.argv[2:]:
-    if '*' in arg or '?' in arg:
-      # Don't quote glob pattern
-      rm_args.append(arg)
-    else:
-      # FIXME(dliew): pipes.quote() is deprecated
-      rm_args.append(pipes.quote(arg))
-  rm_cmd_line = ["/bin/rm"] + rm_args
-  rm_cmd_line_str = ' '.join(rm_cmd_line)
-  # We use `shell=True` so that any wildcard globs get expanded by the shell.
+if prog == "rm":
+    # The simulator and host actually share the same file system so we can just
+    # execute directly on the host.
+    rm_args = []
+    for arg in sys.argv[2:]:
+        if "*" in arg or "?" in arg:
+            # Don't quote glob pattern
+            rm_args.append(arg)
+        else:
+            # FIXME(dliew): pipes.quote() is deprecated
+            rm_args.append(pipes.quote(arg))
+    rm_cmd_line = ["/bin/rm"] + rm_args
+    rm_cmd_line_str = " ".join(rm_cmd_line)
+    # We use `shell=True` so that any wildcard globs get expanded by the shell.
 
-  if iossim_run_verbose:
-    print("RUNNING: \t{}".format(rm_cmd_line_str), flush=True)
+    if iossim_run_verbose:
+        print("RUNNING: \t{}".format(rm_cmd_line_str), flush=True)
 
-  exitcode = subprocess.call(rm_cmd_line_str, shell=True)
+    exitcode = subprocess.call(rm_cmd_line_str, shell=True)
 
 else:
-  cmd = ["xcrun", "simctl", "spawn", "--standalone"]
+    cmd = ["xcrun", "simctl", "spawn", "--standalone"]
 
-  if wait_for_debug:
-    cmd.append("--wait-for-debugger")
+    if wait_for_debug:
+        cmd.append("--wait-for-debugger")
 
-  cmd.append(device_id)
-  cmd += sys.argv[1:]
+    cmd.append(device_id)
+    cmd += sys.argv[1:]
 
-  if iossim_run_verbose:
-    print("RUNNING: \t{}".format(" ".join(cmd)), flush=True)
+    if iossim_run_verbose:
+        print("RUNNING: \t{}".format(" ".join(cmd)), flush=True)
 
-  exitcode = subprocess.call(cmd)
+    exitcode = subprocess.call(cmd)
 if exitcode > 125:
-  exitcode = 126
+    exitcode = 126
 sys.exit(exitcode)

diff  --git a/compiler-rt/test/sanitizer_common/ios_commands/print_crashreport_for_pid.py b/compiler-rt/test/sanitizer_common/ios_commands/print_crashreport_for_pid.py
index 9e3983bc704a6..de2e565d4ff1e 100644
--- a/compiler-rt/test/sanitizer_common/ios_commands/print_crashreport_for_pid.py
+++ b/compiler-rt/test/sanitizer_common/ios_commands/print_crashreport_for_pid.py
@@ -6,44 +6,91 @@
 """
 import sys, os, argparse, re, glob, shutil, time
 
+
 def main():
     parser = argparse.ArgumentParser()
-    parser.add_argument('--pid', type=str, required=True, help='The process id of the process that crashed')
-    parser.add_argument('--binary-filename', type=str, required=True, help='The name of the file that crashed')
-    parser.add_argument('--retry-count', type=int, nargs='?', default=10, help='The number of retries to make')
-    parser.add_argument('--max-wait-time', type=float, nargs='?', default=5.0, help='The max amount of seconds to wait between tries')
+    parser.add_argument(
+        "--pid",
+        type=str,
+        required=True,
+        help="The process id of the process that crashed",
+    )
+    parser.add_argument(
+        "--binary-filename",
+        type=str,
+        required=True,
+        help="The name of the file that crashed",
+    )
+    parser.add_argument(
+        "--retry-count",
+        type=int,
+        nargs="?",
+        default=10,
+        help="The number of retries to make",
+    )
+    parser.add_argument(
+        "--max-wait-time",
+        type=float,
+        nargs="?",
+        default=5.0,
+        help="The max amount of seconds to wait between tries",
+    )
 
-    parser.add_argument('--dir', nargs='?', type=str, default="~/Library/Logs/DiagnosticReports", help='The directory to look for the crash report')
-    parser.add_argument('--outfile', nargs='?', type=argparse.FileType('r'), default=sys.stdout, help='Where to write the result')
+    parser.add_argument(
+        "--dir",
+        nargs="?",
+        type=str,
+        default="~/Library/Logs/DiagnosticReports",
+        help="The directory to look for the crash report",
+    )
+    parser.add_argument(
+        "--outfile",
+        nargs="?",
+        type=argparse.FileType("r"),
+        default=sys.stdout,
+        help="Where to write the result",
+    )
     args = parser.parse_args()
 
     assert args.pid, "pid can't be empty"
     assert args.binary_filename, "binary-filename can't be empty"
 
     os.chdir(os.path.expanduser(args.dir))
-    output_report_with_retries(args.outfile, args.pid.strip(), args.binary_filename, args.retry_count, args.max_wait_time)
+    output_report_with_retries(
+        args.outfile,
+        args.pid.strip(),
+        args.binary_filename,
+        args.retry_count,
+        args.max_wait_time,
+    )
+
 
-def output_report_with_retries(outfile, pid, filename, attempts_remaining, max_wait_time):
+def output_report_with_retries(
+    outfile, pid, filename, attempts_remaining, max_wait_time
+):
     report_name = find_report_in_cur_dir(pid, filename)
     if report_name:
         with open(report_name, "r") as f:
             shutil.copyfileobj(f, outfile)
         return
-    elif(attempts_remaining > 0):
+    elif attempts_remaining > 0:
         # As the number of attempts remaining decreases, increase the number of seconds waited
         # if the max wait time is 2s and there are 10 attempts remaining, wait .2 seconds.
-        # if the max wait time is 2s and there are 2 attempts remaining, wait 1 second. 
+        # if the max wait time is 2s and there are 2 attempts remaining, wait 1 second.
         time.sleep(max_wait_time / attempts_remaining)
-        output_report_with_retries(outfile, pid, filename, attempts_remaining - 1, max_wait_time)
+        output_report_with_retries(
+            outfile, pid, filename, attempts_remaining - 1, max_wait_time
+        )
     else:
         raise RuntimeError("Report not found for ({}, {}).".format(filename, pid))
 
+
 def find_report_in_cur_dir(pid, filename):
     for report_name in sorted(glob.glob("{}_*.crash".format(filename)), reverse=True):
         # parse out pid from first line of report
         # `Process:               filename [pid]``
         with open(report_name) as cur_report:
-            pattern = re.compile(r'Process: *{} \[([0-9]*)\]'.format(filename))
+            pattern = re.compile(r"Process: *{} \[([0-9]*)\]".format(filename))
             cur_report_pid = pattern.search(cur_report.readline()).group(1)
 
         assert cur_report_pid and cur_report_pid.isdigit()
@@ -52,7 +99,7 @@ def find_report_in_cur_dir(pid, filename):
 
     # did not find the crash report
     return None
-        
 
-if __name__ == '__main__':
+
+if __name__ == "__main__":
     main()

diff  --git a/compiler-rt/test/sanitizer_common/lit.common.cfg.py b/compiler-rt/test/sanitizer_common/lit.common.cfg.py
index a35884cab91da..04af4816eb6e7 100644
--- a/compiler-rt/test/sanitizer_common/lit.common.cfg.py
+++ b/compiler-rt/test/sanitizer_common/lit.common.cfg.py
@@ -8,85 +8,92 @@
 default_tool_options = []
 collect_stack_traces = ""
 if config.tool_name == "asan":
-  tool_cflags = ["-fsanitize=address"]
-  tool_options = "ASAN_OPTIONS"
+    tool_cflags = ["-fsanitize=address"]
+    tool_options = "ASAN_OPTIONS"
 elif config.tool_name == "hwasan":
-  tool_cflags = ["-fsanitize=hwaddress", "-fuse-ld=lld"]
-  if config.target_arch == "x86_64":
-    tool_cflags += ["-fsanitize-hwaddress-experimental-aliasing"]
-    config.available_features.add("hwasan-aliasing")
-  tool_options = "HWASAN_OPTIONS"
-  if not config.has_lld:
-    config.unsupported = True
+    tool_cflags = ["-fsanitize=hwaddress", "-fuse-ld=lld"]
+    if config.target_arch == "x86_64":
+        tool_cflags += ["-fsanitize-hwaddress-experimental-aliasing"]
+        config.available_features.add("hwasan-aliasing")
+    tool_options = "HWASAN_OPTIONS"
+    if not config.has_lld:
+        config.unsupported = True
 elif config.tool_name == "tsan":
-  tool_cflags = ["-fsanitize=thread"]
-  tool_options = "TSAN_OPTIONS"
+    tool_cflags = ["-fsanitize=thread"]
+    tool_options = "TSAN_OPTIONS"
 elif config.tool_name == "msan":
-  tool_cflags = ["-fsanitize=memory"]
-  tool_options = "MSAN_OPTIONS"
-  collect_stack_traces = "-fsanitize-memory-track-origins"
+    tool_cflags = ["-fsanitize=memory"]
+    tool_options = "MSAN_OPTIONS"
+    collect_stack_traces = "-fsanitize-memory-track-origins"
 elif config.tool_name == "lsan":
-  tool_cflags = ["-fsanitize=leak"]
-  tool_options = "LSAN_OPTIONS"
+    tool_cflags = ["-fsanitize=leak"]
+    tool_options = "LSAN_OPTIONS"
 elif config.tool_name == "ubsan":
-  tool_cflags = ["-fsanitize=undefined"]
-  tool_options = "UBSAN_OPTIONS"
+    tool_cflags = ["-fsanitize=undefined"]
+    tool_options = "UBSAN_OPTIONS"
 else:
-  lit_config.fatal("Unknown tool for sanitizer_common tests: %r" % config.tool_name)
+    lit_config.fatal("Unknown tool for sanitizer_common tests: %r" % config.tool_name)
 
 config.available_features.add(config.tool_name)
 
-if config.host_os == 'Linux' and config.tool_name == "lsan" and config.target_arch == 'i386':
-  config.available_features.add("lsan-x86")
+if (
+    config.host_os == "Linux"
+    and config.tool_name == "lsan"
+    and config.target_arch == "i386"
+):
+    config.available_features.add("lsan-x86")
 
 if config.arm_thumb:
-  config.available_features.add('thumb')
-
-if config.host_os == 'Darwin':
-  # On Darwin, we default to `abort_on_error=1`, which would make tests run
-  # much slower. Let's override this and run lit tests with 'abort_on_error=0'.
-  default_tool_options += ['abort_on_error=0']
-  if config.tool_name == "tsan":
-    default_tool_options += ['ignore_interceptors_accesses=0']
+    config.available_features.add("thumb")
+
+if config.host_os == "Darwin":
+    # On Darwin, we default to `abort_on_error=1`, which would make tests run
+    # much slower. Let's override this and run lit tests with 'abort_on_error=0'.
+    default_tool_options += ["abort_on_error=0"]
+    if config.tool_name == "tsan":
+        default_tool_options += ["ignore_interceptors_accesses=0"]
 elif config.android:
-  # The same as on Darwin, we default to "abort_on_error=1" which slows down
-  # testing. Also, all existing tests are using "not" instead of "not --crash"
-  # which does not work for abort()-terminated programs.
-  default_tool_options += ['abort_on_error=0']
+    # The same as on Darwin, we default to "abort_on_error=1" which slows down
+    # testing. Also, all existing tests are using "not" instead of "not --crash"
+    # which does not work for abort()-terminated programs.
+    default_tool_options += ["abort_on_error=0"]
 
-default_tool_options_str = ':'.join(default_tool_options)
+default_tool_options_str = ":".join(default_tool_options)
 if default_tool_options_str:
-  config.environment[tool_options] = default_tool_options_str
-  default_tool_options_str += ':'
+    config.environment[tool_options] = default_tool_options_str
+    default_tool_options_str += ":"
 
 extra_link_flags = []
 
-if config.host_os in ['Linux']:
-  extra_link_flags += ["-ldl"]
+if config.host_os in ["Linux"]:
+    extra_link_flags += ["-ldl"]
 
 clang_cflags = config.debug_info_flags + tool_cflags + [config.target_cflags]
 clang_cflags += ["-I%s" % os.path.dirname(os.path.dirname(__file__))]
 clang_cflags += extra_link_flags
 clang_cxxflags = config.cxx_mode_flags + clang_cflags
 
+
 def build_invocation(compile_flags):
-  return " " + " ".join([config.clang] + compile_flags) + " "
+    return " " + " ".join([config.clang] + compile_flags) + " "
 
-config.substitutions.append( ("%clang ", build_invocation(clang_cflags)) )
-config.substitutions.append( ("%clangxx ", build_invocation(clang_cxxflags)) )
-config.substitutions.append( ("%collect_stack_traces", collect_stack_traces) )
-config.substitutions.append( ("%tool_name", config.tool_name) )
-config.substitutions.append( ("%tool_options", tool_options) )
-config.substitutions.append( ('%env_tool_opts=',
-                              'env ' + tool_options + '=' + default_tool_options_str))
 
-config.suffixes = ['.c', '.cpp']
+config.substitutions.append(("%clang ", build_invocation(clang_cflags)))
+config.substitutions.append(("%clangxx ", build_invocation(clang_cxxflags)))
+config.substitutions.append(("%collect_stack_traces", collect_stack_traces))
+config.substitutions.append(("%tool_name", config.tool_name))
+config.substitutions.append(("%tool_options", tool_options))
+config.substitutions.append(
+    ("%env_tool_opts=", "env " + tool_options + "=" + default_tool_options_str)
+)
 
-if config.host_os not in ['Linux', 'Darwin', 'NetBSD', 'FreeBSD', 'SunOS']:
-  config.unsupported = True
+config.suffixes = [".c", ".cpp"]
+
+if config.host_os not in ["Linux", "Darwin", "NetBSD", "FreeBSD", "SunOS"]:
+    config.unsupported = True
 
 if not config.parallelism_group:
-  config.parallelism_group = 'shadow-memory'
+    config.parallelism_group = "shadow-memory"
 
-if config.host_os == 'NetBSD':
-  config.substitutions.insert(0, ('%run', config.netbsd_noaslr_prefix))
+if config.host_os == "NetBSD":
+    config.substitutions.insert(0, ("%run", config.netbsd_noaslr_prefix))

diff  --git a/compiler-rt/test/scudo/lit.cfg.py b/compiler-rt/test/scudo/lit.cfg.py
index 236d645f3b940..5d45bd99804c7 100644
--- a/compiler-rt/test/scudo/lit.cfg.py
+++ b/compiler-rt/test/scudo/lit.cfg.py
@@ -3,62 +3,72 @@
 import os
 
 # Setup config name.
-config.name = 'Scudo' + config.name_suffix
+config.name = "Scudo" + config.name_suffix
 
 # Setup source root.
 config.test_source_root = os.path.dirname(__file__)
 
 # Path to the shared library
-shared_libscudo = os.path.join(config.compiler_rt_libdir, "libclang_rt.scudo%s.so" % config.target_suffix)
-shared_minlibscudo = os.path.join(config.compiler_rt_libdir, "libclang_rt.scudo_minimal%s.so" % config.target_suffix)
+shared_libscudo = os.path.join(
+    config.compiler_rt_libdir, "libclang_rt.scudo%s.so" % config.target_suffix
+)
+shared_minlibscudo = os.path.join(
+    config.compiler_rt_libdir, "libclang_rt.scudo_minimal%s.so" % config.target_suffix
+)
 
 # Test suffixes.
-config.suffixes = ['.c', '.cpp', '.test']
+config.suffixes = [".c", ".cpp", ".test"]
 
 # C & CXX flags.
-c_flags = ([config.target_cflags] +
-           ["-pthread",
-           "-fPIE",
-           "-pie",
-           "-O0",
-           "-UNDEBUG",
-           "-ldl",
-           "-Wl,--gc-sections"])
+c_flags = [config.target_cflags] + [
+    "-pthread",
+    "-fPIE",
+    "-pie",
+    "-O0",
+    "-UNDEBUG",
+    "-ldl",
+    "-Wl,--gc-sections",
+]
 
 # Android doesn't want -lrt.
 if not config.android:
-  c_flags += ["-lrt"]
+    c_flags += ["-lrt"]
 
-cxx_flags = (c_flags + config.cxx_mode_flags + ["-std=c++11"])
+cxx_flags = c_flags + config.cxx_mode_flags + ["-std=c++11"]
 
 scudo_flags = ["-fsanitize=scudo"]
 
+
 def build_invocation(compile_flags):
-  return " " + " ".join([config.clang] + compile_flags) + " "
+    return " " + " ".join([config.clang] + compile_flags) + " "
+
 
 # Add substitutions.
 config.substitutions.append(("%clang ", build_invocation(c_flags)))
 config.substitutions.append(("%clang_scudo ", build_invocation(c_flags + scudo_flags)))
-config.substitutions.append(("%clangxx_scudo ", build_invocation(cxx_flags + scudo_flags)))
+config.substitutions.append(
+    ("%clangxx_scudo ", build_invocation(cxx_flags + scudo_flags))
+)
 config.substitutions.append(("%shared_libscudo", shared_libscudo))
 config.substitutions.append(("%shared_minlibscudo", shared_minlibscudo))
 
 # Platform-specific default SCUDO_OPTIONS for lit tests.
-default_scudo_opts = ''
+default_scudo_opts = ""
 if config.android:
-  # Android defaults to abort_on_error=1, which doesn't work for us.
-  default_scudo_opts = 'abort_on_error=0'
+    # Android defaults to abort_on_error=1, which doesn't work for us.
+    default_scudo_opts = "abort_on_error=0"
 
 # Disable GWP-ASan for scudo internal tests.
 if config.gwp_asan:
-  config.environment['GWP_ASAN_OPTIONS'] = 'Enabled=0'
+    config.environment["GWP_ASAN_OPTIONS"] = "Enabled=0"
 
 if default_scudo_opts:
-  config.environment['SCUDO_OPTIONS'] = default_scudo_opts
-  default_scudo_opts += ':'
-config.substitutions.append(('%env_scudo_opts=',
-                             'env SCUDO_OPTIONS=' + default_scudo_opts))
+    config.environment["SCUDO_OPTIONS"] = default_scudo_opts
+    default_scudo_opts += ":"
+config.substitutions.append(
+    ("%env_scudo_opts=", "env SCUDO_OPTIONS=" + default_scudo_opts)
+)
 
 # Hardened Allocator tests are currently supported on Linux only.
-if config.host_os not in ['Linux']:
-   config.unsupported = True
+if config.host_os not in ["Linux"]:
+    config.unsupported = True

diff  --git a/compiler-rt/test/shadowcallstack/lit.cfg.py b/compiler-rt/test/shadowcallstack/lit.cfg.py
index 062ce83ea15d7..70a6b16174c4b 100644
--- a/compiler-rt/test/shadowcallstack/lit.cfg.py
+++ b/compiler-rt/test/shadowcallstack/lit.cfg.py
@@ -3,21 +3,34 @@
 import os
 
 # Setup config name.
-config.name = 'ShadowCallStack'
+config.name = "ShadowCallStack"
 
 # Setup source root.
 config.test_source_root = os.path.dirname(__file__)
 
 # Test suffixes.
-config.suffixes = ['.c', '.cpp', '.m', '.mm', '.ll', '.test']
+config.suffixes = [".c", ".cpp", ".m", ".mm", ".ll", ".test"]
 
 # Add clang substitutions.
-config.substitutions.append( ("%clang_noscs ", config.clang + ' -O0 -fno-sanitize=shadow-call-stack ' + config.target_cflags + ' ') )
+config.substitutions.append(
+    (
+        "%clang_noscs ",
+        config.clang
+        + " -O0 -fno-sanitize=shadow-call-stack "
+        + config.target_cflags
+        + " ",
+    )
+)
 
 scs_arch_cflags = config.target_cflags
-if config.target_arch == 'aarch64':
-  scs_arch_cflags += ' -ffixed-x18 '
-config.substitutions.append( ("%clang_scs ", config.clang + ' -O0 -fsanitize=shadow-call-stack ' + scs_arch_cflags + ' ') )
+if config.target_arch == "aarch64":
+    scs_arch_cflags += " -ffixed-x18 "
+config.substitutions.append(
+    (
+        "%clang_scs ",
+        config.clang + " -O0 -fsanitize=shadow-call-stack " + scs_arch_cflags + " ",
+    )
+)
 
-if config.host_os not in ['Linux'] or config.target_arch not in ['aarch64','riscv64']:
-   config.unsupported = True
+if config.host_os not in ["Linux"] or config.target_arch not in ["aarch64", "riscv64"]:
+    config.unsupported = True

diff  --git a/compiler-rt/test/tsan/Darwin/lit.local.cfg.py b/compiler-rt/test/tsan/Darwin/lit.local.cfg.py
index e74e82dbf4743..7bf80ac5e1375 100644
--- a/compiler-rt/test/tsan/Darwin/lit.local.cfg.py
+++ b/compiler-rt/test/tsan/Darwin/lit.local.cfg.py
@@ -1,11 +1,12 @@
 def getRoot(config):
-  if not config.parent:
-    return config
-  return getRoot(config.parent)
+    if not config.parent:
+        return config
+    return getRoot(config.parent)
+
 
 root = getRoot(config)
 
-if root.host_os not in ['Darwin']:
-  config.unsupported = True
+if root.host_os not in ["Darwin"]:
+    config.unsupported = True
 
-config.environment['TSAN_OPTIONS'] += ':ignore_noninstrumented_modules=1'
+config.environment["TSAN_OPTIONS"] += ":ignore_noninstrumented_modules=1"

diff  --git a/compiler-rt/test/tsan/Linux/lit.local.cfg.py b/compiler-rt/test/tsan/Linux/lit.local.cfg.py
index 57271b8078a49..603ca0365068f 100644
--- a/compiler-rt/test/tsan/Linux/lit.local.cfg.py
+++ b/compiler-rt/test/tsan/Linux/lit.local.cfg.py
@@ -1,9 +1,10 @@
 def getRoot(config):
-  if not config.parent:
-    return config
-  return getRoot(config.parent)
+    if not config.parent:
+        return config
+    return getRoot(config.parent)
+
 
 root = getRoot(config)
 
-if root.host_os not in ['Linux']:
-  config.unsupported = True
+if root.host_os not in ["Linux"]:
+    config.unsupported = True

diff  --git a/compiler-rt/test/tsan/libcxx/lit.local.cfg.py b/compiler-rt/test/tsan/libcxx/lit.local.cfg.py
index 3ee705736e6fe..f4820dccb0109 100644
--- a/compiler-rt/test/tsan/libcxx/lit.local.cfg.py
+++ b/compiler-rt/test/tsan/libcxx/lit.local.cfg.py
@@ -1,12 +1,12 @@
 def getRoot(config):
-  if not config.parent:
-    return config
-  return getRoot(config.parent)
+    if not config.parent:
+        return config
+    return getRoot(config.parent)
+
 
 root = getRoot(config)
 
 # Only run if we have an instrumented libcxx.  On Darwin, run always (we have
 # interceptors to support the system-provided libcxx).
-if not root.has_libcxx and root.host_os != 'Darwin':
-  config.unsupported = True
-
+if not root.has_libcxx and root.host_os != "Darwin":
+    config.unsupported = True

diff  --git a/compiler-rt/test/tsan/libdispatch/lit.local.cfg.py b/compiler-rt/test/tsan/libdispatch/lit.local.cfg.py
index 9d3cf75489ad2..a7653f4305952 100644
--- a/compiler-rt/test/tsan/libdispatch/lit.local.cfg.py
+++ b/compiler-rt/test/tsan/libdispatch/lit.local.cfg.py
@@ -1,17 +1,18 @@
 def getRoot(config):
-  if not config.parent:
-    return config
-  return getRoot(config.parent)
+    if not config.parent:
+        return config
+    return getRoot(config.parent)
+
 
 root = getRoot(config)
 
-if 'libdispatch' in root.available_features:
-  additional_cflags = ' -fblocks '
-  for index, (template, replacement) in enumerate(config.substitutions):
-    if template in ['%clang_tsan ', '%clangxx_tsan ']:
-      config.substitutions[index] = (template, replacement + additional_cflags)
+if "libdispatch" in root.available_features:
+    additional_cflags = " -fblocks "
+    for index, (template, replacement) in enumerate(config.substitutions):
+        if template in ["%clang_tsan ", "%clangxx_tsan "]:
+            config.substitutions[index] = (template, replacement + additional_cflags)
 else:
-  config.unsupported = True
+    config.unsupported = True
 
-if config.host_os == 'Darwin':
-  config.environment['TSAN_OPTIONS'] += ':ignore_noninstrumented_modules=1'
+if config.host_os == "Darwin":
+    config.environment["TSAN_OPTIONS"] += ":ignore_noninstrumented_modules=1"

diff  --git a/compiler-rt/test/tsan/lit.cfg.py b/compiler-rt/test/tsan/lit.cfg.py
index 4295514f0a5e3..a93333e2e593d 100644
--- a/compiler-rt/test/tsan/lit.cfg.py
+++ b/compiler-rt/test/tsan/lit.cfg.py
@@ -2,17 +2,20 @@
 
 import os
 
+
 def get_required_attr(config, attr_name):
-  attr_value = getattr(config, attr_name, None)
-  if not attr_value:
-    lit_config.fatal(
-      "No attribute %r in test configuration! You may need to run "
-      "tests from your build directory or add this attribute "
-      "to lit.site.cfg.py " % attr_name)
-  return attr_value
+    attr_value = getattr(config, attr_name, None)
+    if not attr_value:
+        lit_config.fatal(
+            "No attribute %r in test configuration! You may need to run "
+            "tests from your build directory or add this attribute "
+            "to lit.site.cfg.py " % attr_name
+        )
+    return attr_value
+
 
 # Setup config name.
-config.name = 'ThreadSanitizer' + config.name_suffix
+config.name = "ThreadSanitizer" + config.name_suffix
 
 # Setup source root.
 config.test_source_root = os.path.dirname(__file__)
@@ -20,75 +23,92 @@ def get_required_attr(config, attr_name):
 # Setup environment variables for running ThreadSanitizer.
 default_tsan_opts = "atexit_sleep_ms=0"
 
-if config.host_os == 'Darwin':
-  # On Darwin, we default to `abort_on_error=1`, which would make tests run
-  # much slower. Let's override this and run lit tests with 'abort_on_error=0'.
-  default_tsan_opts += ':abort_on_error=0'
-  # On Darwin, we default to ignore_noninstrumented_modules=1, which also
-  # suppresses some races the tests are supposed to find. Let's run without this
-  # setting, but turn it back on for Darwin tests (see Darwin/lit.local.cfg.py).
-  default_tsan_opts += ':ignore_noninstrumented_modules=0'
-  default_tsan_opts += ':ignore_interceptors_accesses=0'
+if config.host_os == "Darwin":
+    # On Darwin, we default to `abort_on_error=1`, which would make tests run
+    # much slower. Let's override this and run lit tests with 'abort_on_error=0'.
+    default_tsan_opts += ":abort_on_error=0"
+    # On Darwin, we default to ignore_noninstrumented_modules=1, which also
+    # suppresses some races the tests are supposed to find. Let's run without this
+    # setting, but turn it back on for Darwin tests (see Darwin/lit.local.cfg.py).
+    default_tsan_opts += ":ignore_noninstrumented_modules=0"
+    default_tsan_opts += ":ignore_interceptors_accesses=0"
 
 # Platform-specific default TSAN_OPTIONS for lit tests.
 if default_tsan_opts:
-  config.environment['TSAN_OPTIONS'] = default_tsan_opts
-  default_tsan_opts += ':'
-config.substitutions.append(('%env_tsan_opts=',
-                             'env TSAN_OPTIONS=' + default_tsan_opts))
+    config.environment["TSAN_OPTIONS"] = default_tsan_opts
+    default_tsan_opts += ":"
+config.substitutions.append(
+    ("%env_tsan_opts=", "env TSAN_OPTIONS=" + default_tsan_opts)
+)
 
 # GCC driver doesn't add necessary compile/link flags with -fsanitize=thread.
-if config.compiler_id == 'GNU':
-  extra_cflags = ["-fPIE", "-pthread", "-ldl", "-lrt", "-pie"]
+if config.compiler_id == "GNU":
+    extra_cflags = ["-fPIE", "-pthread", "-ldl", "-lrt", "-pie"]
 else:
-  extra_cflags = []
+    extra_cflags = []
 
 tsan_incdir = config.test_source_root + "/../"
 # Setup default compiler flags used with -fsanitize=thread option.
-clang_tsan_cflags = (["-fsanitize=thread",
-                      "-Wall"] +
-                      [config.target_cflags] +
-                      config.debug_info_flags +
-                      extra_cflags +
-                      ["-I%s" % tsan_incdir])
-clang_tsan_cxxflags = config.cxx_mode_flags + clang_tsan_cflags + ["-std=c++11"] + ["-I%s" % tsan_incdir]
+clang_tsan_cflags = (
+    ["-fsanitize=thread", "-Wall"]
+    + [config.target_cflags]
+    + config.debug_info_flags
+    + extra_cflags
+    + ["-I%s" % tsan_incdir]
+)
+clang_tsan_cxxflags = (
+    config.cxx_mode_flags + clang_tsan_cflags + ["-std=c++11"] + ["-I%s" % tsan_incdir]
+)
 # Add additional flags if we're using instrumented libc++.
 # Instrumented libcxx currently not supported on Darwin.
-if config.has_libcxx and config.host_os != 'Darwin':
-  # FIXME: Dehardcode this path somehow.
-  libcxx_path = os.path.join(config.compiler_rt_obj_root, "lib",
-                             "tsan", "libcxx_tsan_%s" % config.target_arch)
-  libcxx_incdir = os.path.join(libcxx_path, "include", "c++", "v1")
-  libcxx_libdir = os.path.join(libcxx_path, "lib")
-  libcxx_a = os.path.join(libcxx_libdir, "libc++.a")
-  clang_tsan_cxxflags += ["-nostdinc++",
-                          "-I%s" % libcxx_incdir]
-  config.substitutions.append( ("%link_libcxx_tsan", libcxx_a) )
+if config.has_libcxx and config.host_os != "Darwin":
+    # FIXME: Dehardcode this path somehow.
+    libcxx_path = os.path.join(
+        config.compiler_rt_obj_root,
+        "lib",
+        "tsan",
+        "libcxx_tsan_%s" % config.target_arch,
+    )
+    libcxx_incdir = os.path.join(libcxx_path, "include", "c++", "v1")
+    libcxx_libdir = os.path.join(libcxx_path, "lib")
+    libcxx_a = os.path.join(libcxx_libdir, "libc++.a")
+    clang_tsan_cxxflags += ["-nostdinc++", "-I%s" % libcxx_incdir]
+    config.substitutions.append(("%link_libcxx_tsan", libcxx_a))
 else:
-  config.substitutions.append( ("%link_libcxx_tsan", "") )
+    config.substitutions.append(("%link_libcxx_tsan", ""))
+
 
 def build_invocation(compile_flags):
-  return " " + " ".join([config.clang] + compile_flags) + " "
+    return " " + " ".join([config.clang] + compile_flags) + " "
 
-config.substitutions.append( ("%clang_tsan ", build_invocation(clang_tsan_cflags)) )
-config.substitutions.append( ("%clangxx_tsan ", build_invocation(clang_tsan_cxxflags)) )
 
-# Define CHECK-%os to check for OS-dependent output.
-config.substitutions.append( ('CHECK-%os', ("CHECK-" + config.host_os)))
+config.substitutions.append(("%clang_tsan ", build_invocation(clang_tsan_cflags)))
+config.substitutions.append(("%clangxx_tsan ", build_invocation(clang_tsan_cxxflags)))
 
-config.substitutions.append( ("%deflake ", os.path.join(os.path.dirname(__file__), "deflake.bash") + " " + config.deflake_threshold + " "))
+# Define CHECK-%os to check for OS-dependent output.
+config.substitutions.append(("CHECK-%os", ("CHECK-" + config.host_os)))
+
+config.substitutions.append(
+    (
+        "%deflake ",
+        os.path.join(os.path.dirname(__file__), "deflake.bash")
+        + " "
+        + config.deflake_threshold
+        + " ",
+    )
+)
 
 # Default test suffixes.
-config.suffixes = ['.c', '.cpp', '.m', '.mm']
+config.suffixes = [".c", ".cpp", ".m", ".mm"]
 
-if config.host_os not in ['FreeBSD', 'Linux', 'Darwin', 'NetBSD']:
-  config.unsupported = True
+if config.host_os not in ["FreeBSD", "Linux", "Darwin", "NetBSD"]:
+    config.unsupported = True
 
 if config.android:
-  config.unsupported = True
+    config.unsupported = True
 
 if not config.parallelism_group:
-  config.parallelism_group = 'shadow-memory'
+    config.parallelism_group = "shadow-memory"
 
-if config.host_os == 'NetBSD':
-  config.substitutions.insert(0, ('%run', config.netbsd_noaslr_prefix))
+if config.host_os == "NetBSD":
+    config.substitutions.insert(0, ("%run", config.netbsd_noaslr_prefix))

diff  --git a/compiler-rt/test/ubsan/TestCases/Misc/Linux/lit.local.cfg.py b/compiler-rt/test/ubsan/TestCases/Misc/Linux/lit.local.cfg.py
index 57271b8078a49..603ca0365068f 100644
--- a/compiler-rt/test/ubsan/TestCases/Misc/Linux/lit.local.cfg.py
+++ b/compiler-rt/test/ubsan/TestCases/Misc/Linux/lit.local.cfg.py
@@ -1,9 +1,10 @@
 def getRoot(config):
-  if not config.parent:
-    return config
-  return getRoot(config.parent)
+    if not config.parent:
+        return config
+    return getRoot(config.parent)
+
 
 root = getRoot(config)
 
-if root.host_os not in ['Linux']:
-  config.unsupported = True
+if root.host_os not in ["Linux"]:
+    config.unsupported = True

diff  --git a/compiler-rt/test/ubsan/TestCases/TypeCheck/Function/lit.local.cfg.py b/compiler-rt/test/ubsan/TestCases/TypeCheck/Function/lit.local.cfg.py
index b5524bf1fd180..e69d15f5b141c 100644
--- a/compiler-rt/test/ubsan/TestCases/TypeCheck/Function/lit.local.cfg.py
+++ b/compiler-rt/test/ubsan/TestCases/TypeCheck/Function/lit.local.cfg.py
@@ -1,8 +1,8 @@
-if config.host_os not in ['Darwin', 'FreeBSD', 'Linux', 'NetBSD']:
-  config.unsupported = True
+if config.host_os not in ["Darwin", "FreeBSD", "Linux", "NetBSD"]:
+    config.unsupported = True
 # Work around "Cannot represent a 
diff erence across sections"
-if config.target_arch == 'powerpc64':
-  config.unsupported = True
+if config.target_arch == "powerpc64":
+    config.unsupported = True
 # Work around "library ... not found: needed by main executable" in qemu.
-if config.android and config.target_arch not in ['x86', 'x86_64']:
-  config.unsupported = True
+if config.android and config.target_arch not in ["x86", "x86_64"]:
+    config.unsupported = True

diff  --git a/compiler-rt/test/ubsan/TestCases/TypeCheck/Linux/lit.local.cfg.py b/compiler-rt/test/ubsan/TestCases/TypeCheck/Linux/lit.local.cfg.py
index 57271b8078a49..603ca0365068f 100644
--- a/compiler-rt/test/ubsan/TestCases/TypeCheck/Linux/lit.local.cfg.py
+++ b/compiler-rt/test/ubsan/TestCases/TypeCheck/Linux/lit.local.cfg.py
@@ -1,9 +1,10 @@
 def getRoot(config):
-  if not config.parent:
-    return config
-  return getRoot(config.parent)
+    if not config.parent:
+        return config
+    return getRoot(config.parent)
+
 
 root = getRoot(config)
 
-if root.host_os not in ['Linux']:
-  config.unsupported = True
+if root.host_os not in ["Linux"]:
+    config.unsupported = True

diff  --git a/compiler-rt/test/ubsan/lit.common.cfg.py b/compiler-rt/test/ubsan/lit.common.cfg.py
index 7108561a92c4d..bc31913478c67 100644
--- a/compiler-rt/test/ubsan/lit.common.cfg.py
+++ b/compiler-rt/test/ubsan/lit.common.cfg.py
@@ -2,81 +2,95 @@
 
 import os
 
+
 def get_required_attr(config, attr_name):
-  attr_value = getattr(config, attr_name, None)
-  if attr_value == None:
-    lit_config.fatal(
-      "No attribute %r in test configuration! You may need to run "
-      "tests from your build directory or add this attribute "
-      "to lit.site.cfg.py " % attr_name)
-  return attr_value
+    attr_value = getattr(config, attr_name, None)
+    if attr_value == None:
+        lit_config.fatal(
+            "No attribute %r in test configuration! You may need to run "
+            "tests from your build directory or add this attribute "
+            "to lit.site.cfg.py " % attr_name
+        )
+    return attr_value
+
 
 # Setup config name.
-config.name = 'UBSan-' + config.name_suffix
+config.name = "UBSan-" + config.name_suffix
 
 # Setup source root.
 config.test_source_root = os.path.dirname(__file__)
 
 default_ubsan_opts = list(config.default_sanitizer_opts)
 # Choose between standalone and UBSan+ASan modes.
-ubsan_lit_test_mode = get_required_attr(config, 'ubsan_lit_test_mode')
+ubsan_lit_test_mode = get_required_attr(config, "ubsan_lit_test_mode")
 if ubsan_lit_test_mode == "Standalone":
-  config.available_features.add("ubsan-standalone")
-  clang_ubsan_cflags = []
+    config.available_features.add("ubsan-standalone")
+    clang_ubsan_cflags = []
 elif ubsan_lit_test_mode == "StandaloneStatic":
-  config.available_features.add("ubsan-standalone-static")
-  clang_ubsan_cflags = ['-static-libsan']
+    config.available_features.add("ubsan-standalone-static")
+    clang_ubsan_cflags = ["-static-libsan"]
 elif ubsan_lit_test_mode == "AddressSanitizer":
-  config.available_features.add("ubsan-asan")
-  clang_ubsan_cflags = ["-fsanitize=address"]
-  default_ubsan_opts += ['detect_leaks=0']
+    config.available_features.add("ubsan-asan")
+    clang_ubsan_cflags = ["-fsanitize=address"]
+    default_ubsan_opts += ["detect_leaks=0"]
 elif ubsan_lit_test_mode == "MemorySanitizer":
-  config.available_features.add("ubsan-msan")
-  clang_ubsan_cflags = ["-fsanitize=memory"]
+    config.available_features.add("ubsan-msan")
+    clang_ubsan_cflags = ["-fsanitize=memory"]
 elif ubsan_lit_test_mode == "ThreadSanitizer":
-  config.available_features.add("ubsan-tsan")
-  clang_ubsan_cflags = ["-fsanitize=thread"]
+    config.available_features.add("ubsan-tsan")
+    clang_ubsan_cflags = ["-fsanitize=thread"]
 else:
-  lit_config.fatal("Unknown UBSan test mode: %r" % ubsan_lit_test_mode)
+    lit_config.fatal("Unknown UBSan test mode: %r" % ubsan_lit_test_mode)
 
 # Platform-specific default for lit tests.
-if config.target_arch == 's390x':
-  # On SystemZ we need -mbackchain to make the fast unwinder work.
-  clang_ubsan_cflags.append("-mbackchain")
+if config.target_arch == "s390x":
+    # On SystemZ we need -mbackchain to make the fast unwinder work.
+    clang_ubsan_cflags.append("-mbackchain")
 
-default_ubsan_opts_str = ':'.join(default_ubsan_opts)
+default_ubsan_opts_str = ":".join(default_ubsan_opts)
 if default_ubsan_opts_str:
-  config.environment['UBSAN_OPTIONS'] = default_ubsan_opts_str
-  default_ubsan_opts_str += ':'
+    config.environment["UBSAN_OPTIONS"] = default_ubsan_opts_str
+    default_ubsan_opts_str += ":"
 # Substitution to setup UBSAN_OPTIONS in portable way.
-config.substitutions.append(('%env_ubsan_opts=',
-                             'env UBSAN_OPTIONS=' + default_ubsan_opts_str))
+config.substitutions.append(
+    ("%env_ubsan_opts=", "env UBSAN_OPTIONS=" + default_ubsan_opts_str)
+)
+
 
 def build_invocation(compile_flags):
-  return " " + " ".join([config.clang] + compile_flags) + " "
+    return " " + " ".join([config.clang] + compile_flags) + " "
+
 
 target_cflags = [get_required_attr(config, "target_cflags")]
 clang_ubsan_cflags += target_cflags
 clang_ubsan_cxxflags = config.cxx_mode_flags + clang_ubsan_cflags
 
 # Define %clang and %clangxx substitutions to use in test RUN lines.
-config.substitutions.append( ("%clang ", build_invocation(clang_ubsan_cflags)) )
-config.substitutions.append( ("%clangxx ", build_invocation(clang_ubsan_cxxflags)) )
-config.substitutions.append( ("%gmlt ", " ".join(config.debug_info_flags) + " ") )
+config.substitutions.append(("%clang ", build_invocation(clang_ubsan_cflags)))
+config.substitutions.append(("%clangxx ", build_invocation(clang_ubsan_cxxflags)))
+config.substitutions.append(("%gmlt ", " ".join(config.debug_info_flags) + " "))
 
 # Default test suffixes.
-config.suffixes = ['.c', '.cpp', '.m']
+config.suffixes = [".c", ".cpp", ".m"]
 
 # Check that the host supports UndefinedBehaviorSanitizer tests
-if config.host_os not in ['Linux', 'Darwin', 'FreeBSD', 'Windows', 'NetBSD', 'SunOS', 'OpenBSD']:
-  config.unsupported = True
+if config.host_os not in [
+    "Linux",
+    "Darwin",
+    "FreeBSD",
+    "Windows",
+    "NetBSD",
+    "SunOS",
+    "OpenBSD",
+]:
+    config.unsupported = True
 
-config.available_features.add('arch=' + config.target_arch)
+config.available_features.add("arch=" + config.target_arch)
 
-config.excludes = ['Inputs']
+config.excludes = ["Inputs"]
 
-if ubsan_lit_test_mode in ['AddressSanitizer', 'MemorySanitizer', 'ThreadSanitizer']:
-  if not config.parallelism_group:
-    config.parallelism_group = 'shadow-memory'
-  if config.host_os == 'NetBSD':
-    config.substitutions.insert(0, ('%run', config.netbsd_noaslr_prefix))
+if ubsan_lit_test_mode in ["AddressSanitizer", "MemorySanitizer", "ThreadSanitizer"]:
+    if not config.parallelism_group:
+        config.parallelism_group = "shadow-memory"
+    if config.host_os == "NetBSD":
+        config.substitutions.insert(0, ("%run", config.netbsd_noaslr_prefix))

diff  --git a/compiler-rt/test/ubsan_minimal/lit.common.cfg.py b/compiler-rt/test/ubsan_minimal/lit.common.cfg.py
index 39800c968dbf2..ea3666f0bcc92 100644
--- a/compiler-rt/test/ubsan_minimal/lit.common.cfg.py
+++ b/compiler-rt/test/ubsan_minimal/lit.common.cfg.py
@@ -2,39 +2,51 @@
 
 import os
 
+
 def get_required_attr(config, attr_name):
-  attr_value = getattr(config, attr_name, None)
-  if attr_value == None:
-    lit_config.fatal(
-      "No attribute %r in test configuration! You may need to run "
-      "tests from your build directory or add this attribute "
-      "to lit.site.cfg.py " % attr_name)
-  return attr_value
+    attr_value = getattr(config, attr_name, None)
+    if attr_value == None:
+        lit_config.fatal(
+            "No attribute %r in test configuration! You may need to run "
+            "tests from your build directory or add this attribute "
+            "to lit.site.cfg.py " % attr_name
+        )
+    return attr_value
+
 
 # Setup source root.
 config.test_source_root = os.path.dirname(__file__)
-config.name = 'UBSan-Minimal-' + config.target_arch
+config.name = "UBSan-Minimal-" + config.target_arch
+
 
 def build_invocation(compile_flags):
-  return " " + " ".join([config.clang] + compile_flags) + " "
+    return " " + " ".join([config.clang] + compile_flags) + " "
+
 
 target_cflags = [get_required_attr(config, "target_cflags")]
 clang_ubsan_cflags = ["-fsanitize-minimal-runtime"] + target_cflags
 clang_ubsan_cxxflags = config.cxx_mode_flags + clang_ubsan_cflags
 
 # Define %clang and %clangxx substitutions to use in test RUN lines.
-config.substitutions.append( ("%clang ", build_invocation(clang_ubsan_cflags)) )
-config.substitutions.append( ("%clangxx ", build_invocation(clang_ubsan_cxxflags)) )
+config.substitutions.append(("%clang ", build_invocation(clang_ubsan_cflags)))
+config.substitutions.append(("%clangxx ", build_invocation(clang_ubsan_cxxflags)))
 
 # Default test suffixes.
-config.suffixes = ['.c', '.cpp']
+config.suffixes = [".c", ".cpp"]
 
 # Check that the host supports UndefinedBehaviorSanitizerMinimal tests
-if config.host_os not in ['Linux', 'FreeBSD', 'NetBSD', 'Darwin', 'OpenBSD', 'SunOS']: # TODO: Windows
-  config.unsupported = True
+if config.host_os not in [
+    "Linux",
+    "FreeBSD",
+    "NetBSD",
+    "Darwin",
+    "OpenBSD",
+    "SunOS",
+]:  # TODO: Windows
+    config.unsupported = True
 
 # Don't target x86_64h if the test machine can't execute x86_64h binaries.
-if '-arch x86_64h' in target_cflags and 'x86_64h' not in config.available_features:
-  config.unsupported = True
+if "-arch x86_64h" in target_cflags and "x86_64h" not in config.available_features:
+    config.unsupported = True
 
-config.available_features.add('arch=' + config.target_arch)
+config.available_features.add("arch=" + config.target_arch)

diff  --git a/compiler-rt/test/xray/lit.cfg.py b/compiler-rt/test/xray/lit.cfg.py
index 1b41ca6058c1a..f73ae3acd7715 100644
--- a/compiler-rt/test/xray/lit.cfg.py
+++ b/compiler-rt/test/xray/lit.cfg.py
@@ -3,64 +3,67 @@
 import os
 
 # Setup config name.
-config.name = 'XRay' + config.name_suffix
+config.name = "XRay" + config.name_suffix
 
 # Setup source root.
 config.test_source_root = os.path.dirname(__file__)
 
 # Setup default compiler flags use with -fxray-instrument option.
-clang_xray_cflags = (['-fxray-instrument', config.target_cflags])
+clang_xray_cflags = ["-fxray-instrument", config.target_cflags]
 
 # If libc++ was used to build XRAY libraries, libc++ is needed. Fix applied
 # to Linux only since -rpath may not be portable. This can be extended to
 # other platforms.
 if config.libcxx_used == "1" and config.host_os == "Linux":
-  clang_xray_cflags = clang_xray_cflags + (['-L%s -lc++ -Wl,-rpath=%s'
-                                          % (config.llvm_shlib_dir,
-                                             config.llvm_shlib_dir)])
+    clang_xray_cflags = clang_xray_cflags + (
+        ["-L%s -lc++ -Wl,-rpath=%s" % (config.llvm_shlib_dir, config.llvm_shlib_dir)]
+    )
 
 clang_xray_cxxflags = config.cxx_mode_flags + clang_xray_cflags
 
+
 def build_invocation(compile_flags):
-  return ' ' + ' '.join([config.clang] + compile_flags) + ' '
+    return " " + " ".join([config.clang] + compile_flags) + " "
+
 
 # Assume that llvm-xray is in the config.llvm_tools_dir.
-llvm_xray = os.path.join(config.llvm_tools_dir, 'llvm-xray')
+llvm_xray = os.path.join(config.llvm_tools_dir, "llvm-xray")
 
 # Setup substitutions.
 if config.host_os == "Linux":
-  libdl_flag = "-ldl"
+    libdl_flag = "-ldl"
 else:
-  libdl_flag = ""
+    libdl_flag = ""
 
+config.substitutions.append(("%clang ", build_invocation([config.target_cflags])))
 config.substitutions.append(
-    ('%clang ', build_invocation([config.target_cflags])))
-config.substitutions.append(
-    ('%clangxx ',
-     build_invocation(config.cxx_mode_flags + [config.target_cflags])))
-config.substitutions.append(
-    ('%clang_xray ', build_invocation(clang_xray_cflags)))
-config.substitutions.append(
-    ('%clangxx_xray', build_invocation(clang_xray_cxxflags)))
-config.substitutions.append(
-    ('%llvm_xray', llvm_xray))
+    ("%clangxx ", build_invocation(config.cxx_mode_flags + [config.target_cflags]))
+)
+config.substitutions.append(("%clang_xray ", build_invocation(clang_xray_cflags)))
+config.substitutions.append(("%clangxx_xray", build_invocation(clang_xray_cxxflags)))
+config.substitutions.append(("%llvm_xray", llvm_xray))
 config.substitutions.append(
-    ('%xraylib',
-        ('-lm -lpthread %s -lrt -L%s '
-         '-Wl,-whole-archive -lclang_rt.xray%s -Wl,-no-whole-archive')
-        % (libdl_flag, config.compiler_rt_libdir, config.target_suffix)))
+    (
+        "%xraylib",
+        (
+            "-lm -lpthread %s -lrt -L%s "
+            "-Wl,-whole-archive -lclang_rt.xray%s -Wl,-no-whole-archive"
+        )
+        % (libdl_flag, config.compiler_rt_libdir, config.target_suffix),
+    )
+)
 
 # Default test suffixes.
-config.suffixes = ['.c', '.cpp']
+config.suffixes = [".c", ".cpp"]
 
-if config.host_os not in ['FreeBSD', 'Linux', 'NetBSD', 'OpenBSD']:
-  config.unsupported = True
-elif '64' not in config.host_arch:
-  if 'arm' in config.host_arch:
-    if '-mthumb' in config.target_cflags:
-      config.unsupported = True
-  else:
+if config.host_os not in ["FreeBSD", "Linux", "NetBSD", "OpenBSD"]:
     config.unsupported = True
+elif "64" not in config.host_arch:
+    if "arm" in config.host_arch:
+        if "-mthumb" in config.target_cflags:
+            config.unsupported = True
+    else:
+        config.unsupported = True
 
-if config.host_os == 'NetBSD':
-  config.substitutions.insert(0, ('%run', config.netbsd_nomprotect_prefix))
+if config.host_os == "NetBSD":
+    config.substitutions.insert(0, ("%run", config.netbsd_nomprotect_prefix))

diff  --git a/compiler-rt/unittests/lit.common.unit.cfg.py b/compiler-rt/unittests/lit.common.unit.cfg.py
index dd6b5bab5b65d..557a42893ec15 100644
--- a/compiler-rt/unittests/lit.common.unit.cfg.py
+++ b/compiler-rt/unittests/lit.common.unit.cfg.py
@@ -18,7 +18,8 @@ def get_lit_conf(name, default=None):
             val = default
     return val
 
-emulator = get_lit_conf('emulator', None)
+
+emulator = get_lit_conf("emulator", None)
 
 # Setup test format
 llvm_build_mode = getattr(config, "llvm_build_mode", "Debug")
@@ -30,28 +31,28 @@ def get_lit_conf(name, default=None):
 # Tweak PATH to include llvm tools dir.
 llvm_tools_dir = config.llvm_tools_dir
 if (not llvm_tools_dir) or (not os.path.exists(llvm_tools_dir)):
-  lit_config.fatal("Invalid llvm_tools_dir config attribute: %r" % llvm_tools_dir)
-path = os.path.pathsep.join((llvm_tools_dir, config.environment['PATH']))
-config.environment['PATH'] = path
+    lit_config.fatal("Invalid llvm_tools_dir config attribute: %r" % llvm_tools_dir)
+path = os.path.pathsep.join((llvm_tools_dir, config.environment["PATH"]))
+config.environment["PATH"] = path
 
 # Propagate the temp directory. Windows requires this because it uses \Windows\
 # if none of these are present.
-if 'TMP' in os.environ:
-    config.environment['TMP'] = os.environ['TMP']
-if 'TEMP' in os.environ:
-    config.environment['TEMP'] = os.environ['TEMP']
-
-if config.host_os == 'Darwin':
-  # Only run up to 3 processes that require shadow memory simultaneously on
-  # 64-bit Darwin. Using more scales badly and hogs the system due to
-  # inefficient handling of large mmap'd regions (terabytes) by the kernel.
-  lit_config.parallelism_groups["shadow-memory"] = 3
-
-  # Disable libmalloc nano allocator due to crashes running on macOS 12.0.
-  # rdar://80086125
-  config.environment['MallocNanoZone'] = '0'
-
-  # We crash when we set DYLD_INSERT_LIBRARIES for unit tests, so interceptors
-  # don't work.
-  config.environment['ASAN_OPTIONS'] = 'verify_interceptors=0'
-  config.environment['TSAN_OPTIONS'] = 'verify_interceptors=0'
+if "TMP" in os.environ:
+    config.environment["TMP"] = os.environ["TMP"]
+if "TEMP" in os.environ:
+    config.environment["TEMP"] = os.environ["TEMP"]
+
+if config.host_os == "Darwin":
+    # Only run up to 3 processes that require shadow memory simultaneously on
+    # 64-bit Darwin. Using more scales badly and hogs the system due to
+    # inefficient handling of large mmap'd regions (terabytes) by the kernel.
+    lit_config.parallelism_groups["shadow-memory"] = 3
+
+    # Disable libmalloc nano allocator due to crashes running on macOS 12.0.
+    # rdar://80086125
+    config.environment["MallocNanoZone"] = "0"
+
+    # We crash when we set DYLD_INSERT_LIBRARIES for unit tests, so interceptors
+    # don't work.
+    config.environment["ASAN_OPTIONS"] = "verify_interceptors=0"
+    config.environment["TSAN_OPTIONS"] = "verify_interceptors=0"

diff  --git a/cross-project-tests/amdgpu/lit.local.cfg b/cross-project-tests/amdgpu/lit.local.cfg
index 231c8aecb0692..23bb1a5dbf890 100644
--- a/cross-project-tests/amdgpu/lit.local.cfg
+++ b/cross-project-tests/amdgpu/lit.local.cfg
@@ -1,2 +1,2 @@
-if 'clang' not in config.available_features or 'AMDGPU' not in config.targets_to_build:
+if "clang" not in config.available_features or "AMDGPU" not in config.targets_to_build:
     config.unsupported = True

diff  --git a/cross-project-tests/debuginfo-tests/clang_llvm_roundtrip/lit.local.cfg b/cross-project-tests/debuginfo-tests/clang_llvm_roundtrip/lit.local.cfg
index 3ce1bcf85c2e3..2f7d525fa3ca5 100644
--- a/cross-project-tests/debuginfo-tests/clang_llvm_roundtrip/lit.local.cfg
+++ b/cross-project-tests/debuginfo-tests/clang_llvm_roundtrip/lit.local.cfg
@@ -1,3 +1,3 @@
 # In MSVC mode DWARF isn't produced & is needed for these tests
-if 'native' not in config.available_features or config.is_msvc:
+if "native" not in config.available_features or config.is_msvc:
     config.unsupported = True

diff  --git a/cross-project-tests/debuginfo-tests/dexter-tests/lit.local.cfg b/cross-project-tests/debuginfo-tests/dexter-tests/lit.local.cfg
index a081e8e87daf7..bace385c23f7c 100644
--- a/cross-project-tests/debuginfo-tests/dexter-tests/lit.local.cfg
+++ b/cross-project-tests/debuginfo-tests/dexter-tests/lit.local.cfg
@@ -1,2 +1,2 @@
-if 'dexter' not in config.available_features:
+if "dexter" not in config.available_features:
     config.unsupported = True

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/__init__.py b/cross-project-tests/debuginfo-tests/dexter/dex/__init__.py
index d2a290b0ee065..e79a8b3cad61a 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/__init__.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/__init__.py
@@ -5,4 +5,4 @@
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 
-__version__ = '1.0.0'
+__version__ = "1.0.0"

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/builder/Builder.py b/cross-project-tests/debuginfo-tests/dexter/dex/builder/Builder.py
index 153c7685a2315..1d6487696423f 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/builder/Builder.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/builder/Builder.py
@@ -16,40 +16,44 @@
 
 
 def _quotify(text):
-    if '"' in text or ' ' not in text:
+    if '"' in text or " " not in text:
         return text
     return '"{}"'.format(text)
 
 
-def _get_script_environment(source_files, compiler_options,
-                            linker_options, executable_file):
+def _get_script_environment(
+    source_files, compiler_options, linker_options, executable_file
+):
 
     source_files = [_quotify(f) for f in source_files]
-    object_files = [
-        _quotify('{}.o'.format(os.path.basename(f))) for f in source_files
-    ]
-    source_indexes = ['{:02d}'.format(i + 1) for i in range(len(source_files))]
+    object_files = [_quotify("{}.o".format(os.path.basename(f))) for f in source_files]
+    source_indexes = ["{:02d}".format(i + 1) for i in range(len(source_files))]
 
     env_variables = {}
-    env_variables['SOURCE_INDEXES'] = ' '.join(source_indexes)
-    env_variables['SOURCE_FILES'] = ' '.join(source_files)
-    env_variables['OBJECT_FILES'] = ' '.join(object_files)
-    env_variables['LINKER_OPTIONS'] = linker_options
+    env_variables["SOURCE_INDEXES"] = " ".join(source_indexes)
+    env_variables["SOURCE_FILES"] = " ".join(source_files)
+    env_variables["OBJECT_FILES"] = " ".join(object_files)
+    env_variables["LINKER_OPTIONS"] = linker_options
 
     for i, _ in enumerate(source_files):
         index = source_indexes[i]
-        env_variables['SOURCE_FILE_{}'.format(index)] = source_files[i]
-        env_variables['OBJECT_FILE_{}'.format(index)] = object_files[i]
-        env_variables['COMPILER_OPTIONS_{}'.format(index)] = compiler_options[i]
+        env_variables["SOURCE_FILE_{}".format(index)] = source_files[i]
+        env_variables["OBJECT_FILE_{}".format(index)] = object_files[i]
+        env_variables["COMPILER_OPTIONS_{}".format(index)] = compiler_options[i]
 
-    env_variables['EXECUTABLE_FILE'] = executable_file
+    env_variables["EXECUTABLE_FILE"] = executable_file
 
     return env_variables
 
 
-def run_external_build_script(context, script_path, source_files,
-                              compiler_options, linker_options,
-                              executable_file):
+def run_external_build_script(
+    context,
+    script_path,
+    source_files,
+    compiler_options,
+    linker_options,
+    executable_file,
+):
     """Build an executable using a builder script.
 
     The executable is saved to `context.working_directory.path`.
@@ -63,57 +67,61 @@ def run_external_build_script(context, script_path, source_files,
         cflags=compiler_options,
         ldflags=linker_options,
     )
-    assert len(source_files) == len(compiler_options), (source_files,
-                                                        compiler_options)
+    assert len(source_files) == len(compiler_options), (source_files, compiler_options)
 
-    script_environ = _get_script_environment(source_files, compiler_options,
-                                             linker_options, executable_file)
+    script_environ = _get_script_environment(
+        source_files, compiler_options, linker_options, executable_file
+    )
     env = dict(os.environ)
     env.update(script_environ)
     try:
-        with Timer('running build script'):
+        with Timer("running build script"):
             process = subprocess.Popen(
                 [script_path],
                 cwd=context.working_directory.path,
                 env=env,
                 stdout=subprocess.PIPE,
-                stderr=subprocess.PIPE)
+                stderr=subprocess.PIPE,
+            )
             out, err = process.communicate()
             returncode = process.returncode
-        out = out.decode('utf-8')
-        err = err.decode('utf-8')
+        out = out.decode("utf-8")
+        err = err.decode("utf-8")
         if returncode != 0:
             raise BuildScriptException(
-                '{}: failed with returncode {}.\nstdout:\n{}\n\nstderr:\n{}\n'.
-                format(script_path, returncode, out, err),
-                script_error=err)
+                "{}: failed with returncode {}.\nstdout:\n{}\n\nstderr:\n{}\n".format(
+                    script_path, returncode, out, err
+                ),
+                script_error=err,
+            )
         return out, err, builderIR
     except OSError as e:
-        raise BuildScriptException('{}: {}'.format(e.strerror, script_path))
+        raise BuildScriptException("{}: {}".format(e.strerror, script_path))
 
 
 class TestBuilder(unittest.TestCase):
     def test_get_script_environment(self):
-        source_files = ['a.a', 'b.b']
-        compiler_options = ['-option1 value1', '-option2 value2']
-        linker_options = '-optionX valueX'
-        executable_file = 'exe.exe'
-        env = _get_script_environment(source_files, compiler_options,
-                                      linker_options, executable_file)
+        source_files = ["a.a", "b.b"]
+        compiler_options = ["-option1 value1", "-option2 value2"]
+        linker_options = "-optionX valueX"
+        executable_file = "exe.exe"
+        env = _get_script_environment(
+            source_files, compiler_options, linker_options, executable_file
+        )
 
-        assert env['SOURCE_FILES'] == 'a.a b.b'
-        assert env['OBJECT_FILES'] == 'a.a.o b.b.o'
+        assert env["SOURCE_FILES"] == "a.a b.b"
+        assert env["OBJECT_FILES"] == "a.a.o b.b.o"
 
-        assert env['SOURCE_INDEXES'] == '01 02'
-        assert env['LINKER_OPTIONS'] == '-optionX valueX'
+        assert env["SOURCE_INDEXES"] == "01 02"
+        assert env["LINKER_OPTIONS"] == "-optionX valueX"
 
-        assert env['SOURCE_FILE_01'] == 'a.a'
-        assert env['SOURCE_FILE_02'] == 'b.b'
+        assert env["SOURCE_FILE_01"] == "a.a"
+        assert env["SOURCE_FILE_02"] == "b.b"
 
-        assert env['OBJECT_FILE_01'] == 'a.a.o'
-        assert env['OBJECT_FILE_02'] == 'b.b.o'
+        assert env["OBJECT_FILE_01"] == "a.a.o"
+        assert env["OBJECT_FILE_02"] == "b.b.o"
 
-        assert env['EXECUTABLE_FILE'] == 'exe.exe'
+        assert env["EXECUTABLE_FILE"] == "exe.exe"
 
-        assert env['COMPILER_OPTIONS_01'] == '-option1 value1'
-        assert env['COMPILER_OPTIONS_02'] == '-option2 value2'
+        assert env["COMPILER_OPTIONS_01"] == "-option1 value1"
+        assert env["COMPILER_OPTIONS_02"] == "-option2 value2"

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/builder/ParserOptions.py b/cross-project-tests/debuginfo-tests/dexter/dex/builder/ParserOptions.py
index 27683f4c418b3..14162439f87ce 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/builder/ParserOptions.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/builder/ParserOptions.py
@@ -21,17 +21,18 @@ def _find_build_scripts():
     try:
         return _find_build_scripts.cached
     except AttributeError:
-        scripts_directory = os.path.join(os.path.dirname(__file__), 'scripts')
+        scripts_directory = os.path.join(os.path.dirname(__file__), "scripts")
         if is_native_windows():
-            scripts_directory = os.path.join(scripts_directory, 'windows')
+            scripts_directory = os.path.join(scripts_directory, "windows")
         else:
-            scripts_directory = os.path.join(scripts_directory, 'posix')
+            scripts_directory = os.path.join(scripts_directory, "posix")
         assert os.path.isdir(scripts_directory), scripts_directory
         results = {}
 
         for f in os.listdir(scripts_directory):
             results[os.path.splitext(f)[0]] = os.path.abspath(
-                os.path.join(scripts_directory, f))
+                os.path.join(scripts_directory, f)
+            )
 
         _find_build_scripts.cached = results
         return results
@@ -39,20 +40,23 @@ def _find_build_scripts():
 
 def add_builder_tool_arguments(parser):
     build_group = parser.add_mutually_exclusive_group(required=True)
-    build_group.add_argument('--binary',
-                             metavar="<file>",
-                             help='provide binary file instead of --builder')
+    build_group.add_argument(
+        "--binary", metavar="<file>", help="provide binary file instead of --builder"
+    )
 
     build_group.add_argument(
-        '--builder',
+        "--builder",
         type=str,
         choices=sorted(_find_build_scripts().keys()),
-        help='test builder to use')
-    build_group.add_argument('--vs-solution', metavar="<file>",
-        help='provide a path to an already existing visual studio solution.')
-    parser.add_argument(
-        '--cflags', type=str, default='', help='compiler flags')
-    parser.add_argument('--ldflags', type=str, default='', help='linker flags')
+        help="test builder to use",
+    )
+    build_group.add_argument(
+        "--vs-solution",
+        metavar="<file>",
+        help="provide a path to an already existing visual studio solution.",
+    )
+    parser.add_argument("--cflags", type=str, default="", help="compiler flags")
+    parser.add_argument("--ldflags", type=str, default="", help="linker flags")
 
 
 def handle_builder_tool_options(context: Context) -> str:

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/command/CommandBase.py b/cross-project-tests/debuginfo-tests/dexter/dex/command/CommandBase.py
index fdeb97f71f294..b52544dfbf078 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/command/CommandBase.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/command/CommandBase.py
@@ -13,13 +13,14 @@
 from collections import namedtuple
 from typing import List
 
-StepExpectInfo = namedtuple('StepExpectInfo', 'expression, path, frame_idx, line_range')
+StepExpectInfo = namedtuple("StepExpectInfo", "expression, path, frame_idx, line_range")
+
 
 class CommandBase(object, metaclass=abc.ABCMeta):
     def __init__(self):
         self.path = None
         self.lineno = None
-        self.raw_text = ''
+        self.raw_text = ""
 
     def get_label_args(self):
         return list()

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/command/ParseCommand.py b/cross-project-tests/debuginfo-tests/dexter/dex/command/ParseCommand.py
index 1027ba0d9beb4..5afefb1142fc7 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/command/ParseCommand.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/command/ParseCommand.py
@@ -26,7 +26,10 @@
 from dex.command.commands.DexExpectStepOrder import DexExpectStepOrder
 from dex.command.commands.DexExpectWatchType import DexExpectWatchType
 from dex.command.commands.DexExpectWatchValue import DexExpectWatchValue
-from dex.command.commands.DexExpectWatchBase import AddressExpression, DexExpectWatchBase
+from dex.command.commands.DexExpectWatchBase import (
+    AddressExpression,
+    DexExpectWatchBase,
+)
 from dex.command.commands.DexLabel import DexLabel
 from dex.command.commands.DexLimitSteps import DexLimitSteps
 from dex.command.commands.DexFinishTest import DexFinishTest
@@ -35,6 +38,7 @@
 from dex.utils import Timer
 from dex.utils.Exceptions import CommandParseError, DebuggerException
 
+
 def _get_valid_commands():
     """Return all top level DExTer test commands.
 
@@ -42,19 +46,19 @@ def _get_valid_commands():
         { name (str): command (class) }
     """
     return {
-      DexCommandLine.get_name() : DexCommandLine,
-      DexDeclareAddress.get_name() : DexDeclareAddress,
-      DexDeclareFile.get_name() : DexDeclareFile,
-      DexExpectProgramState.get_name() : DexExpectProgramState,
-      DexExpectStepKind.get_name() : DexExpectStepKind,
-      DexExpectStepOrder.get_name() : DexExpectStepOrder,
-      DexExpectWatchType.get_name() : DexExpectWatchType,
-      DexExpectWatchValue.get_name() : DexExpectWatchValue,
-      DexLabel.get_name() : DexLabel,
-      DexLimitSteps.get_name() : DexLimitSteps,
-      DexFinishTest.get_name() : DexFinishTest,
-      DexUnreachable.get_name() : DexUnreachable,
-      DexWatch.get_name() : DexWatch
+        DexCommandLine.get_name(): DexCommandLine,
+        DexDeclareAddress.get_name(): DexDeclareAddress,
+        DexDeclareFile.get_name(): DexDeclareFile,
+        DexExpectProgramState.get_name(): DexExpectProgramState,
+        DexExpectStepKind.get_name(): DexExpectStepKind,
+        DexExpectStepOrder.get_name(): DexExpectStepOrder,
+        DexExpectWatchType.get_name(): DexExpectWatchType,
+        DexExpectWatchValue.get_name(): DexExpectWatchValue,
+        DexLabel.get_name(): DexLabel,
+        DexLimitSteps.get_name(): DexLimitSteps,
+        DexFinishTest.get_name(): DexFinishTest,
+        DexUnreachable.get_name(): DexUnreachable,
+        DexWatch.get_name(): DexWatch,
     }
 
 
@@ -63,7 +67,7 @@ def _get_command_name(command_raw: str) -> str:
     command_raw on the first opening paranthesis and further stripping
     any potential leading or trailing whitespace.
     """
-    return command_raw.split('(', 1)[0].rstrip()
+    return command_raw.split("(", 1)[0].rstrip()
 
 
 def _merge_subcommands(command_name: str, valid_commands: dict) -> dict:
@@ -74,11 +78,13 @@ def _merge_subcommands(command_name: str, valid_commands: dict) -> dict:
     """
     subcommands = valid_commands[command_name].get_subcommands()
     if subcommands:
-        return { **valid_commands, **subcommands }
+        return {**valid_commands, **subcommands}
     return valid_commands
 
 
-def _build_command(command_type, labels, addresses, raw_text: str, path: str, lineno: str) -> CommandBase:
+def _build_command(
+    command_type, labels, addresses, raw_text: str, path: str, lineno: str
+) -> CommandBase:
     """Build a command object from raw text.
 
     This function will call eval().
@@ -89,23 +95,26 @@ def _build_command(command_type, labels, addresses, raw_text: str, path: str, li
     Returns:
         A dexter command object.
     """
+
     def label_to_line(label_name: str) -> int:
         line = labels.get(label_name, None)
         if line != None:
             return line
         raise format_unresolved_label_err(label_name, raw_text, path, lineno)
 
-    def get_address_object(address_name: str, offset: int=0):
+    def get_address_object(address_name: str, offset: int = 0):
         if address_name not in addresses:
             raise format_undeclared_address_err(address_name, raw_text, path, lineno)
         return AddressExpression(address_name, offset)
 
     valid_commands = _merge_subcommands(
-        command_type.get_name(), {
-            'ref': label_to_line,
-            'address': get_address_object,
+        command_type.get_name(),
+        {
+            "ref": label_to_line,
+            "address": get_address_object,
             command_type.get_name(): command_type,
-        })
+        },
+    )
 
     # pylint: disable=eval-used
     command = eval(raw_text, valid_commands)
@@ -130,7 +139,7 @@ def _search_line_for_cmd_start(line: str, start: int, valid_commands: dict) -> i
         idx = line.find(command, start)
         if idx != -1:
             # Ignore escaped '\' commands.
-            if idx > 0 and line[idx - 1] == '\\':
+            if idx > 0 and line[idx - 1] == "\\":
                 continue
             return idx
     return -1
@@ -158,17 +167,17 @@ def _search_line_for_cmd_end(line: str, start: int, paren_balance: int) -> (int,
     """
     for end in range(start, len(line)):
         ch = line[end]
-        if ch == '(':
+        if ch == "(":
             paren_balance += 1
-        elif ch == ')':
-            paren_balance -=1
+        elif ch == ")":
+            paren_balance -= 1
         if paren_balance == 0:
             break
     end += 1
     return (end, paren_balance)
 
 
-class TextPoint():
+class TextPoint:
     def __init__(self, line, char):
         self.line = line
         self.char = char
@@ -180,37 +189,45 @@ def get_column(self):
         return self.char + 1
 
 
-def format_unresolved_label_err(label: str, src: str, filename: str, lineno) -> CommandParseError:
+def format_unresolved_label_err(
+    label: str, src: str, filename: str, lineno
+) -> CommandParseError:
     err = CommandParseError()
     err.src = src
-    err.caret = '' # Don't bother trying to point to the bad label.
+    err.caret = ""  # Don't bother trying to point to the bad label.
     err.filename = filename
     err.lineno = lineno
-    err.info = f'Unresolved label: \'{label}\''
+    err.info = f"Unresolved label: '{label}'"
     return err
 
-def format_undeclared_address_err(address: str, src: str, filename: str, lineno) -> CommandParseError:
+
+def format_undeclared_address_err(
+    address: str, src: str, filename: str, lineno
+) -> CommandParseError:
     err = CommandParseError()
     err.src = src
-    err.caret = '' # Don't bother trying to point to the bad address.
+    err.caret = ""  # Don't bother trying to point to the bad address.
     err.filename = filename
     err.lineno = lineno
-    err.info = f'Undeclared address: \'{address}\''
+    err.info = f"Undeclared address: '{address}'"
     return err
 
-def format_parse_err(msg: str, path: str, lines: list, point: TextPoint) -> CommandParseError:
+
+def format_parse_err(
+    msg: str, path: str, lines: list, point: TextPoint
+) -> CommandParseError:
     err = CommandParseError()
     err.filename = path
     err.src = lines[point.line].rstrip()
     err.lineno = point.get_lineno()
     err.info = msg
-    err.caret = '{}<r>^</>'.format(' ' * (point.char))
+    err.caret = "{}<r>^</>".format(" " * (point.char))
     return err
 
 
 def skip_horizontal_whitespace(line, point):
-    for idx, char in enumerate(line[point.char:]):
-        if char not in ' \t':
+    for idx, char in enumerate(line[point.char :]):
+        if char not in " \t":
             point.char += idx
             return
 
@@ -219,34 +236,36 @@ def add_line_label(labels, label, cmd_path, cmd_lineno):
     # Enforce unique line labels.
     if label.eval() in labels:
         err = CommandParseError()
-        err.info = f'Found duplicate line label: \'{label.eval()}\''
+        err.info = f"Found duplicate line label: '{label.eval()}'"
         err.lineno = cmd_lineno
         err.filename = cmd_path
         err.src = label.raw_text
         # Don't both trying to point to it since we're only printing the raw
         # command, which isn't much text.
-        err.caret = ''
+        err.caret = ""
         raise err
     labels[label.eval()] = label.get_line()
 
+
 def add_address(addresses, address, cmd_path, cmd_lineno):
     # Enforce unique address variables.
     address_name = address.get_address_name()
     if address_name in addresses:
         err = CommandParseError()
-        err.info = f'Found duplicate address: \'{address_name}\''
+        err.info = f"Found duplicate address: '{address_name}'"
         err.lineno = cmd_lineno
         err.filename = cmd_path
         err.src = address.raw_text
         # Don't both trying to point to it since we're only printing the raw
         # command, which isn't much text.
-        err.caret = ''
+        err.caret = ""
         raise err
     addresses.append(address_name)
 
+
 def _find_all_commands_in_file(path, file_lines, valid_commands, source_root_dir):
-    labels = {} # dict of {name: line}.
-    addresses = [] # list of addresses.
+    labels = {}  # dict of {name: line}.
+    addresses = []  # list of addresses.
     address_resolutions = {}
     cmd_path = path
     declared_files = set()
@@ -262,23 +281,30 @@ def _find_all_commands_in_file(path, file_lines, valid_commands, source_root_dir
         while True:
             # If parens are currently balanced we can look for a new command.
             if paren_balance == 0:
-                region_start.char = _search_line_for_cmd_start(line, region_start.char, valid_commands)
+                region_start.char = _search_line_for_cmd_start(
+                    line, region_start.char, valid_commands
+                )
                 if region_start.char == -1:
-                    break # Read next line.
+                    break  # Read next line.
 
-                command_name = _get_command_name(line[region_start.char:])
+                command_name = _get_command_name(line[region_start.char :])
                 cmd_point = copy(region_start)
                 cmd_text_list = [command_name]
 
-                region_start.char += len(command_name) # Start searching for parens after cmd.
+                region_start.char += len(
+                    command_name
+                )  # Start searching for parens after cmd.
                 skip_horizontal_whitespace(line, region_start)
-                if region_start.char >= len(line) or line[region_start.char] != '(':
+                if region_start.char >= len(line) or line[region_start.char] != "(":
                     raise format_parse_err(
-                        "Missing open parenthesis", path, file_lines, region_start)
+                        "Missing open parenthesis", path, file_lines, region_start
+                    )
 
-            end, paren_balance = _search_line_for_cmd_end(line, region_start.char, paren_balance)
+            end, paren_balance = _search_line_for_cmd_end(
+                line, region_start.char, paren_balance
+            )
             # Add this text blob to the command.
-            cmd_text_list.append(line[region_start.char:end])
+            cmd_text_list.append(line[region_start.char : end])
             # Move parse ptr to end of line or parens.
             region_start.char = end
 
@@ -302,8 +328,8 @@ def _find_all_commands_in_file(path, file_lines, valid_commands, source_root_dir
                 # This err should point to the problem line.
                 err_point = copy(cmd_point)
                 # To e the command start is the absolute start, so use as offset.
-                err_point.line += e.lineno - 1 # e.lineno is a position, not index.
-                err_point.char += e.offset - 1 # e.offset is a position, not index.
+                err_point.line += e.lineno - 1  # e.lineno is a position, not index.
+                err_point.char += e.offset - 1  # e.offset is a position, not index.
                 raise format_parse_err(e.msg, path, file_lines, err_point)
             except TypeError as e:
                 # This err should always point to the end of the command name.
@@ -322,18 +348,23 @@ def _find_all_commands_in_file(path, file_lines, valid_commands, source_root_dir
                 elif type(command) is DexDeclareFile:
                     cmd_path = command.declared_file
                     if not os.path.isabs(cmd_path):
-                        source_dir = (source_root_dir if source_root_dir else
-                                      os.path.dirname(path))
+                        source_dir = (
+                            source_root_dir
+                            if source_root_dir
+                            else os.path.dirname(path)
+                        )
                         cmd_path = os.path.join(source_dir, cmd_path)
                     # TODO: keep stored paths as PurePaths for 'longer'.
                     cmd_path = str(PurePath(cmd_path))
                     declared_files.add(cmd_path)
-                elif type(command) is DexCommandLine and 'DexCommandLine' in commands:
+                elif type(command) is DexCommandLine and "DexCommandLine" in commands:
                     msg = "More than one DexCommandLine in file"
                     raise format_parse_err(msg, path, file_lines, err_point)
 
                 assert (path, cmd_point) not in commands[command_name], (
-                    command_name, commands[command_name])
+                    command_name,
+                    commands[command_name],
+                )
                 commands[command_name][path, cmd_point] = command
 
     if paren_balance != 0:
@@ -344,6 +375,7 @@ def _find_all_commands_in_file(path, file_lines, valid_commands, source_root_dir
         raise format_parse_err(msg, path, file_lines, err_point)
     return dict(commands), declared_files
 
+
 def _find_all_commands(test_files, source_root_dir):
     commands = defaultdict(dict)
     valid_commands = _get_valid_commands()
@@ -352,28 +384,32 @@ def _find_all_commands(test_files, source_root_dir):
         with open(test_file) as fp:
             lines = fp.readlines()
         file_commands, declared_files = _find_all_commands_in_file(
-            test_file, lines, valid_commands, source_root_dir)
+            test_file, lines, valid_commands, source_root_dir
+        )
         for command_name in file_commands:
             commands[command_name].update(file_commands[command_name])
         new_source_files |= declared_files
 
     return dict(commands), new_source_files
 
+
 def get_command_infos(test_files, source_root_dir):
-  with Timer('parsing commands'):
-      try:
-          commands, new_source_files = _find_all_commands(test_files, source_root_dir)
-          command_infos = OrderedDict()
-          for command_type in commands:
-              for command in commands[command_type].values():
-                  if command_type not in command_infos:
-                      command_infos[command_type] = []
-                  command_infos[command_type].append(command)
-          return OrderedDict(command_infos), new_source_files
-      except CommandParseError as e:
-          msg = 'parser error: <d>{}({}):</> {}\n{}\n{}\n'.format(
-                e.filename, e.lineno, e.info, e.src, e.caret)
-          raise DebuggerException(msg)
+    with Timer("parsing commands"):
+        try:
+            commands, new_source_files = _find_all_commands(test_files, source_root_dir)
+            command_infos = OrderedDict()
+            for command_type in commands:
+                for command in commands[command_type].values():
+                    if command_type not in command_infos:
+                        command_infos[command_type] = []
+                    command_infos[command_type].append(command)
+            return OrderedDict(command_infos), new_source_files
+        except CommandParseError as e:
+            msg = "parser error: <d>{}({}):</> {}\n{}\n{}\n".format(
+                e.filename, e.lineno, e.info, e.src, e.caret
+            )
+            raise DebuggerException(msg)
+
 
 class TestParseCommand(unittest.TestCase):
     class MockCmd(CommandBase):
@@ -384,7 +420,7 @@ class MockCmd(CommandBase):
         """
 
         def __init__(self, *args):
-           self.value = args[0]
+            self.value = args[0]
 
         def get_name():
             return __class__.__name__
@@ -392,25 +428,24 @@ def get_name():
         def eval(this):
             pass
 
-
     def __init__(self, *args):
         super().__init__(*args)
 
         self.valid_commands = {
-            TestParseCommand.MockCmd.get_name() : TestParseCommand.MockCmd
+            TestParseCommand.MockCmd.get_name(): TestParseCommand.MockCmd
         }
 
-
     def _find_all_commands_in_lines(self, lines):
         """Use DExTer parsing methods to find all the mock commands in lines.
 
         Returns:
             { cmd_name: { (path, line): command_obj } }
         """
-        cmds, declared_files = _find_all_commands_in_file(__file__, lines, self.valid_commands, None)
+        cmds, declared_files = _find_all_commands_in_file(
+            __file__, lines, self.valid_commands, None
+        )
         return cmds
 
-
     def _find_all_mock_values_in_lines(self, lines):
         """Use DExTer parsing methods to find all mock command values in lines.
 
@@ -421,36 +456,34 @@ def _find_all_mock_values_in_lines(self, lines):
         mocks = cmds.get(TestParseCommand.MockCmd.get_name(), None)
         return [v.value for v in mocks.values()] if mocks else []
 
-
     def test_parse_inline(self):
         """Commands can be embedded in other text."""
 
         lines = [
             'MockCmd("START") Lorem ipsum dolor sit amet, consectetur\n',
             'adipiscing elit, MockCmd("EMBEDDED") sed doeiusmod tempor,\n',
-            'incididunt ut labore et dolore magna aliqua.\n'
+            "incididunt ut labore et dolore magna aliqua.\n",
         ]
 
         values = self._find_all_mock_values_in_lines(lines)
 
-        self.assertTrue('START' in values)
-        self.assertTrue('EMBEDDED' in values)
-
+        self.assertTrue("START" in values)
+        self.assertTrue("EMBEDDED" in values)
 
     def test_parse_multi_line_comment(self):
         """Multi-line commands can embed comments."""
 
         lines = [
-            'Lorem ipsum dolor sit amet, consectetur\n',
-            'adipiscing elit, sed doeiusmod tempor,\n',
-            'incididunt ut labore et MockCmd(\n',
+            "Lorem ipsum dolor sit amet, consectetur\n",
+            "adipiscing elit, sed doeiusmod tempor,\n",
+            "incididunt ut labore et MockCmd(\n",
             '    "WITH_COMMENT" # THIS IS A COMMENT\n',
-            ') dolore magna aliqua. Ut enim ad minim\n',
+            ") dolore magna aliqua. Ut enim ad minim\n",
         ]
 
         values = self._find_all_mock_values_in_lines(lines)
 
-        self.assertTrue('WITH_COMMENT' in values)
+        self.assertTrue("WITH_COMMENT" in values)
 
     def test_parse_empty(self):
         """Empty files are silently ignored."""
@@ -462,7 +495,7 @@ def test_parse_empty(self):
     def test_parse_bad_whitespace(self):
         """Throw exception when parsing badly formed whitespace."""
         lines = [
-            'MockCmd\n',
+            "MockCmd\n",
             '("XFAIL_CMD_LF_PAREN")\n',
         ]
 
@@ -478,42 +511,38 @@ def test_parse_good_whitespace(self):
             'MockCmd\t\t("TABS")\n',
             'MockCmd(    "ARG_SPACE"    )\n',
             'MockCmd(\t\t"ARG_TABS"\t\t)\n',
-            'MockCmd(\n',
+            "MockCmd(\n",
             '"CMD_PAREN_LF")\n',
         ]
 
         values = self._find_all_mock_values_in_lines(lines)
 
-        self.assertTrue('NONE' in values)
-        self.assertTrue('SPACE' in values)
-        self.assertTrue('TABS' in values)
-        self.assertTrue('ARG_SPACE' in values)
-        self.assertTrue('ARG_TABS' in values)
-        self.assertTrue('CMD_PAREN_LF' in values)
-
+        self.assertTrue("NONE" in values)
+        self.assertTrue("SPACE" in values)
+        self.assertTrue("TABS" in values)
+        self.assertTrue("ARG_SPACE" in values)
+        self.assertTrue("ARG_TABS" in values)
+        self.assertTrue("CMD_PAREN_LF" in values)
 
     def test_parse_share_line(self):
         """More than one command can appear on one line."""
 
         lines = [
             'MockCmd("START") MockCmd("CONSECUTIVE") words '
-                'MockCmd("EMBEDDED") more words\n'
+            'MockCmd("EMBEDDED") more words\n'
         ]
 
         values = self._find_all_mock_values_in_lines(lines)
 
-        self.assertTrue('START' in values)
-        self.assertTrue('CONSECUTIVE' in values)
-        self.assertTrue('EMBEDDED' in values)
-
+        self.assertTrue("START" in values)
+        self.assertTrue("CONSECUTIVE" in values)
+        self.assertTrue("EMBEDDED" in values)
 
     def test_parse_escaped(self):
         """Escaped commands are ignored."""
 
-        lines = [
-            'words \MockCmd("IGNORED") words words words\n'
-        ]
+        lines = ['words \MockCmd("IGNORED") words words words\n']
 
         values = self._find_all_mock_values_in_lines(lines)
 
-        self.assertFalse('IGNORED' in values)
+        self.assertFalse("IGNORED" in values)

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/command/StepValueInfo.py b/cross-project-tests/debuginfo-tests/dexter/dex/command/StepValueInfo.py
index afcb9c5d0c800..92cd576533fa2 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/command/StepValueInfo.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/command/StepValueInfo.py
@@ -13,11 +13,15 @@ def __init__(self, step_index, watch_info, expected_value):
         self.expected_value = expected_value
 
     def __str__(self):
-        return '{}:{}: expected value:{}'.format(self.step_index, self.watch_info, self.expected_value)
+        return "{}:{}: expected value:{}".format(
+            self.step_index, self.watch_info, self.expected_value
+        )
 
     def __eq__(self, other):
-        return (self.watch_info.expression == other.watch_info.expression
-                and self.expected_value == other.expected_value)
+        return (
+            self.watch_info.expression == other.watch_info.expression
+            and self.expected_value == other.expected_value
+        )
 
     def __hash__(self):
         return hash(self.watch_info.expression, self.expected_value)

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexCommandLine.py b/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexCommandLine.py
index 76b2ae5196c54..77615e7845f55 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexCommandLine.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexCommandLine.py
@@ -9,18 +9,21 @@
 
 from dex.command.CommandBase import CommandBase
 
+
 class DexCommandLine(CommandBase):
     def __init__(self, the_cmdline):
         if type(the_cmdline) is not list:
-            raise TypeError('Expected list, got {}'.format(type(the_cmdline)))
+            raise TypeError("Expected list, got {}".format(type(the_cmdline)))
         for x in the_cmdline:
-          if type(x) is not str:
-              raise TypeError('Command line element "{}" has type {}'.format(x, type(x)))
+            if type(x) is not str:
+                raise TypeError(
+                    'Command line element "{}" has type {}'.format(x, type(x))
+                )
         self.the_cmdline = the_cmdline
         super(DexCommandLine, self).__init__()
 
     def eval(self):
-        raise NotImplementedError('DexCommandLine commands cannot be evaled.')
+        raise NotImplementedError("DexCommandLine commands cannot be evaled.")
 
     @staticmethod
     def get_name():

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexDeclareAddress.py b/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexDeclareAddress.py
index 29d2dc7610742..dcf5aea4b157a 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexDeclareAddress.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexDeclareAddress.py
@@ -12,16 +12,17 @@
 
 from dex.command.CommandBase import CommandBase, StepExpectInfo
 
+
 class DexDeclareAddress(CommandBase):
     def __init__(self, addr_name, expression, **kwargs):
 
         if not isinstance(addr_name, str):
-            raise TypeError('invalid argument type')
+            raise TypeError("invalid argument type")
 
         self.addr_name = addr_name
         self.expression = expression
-        self.on_line = kwargs.pop('on_line')
-        self.hit_count = kwargs.pop('hit_count', 0)
+        self.on_line = kwargs.pop("on_line")
+        self.hit_count = kwargs.pop("hit_count", 0)
 
         self.address_resolutions = None
 
@@ -32,7 +33,11 @@ def get_name():
         return __class__.__name__
 
     def get_watches(self):
-        return [StepExpectInfo(self.expression, self.path, 0, range(self.on_line, self.on_line + 1))]
+        return [
+            StepExpectInfo(
+                self.expression, self.path, 0, range(self.on_line, self.on_line + 1)
+            )
+        ]
 
     def get_address_name(self):
         return self.addr_name
@@ -42,9 +47,12 @@ def eval(self, step_collection):
         for step in step_collection.steps:
             loc = step.current_location
 
-            if (loc.path and self.path and
-                PurePath(loc.path) == PurePath(self.path) and
-                loc.lineno == self.on_line):
+            if (
+                loc.path
+                and self.path
+                and PurePath(loc.path) == PurePath(self.path)
+                and loc.lineno == self.on_line
+            ):
                 if self.hit_count > 0:
                     self.hit_count -= 1
                     continue

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexDeclareFile.py b/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexDeclareFile.py
index c40c854575d97..1377408c18099 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexDeclareFile.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexDeclareFile.py
@@ -16,7 +16,7 @@ class DexDeclareFile(CommandBase):
     def __init__(self, declared_file):
 
         if not isinstance(declared_file, str):
-            raise TypeError('invalid argument type')
+            raise TypeError("invalid argument type")
 
         # Use PurePath to create a cannonical platform path.
         # TODO: keep paths as PurePath objects for 'longer'

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexExpectProgramState.py b/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexExpectProgramState.py
index 24b760ae9146f..54c62a1be8d00 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexExpectProgramState.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexExpectProgramState.py
@@ -13,18 +13,21 @@
 from dex.command.CommandBase import CommandBase, StepExpectInfo
 from dex.dextIR import ProgramState, SourceLocation, StackFrame, DextIR
 
+
 def frame_from_dict(source: dict) -> StackFrame:
-    if 'location' in source:
-        assert isinstance(source['location'], dict)
-        source['location'] = SourceLocation(**source['location'])
+    if "location" in source:
+        assert isinstance(source["location"], dict)
+        source["location"] = SourceLocation(**source["location"])
     return StackFrame(**source)
 
+
 def state_from_dict(source: dict) -> ProgramState:
-    if 'frames' in source:
-        assert isinstance(source['frames'], list)
-        source['frames'] = list(map(frame_from_dict, source['frames']))
+    if "frames" in source:
+        assert isinstance(source["frames"], list)
+        source["frames"] = list(map(frame_from_dict, source["frames"]))
     return ProgramState(**source)
 
+
 class DexExpectProgramState(CommandBase):
     """Expect to see a given program `state` a certain numer of `times`.
 
@@ -35,16 +38,15 @@ class DexExpectProgramState(CommandBase):
 
     def __init__(self, *args, **kwargs):
         if len(args) != 1:
-            raise TypeError('expected exactly one unnamed arg')
+            raise TypeError("expected exactly one unnamed arg")
 
         self.program_state_text = str(args[0])
 
         self.expected_program_state = state_from_dict(args[0])
 
-        self.times = kwargs.pop('times', -1)
+        self.times = kwargs.pop("times", -1)
         if kwargs:
-            raise TypeError('unexpected named args: {}'.format(
-                ', '.join(kwargs)))
+            raise TypeError("unexpected named args: {}".format(", ".join(kwargs)))
 
         # Step indices at which the expected program state was encountered.
         self.encounters = []
@@ -58,18 +60,23 @@ def get_name():
     def get_watches(self):
         frame_expects = set()
         for idx, frame in enumerate(self.expected_program_state.frames):
-            path = (frame.location.path if
-                    frame.location and frame.location.path else self.path)
+            path = (
+                frame.location.path
+                if frame.location and frame.location.path
+                else self.path
+            )
             line_range = (
                 range(frame.location.lineno, frame.location.lineno + 1)
-                if frame.location and frame.location.lineno else None)
+                if frame.location and frame.location.lineno
+                else None
+            )
             for watch in frame.watches:
                 frame_expects.add(
                     StepExpectInfo(
                         expression=watch,
                         path=path,
                         frame_idx=idx,
-                        line_range=line_range
+                        line_range=line_range,
                     )
                 )
         return frame_expects
@@ -79,4 +86,6 @@ def eval(self, step_collection: DextIR) -> bool:
             if self.expected_program_state.match(step.program_state):
                 self.encounters.append(step.step_index)
 
-        return self.times < 0 < len(self.encounters) or len(self.encounters) == self.times
+        return (
+            self.times < 0 < len(self.encounters) or len(self.encounters) == self.times
+        )

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexExpectStepKind.py b/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexExpectStepKind.py
index 6370f5d32c7d4..333b765459e51 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexExpectStepKind.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexExpectStepKind.py
@@ -21,13 +21,16 @@ class DexExpectStepKind(CommandBase):
 
     def __init__(self, *args):
         if len(args) != 2:
-            raise TypeError('expected two args')
+            raise TypeError("expected two args")
 
         try:
             step_kind = StepKind[args[0]]
         except KeyError:
-            raise TypeError('expected arg 0 to be one of {}'.format(
-                [kind for kind, _ in StepKind.__members__.items()]))
+            raise TypeError(
+                "expected arg 0 to be one of {}".format(
+                    [kind for kind, _ in StepKind.__members__.items()]
+                )
+            )
 
         self.name = step_kind
         self.count = args[1]

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexExpectStepOrder.py b/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexExpectStepOrder.py
index d5cfc3c82f415..cb5579b523dcf 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexExpectStepOrder.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexExpectStepOrder.py
@@ -9,6 +9,7 @@
 from dex.dextIR import LocIR
 from dex.dextIR import ValueIR
 
+
 class DexExpectStepOrder(CommandBase):
     """Expect the line every `DexExpectStepOrder` is found on to be stepped on
     in `order`. Each instance must have a set of unique ascending indicies.
@@ -20,14 +21,16 @@ class DexExpectStepOrder(CommandBase):
 
     def __init__(self, *args, **kwargs):
         if not args:
-            raise TypeError('Need at least one order number')
+            raise TypeError("Need at least one order number")
 
-        if 'on_line' in kwargs:
+        if "on_line" in kwargs:
             try:
-                on_line = kwargs.pop('on_line')
+                on_line = kwargs.pop("on_line")
                 self.on_line = int(on_line)
             except ValueError:
-                raise ValueError('on_line value \'{0}\' cannot be parsed to an integer'.format(on_line))
+                raise ValueError(
+                    "on_line value '{0}' cannot be parsed to an integer".format(on_line)
+                )
         self.sequence = [int(x) for x in args]
         super(DexExpectStepOrder, self).__init__()
 
@@ -36,12 +39,17 @@ def get_name():
         return __class__.__name__
 
     def get_line(self):
-        return self.on_line if hasattr(self, 'on_line') else self.lineno
+        return self.on_line if hasattr(self, "on_line") else self.lineno
 
     def eval(self, step_info):
-        return {'DexExpectStepOrder': ValueIR(expression=str(step_info.current_location.lineno),
-                      value=str(step_info.step_index), type_name=None,
-                      error_string=None,
-                      could_evaluate=True,
-                      is_optimized_away=True,
-                      is_irretrievable=False)}
+        return {
+            "DexExpectStepOrder": ValueIR(
+                expression=str(step_info.current_location.lineno),
+                value=str(step_info.step_index),
+                type_name=None,
+                error_string=None,
+                could_evaluate=True,
+                is_optimized_away=True,
+                is_irretrievable=False,
+            )
+        }

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexExpectWatchBase.py b/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexExpectWatchBase.py
index e79b1bb711e67..21f1b719c6817 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexExpectWatchBase.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexExpectWatchBase.py
@@ -20,6 +20,7 @@
 from dex.command.StepValueInfo import StepValueInfo
 from dex.utils.Exceptions import NonFloatValueInCommand
 
+
 class AddressExpression(object):
     def __init__(self, name, offset=0):
         self.name = name
@@ -36,37 +37,47 @@ def resolved_value(self, resolutions):
         # Technically we should fill(8) if we're debugging on a 32bit architecture?
         return format_address(resolutions[self.name] + self.offset)
 
+
 def format_address(value, address_width=64):
-    return "0x" + hex(value)[2:].zfill(math.ceil(address_width/4))
+    return "0x" + hex(value)[2:].zfill(math.ceil(address_width / 4))
+
 
 def resolved_value(value, resolutions):
-    return value.resolved_value(resolutions) if isinstance(value, AddressExpression) else value
+    return (
+        value.resolved_value(resolutions)
+        if isinstance(value, AddressExpression)
+        else value
+    )
+
 
 class DexExpectWatchBase(CommandBase):
     def __init__(self, *args, **kwargs):
         if len(args) < 2:
-            raise TypeError('expected at least two args')
+            raise TypeError("expected at least two args")
 
         self.expression = args[0]
-        self.values = [arg if isinstance(arg, AddressExpression) else str(arg) for arg in args[1:]]
+        self.values = [
+            arg if isinstance(arg, AddressExpression) else str(arg) for arg in args[1:]
+        ]
         try:
-            on_line = kwargs.pop('on_line')
+            on_line = kwargs.pop("on_line")
             self._from_line = on_line
             self._to_line = on_line
         except KeyError:
-            self._from_line = kwargs.pop('from_line', 1)
-            self._to_line = kwargs.pop('to_line', 999999)
-        self._require_in_order = kwargs.pop('require_in_order', True)
-        self.float_range = kwargs.pop('float_range', None)
+            self._from_line = kwargs.pop("from_line", 1)
+            self._to_line = kwargs.pop("to_line", 999999)
+        self._require_in_order = kwargs.pop("require_in_order", True)
+        self.float_range = kwargs.pop("float_range", None)
         if self.float_range is not None:
             for value in self.values:
                 try:
                     float(value)
                 except ValueError:
-                    raise NonFloatValueInCommand(f'Non-float value \'{value}\' when float_range arg provided')
+                    raise NonFloatValueInCommand(
+                        f"Non-float value '{value}' when float_range arg provided"
+                    )
         if kwargs:
-            raise TypeError('unexpected named args: {}'.format(
-                ', '.join(kwargs)))
+            raise TypeError("unexpected named args: {}".format(", ".join(kwargs)))
 
         # Number of times that this watch has been encountered.
         self.times_encountered = 0
@@ -108,7 +119,11 @@ def __init__(self, *args, **kwargs):
         super(DexExpectWatchBase, self).__init__()
 
     def resolve_value(self, value):
-        return value.resolved_value(self.address_resolutions) if isinstance(value, AddressExpression) else value
+        return (
+            value.resolved_value(self.address_resolutions)
+            if isinstance(value, AddressExpression)
+            else value
+        )
 
     def describe_value(self, value):
         if isinstance(value, AddressExpression):
@@ -117,14 +132,18 @@ def describe_value(self, value):
                 offset = f"+{value.offset}"
             elif value.offset < 0:
                 offset = str(value.offset)
-            desc =  f"address '{value.name}'{offset}"
+            desc = f"address '{value.name}'{offset}"
             if self.resolve_value(value) is not None:
                 desc += f" ({self.resolve_value(value)})"
             return desc
         return value
 
     def get_watches(self):
-        return [StepExpectInfo(self.expression, self.path, 0, range(self._from_line, self._to_line + 1))]
+        return [
+            StepExpectInfo(
+                self.expression, self.path, 0, range(self._from_line, self._to_line + 1)
+            )
+        ]
 
     @property
     def line_range(self):
@@ -136,12 +155,18 @@ def missing_values(self):
 
     @property
     def encountered_values(self):
-        return sorted(list(set(self.describe_value(v) for v in set(self.values) - self._missing_values)))
+        return sorted(
+            list(
+                set(
+                    self.describe_value(v)
+                    for v in set(self.values) - self._missing_values
+                )
+            )
+        )
 
     @abc.abstractmethod
     def _get_expected_field(self, watch):
-        """Return a field from watch that this ExpectWatch command is checking.
-        """
+        """Return a field from watch that this ExpectWatch command is checking."""
 
     def _match_expected_floating_point(self, value):
         """Checks to see whether value is a float that falls within the
@@ -155,13 +180,13 @@ def _match_expected_floating_point(self, value):
 
         possible_values = self.values
         for expected in possible_values:
-          try:
-              expected_as_float = float(expected)
-              
diff erence = abs(value_as_float - expected_as_float)
-              if 
diff erence <= self.float_range:
-                  return expected
-          except ValueError:
-              pass
+            try:
+                expected_as_float = float(expected)
+                
diff erence = abs(value_as_float - expected_as_float)
+                if 
diff erence <= self.float_range:
+                    return expected
+            except ValueError:
+                pass
         return value
 
     def _maybe_fix_float(self, value):
@@ -190,9 +215,11 @@ def _handle_watch(self, step_info):
         # Check to see if this value matches with a resolved address.
         matching_address = None
         for v in self.values:
-            if (isinstance(v, AddressExpression) and
-                    v.name in self.address_resolutions and
-                    self.resolve_value(v) == expected_value):
+            if (
+                isinstance(v, AddressExpression)
+                and v.name in self.address_resolutions
+                and self.resolve_value(v) == expected_value
+            ):
                 matching_address = v
                 break
 
@@ -203,7 +230,9 @@ def _handle_watch(self, step_info):
             return
 
         self.expected_watches.append(step_info)
-        value_to_remove = matching_address if matching_address is not None else expected_value
+        value_to_remove = (
+            matching_address if matching_address is not None else expected_value
+        )
         try:
             self._missing_values.remove(value_to_remove)
         except KeyError:
@@ -214,20 +243,23 @@ def _check_watch_order(self, actual_watches, expected_values):
         or not.
         """
         
diff erences = []
-        actual_values = [self._maybe_fix_float(w.expected_value) for w in actual_watches]
-        value_
diff erences = list(
diff lib.Differ().compare(actual_values,
-                                                          expected_values))
+        actual_values = [
+            self._maybe_fix_float(w.expected_value) for w in actual_watches
+        ]
+        value_
diff erences = list(
+            
diff lib.Differ().compare(actual_values, expected_values)
+        )
 
         missing_value = False
         index = 0
         for vd in value_
diff erences:
             kind = vd[0]
-            if kind == '+':
+            if kind == "+":
                 # A value that is encountered in the expected list but not in the
                 # actual list.  We'll keep a note that something is wrong and flag
                 # the next value that matches as misordered.
                 missing_value = True
-            elif kind == ' ':
+            elif kind == " ":
                 # This value is as expected.  It might still be wrong if we've
                 # previously encountered a value that is in the expected list but
                 #  not the actual list.
@@ -235,13 +267,13 @@ def _check_watch_order(self, actual_watches, expected_values):
                     missing_value = False
                     
diff erences.append(actual_watches[index])
                 index += 1
-            elif kind == '-':
+            elif kind == "-":
                 # A value that is encountered in the actual list but not the
                 #  expected list.
                 
diff erences.append(actual_watches[index])
                 index += 1
             else:
-                assert False, 'unexpected 
diff :{}'.format(vd)
+                assert False, "unexpected 
diff :{}".format(vd)
 
         return 
diff erences
 
@@ -249,17 +281,19 @@ def eval(self, step_collection):
         for step in step_collection.steps:
             loc = step.current_location
 
-            if (loc.path and self.path and
-                PurePath(loc.path) == PurePath(self.path) and
-                loc.lineno in self.line_range):
+            if (
+                loc.path
+                and self.path
+                and PurePath(loc.path) == PurePath(self.path)
+                and loc.lineno in self.line_range
+            ):
                 try:
                     watch = step.program_state.frames[0].watches[self.expression]
                 except KeyError:
                     pass
                 else:
                     expected_field = self._get_expected_field(watch)
-                    step_info = StepValueInfo(step.step_index, watch, 
-                                              expected_field)
+                    step_info = StepValueInfo(step.step_index, watch, expected_field)
                     self._handle_watch(step_info)
 
         if self._require_in_order:
@@ -276,6 +310,6 @@ def eval(self, step_collection):
 
             resolved_values = [self.resolve_value(v) for v in self.values]
             self.misordered_watches = self._check_watch_order(
-                value_change_watches, [
-                    v for v in resolved_values if v in all_expected_values
-                ])
+                value_change_watches,
+                [v for v in resolved_values if v in all_expected_values],
+            )

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexExpectWatchType.py b/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexExpectWatchType.py
index f2336de482801..0f0c65f49bfee 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexExpectWatchType.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexExpectWatchType.py
@@ -9,6 +9,7 @@
 
 from dex.command.commands.DexExpectWatchBase import DexExpectWatchBase
 
+
 class DexExpectWatchType(DexExpectWatchBase):
     """Expect the expression `expr` to evaluate be evaluated and have each
     evaluation's type checked against the list of `types`.
@@ -18,6 +19,7 @@ class DexExpectWatchType(DexExpectWatchBase):
 
     See Commands.md for more info.
     """
+
     @staticmethod
     def get_name():
         return __class__.__name__

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexExpectWatchValue.py b/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexExpectWatchValue.py
index d6da006ee8cc3..df5bb09c03651 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexExpectWatchValue.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexExpectWatchValue.py
@@ -9,6 +9,7 @@
 
 from dex.command.commands.DexExpectWatchBase import DexExpectWatchBase
 
+
 class DexExpectWatchValue(DexExpectWatchBase):
     """Expect the expression `expr` to evaluate to the list of `values`
     sequentially.

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexFinishTest.py b/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexFinishTest.py
index 5014317c35dd8..7a28f1c175e7a 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexFinishTest.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexFinishTest.py
@@ -10,6 +10,7 @@
 
 from dex.command.CommandBase import CommandBase
 
+
 class DexFinishTest(CommandBase):
     def __init__(self, *args, **kwargs):
         if len(args) == 0:
@@ -20,15 +21,14 @@ def __init__(self, *args, **kwargs):
         else:
             self.expression = args[0]
             self.values = [str(arg) for arg in args[1:]]
-        self.on_line = kwargs.pop('on_line')
-        self.hit_count = kwargs.pop('hit_count', 0)
+        self.on_line = kwargs.pop("on_line")
+        self.hit_count = kwargs.pop("hit_count", 0)
         if kwargs:
-            raise TypeError('unexpected named args: {}'.format(
-                ', '.join(kwargs)))
+            raise TypeError("unexpected named args: {}".format(", ".join(kwargs)))
         super(DexFinishTest, self).__init__()
 
     def eval(self):
-        raise NotImplementedError('DexFinishTest commands cannot be evaled.')
+        raise NotImplementedError("DexFinishTest commands cannot be evaled.")
 
     @staticmethod
     def get_name():

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexLabel.py b/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexLabel.py
index 9f42d42e0ffee..575e5ea9c66dd 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexLabel.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexLabel.py
@@ -15,10 +15,10 @@ class DexLabel(CommandBase):
     def __init__(self, label, **kwargs):
 
         if not isinstance(label, str):
-            raise TypeError('invalid argument type')
+            raise TypeError("invalid argument type")
 
         try:
-            self.on_line = kwargs.pop('on_line')
+            self.on_line = kwargs.pop("on_line")
         except KeyError:
             # We cannot use self.lineno because it hasn't been set yet.
             pass
@@ -29,7 +29,7 @@ def __init__(self, label, **kwargs):
         super(DexLabel, self).__init__()
 
     def get_line(self):
-        return getattr(self, 'on_line', self.lineno)
+        return getattr(self, "on_line", self.lineno)
 
     def get_as_pair(self):
         return (self._label, self.get_line())

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexLimitSteps.py b/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexLimitSteps.py
index d779539e07fe4..1c6aee196331a 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexLimitSteps.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexLimitSteps.py
@@ -10,6 +10,7 @@
 
 from dex.command.CommandBase import CommandBase
 
+
 class DexLimitSteps(CommandBase):
     def __init__(self, *args, **kwargs):
         if len(args) == 0:
@@ -21,20 +22,19 @@ def __init__(self, *args, **kwargs):
             self.expression = args[0]
             self.values = [str(arg) for arg in args[1:]]
         try:
-            on_line = kwargs.pop('on_line')
+            on_line = kwargs.pop("on_line")
             self.from_line = on_line
             self.to_line = on_line
         except KeyError:
-            self.from_line = kwargs.pop('from_line', 1)
-            self.to_line = kwargs.pop('to_line', 999999)
-        self.hit_count = kwargs.pop('hit_count', None)
+            self.from_line = kwargs.pop("from_line", 1)
+            self.to_line = kwargs.pop("to_line", 999999)
+        self.hit_count = kwargs.pop("hit_count", None)
         if kwargs:
-            raise TypeError('unexpected named args: {}'.format(
-                ', '.join(kwargs)))
+            raise TypeError("unexpected named args: {}".format(", ".join(kwargs)))
         super(DexLimitSteps, self).__init__()
 
     def eval(self):
-        raise NotImplementedError('DexLimitSteps commands cannot be evaled.')
+        raise NotImplementedError("DexLimitSteps commands cannot be evaled.")
 
     @staticmethod
     def get_name():

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexUnreachable.py b/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexUnreachable.py
index 5b40ba0814d77..8356ed7a7a0bb 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexUnreachable.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexUnreachable.py
@@ -21,14 +21,14 @@ class DexUnreachable(CommandBase):
     def __init__(self, *args, **kwargs):
         if len(args) != 0:
             raise TypeError("DexUnreachable takes no positional arguments")
-        if 'on_line' in kwargs:
-            on_line = kwargs.pop('on_line')
+        if "on_line" in kwargs:
+            on_line = kwargs.pop("on_line")
             self._from_line = on_line
             self._to_line = on_line
-        elif 'from_line' in kwargs and 'to_line' in kwargs:
-            self._from_line = kwargs.pop('from_line')
-            self._to_line = kwargs.pop('to_line')
-        elif 'from_line' in kwargs or 'to_line' in kwargs:
+        elif "from_line" in kwargs and "to_line" in kwargs:
+            self._from_line = kwargs.pop("from_line")
+            self._to_line = kwargs.pop("to_line")
+        elif "from_line" in kwargs or "to_line" in kwargs:
             raise TypeError("Must provide both from_line and to_line to DexUnreachable")
 
         if len(kwargs) > 0:
@@ -43,10 +43,13 @@ def get_name():
     def eval(self, step_info):
         # If we're ever called, at all, then we're evaluating a line that has
         # been marked as unreachable. Which means a failure.
-        vir = ValueIR(expression="Unreachable",
-                      value="True", type_name=None,
-                      error_string=None,
-                      could_evaluate=True,
-                      is_optimized_away=True,
-                      is_irretrievable=False)
-        return {'DexUnreachable' : vir}
+        vir = ValueIR(
+            expression="Unreachable",
+            value="True",
+            type_name=None,
+            error_string=None,
+            could_evaluate=True,
+            is_optimized_away=True,
+            is_irretrievable=False,
+        )
+        return {"DexUnreachable": vir}

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexWatch.py b/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexWatch.py
index 2dfa3a36fb374..62f12c354e22f 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexWatch.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexWatch.py
@@ -22,11 +22,11 @@ class DexWatch(CommandBase):
 
     def __init__(self, *args):
         if not args:
-            raise TypeError('expected some arguments')
+            raise TypeError("expected some arguments")
 
         for arg in args:
             if not isinstance(arg, str):
-                raise TypeError('invalid argument type')
+                raise TypeError("invalid argument type")
 
         self._args = args
         super(DexWatch, self).__init__()

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/DebuggerBase.py b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/DebuggerBase.py
index 8a18ee0bfcdb7..d4a555085face 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/DebuggerBase.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/DebuggerBase.py
@@ -18,14 +18,14 @@
 from dex.utils.Exceptions import DebuggerException
 from dex.utils.ReturnCode import ReturnCode
 
+
 def watch_is_active(watch_info: StepExpectInfo, path, frame_idx, line_no):
     _, watch_path, watch_frame_idx, watch_line_range = watch_info
     # If this watch should only be active for a specific file...
     if watch_path and os.path.isfile(watch_path):
         # If the current path does not match the expected file, this watch is
         # not active.
-        if not (path and os.path.isfile(path) and
-                os.path.samefile(path, watch_path)):
+        if not (path and os.path.isfile(path) and os.path.samefile(path, watch_path)):
             return False
     if watch_frame_idx != frame_idx:
         return False
@@ -33,6 +33,7 @@ def watch_is_active(watch_info: StepExpectInfo, path, frame_idx, line_no):
         return False
     return True
 
+
 class DebuggerBase(object, metaclass=abc.ABCMeta):
     def __init__(self, context):
         self.context = context
@@ -77,8 +78,7 @@ def is_available(self):
 
     @property
     def loading_error(self):
-        return (str(self._loading_error[1])
-                if self._loading_error is not None else None)
+        return str(self._loading_error[1]) if self._loading_error is not None else None
 
     @property
     def loading_error_trace(self):
@@ -89,13 +89,14 @@ def loading_error_trace(self):
 
         if self._loading_error[1].orig_exception is not None:
             orig_exception = traceback.format_exception(
-                *self._loading_error[1].orig_exception)
+                *self._loading_error[1].orig_exception
+            )
 
-            if ''.join(orig_exception) not in ''.join(tb):
-                tb.extend(['\n'])
+            if "".join(orig_exception) not in "".join(tb):
+                tb.extend(["\n"])
                 tb.extend(orig_exception)
 
-        tb = ''.join(tb).splitlines(True)
+        tb = "".join(tb).splitlines(True)
         return tb
 
     def _sanitize_function_name(self, name):  # pylint: disable=no-self-use
@@ -147,8 +148,7 @@ def add_breakpoint(self, file_, line):
 
     @abc.abstractmethod
     def _add_breakpoint(self, file_, line):
-        """Returns a unique opaque breakpoint id.
-        """
+        """Returns a unique opaque breakpoint id."""
         pass
 
     def add_conditional_breakpoint(self, file_, line, condition):
@@ -158,12 +158,12 @@ def add_conditional_breakpoint(self, file_, line, condition):
         an int.
         """
         return self._add_conditional_breakpoint(
-            self._external_to_debug_path(file_), line, condition)
+            self._external_to_debug_path(file_), line, condition
+        )
 
     @abc.abstractmethod
     def _add_conditional_breakpoint(self, file_, line, condition):
-        """Returns a unique opaque breakpoint id.
-        """
+        """Returns a unique opaque breakpoint id."""
         pass
 
     @abc.abstractmethod
@@ -176,8 +176,7 @@ def delete_breakpoints(self, ids):
 
     @abc.abstractmethod
     def get_triggered_breakpoint_ids(self):
-        """Returns a set of opaque ids for just-triggered breakpoints.
-        """
+        """Returns a set of opaque ids for just-triggered breakpoints."""
         pass
 
     @abc.abstractmethod
@@ -225,7 +224,7 @@ def _external_to_debug_path(self, path):
         if not root_dir or not path:
             return path
         assert path.startswith(root_dir)
-        return path[len(root_dir):].lstrip(os.path.sep)
+        return path[len(root_dir) :].lstrip(os.path.sep)
 
     def _debug_to_external_path(self, path):
         if not self.options.debugger_use_relative_paths:
@@ -237,10 +236,9 @@ def _debug_to_external_path(self, path):
                 return file
         return path
 
-class TestDebuggerBase(unittest.TestCase):
 
+class TestDebuggerBase(unittest.TestCase):
     class MockDebugger(DebuggerBase):
-
         def __init__(self, context, *args):
             super().__init__(context, *args)
             self.step_info = None
@@ -255,8 +253,8 @@ def _get_step_info(self, watches, step_index):
     def __init__(self, *args):
         super().__init__(*args)
         TestDebuggerBase.MockDebugger.__abstractmethods__ = set()
-        self.options = SimpleNamespace(source_root_dir = '', source_files = [])
-        context = SimpleNamespace(options = self.options)
+        self.options = SimpleNamespace(source_root_dir="", source_files=[])
+        context = SimpleNamespace(options=self.options)
         self.dbg = TestDebuggerBase.MockDebugger(context)
 
     def _new_step(self, paths):
@@ -264,7 +262,9 @@ def _new_step(self, paths):
             FrameIR(
                 function=None,
                 is_inlined=False,
-                loc=LocIR(path=path, lineno=0, column=0)) for path in paths
+                loc=LocIR(path=path, lineno=0, column=0),
+            )
+            for path in paths
         ]
         return StepIR(step_index=0, stop_reason=None, frames=frames)
 
@@ -273,47 +273,45 @@ def _step_paths(self, step):
 
     def test_add_breakpoint_no_source_root_dir(self):
         self.options.debugger_use_relative_paths = True
-        self.options.source_root_dir = ''
-        path = os.path.join(os.path.sep + 'root', 'some_file')
+        self.options.source_root_dir = ""
+        path = os.path.join(os.path.sep + "root", "some_file")
         self.dbg.add_breakpoint(path, 12)
         self.assertEqual(path, self.dbg.breakpoint_file)
 
     def test_add_breakpoint_with_source_root_dir(self):
         self.options.debugger_use_relative_paths = True
-        self.options.source_root_dir = os.path.sep + 'my_root'
-        path = os.path.join(self.options.source_root_dir, 'some_file')
+        self.options.source_root_dir = os.path.sep + "my_root"
+        path = os.path.join(self.options.source_root_dir, "some_file")
         self.dbg.add_breakpoint(path, 12)
-        self.assertEqual('some_file', self.dbg.breakpoint_file)
+        self.assertEqual("some_file", self.dbg.breakpoint_file)
 
     def test_add_breakpoint_with_source_root_dir_slash_suffix(self):
         self.options.debugger_use_relative_paths = True
-        self.options.source_root_dir = os.path.sep + 'my_root' + os.path.sep
-        path = os.path.join(self.options.source_root_dir, 'some_file')
+        self.options.source_root_dir = os.path.sep + "my_root" + os.path.sep
+        path = os.path.join(self.options.source_root_dir, "some_file")
         self.dbg.add_breakpoint(path, 12)
-        self.assertEqual('some_file', self.dbg.breakpoint_file)
+        self.assertEqual("some_file", self.dbg.breakpoint_file)
 
     def test_get_step_info_no_source_root_dir(self):
         self.options.debugger_use_relative_paths = True
-        path = os.path.join(os.path.sep + 'root', 'some_file')
+        path = os.path.join(os.path.sep + "root", "some_file")
         self.dbg.step_info = self._new_step([path])
-        self.assertEqual([path],
-            self._step_paths(self.dbg.get_step_info([], 0)))
+        self.assertEqual([path], self._step_paths(self.dbg.get_step_info([], 0)))
 
     def test_get_step_info_no_frames(self):
         self.options.debugger_use_relative_paths = True
-        self.options.source_root_dir = os.path.sep + 'my_root'
+        self.options.source_root_dir = os.path.sep + "my_root"
         self.dbg.step_info = self._new_step([])
-        self.assertEqual([],
-            self._step_paths(self.dbg.get_step_info([], 0)))
+        self.assertEqual([], self._step_paths(self.dbg.get_step_info([], 0)))
 
     def test_get_step_info(self):
         self.options.debugger_use_relative_paths = True
-        self.options.source_root_dir = os.path.sep + 'my_root'
-        path = os.path.join(self.options.source_root_dir, 'some_file')
+        self.options.source_root_dir = os.path.sep + "my_root"
+        path = os.path.join(self.options.source_root_dir, "some_file")
         self.options.source_files = [path]
-        other_path = os.path.join(os.path.sep + 'other', 'file')
-        dbg_path = os.path.join(os.path.sep + 'dbg', 'some_file')
-        self.dbg.step_info = self._new_step(
-            [None, other_path, dbg_path])
-        self.assertEqual([None, other_path, path],
-            self._step_paths(self.dbg.get_step_info([], 0)))
+        other_path = os.path.join(os.path.sep + "other", "file")
+        dbg_path = os.path.join(os.path.sep + "dbg", "some_file")
+        self.dbg.step_info = self._new_step([None, other_path, dbg_path])
+        self.assertEqual(
+            [None, other_path, path], self._step_paths(self.dbg.get_step_info([], 0))
+        )

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/DebuggerControllers/ConditionalController.py b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/DebuggerControllers/ConditionalController.py
index c2e97e375d979..fb5536d6a14dc 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/DebuggerControllers/ConditionalController.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/DebuggerControllers/ConditionalController.py
@@ -12,8 +12,13 @@
 from collections import defaultdict
 from itertools import chain
 
-from dex.debugger.DebuggerControllers.ControllerHelpers import in_source_file, update_step_watches
-from dex.debugger.DebuggerControllers.DebuggerControllerBase import DebuggerControllerBase
+from dex.debugger.DebuggerControllers.ControllerHelpers import (
+    in_source_file,
+    update_step_watches,
+)
+from dex.debugger.DebuggerControllers.DebuggerControllerBase import (
+    DebuggerControllerBase,
+)
 from dex.debugger.DebuggerBase import DebuggerBase
 from dex.utils.Exceptions import DebuggerException
 from dex.utils.Timeout import Timeout
@@ -37,8 +42,16 @@ class BreakpointRange:
                   leading breakpoint is triggered before it is removed.
     """
 
-    def __init__(self, expression: str, path: str, range_from: int, range_to: int,
-                 values: list, hit_count: int, finish_on_remove: bool):
+    def __init__(
+        self,
+        expression: str,
+        path: str,
+        range_from: int,
+        range_to: int,
+        values: list,
+        hit_count: int,
+        finish_on_remove: bool,
+    ):
         self.expression = expression
         self.path = path
         self.range_from = range_from
@@ -55,7 +68,7 @@ def get_conditional_expression_list(self):
         conditional_list = []
         for value in self.conditional_values:
             # (<expression>) == (<value>)
-            conditional_expression = '({}) == ({})'.format(self.expression, value)
+            conditional_expression = "({}) == ({})".format(self.expression, value)
             conditional_list.append(conditional_expression)
         return conditional_list
 
@@ -70,44 +83,48 @@ def should_be_removed(self):
 
 class ConditionalController(DebuggerControllerBase):
     def __init__(self, context, step_collection):
-      self._bp_ranges = None
-      self._watches = set()
-      self._step_index = 0
-      self._pause_between_steps = context.options.pause_between_steps
-      self._max_steps = context.options.max_steps
-      # Map {id: BreakpointRange}
-      self._leading_bp_handles = {}
-      super(ConditionalController, self).__init__(context, step_collection)
-      self._build_bp_ranges()
+        self._bp_ranges = None
+        self._watches = set()
+        self._step_index = 0
+        self._pause_between_steps = context.options.pause_between_steps
+        self._max_steps = context.options.max_steps
+        # Map {id: BreakpointRange}
+        self._leading_bp_handles = {}
+        super(ConditionalController, self).__init__(context, step_collection)
+        self._build_bp_ranges()
 
     def _build_bp_ranges(self):
         commands = self.step_collection.commands
         self._bp_ranges = []
         try:
-            limit_commands = commands['DexLimitSteps']
+            limit_commands = commands["DexLimitSteps"]
             for lc in limit_commands:
                 bpr = BreakpointRange(
-                  lc.expression,
-                  lc.path,
-                  lc.from_line,
-                  lc.to_line,
-                  lc.values,
-                  lc.hit_count,
-                  False)
+                    lc.expression,
+                    lc.path,
+                    lc.from_line,
+                    lc.to_line,
+                    lc.values,
+                    lc.hit_count,
+                    False,
+                )
                 self._bp_ranges.append(bpr)
         except KeyError:
-            raise DebuggerException('Missing DexLimitSteps commands, cannot conditionally step.')
-        if 'DexFinishTest' in commands:
-            finish_commands = commands['DexFinishTest']
+            raise DebuggerException(
+                "Missing DexLimitSteps commands, cannot conditionally step."
+            )
+        if "DexFinishTest" in commands:
+            finish_commands = commands["DexFinishTest"]
             for ic in finish_commands:
                 bpr = BreakpointRange(
-                  ic.expression,
-                  ic.path,
-                  ic.on_line,
-                  ic.on_line,
-                  ic.values,
-                  ic.hit_count + 1,
-                  True)
+                    ic.expression,
+                    ic.path,
+                    ic.on_line,
+                    ic.on_line,
+                    ic.values,
+                    ic.hit_count + 1,
+                    True,
+                )
                 self._bp_ranges.append(bpr)
 
     def _set_leading_bps(self):
@@ -117,9 +134,9 @@ def _set_leading_bps(self):
             if bpr.has_conditions():
                 # Add a conditional breakpoint for each condition.
                 for cond_expr in bpr.get_conditional_expression_list():
-                    id = self.debugger.add_conditional_breakpoint(bpr.path,
-                                                                  bpr.range_from,
-                                                                  cond_expr)
+                    id = self.debugger.add_conditional_breakpoint(
+                        bpr.path, bpr.range_from, cond_expr
+                    )
                     self._leading_bp_handles[id] = bpr
             else:
                 # Add an unconditional breakpoint.
@@ -128,8 +145,10 @@ def _set_leading_bps(self):
 
     def _run_debugger_custom(self, cmdline):
         # TODO: Add conditional and unconditional breakpoint support to dbgeng.
-        if self.debugger.get_name() == 'dbgeng':
-            raise DebuggerException('DexLimitSteps commands are not supported by dbgeng')
+        if self.debugger.get_name() == "dbgeng":
+            raise DebuggerException(
+                "DexLimitSteps commands are not supported by dbgeng"
+            )
 
         self.step_collection.clear_steps()
         self._set_leading_bps()
@@ -150,13 +169,17 @@ def _run_debugger_custom(self, cmdline):
             while self.debugger.is_running and not timed_out:
                 # Check to see whether we've timed out while we're waiting.
                 if total_timeout.timed_out():
-                    self.context.logger.error('Debugger session has been '
-                        f'running for {total_timeout.elapsed}s, timeout reached!')
+                    self.context.logger.error(
+                        "Debugger session has been "
+                        f"running for {total_timeout.elapsed}s, timeout reached!"
+                    )
                     timed_out = True
                 if breakpoint_timeout.timed_out():
-                    self.context.logger.error(f'Debugger session has not '
-                        f'hit a breakpoint for {breakpoint_timeout.elapsed}s, timeout '
-                        'reached!')
+                    self.context.logger.error(
+                        f"Debugger session has not "
+                        f"hit a breakpoint for {breakpoint_timeout.elapsed}s, timeout "
+                        "reached!"
+                    )
                     timed_out = True
 
             if timed_out:
@@ -165,7 +188,9 @@ def _run_debugger_custom(self, cmdline):
             step_info = self.debugger.get_step_info(self._watches, self._step_index)
             if step_info.current_frame:
                 self._step_index += 1
-                update_step_watches(step_info, self._watches, self.step_collection.commands)
+                update_step_watches(
+                    step_info, self._watches, self.step_collection.commands
+                )
                 self.step_collection.new_step(self.context, step_info)
 
             bp_to_delete = []

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/DebuggerControllers/ControllerHelpers.py b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/DebuggerControllers/ControllerHelpers.py
index 8044f3982f40e..3e5a7b919d703 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/DebuggerControllers/ControllerHelpers.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/DebuggerControllers/ControllerHelpers.py
@@ -8,6 +8,7 @@
 import os
 from itertools import chain
 
+
 def in_source_file(source_files, step_info):
     if not step_info.current_frame:
         return False
@@ -15,31 +16,34 @@ def in_source_file(source_files, step_info):
         return False
     if not os.path.exists(step_info.current_location.path):
         return False
-    return any(os.path.samefile(step_info.current_location.path, f) \
-               for f in source_files)
+    return any(
+        os.path.samefile(step_info.current_location.path, f) for f in source_files
+    )
+
 
 def have_hit_line(watch, loc):
-  if hasattr(watch, 'on_line'):
-    return watch.on_line == loc.lineno
-  elif hasattr(watch, '_from_line'):
-    return watch._from_line <= loc.lineno and watch._to_line >= loc.lineno
-  elif watch.lineno == loc.lineno:
-    return True
-  return False
+    if hasattr(watch, "on_line"):
+        return watch.on_line == loc.lineno
+    elif hasattr(watch, "_from_line"):
+        return watch._from_line <= loc.lineno and watch._to_line >= loc.lineno
+    elif watch.lineno == loc.lineno:
+        return True
+    return False
+
 
 def update_step_watches(step_info, watches, commands):
-    watch_cmds = ['DexUnreachable', 'DexExpectStepOrder']
-    towatch = chain.from_iterable(commands[x]
-                                  for x in watch_cmds
-                                  if x in commands)
+    watch_cmds = ["DexUnreachable", "DexExpectStepOrder"]
+    towatch = chain.from_iterable(commands[x] for x in watch_cmds if x in commands)
     try:
         # Iterate over all watches of the types named in watch_cmds
         for watch in towatch:
             loc = step_info.current_location
-            if (loc.path != None
-                    and os.path.exists(loc.path)
-                    and os.path.samefile(watch.path, loc.path)
-                    and have_hit_line(watch, loc)):
+            if (
+                loc.path != None
+                and os.path.exists(loc.path)
+                and os.path.samefile(watch.path, loc.path)
+                and have_hit_line(watch, loc)
+            ):
                 result = watch.eval(step_info)
                 step_info.watches.update(result)
                 break

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/DebuggerControllers/DebuggerControllerBase.py b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/DebuggerControllers/DebuggerControllerBase.py
index 867345311e14c..4cd44bb9aa223 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/DebuggerControllers/DebuggerControllerBase.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/DebuggerControllers/DebuggerControllerBase.py
@@ -8,6 +8,7 @@
 
 import abc
 
+
 class DebuggerControllerBase(object, metaclass=abc.ABCMeta):
     def __init__(self, context, step_collection):
         self.context = context
@@ -21,15 +22,14 @@ def _run_debugger_custom(self):
         pass
 
     def run_debugger(self, debugger):
-        """Responsible for correctly launching and tearing down the debugger.
-        """
+        """Responsible for correctly launching and tearing down the debugger."""
         self.debugger = debugger
 
         # Fetch command line options, if any.
         the_cmdline = []
         commands = self.step_collection.commands
-        if 'DexCommandLine' in commands:
-            cmd_line_objs = commands['DexCommandLine']
+        if "DexCommandLine" in commands:
+            cmd_line_objs = commands["DexCommandLine"]
             assert len(cmd_line_objs) == 1
             cmd_line_obj = cmd_line_objs[0]
             the_cmdline = cmd_line_obj.the_cmdline

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/DebuggerControllers/DefaultController.py b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/DebuggerControllers/DefaultController.py
index 1cc2c4410768c..9b0a6ac96eb2f 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/DebuggerControllers/DefaultController.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/DebuggerControllers/DefaultController.py
@@ -10,11 +10,17 @@
 import os
 import time
 
-from dex.debugger.DebuggerControllers.DebuggerControllerBase import DebuggerControllerBase
-from dex.debugger.DebuggerControllers.ControllerHelpers import in_source_file, update_step_watches
+from dex.debugger.DebuggerControllers.DebuggerControllerBase import (
+    DebuggerControllerBase,
+)
+from dex.debugger.DebuggerControllers.ControllerHelpers import (
+    in_source_file,
+    update_step_watches,
+)
 from dex.utils.Exceptions import DebuggerException, LoadDebuggerException
 from dex.utils.Timeout import Timeout
 
+
 class EarlyExitCondition(object):
     def __init__(self, on_line, hit_count, expression, values):
         self.on_line = on_line
@@ -22,6 +28,7 @@ def __init__(self, on_line, hit_count, expression, values):
         self.expression = expression
         self.values = values
 
+
 class DefaultController(DebuggerControllerBase):
     def __init__(self, context, step_collection):
         self.source_files = context.options.source_files
@@ -31,24 +38,26 @@ def __init__(self, context, step_collection):
 
     def _break_point_all_lines(self):
         for s in self.context.options.source_files:
-            with open(s, 'r') as fp:
+            with open(s, "r") as fp:
                 num_lines = len(fp.readlines())
             for line in range(1, num_lines + 1):
                 try:
-                   self.debugger.add_breakpoint(s, line)
+                    self.debugger.add_breakpoint(s, line)
                 except DebuggerException:
-                   raise LoadDebuggerException(DebuggerException.msg)
+                    raise LoadDebuggerException(DebuggerException.msg)
 
     def _get_early_exit_conditions(self):
         commands = self.step_collection.commands
         early_exit_conditions = []
-        if 'DexFinishTest' in commands:
-            finish_commands = commands['DexFinishTest']
+        if "DexFinishTest" in commands:
+            finish_commands = commands["DexFinishTest"]
             for fc in finish_commands:
-                condition = EarlyExitCondition(on_line=fc.on_line,
-                                               hit_count=fc.hit_count,
-                                               expression=fc.expression,
-                                               values=fc.values)
+                condition = EarlyExitCondition(
+                    on_line=fc.on_line,
+                    hit_count=fc.hit_count,
+                    expression=fc.expression,
+                    values=fc.values,
+                )
                 early_exit_conditions.append(condition)
         return early_exit_conditions
 
@@ -61,8 +70,10 @@ def _should_exit(self, early_exit_conditions, line_no):
                     # Conditional Controller, check equality in the debugger
                     # rather than in python (as the two can 
diff er).
                     for value in condition.values:
-                        expr_val = self.debugger.evaluate_expression(f'({condition.expression}) == ({value})')
-                        if expr_val.value == 'true':
+                        expr_val = self.debugger.evaluate_expression(
+                            f"({condition.expression}) == ({value})"
+                        )
+                        if expr_val.value == "true":
                             exit_condition_hit = True
                             break
                 if exit_condition_hit:
@@ -72,7 +83,6 @@ def _should_exit(self, early_exit_conditions, line_no):
                         condition.hit_count -= 1
         return False
 
-
     def _run_debugger_custom(self, cmdline):
         self.step_collection.debugger = self.debugger.debugger_info
         self._break_point_all_lines()
@@ -91,13 +101,17 @@ def _run_debugger_custom(self, cmdline):
             while self.debugger.is_running and not timed_out:
                 # Check to see whether we've timed out while we're waiting.
                 if total_timeout.timed_out():
-                    self.context.logger.error('Debugger session has been '
-                        f'running for {total_timeout.elapsed}s, timeout reached!')
+                    self.context.logger.error(
+                        "Debugger session has been "
+                        f"running for {total_timeout.elapsed}s, timeout reached!"
+                    )
                     timed_out = True
                 if breakpoint_timeout.timed_out():
-                    self.context.logger.error(f'Debugger session has not '
-                        f'hit a breakpoint for {breakpoint_timeout.elapsed}s, timeout '
-                        'reached!')
+                    self.context.logger.error(
+                        f"Debugger session has not "
+                        f"hit a breakpoint for {breakpoint_timeout.elapsed}s, timeout "
+                        "reached!"
+                    )
                     timed_out = True
 
             if timed_out or self.debugger.is_finished:
@@ -107,9 +121,13 @@ def _run_debugger_custom(self, cmdline):
             step_info = self.debugger.get_step_info(self.watches, self.step_index)
 
             if step_info.current_frame:
-                update_step_watches(step_info, self.watches, self.step_collection.commands)
+                update_step_watches(
+                    step_info, self.watches, self.step_collection.commands
+                )
                 self.step_collection.new_step(self.context, step_info)
-                if self._should_exit(early_exit_conditions, step_info.current_frame.loc.lineno):
+                if self._should_exit(
+                    early_exit_conditions, step_info.current_frame.loc.lineno
+                ):
                     break
 
             if in_source_file(self.source_files, step_info):
@@ -120,4 +138,5 @@ def _run_debugger_custom(self, cmdline):
             time.sleep(self.context.options.pause_between_steps)
         else:
             raise DebuggerException(
-                'maximum number of steps reached ({})'.format(max_steps))
+                "maximum number of steps reached ({})".format(max_steps)
+            )

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/Debuggers.py b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/Debuggers.py
index c85310b37a8f4..bee62c7728145 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/Debuggers.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/Debuggers.py
@@ -39,29 +39,31 @@ def _get_potential_debuggers():  # noqa
         LLDB.get_option_name(): LLDB,
         VisualStudio2015.get_option_name(): VisualStudio2015,
         VisualStudio2017.get_option_name(): VisualStudio2017,
-        VisualStudio2019.get_option_name(): VisualStudio2019
+        VisualStudio2019.get_option_name(): VisualStudio2019,
     }
 
 
 def _warn_meaningless_option(context, option):
-    if hasattr(context.options, 'list_debuggers'):
+    if hasattr(context.options, "list_debuggers"):
         return
 
     context.logger.warning(
-         f'option "{option}" is meaningless with this debugger',
-         enable_prefix=True,
-         flag=f'--debugger={context.options.debugger}')
+        f'option "{option}" is meaningless with this debugger',
+        enable_prefix=True,
+        flag=f"--debugger={context.options.debugger}",
+    )
 
 
 def add_debugger_tool_base_arguments(parser, defaults):
-    defaults.lldb_executable = 'lldb.exe' if is_native_windows() else 'lldb'
+    defaults.lldb_executable = "lldb.exe" if is_native_windows() else "lldb"
     parser.add_argument(
-        '--lldb-executable',
+        "--lldb-executable",
         type=str,
-        metavar='<file>',
+        metavar="<file>",
         default=None,
         display_default=defaults.lldb_executable,
-        help='location of LLDB executable')
+        help="location of LLDB executable",
+    )
 
 
 def add_debugger_tool_arguments(parser, context, defaults):
@@ -71,71 +73,79 @@ def add_debugger_tool_arguments(parser, context, defaults):
     add_debugger_tool_base_arguments(parser, defaults)
 
     parser.add_argument(
-        '--debugger',
+        "--debugger",
         type=str,
         choices=potential_debuggers,
         required=True,
-        help='debugger to use')
+        help="debugger to use",
+    )
     parser.add_argument(
-        '--max-steps',
-        metavar='<int>',
+        "--max-steps",
+        metavar="<int>",
         type=int,
         default=1000,
-        help='maximum number of program steps allowed')
+        help="maximum number of program steps allowed",
+    )
     parser.add_argument(
-        '--pause-between-steps',
-        metavar='<seconds>',
+        "--pause-between-steps",
+        metavar="<seconds>",
         type=float,
         default=0.0,
-        help='number of seconds to pause between steps')
+        help="number of seconds to pause between steps",
+    )
     defaults.show_debugger = False
     parser.add_argument(
-        '--show-debugger',
-        action='store_true',
-        default=None,
-        help='show the debugger')
-    defaults.arch = 'x86_64'
+        "--show-debugger", action="store_true", default=None, help="show the debugger"
+    )
+    defaults.arch = "x86_64"
     parser.add_argument(
-        '--arch',
+        "--arch",
         type=str,
-        metavar='<architecture>',
+        metavar="<architecture>",
         default=None,
         display_default=defaults.arch,
-        help='target architecture')
-    defaults.source_root_dir = ''
+        help="target architecture",
+    )
+    defaults.source_root_dir = ""
     parser.add_argument(
-        '--source-root-dir',
+        "--source-root-dir",
         type=str,
-        metavar='<directory>',
+        metavar="<directory>",
         default=None,
-        help='source root directory')
+        help="source root directory",
+    )
     parser.add_argument(
-        '--debugger-use-relative-paths',
-        action='store_true',
+        "--debugger-use-relative-paths",
+        action="store_true",
         default=False,
-        help='pass the debugger paths relative to --source-root-dir')
+        help="pass the debugger paths relative to --source-root-dir",
+    )
     parser.add_argument(
-        '--target-run-args',
+        "--target-run-args",
         type=str,
-        metavar='<flags>',
-        default='',
-        help='command line arguments for the test program, in addition to any '
-             'provided by DexCommandLine')
+        metavar="<flags>",
+        default="",
+        help="command line arguments for the test program, in addition to any "
+        "provided by DexCommandLine",
+    )
     parser.add_argument(
-        '--timeout-total',
-        metavar='<seconds>',
+        "--timeout-total",
+        metavar="<seconds>",
         type=float,
         default=0.0,
-        help='if >0, debugger session will automatically exit after '
-             'running for <timeout-total> seconds')
+        help="if >0, debugger session will automatically exit after "
+        "running for <timeout-total> seconds",
+    )
     parser.add_argument(
-        '--timeout-breakpoint',
-        metavar='<seconds>',
+        "--timeout-breakpoint",
+        metavar="<seconds>",
         type=float,
         default=0.0,
-        help='if >0, debugger session will automatically exit after '
-             'waiting <timeout-breakpoint> seconds without hitting a '
-             'breakpoint')
+        help="if >0, debugger session will automatically exit after "
+        "waiting <timeout-breakpoint> seconds without hitting a "
+        "breakpoint",
+    )
+
 
 def handle_debugger_tool_base_options(context, defaults):  # noqa
     options = context.options
@@ -143,13 +153,14 @@ def handle_debugger_tool_base_options(context, defaults):  # noqa
     if options.lldb_executable is None:
         options.lldb_executable = defaults.lldb_executable
     else:
-        if getattr(options, 'debugger', 'lldb') != 'lldb':
-            _warn_meaningless_option(context, '--lldb-executable')
+        if getattr(options, "debugger", "lldb") != "lldb":
+            _warn_meaningless_option(context, "--lldb-executable")
 
         options.lldb_executable = os.path.abspath(options.lldb_executable)
         if not os.path.isfile(options.lldb_executable):
-            raise ToolArgumentError('<d>could not find</> <r>"{}"</>'.format(
-                options.lldb_executable))
+            raise ToolArgumentError(
+                '<d>could not find</> <r>"{}"</>'.format(options.lldb_executable)
+            )
 
 
 def handle_debugger_tool_options(context, defaults):  # noqa
@@ -160,53 +171,59 @@ def handle_debugger_tool_options(context, defaults):  # noqa
     if options.arch is None:
         options.arch = defaults.arch
     else:
-        if options.debugger != 'lldb':
-            _warn_meaningless_option(context, '--arch')
+        if options.debugger != "lldb":
+            _warn_meaningless_option(context, "--arch")
 
     if options.show_debugger is None:
         options.show_debugger = defaults.show_debugger
     else:
-        if options.debugger == 'lldb':
-            _warn_meaningless_option(context, '--show-debugger')
+        if options.debugger == "lldb":
+            _warn_meaningless_option(context, "--show-debugger")
 
     if options.source_root_dir != None:
         if not os.path.isabs(options.source_root_dir):
-            raise ToolArgumentError(f'<d>--source-root-dir: expected absolute path, got</> <r>"{options.source_root_dir}"</>')
+            raise ToolArgumentError(
+                f'<d>--source-root-dir: expected absolute path, got</> <r>"{options.source_root_dir}"</>'
+            )
         if not os.path.isdir(options.source_root_dir):
-            raise ToolArgumentError(f'<d>--source-root-dir: could not find directory</> <r>"{options.source_root_dir}"</>')
+            raise ToolArgumentError(
+                f'<d>--source-root-dir: could not find directory</> <r>"{options.source_root_dir}"</>'
+            )
 
     if options.debugger_use_relative_paths:
         if not options.source_root_dir:
-            raise ToolArgumentError(f'<d>--debugger-relative-paths</> <r>requires --source-root-dir</>')
+            raise ToolArgumentError(
+                f"<d>--debugger-relative-paths</> <r>requires --source-root-dir</>"
+            )
+
 
 def run_debugger_subprocess(debugger_controller, working_dir_path):
-    with NamedTemporaryFile(
-            dir=working_dir_path, delete=False, mode='wb') as fp:
+    with NamedTemporaryFile(dir=working_dir_path, delete=False, mode="wb") as fp:
         pickle.dump(debugger_controller, fp, protocol=pickle.HIGHEST_PROTOCOL)
         controller_path = fp.name
 
     dexter_py = os.path.basename(sys.argv[0])
     if not os.path.isfile(dexter_py):
-        dexter_py = os.path.join(get_root_directory(), '..', dexter_py)
+        dexter_py = os.path.join(get_root_directory(), "..", dexter_py)
     assert os.path.isfile(dexter_py)
 
     with NamedTemporaryFile(dir=working_dir_path) as fp:
         args = [
             sys.executable,
             dexter_py,
-            'run-debugger-internal-',
+            "run-debugger-internal-",
             controller_path,
-            '--working-directory={}'.format(working_dir_path),
-            '--unittest=off',
-            '--indent-timer-level={}'.format(Timer.indent + 2)
+            "--working-directory={}".format(working_dir_path),
+            "--unittest=off",
+            "--indent-timer-level={}".format(Timer.indent + 2),
         ]
         try:
-            with Timer('running external debugger process'):
+            with Timer("running external debugger process"):
                 subprocess.check_call(args)
         except subprocess.CalledProcessError as e:
             raise DebuggerException(e)
 
-    with open(controller_path, 'rb') as fp:
+    with open(controller_path, "rb") as fp:
         debugger_controller = pickle.load(fp)
 
     return debugger_controller
@@ -225,7 +242,7 @@ def __init__(self, context):
         self.context = context
 
     def load(self, key):
-        with Timer('load {}'.format(key)):
+        with Timer("load {}".format(key)):
             return Debuggers.potential_debuggers()[key](self.context)
 
     def _populate_debugger_cache(self):
@@ -237,19 +254,19 @@ class LoadedDebugger(object):
                 pass
 
             LoadedDebugger.option_name = key
-            LoadedDebugger.full_name = '[{}]'.format(debugger.name)
+            LoadedDebugger.full_name = "[{}]".format(debugger.name)
             LoadedDebugger.is_available = debugger.is_available
 
             if LoadedDebugger.is_available:
                 try:
                     LoadedDebugger.version = debugger.version.splitlines()
                 except AttributeError:
-                    LoadedDebugger.version = ['']
+                    LoadedDebugger.version = [""]
             else:
                 try:
                     LoadedDebugger.error = debugger.loading_error.splitlines()
                 except AttributeError:
-                    LoadedDebugger.error = ['']
+                    LoadedDebugger.error = [""]
 
                 try:
                     LoadedDebugger.error_trace = debugger.loading_error_trace
@@ -269,29 +286,27 @@ def list(self):
 
         for d in debuggers:
             # Option name, right padded with spaces for alignment
-            option_name = (
-                '{{name: <{}}}'.format(max_o_len).format(name=d.option_name))
+            option_name = "{{name: <{}}}".format(max_o_len).format(name=d.option_name)
 
             # Full name, right padded with spaces for alignment
-            full_name = ('{{name: <{}}}'.format(max_n_len)
-                         .format(name=d.full_name))
+            full_name = "{{name: <{}}}".format(max_n_len).format(name=d.full_name)
 
             if d.is_available:
-                name = '<b>{} {}</>'.format(option_name, full_name)
+                name = "<b>{} {}</>".format(option_name, full_name)
 
                 # If the debugger is available, show the first line of the
                 #  version info.
-                available = '<g>YES</>'
-                info = '<b>({})</>'.format(d.version[0])
+                available = "<g>YES</>"
+                info = "<b>({})</>".format(d.version[0])
             else:
-                name = '<y>{} {}</>'.format(option_name, full_name)
+                name = "<y>{} {}</>".format(option_name, full_name)
 
                 # If the debugger is not available, show the first line of the
                 # error reason.
-                available = '<r>NO</> '
-                info = '<y>({})</>'.format(d.error[0])
+                available = "<r>NO</> "
+                info = "<y>({})</>".format(d.error[0])
 
-            msg = '{} {} {}'.format(name, available, info)
+            msg = "{} {} {}".format(name, available, info)
 
             if self.context.options.verbose:
                 # If verbose mode and there was more version or error output
@@ -300,16 +315,18 @@ def list(self):
                 verbose_info = None
                 if d.is_available:
                     if d.version[1:]:
-                        verbose_info = d.version + ['\n']
+                        verbose_info = d.version + ["\n"]
                 else:
                     # Some of list elems may contain multiple lines, so make
                     # sure each elem is a line of its own.
                     verbose_info = d.error_trace
 
                 if verbose_info:
-                    verbose_info = '\n'.join('        {}'.format(l.rstrip())
-                                             for l in verbose_info) + '\n'
-                    msg = '{}\n\n{}'.format(msg, verbose_info)
+                    verbose_info = (
+                        "\n".join("        {}".format(l.rstrip()) for l in verbose_info)
+                        + "\n"
+                    )
+                    msg = "{}\n\n{}".format(msg, verbose_info)
 
             msgs.append(msg)
-        self.context.o.auto('\n{}\n\n'.format('\n'.join(msgs)))
+        self.context.o.auto("\n{}\n\n".format("\n".join(msgs)))

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/__init__.py b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/__init__.py
index 394f9f0eca8db..6bd389a1dc906 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/__init__.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/__init__.py
@@ -6,5 +6,7 @@
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 
 from dex.debugger.Debuggers import Debuggers
-from dex.debugger.DebuggerControllers.DebuggerControllerBase import DebuggerControllerBase
+from dex.debugger.DebuggerControllers.DebuggerControllerBase import (
+    DebuggerControllerBase,
+)
 from dex.debugger.DebuggerControllers.DefaultController import DefaultController

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/__init__.py b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/__init__.py
index 3c458f955b7a4..78e599de8083f 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/__init__.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/__init__.py
@@ -8,12 +8,13 @@
 from . import dbgeng
 
 import platform
-if platform.system() == 'Windows':
-  from . import breakpoint
-  from . import control
-  from . import probe_process
-  from . import setup
-  from . import symbols
-  from . import symgroup
-  from . import sysobjs
-  from . import utils
+
+if platform.system() == "Windows":
+    from . import breakpoint
+    from . import control
+    from . import probe_process
+    from . import setup
+    from . import symbols
+    from . import symgroup
+    from . import sysobjs
+    from . import utils

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/breakpoint.py b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/breakpoint.py
index c966d8c9c88f9..0bac87328e57e 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/breakpoint.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/breakpoint.py
@@ -11,78 +11,90 @@
 
 from .utils import *
 
+
 class BreakpointTypes(IntEnum):
-  DEBUG_BREAKPOINT_CODE =   0
-  DEBUG_BREAKPOINT_DATA =   1
-  DEBUG_BREAKPOINT_TIME =   2
-  DEBUG_BREAKPOINT_INLINE = 3
+    DEBUG_BREAKPOINT_CODE = 0
+    DEBUG_BREAKPOINT_DATA = 1
+    DEBUG_BREAKPOINT_TIME = 2
+    DEBUG_BREAKPOINT_INLINE = 3
+
 
 class BreakpointFlags(IntFlag):
-  DEBUG_BREAKPOINT_GO_ONLY =    0x00000001
-  DEBUG_BREAKPOINT_DEFERRED =   0x00000002
-  DEBUG_BREAKPOINT_ENABLED =    0x00000004
-  DEBUG_BREAKPOINT_ADDER_ONLY = 0x00000008
-  DEBUG_BREAKPOINT_ONE_SHOT =   0x00000010
+    DEBUG_BREAKPOINT_GO_ONLY = 0x00000001
+    DEBUG_BREAKPOINT_DEFERRED = 0x00000002
+    DEBUG_BREAKPOINT_ENABLED = 0x00000004
+    DEBUG_BREAKPOINT_ADDER_ONLY = 0x00000008
+    DEBUG_BREAKPOINT_ONE_SHOT = 0x00000010
+
+
+DebugBreakpoint2IID = IID(
+    0x1B278D20,
+    0x79F2,
+    0x426E,
+    IID_Data4_Type(0xA3, 0xF9, 0xC1, 0xDD, 0xF3, 0x75, 0xD4, 0x8E),
+)
 
-DebugBreakpoint2IID = IID(0x1b278d20, 0x79f2, 0x426e, IID_Data4_Type(0xa3, 0xf9, 0xc1, 0xdd, 0xf3, 0x75, 0xd4, 0x8e))
 
 class DebugBreakpoint2(Structure):
-  pass
+    pass
+
 
 class DebugBreakpoint2Vtbl(Structure):
-  wrp = partial(WINFUNCTYPE, c_long, POINTER(DebugBreakpoint2))
-  idb_setoffset = wrp(c_ulonglong)
-  idb_setflags = wrp(c_ulong)
-  _fields_ = [
-      ("QueryInterface", c_void_p),
-      ("AddRef", c_void_p),
-      ("Release", c_void_p),
-      ("GetId", c_void_p),
-      ("GetType", c_void_p),
-      ("GetAdder", c_void_p),
-      ("GetFlags", c_void_p),
-      ("AddFlags", c_void_p),
-      ("RemoveFlags", c_void_p),
-      ("SetFlags", idb_setflags),
-      ("GetOffset", c_void_p),
-      ("SetOffset", idb_setoffset),
-      ("GetDataParameters", c_void_p),
-      ("SetDataParameters", c_void_p),
-      ("GetPassCount", c_void_p),
-      ("SetPassCount", c_void_p),
-      ("GetCurrentPassCount", c_void_p),
-      ("GetMatchThreadId", c_void_p),
-      ("SetMatchThreadId", c_void_p),
-      ("GetCommand", c_void_p),
-      ("SetCommand", c_void_p),
-      ("GetOffsetExpression", c_void_p),
-      ("SetOffsetExpression", c_void_p),
-      ("GetParameters", c_void_p),
-      ("GetCommandWide", c_void_p),
-      ("SetCommandWide", c_void_p),
-      ("GetOffsetExpressionWide", c_void_p),
-      ("SetOffsetExpressionWide", c_void_p)
+    wrp = partial(WINFUNCTYPE, c_long, POINTER(DebugBreakpoint2))
+    idb_setoffset = wrp(c_ulonglong)
+    idb_setflags = wrp(c_ulong)
+    _fields_ = [
+        ("QueryInterface", c_void_p),
+        ("AddRef", c_void_p),
+        ("Release", c_void_p),
+        ("GetId", c_void_p),
+        ("GetType", c_void_p),
+        ("GetAdder", c_void_p),
+        ("GetFlags", c_void_p),
+        ("AddFlags", c_void_p),
+        ("RemoveFlags", c_void_p),
+        ("SetFlags", idb_setflags),
+        ("GetOffset", c_void_p),
+        ("SetOffset", idb_setoffset),
+        ("GetDataParameters", c_void_p),
+        ("SetDataParameters", c_void_p),
+        ("GetPassCount", c_void_p),
+        ("SetPassCount", c_void_p),
+        ("GetCurrentPassCount", c_void_p),
+        ("GetMatchThreadId", c_void_p),
+        ("SetMatchThreadId", c_void_p),
+        ("GetCommand", c_void_p),
+        ("SetCommand", c_void_p),
+        ("GetOffsetExpression", c_void_p),
+        ("SetOffsetExpression", c_void_p),
+        ("GetParameters", c_void_p),
+        ("GetCommandWide", c_void_p),
+        ("SetCommandWide", c_void_p),
+        ("GetOffsetExpressionWide", c_void_p),
+        ("SetOffsetExpressionWide", c_void_p),
     ]
 
+
 DebugBreakpoint2._fields_ = [("lpVtbl", POINTER(DebugBreakpoint2Vtbl))]
 
+
 class Breakpoint(object):
-  def __init__(self, breakpoint):
-    self.breakpoint = breakpoint.contents
-    self.vt = self.breakpoint.lpVtbl.contents
+    def __init__(self, breakpoint):
+        self.breakpoint = breakpoint.contents
+        self.vt = self.breakpoint.lpVtbl.contents
 
-  def SetFlags(self, flags):
-    res = self.vt.SetFlags(self.breakpoint, flags)
-    aborter(res, "Breakpoint SetFlags")
+    def SetFlags(self, flags):
+        res = self.vt.SetFlags(self.breakpoint, flags)
+        aborter(res, "Breakpoint SetFlags")
 
-  def SetOffset(self, offs):
-    res = self.vt.SetOffset(self.breakpoint, offs)
-    aborter(res, "Breakpoint SetOffset")
+    def SetOffset(self, offs):
+        res = self.vt.SetOffset(self.breakpoint, offs)
+        aborter(res, "Breakpoint SetOffset")
 
-  def RemoveFlags(self, flags):
-    res = self.vt.RemoveFlags(self.breakpoint, flags)
-    aborter(res, "Breakpoint RemoveFlags")
+    def RemoveFlags(self, flags):
+        res = self.vt.RemoveFlags(self.breakpoint, flags)
+        aborter(res, "Breakpoint RemoveFlags")
 
-  def die(self):
-    self.breakpoint = None
-    self.vt = None
+    def die(self):
+        self.breakpoint = None
+        self.vt = None

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/client.py b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/client.py
index 22d4652fcac55..d5fb504be73f4 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/client.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/client.py
@@ -14,198 +14,224 @@
 from . import symbols
 from . import sysobjs
 
+
 class DebugAttach(IntFlag):
-  DEBUG_ATTACH_DEFAULT =                      0
-  DEBUG_ATTACH_NONINVASIVE =                  1
-  DEBUG_ATTACH_EXISTING =                     2
-  DEBUG_ATTACH_NONINVASIVE_NO_SUSPEND =       4
-  DEBUG_ATTACH_INVASIVE_NO_INITIAL_BREAK =    8
-  DEBUG_ATTACH_INVASIVE_RESUME_PROCESS =   0x10
-  DEBUG_ATTACH_NONINVASIVE_ALLOW_PARTIAL = 0x20
+    DEBUG_ATTACH_DEFAULT = 0
+    DEBUG_ATTACH_NONINVASIVE = 1
+    DEBUG_ATTACH_EXISTING = 2
+    DEBUG_ATTACH_NONINVASIVE_NO_SUSPEND = 4
+    DEBUG_ATTACH_INVASIVE_NO_INITIAL_BREAK = 8
+    DEBUG_ATTACH_INVASIVE_RESUME_PROCESS = 0x10
+    DEBUG_ATTACH_NONINVASIVE_ALLOW_PARTIAL = 0x20
+
 
 # UUID for DebugClient7 interface.
-DebugClient7IID = IID(0x13586be3, 0x542e, 0x481e, IID_Data4_Type(0xb1, 0xf2, 0x84, 0x97, 0xba, 0x74, 0xf9, 0xa9 ))
+DebugClient7IID = IID(
+    0x13586BE3,
+    0x542E,
+    0x481E,
+    IID_Data4_Type(0xB1, 0xF2, 0x84, 0x97, 0xBA, 0x74, 0xF9, 0xA9),
+)
+
 
 class DEBUG_CREATE_PROCESS_OPTIONS(Structure):
-  _fields_ = [
-    ("CreateFlags", c_ulong),
-    ("EngCreateFlags", c_ulong),
-    ("VerifierFlags", c_ulong),
-    ("Reserved", c_ulong)
-  ]
+    _fields_ = [
+        ("CreateFlags", c_ulong),
+        ("EngCreateFlags", c_ulong),
+        ("VerifierFlags", c_ulong),
+        ("Reserved", c_ulong),
+    ]
+
 
 class IDebugClient7(Structure):
-  pass
+    pass
+
 
 class IDebugClient7Vtbl(Structure):
-  wrp = partial(WINFUNCTYPE, c_long, POINTER(IDebugClient7))
-  idc_queryinterface = wrp(POINTER(IID), POINTER(c_void_p))
-  idc_attachprocess = wrp(c_longlong, c_long, c_long)
-  idc_detachprocesses = wrp()
-  idc_terminateprocesses = wrp()
-  idc_createprocessandattach2 = wrp(c_ulonglong, c_char_p, c_void_p, c_ulong, c_char_p, c_char_p, c_ulong, c_ulong)
-  _fields_ = [
-      ("QueryInterface", idc_queryinterface),
-      ("AddRef", c_void_p),
-      ("Release", c_void_p),
-      ("AttachKernel", c_void_p),
-      ("GetKernelConnectionOptions", c_void_p),
-      ("SetKernelConnectionOptions", c_void_p),
-      ("StartProcessServer", c_void_p),
-      ("ConnectProcessServer", c_void_p),
-      ("DisconnectProcessServer", c_void_p),
-      ("GetRunningProcessSystemIds", c_void_p),
-      ("GetRunningProcessSystemIdsByExecutableName", c_void_p),
-      ("GetRunningProcessDescription", c_void_p),
-      ("AttachProcess", idc_attachprocess),
-      ("CreateProcess", c_void_p),
-      ("CreateProcessAndAttach", c_void_p),
-      ("GetProcessOptions", c_void_p),
-      ("AddProcessOptions", c_void_p),
-      ("RemoveProcessOptions", c_void_p),
-      ("SetProcessOptions", c_void_p),
-      ("OpenDumpFile", c_void_p),
-      ("WriteDumpFile", c_void_p),
-      ("ConnectSession", c_void_p),
-      ("StartServer", c_void_p),
-      ("OutputServers", c_void_p),
-      ("TerminateProcesses", idc_terminateprocesses),
-      ("DetachProcesses", idc_detachprocesses),
-      ("EndSession", c_void_p),
-      ("GetExitCode", c_void_p),
-      ("DispatchCallbacks", c_void_p),
-      ("ExitDispatch", c_void_p),
-      ("CreateClient", c_void_p),
-      ("GetInputCallbacks", c_void_p),
-      ("SetInputCallbacks", c_void_p),
-      ("GetOutputCallbacks", c_void_p),
-      ("SetOutputCallbacks", c_void_p),
-      ("GetOutputMask", c_void_p),
-      ("SetOutputMask", c_void_p),
-      ("GetOtherOutputMask", c_void_p),
-      ("SetOtherOutputMask", c_void_p),
-      ("GetOutputWidth", c_void_p),
-      ("SetOutputWidth", c_void_p),
-      ("GetOutputLinePrefix", c_void_p),
-      ("SetOutputLinePrefix", c_void_p),
-      ("GetIdentity", c_void_p),
-      ("OutputIdentity", c_void_p),
-      ("GetEventCallbacks", c_void_p),
-      ("SetEventCallbacks", c_void_p),
-      ("FlushCallbacks", c_void_p),
-      ("WriteDumpFile2", c_void_p),
-      ("AddDumpInformationFile", c_void_p),
-      ("EndProcessServer", c_void_p),
-      ("WaitForProcessServerEnd", c_void_p),
-      ("IsKernelDebuggerEnabled", c_void_p),
-      ("TerminateCurrentProcess", c_void_p),
-      ("DetachCurrentProcess", c_void_p),
-      ("AbandonCurrentProcess", c_void_p),
-      ("GetRunningProcessSystemIdByExecutableNameWide", c_void_p),
-      ("GetRunningProcessDescriptionWide", c_void_p),
-      ("CreateProcessWide", c_void_p),
-      ("CreateProcessAndAttachWide", c_void_p),
-      ("OpenDumpFileWide", c_void_p),
-      ("WriteDumpFileWide", c_void_p),
-      ("AddDumpInformationFileWide", c_void_p),
-      ("GetNumberDumpFiles", c_void_p),
-      ("GetDumpFile", c_void_p),
-      ("GetDumpFileWide", c_void_p),
-      ("AttachKernelWide", c_void_p),
-      ("GetKernelConnectionOptionsWide", c_void_p),
-      ("SetKernelConnectionOptionsWide", c_void_p),
-      ("StartProcessServerWide", c_void_p),
-      ("ConnectProcessServerWide", c_void_p),
-      ("StartServerWide", c_void_p),
-      ("OutputServerWide", c_void_p),
-      ("GetOutputCallbacksWide", c_void_p),
-      ("SetOutputCallbacksWide", c_void_p),
-      ("GetOutputLinePrefixWide", c_void_p),
-      ("SetOutputLinePrefixWide", c_void_p),
-      ("GetIdentityWide", c_void_p),
-      ("OutputIdentityWide", c_void_p),
-      ("GetEventCallbacksWide", c_void_p),
-      ("SetEventCallbacksWide", c_void_p),
-      ("CreateProcess2", c_void_p),
-      ("CreateProcess2Wide", c_void_p),
-      ("CreateProcessAndAttach2", idc_createprocessandattach2),
-      ("CreateProcessAndAttach2Wide", c_void_p),
-      ("PushOutputLinePrefix", c_void_p),
-      ("PushOutputLinePrefixWide", c_void_p),
-      ("PopOutputLinePrefix", c_void_p),
-      ("GetNumberInputCallbacks", c_void_p),
-      ("GetNumberOutputCallbacks", c_void_p),
-      ("GetNumberEventCallbacks", c_void_p),
-      ("GetQuitLockString", c_void_p),
-      ("SetQuitLockString", c_void_p),
-      ("GetQuitLockStringWide", c_void_p),
-      ("SetQuitLockStringWide", c_void_p),
-      ("SetEventContextCallbacks", c_void_p),
-      ("SetClientContext", c_void_p),
+    wrp = partial(WINFUNCTYPE, c_long, POINTER(IDebugClient7))
+    idc_queryinterface = wrp(POINTER(IID), POINTER(c_void_p))
+    idc_attachprocess = wrp(c_longlong, c_long, c_long)
+    idc_detachprocesses = wrp()
+    idc_terminateprocesses = wrp()
+    idc_createprocessandattach2 = wrp(
+        c_ulonglong, c_char_p, c_void_p, c_ulong, c_char_p, c_char_p, c_ulong, c_ulong
+    )
+    _fields_ = [
+        ("QueryInterface", idc_queryinterface),
+        ("AddRef", c_void_p),
+        ("Release", c_void_p),
+        ("AttachKernel", c_void_p),
+        ("GetKernelConnectionOptions", c_void_p),
+        ("SetKernelConnectionOptions", c_void_p),
+        ("StartProcessServer", c_void_p),
+        ("ConnectProcessServer", c_void_p),
+        ("DisconnectProcessServer", c_void_p),
+        ("GetRunningProcessSystemIds", c_void_p),
+        ("GetRunningProcessSystemIdsByExecutableName", c_void_p),
+        ("GetRunningProcessDescription", c_void_p),
+        ("AttachProcess", idc_attachprocess),
+        ("CreateProcess", c_void_p),
+        ("CreateProcessAndAttach", c_void_p),
+        ("GetProcessOptions", c_void_p),
+        ("AddProcessOptions", c_void_p),
+        ("RemoveProcessOptions", c_void_p),
+        ("SetProcessOptions", c_void_p),
+        ("OpenDumpFile", c_void_p),
+        ("WriteDumpFile", c_void_p),
+        ("ConnectSession", c_void_p),
+        ("StartServer", c_void_p),
+        ("OutputServers", c_void_p),
+        ("TerminateProcesses", idc_terminateprocesses),
+        ("DetachProcesses", idc_detachprocesses),
+        ("EndSession", c_void_p),
+        ("GetExitCode", c_void_p),
+        ("DispatchCallbacks", c_void_p),
+        ("ExitDispatch", c_void_p),
+        ("CreateClient", c_void_p),
+        ("GetInputCallbacks", c_void_p),
+        ("SetInputCallbacks", c_void_p),
+        ("GetOutputCallbacks", c_void_p),
+        ("SetOutputCallbacks", c_void_p),
+        ("GetOutputMask", c_void_p),
+        ("SetOutputMask", c_void_p),
+        ("GetOtherOutputMask", c_void_p),
+        ("SetOtherOutputMask", c_void_p),
+        ("GetOutputWidth", c_void_p),
+        ("SetOutputWidth", c_void_p),
+        ("GetOutputLinePrefix", c_void_p),
+        ("SetOutputLinePrefix", c_void_p),
+        ("GetIdentity", c_void_p),
+        ("OutputIdentity", c_void_p),
+        ("GetEventCallbacks", c_void_p),
+        ("SetEventCallbacks", c_void_p),
+        ("FlushCallbacks", c_void_p),
+        ("WriteDumpFile2", c_void_p),
+        ("AddDumpInformationFile", c_void_p),
+        ("EndProcessServer", c_void_p),
+        ("WaitForProcessServerEnd", c_void_p),
+        ("IsKernelDebuggerEnabled", c_void_p),
+        ("TerminateCurrentProcess", c_void_p),
+        ("DetachCurrentProcess", c_void_p),
+        ("AbandonCurrentProcess", c_void_p),
+        ("GetRunningProcessSystemIdByExecutableNameWide", c_void_p),
+        ("GetRunningProcessDescriptionWide", c_void_p),
+        ("CreateProcessWide", c_void_p),
+        ("CreateProcessAndAttachWide", c_void_p),
+        ("OpenDumpFileWide", c_void_p),
+        ("WriteDumpFileWide", c_void_p),
+        ("AddDumpInformationFileWide", c_void_p),
+        ("GetNumberDumpFiles", c_void_p),
+        ("GetDumpFile", c_void_p),
+        ("GetDumpFileWide", c_void_p),
+        ("AttachKernelWide", c_void_p),
+        ("GetKernelConnectionOptionsWide", c_void_p),
+        ("SetKernelConnectionOptionsWide", c_void_p),
+        ("StartProcessServerWide", c_void_p),
+        ("ConnectProcessServerWide", c_void_p),
+        ("StartServerWide", c_void_p),
+        ("OutputServerWide", c_void_p),
+        ("GetOutputCallbacksWide", c_void_p),
+        ("SetOutputCallbacksWide", c_void_p),
+        ("GetOutputLinePrefixWide", c_void_p),
+        ("SetOutputLinePrefixWide", c_void_p),
+        ("GetIdentityWide", c_void_p),
+        ("OutputIdentityWide", c_void_p),
+        ("GetEventCallbacksWide", c_void_p),
+        ("SetEventCallbacksWide", c_void_p),
+        ("CreateProcess2", c_void_p),
+        ("CreateProcess2Wide", c_void_p),
+        ("CreateProcessAndAttach2", idc_createprocessandattach2),
+        ("CreateProcessAndAttach2Wide", c_void_p),
+        ("PushOutputLinePrefix", c_void_p),
+        ("PushOutputLinePrefixWide", c_void_p),
+        ("PopOutputLinePrefix", c_void_p),
+        ("GetNumberInputCallbacks", c_void_p),
+        ("GetNumberOutputCallbacks", c_void_p),
+        ("GetNumberEventCallbacks", c_void_p),
+        ("GetQuitLockString", c_void_p),
+        ("SetQuitLockString", c_void_p),
+        ("GetQuitLockStringWide", c_void_p),
+        ("SetQuitLockStringWide", c_void_p),
+        ("SetEventContextCallbacks", c_void_p),
+        ("SetClientContext", c_void_p),
     ]
 
+
 IDebugClient7._fields_ = [("lpVtbl", POINTER(IDebugClient7Vtbl))]
 
+
 class Client(object):
-  def __init__(self):
-    DbgEng = WinDLL("DbgEng")
-    DbgEng.DebugCreate.argtypes = [POINTER(IID), POINTER(POINTER(IDebugClient7))]
-    DbgEng.DebugCreate.restype = c_ulong
-
-    # Call DebugCreate to create a new debug client
-    ptr = POINTER(IDebugClient7)()
-    res = DbgEng.DebugCreate(byref(DebugClient7IID), ptr)
-    aborter(res, "DebugCreate")
-    self.client = ptr.contents
-    self.vt = vt = self.client.lpVtbl.contents
-
-    def QI(iface, ptr):
-      return vt.QueryInterface(self.client, byref(iface), byref(ptr))
-
-    # Query for a control object
-    ptr = c_void_p()
-    res = QI(control.DebugControl7IID, ptr)
-    aborter(res, "QueryInterface control")
-    self.control_ptr = cast(ptr, POINTER(control.IDebugControl7))
-    self.Control = control.Control(self.control_ptr)
-
-    # Query for a SystemObjects object
-    ptr = c_void_p()
-    res = QI(sysobjs.DebugSystemObjects4IID, ptr)
-    aborter(res, "QueryInterface sysobjects")
-    self.sysobjects_ptr = cast(ptr, POINTER(sysobjs.IDebugSystemObjects4))
-    self.SysObjects = sysobjs.SysObjects(self.sysobjects_ptr)
-
-    # Query for a Symbols object
-    ptr = c_void_p()
-    res = QI(symbols.DebugSymbols5IID, ptr)
-    aborter(res, "QueryInterface debugsymbosl5")
-    self.symbols_ptr = cast(ptr, POINTER(symbols.IDebugSymbols5))
-    self.Symbols = symbols.Symbols(self.symbols_ptr)
-
-  def AttachProcess(self, pid):
-    # Zero process-server id means no process-server.
-    res = self.vt.AttachProcess(self.client, 0, pid, DebugAttach.DEBUG_ATTACH_DEFAULT)
-    aborter(res, "AttachProcess")
-    return
-
-  def DetachProcesses(self):
-    res = self.vt.DetachProcesses(self.client)
-    aborter(res, "DetachProcesses")
-    return
-
-  def TerminateProcesses(self):
-    res = self.vt.TerminateProcesses(self.client)
-    aborter(res, "TerminateProcesses")
-    return
-
-  def CreateProcessAndAttach2(self, cmdline):
-    options = DEBUG_CREATE_PROCESS_OPTIONS()
-    options.CreateFlags = 0x2 # DEBUG_ONLY_THIS_PROCESS
-    options.EngCreateFlags  = 0
-    options.VerifierFlags = 0
-    options.Reserved = 0
-    attach_flags = 0
-    res = self.vt.CreateProcessAndAttach2(self.client, 0, cmdline.encode("ascii"), byref(options), sizeof(options), None, None, 0, attach_flags)
-    aborter(res, "CreateProcessAndAttach2")
-    return
+    def __init__(self):
+        DbgEng = WinDLL("DbgEng")
+        DbgEng.DebugCreate.argtypes = [POINTER(IID), POINTER(POINTER(IDebugClient7))]
+        DbgEng.DebugCreate.restype = c_ulong
+
+        # Call DebugCreate to create a new debug client
+        ptr = POINTER(IDebugClient7)()
+        res = DbgEng.DebugCreate(byref(DebugClient7IID), ptr)
+        aborter(res, "DebugCreate")
+        self.client = ptr.contents
+        self.vt = vt = self.client.lpVtbl.contents
+
+        def QI(iface, ptr):
+            return vt.QueryInterface(self.client, byref(iface), byref(ptr))
+
+        # Query for a control object
+        ptr = c_void_p()
+        res = QI(control.DebugControl7IID, ptr)
+        aborter(res, "QueryInterface control")
+        self.control_ptr = cast(ptr, POINTER(control.IDebugControl7))
+        self.Control = control.Control(self.control_ptr)
+
+        # Query for a SystemObjects object
+        ptr = c_void_p()
+        res = QI(sysobjs.DebugSystemObjects4IID, ptr)
+        aborter(res, "QueryInterface sysobjects")
+        self.sysobjects_ptr = cast(ptr, POINTER(sysobjs.IDebugSystemObjects4))
+        self.SysObjects = sysobjs.SysObjects(self.sysobjects_ptr)
+
+        # Query for a Symbols object
+        ptr = c_void_p()
+        res = QI(symbols.DebugSymbols5IID, ptr)
+        aborter(res, "QueryInterface debugsymbosl5")
+        self.symbols_ptr = cast(ptr, POINTER(symbols.IDebugSymbols5))
+        self.Symbols = symbols.Symbols(self.symbols_ptr)
+
+    def AttachProcess(self, pid):
+        # Zero process-server id means no process-server.
+        res = self.vt.AttachProcess(
+            self.client, 0, pid, DebugAttach.DEBUG_ATTACH_DEFAULT
+        )
+        aborter(res, "AttachProcess")
+        return
+
+    def DetachProcesses(self):
+        res = self.vt.DetachProcesses(self.client)
+        aborter(res, "DetachProcesses")
+        return
+
+    def TerminateProcesses(self):
+        res = self.vt.TerminateProcesses(self.client)
+        aborter(res, "TerminateProcesses")
+        return
+
+    def CreateProcessAndAttach2(self, cmdline):
+        options = DEBUG_CREATE_PROCESS_OPTIONS()
+        options.CreateFlags = 0x2  # DEBUG_ONLY_THIS_PROCESS
+        options.EngCreateFlags = 0
+        options.VerifierFlags = 0
+        options.Reserved = 0
+        attach_flags = 0
+        res = self.vt.CreateProcessAndAttach2(
+            self.client,
+            0,
+            cmdline.encode("ascii"),
+            byref(options),
+            sizeof(options),
+            None,
+            None,
+            0,
+            attach_flags,
+        )
+        aborter(res, "CreateProcessAndAttach2")
+        return

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/control.py b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/control.py
index 5f23e2d22d8f0..e370738c3776d 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/control.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/control.py
@@ -11,401 +11,440 @@
 from .utils import *
 from .breakpoint import *
 
+
 class DEBUG_STACK_FRAME_EX(Structure):
-  _fields_ = [
-      ("InstructionOffset", c_ulonglong),
-      ("ReturnOffset", c_ulonglong),
-      ("FrameOffset", c_ulonglong),
-      ("StackOffset", c_ulonglong),
-      ("FuncTableEntry", c_ulonglong),
-      ("Params", c_ulonglong * 4),
-      ("Reserved", c_ulonglong * 6),
-      ("Virtual", c_bool),
-      ("FrameNumber", c_ulong),
-      ("InlineFrameContext", c_ulong),
-      ("Reserved1", c_ulong)
+    _fields_ = [
+        ("InstructionOffset", c_ulonglong),
+        ("ReturnOffset", c_ulonglong),
+        ("FrameOffset", c_ulonglong),
+        ("StackOffset", c_ulonglong),
+        ("FuncTableEntry", c_ulonglong),
+        ("Params", c_ulonglong * 4),
+        ("Reserved", c_ulonglong * 6),
+        ("Virtual", c_bool),
+        ("FrameNumber", c_ulong),
+        ("InlineFrameContext", c_ulong),
+        ("Reserved1", c_ulong),
     ]
+
+
 PDEBUG_STACK_FRAME_EX = POINTER(DEBUG_STACK_FRAME_EX)
 
+
 class DEBUG_VALUE_U(Union):
-  _fields_ = [
-      ("I8", c_byte),
-      ("I16", c_short),
-      ("I32", c_int),
-      ("I64", c_long),
-      ("F32", c_float),
-      ("F64", c_double),
-      ("RawBytes", c_ubyte * 24) # Force length to 24b.
+    _fields_ = [
+        ("I8", c_byte),
+        ("I16", c_short),
+        ("I32", c_int),
+        ("I64", c_long),
+        ("F32", c_float),
+        ("F64", c_double),
+        ("RawBytes", c_ubyte * 24),  # Force length to 24b.
     ]
 
+
 class DEBUG_VALUE(Structure):
-  _fields_ = [
-      ("U", DEBUG_VALUE_U),
-      ("TailOfRawBytes", c_ulong),
-      ("Type", c_ulong)
-    ]
+    _fields_ = [("U", DEBUG_VALUE_U), ("TailOfRawBytes", c_ulong), ("Type", c_ulong)]
+
+
 PDEBUG_VALUE = POINTER(DEBUG_VALUE)
 
+
 class DebugValueType(IntEnum):
-  DEBUG_VALUE_INVALID      = 0
-  DEBUG_VALUE_INT8         = 1
-  DEBUG_VALUE_INT16        = 2
-  DEBUG_VALUE_INT32        = 3
-  DEBUG_VALUE_INT64        = 4
-  DEBUG_VALUE_FLOAT32      = 5
-  DEBUG_VALUE_FLOAT64      = 6
-  DEBUG_VALUE_FLOAT80      = 7
-  DEBUG_VALUE_FLOAT82      = 8
-  DEBUG_VALUE_FLOAT128     = 9
-  DEBUG_VALUE_VECTOR64     = 10
-  DEBUG_VALUE_VECTOR128    = 11
-  DEBUG_VALUE_TYPES        = 12
+    DEBUG_VALUE_INVALID = 0
+    DEBUG_VALUE_INT8 = 1
+    DEBUG_VALUE_INT16 = 2
+    DEBUG_VALUE_INT32 = 3
+    DEBUG_VALUE_INT64 = 4
+    DEBUG_VALUE_FLOAT32 = 5
+    DEBUG_VALUE_FLOAT64 = 6
+    DEBUG_VALUE_FLOAT80 = 7
+    DEBUG_VALUE_FLOAT82 = 8
+    DEBUG_VALUE_FLOAT128 = 9
+    DEBUG_VALUE_VECTOR64 = 10
+    DEBUG_VALUE_VECTOR128 = 11
+    DEBUG_VALUE_TYPES = 12
+
 
 # UUID for DebugControl7 interface.
-DebugControl7IID = IID(0xb86fb3b1, 0x80d4, 0x475b, IID_Data4_Type(0xae, 0xa3, 0xcf, 0x06, 0x53, 0x9c, 0xf6, 0x3a))
+DebugControl7IID = IID(
+    0xB86FB3B1,
+    0x80D4,
+    0x475B,
+    IID_Data4_Type(0xAE, 0xA3, 0xCF, 0x06, 0x53, 0x9C, 0xF6, 0x3A),
+)
+
 
 class IDebugControl7(Structure):
-  pass
+    pass
+
 
 class IDebugControl7Vtbl(Structure):
-  wrp = partial(WINFUNCTYPE, c_long, POINTER(IDebugControl7))
-  idc_getnumbereventfilters = wrp(c_ulong_p, c_ulong_p, c_ulong_p)
-  idc_setexceptionfiltersecondcommand = wrp(c_ulong, c_char_p)
-  idc_waitforevent = wrp(c_long, c_long)
-  idc_execute = wrp(c_long, c_char_p, c_long)
-  idc_setexpressionsyntax = wrp(c_ulong)
-  idc_addbreakpoint2 = wrp(c_ulong, c_ulong, POINTER(POINTER(DebugBreakpoint2)))
-  idc_setexecutionstatus = wrp(c_ulong)
-  idc_getexecutionstatus = wrp(c_ulong_p)
-  idc_getstacktraceex = wrp(c_ulonglong, c_ulonglong, c_ulonglong, PDEBUG_STACK_FRAME_EX, c_ulong, c_ulong_p)
-  idc_evaluate = wrp(c_char_p, c_ulong, PDEBUG_VALUE, c_ulong_p)
-  idc_setengineoptions = wrp(c_ulong)
-  _fields_ = [
-      ("QueryInterface", c_void_p),
-      ("AddRef", c_void_p),
-      ("Release", c_void_p),
-      ("GetInterrupt", c_void_p),
-      ("SetInterrupt", c_void_p),
-      ("GetInterruptTimeout", c_void_p),
-      ("SetInterruptTimeout", c_void_p),
-      ("GetLogFile", c_void_p),
-      ("OpenLogFile", c_void_p),
-      ("CloseLogFile", c_void_p),
-      ("GetLogMask", c_void_p),
-      ("SetLogMask", c_void_p),
-      ("Input", c_void_p),
-      ("ReturnInput", c_void_p),
-      ("Output", c_void_p),
-      ("OutputVaList", c_void_p),
-      ("ControlledOutput", c_void_p),
-      ("ControlledOutputVaList", c_void_p),
-      ("OutputPrompt", c_void_p),
-      ("OutputPromptVaList", c_void_p),
-      ("GetPromptText", c_void_p),
-      ("OutputCurrentState", c_void_p),
-      ("OutputVersionInformation", c_void_p),
-      ("GetNotifyEventHandle", c_void_p),
-      ("SetNotifyEventHandle", c_void_p),
-      ("Assemble", c_void_p),
-      ("Disassemble", c_void_p),
-      ("GetDisassembleEffectiveOffset", c_void_p),
-      ("OutputDisassembly", c_void_p),
-      ("OutputDisassemblyLines", c_void_p),
-      ("GetNearInstruction", c_void_p),
-      ("GetStackTrace", c_void_p),
-      ("GetReturnOffset", c_void_p),
-      ("OutputStackTrace", c_void_p),
-      ("GetDebuggeeType", c_void_p),
-      ("GetActualProcessorType", c_void_p),
-      ("GetExecutingProcessorType", c_void_p),
-      ("GetNumberPossibleExecutingProcessorTypes", c_void_p),
-      ("GetPossibleExecutingProcessorTypes", c_void_p),
-      ("GetNumberProcessors", c_void_p),
-      ("GetSystemVersion", c_void_p),
-      ("GetPageSize", c_void_p),
-      ("IsPointer64Bit", c_void_p),
-      ("ReadBugCheckData", c_void_p),
-      ("GetNumberSupportedProcessorTypes", c_void_p),
-      ("GetSupportedProcessorTypes", c_void_p),
-      ("GetProcessorTypeNames", c_void_p),
-      ("GetEffectiveProcessorType", c_void_p),
-      ("SetEffectiveProcessorType", c_void_p),
-      ("GetExecutionStatus", idc_getexecutionstatus),
-      ("SetExecutionStatus", idc_setexecutionstatus),
-      ("GetCodeLevel", c_void_p),
-      ("SetCodeLevel", c_void_p),
-      ("GetEngineOptions", c_void_p),
-      ("AddEngineOptions", c_void_p),
-      ("RemoveEngineOptions", c_void_p),
-      ("SetEngineOptions", idc_setengineoptions),
-      ("GetSystemErrorControl", c_void_p),
-      ("SetSystemErrorControl", c_void_p),
-      ("GetTextMacro", c_void_p),
-      ("SetTextMacro", c_void_p),
-      ("GetRadix", c_void_p),
-      ("SetRadix", c_void_p),
-      ("Evaluate", idc_evaluate),
-      ("CoerceValue", c_void_p),
-      ("CoerceValues", c_void_p),
-      ("Execute", idc_execute),
-      ("ExecuteCommandFile", c_void_p),
-      ("GetNumberBreakpoints", c_void_p),
-      ("GetBreakpointByIndex", c_void_p),
-      ("GetBreakpointById", c_void_p),
-      ("GetBreakpointParameters", c_void_p),
-      ("AddBreakpoint", c_void_p),
-      ("RemoveBreakpoint", c_void_p),
-      ("AddExtension", c_void_p),
-      ("RemoveExtension", c_void_p),
-      ("GetExtensionByPath", c_void_p),
-      ("CallExtension", c_void_p),
-      ("GetExtensionFunction", c_void_p),
-      ("GetWindbgExtensionApis32", c_void_p),
-      ("GetWindbgExtensionApis64", c_void_p),
-      ("GetNumberEventFilters", idc_getnumbereventfilters),
-      ("GetEventFilterText", c_void_p),
-      ("GetEventFilterCommand", c_void_p),
-      ("SetEventFilterCommand", c_void_p),
-      ("GetSpecificFilterParameters", c_void_p),
-      ("SetSpecificFilterParameters", c_void_p),
-      ("GetSpecificFilterArgument", c_void_p),
-      ("SetSpecificFilterArgument", c_void_p),
-      ("GetExceptionFilterParameters", c_void_p),
-      ("SetExceptionFilterParameters", c_void_p),
-      ("GetExceptionFilterSecondCommand", c_void_p),
-      ("SetExceptionFilterSecondCommand", idc_setexceptionfiltersecondcommand),
-      ("WaitForEvent", idc_waitforevent),
-      ("GetLastEventInformation", c_void_p),
-      ("GetCurrentTimeDate", c_void_p),
-      ("GetCurrentSystemUpTime", c_void_p),
-      ("GetDumpFormatFlags", c_void_p),
-      ("GetNumberTextReplacements", c_void_p),
-      ("GetTextReplacement", c_void_p),
-      ("SetTextReplacement", c_void_p),
-      ("RemoveTextReplacements", c_void_p),
-      ("OutputTextReplacements", c_void_p),
-      ("GetAssemblyOptions", c_void_p),
-      ("AddAssemblyOptions", c_void_p),
-      ("RemoveAssemblyOptions", c_void_p),
-      ("SetAssemblyOptions", c_void_p),
-      ("GetExpressionSyntax", c_void_p),
-      ("SetExpressionSyntax", idc_setexpressionsyntax),
-      ("SetExpressionSyntaxByName", c_void_p),
-      ("GetNumberExpressionSyntaxes", c_void_p),
-      ("GetExpressionSyntaxNames", c_void_p),
-      ("GetNumberEvents", c_void_p),
-      ("GetEventIndexDescription", c_void_p),
-      ("GetCurrentEventIndex", c_void_p),
-      ("SetNextEventIndex", c_void_p),
-      ("GetLogFileWide", c_void_p),
-      ("OpenLogFileWide", c_void_p),
-      ("InputWide", c_void_p),
-      ("ReturnInputWide", c_void_p),
-      ("OutputWide", c_void_p),
-      ("OutputVaListWide", c_void_p),
-      ("ControlledOutputWide", c_void_p),
-      ("ControlledOutputVaListWide", c_void_p),
-      ("OutputPromptWide", c_void_p),
-      ("OutputPromptVaListWide", c_void_p),
-      ("GetPromptTextWide", c_void_p),
-      ("AssembleWide", c_void_p),
-      ("DisassembleWide", c_void_p),
-      ("GetProcessrTypeNamesWide", c_void_p),
-      ("GetTextMacroWide", c_void_p),
-      ("SetTextMacroWide", c_void_p),
-      ("EvaluateWide", c_void_p),
-      ("ExecuteWide", c_void_p),
-      ("ExecuteCommandFileWide", c_void_p),
-      ("GetBreakpointByIndex2", c_void_p),
-      ("GetBreakpointById2", c_void_p),
-      ("AddBreakpoint2", idc_addbreakpoint2),
-      ("RemoveBreakpoint2", c_void_p),
-      ("AddExtensionWide", c_void_p),
-      ("GetExtensionByPathWide", c_void_p),
-      ("CallExtensionWide", c_void_p),
-      ("GetExtensionFunctionWide", c_void_p),
-      ("GetEventFilterTextWide", c_void_p),
-      ("GetEventfilterCommandWide", c_void_p),
-      ("SetEventFilterCommandWide", c_void_p),
-      ("GetSpecificFilterArgumentWide", c_void_p),
-      ("SetSpecificFilterArgumentWide", c_void_p),
-      ("GetExceptionFilterSecondCommandWide", c_void_p),
-      ("SetExceptionFilterSecondCommandWider", c_void_p),
-      ("GetLastEventInformationWide", c_void_p),
-      ("GetTextReplacementWide", c_void_p),
-      ("SetTextReplacementWide", c_void_p),
-      ("SetExpressionSyntaxByNameWide", c_void_p),
-      ("GetExpressionSyntaxNamesWide", c_void_p),
-      ("GetEventIndexDescriptionWide", c_void_p),
-      ("GetLogFile2", c_void_p),
-      ("OpenLogFile2", c_void_p),
-      ("GetLogFile2Wide", c_void_p),
-      ("OpenLogFile2Wide", c_void_p),
-      ("GetSystemVersionValues", c_void_p),
-      ("GetSystemVersionString", c_void_p),
-      ("GetSystemVersionStringWide", c_void_p),
-      ("GetContextStackTrace", c_void_p),
-      ("OutputContextStackTrace", c_void_p),
-      ("GetStoredEventInformation", c_void_p),
-      ("GetManagedStatus", c_void_p),
-      ("GetManagedStatusWide", c_void_p),
-      ("ResetManagedStatus", c_void_p),
-      ("GetStackTraceEx", idc_getstacktraceex),
-      ("OutputStackTraceEx", c_void_p),
-      ("GetContextStackTraceEx", c_void_p),
-      ("OutputContextStackTraceEx", c_void_p),
-      ("GetBreakpointByGuid", c_void_p),
-      ("GetExecutionStatusEx", c_void_p),
-      ("GetSynchronizationStatus", c_void_p),
-      ("GetDebuggeeType2", c_void_p)
+    wrp = partial(WINFUNCTYPE, c_long, POINTER(IDebugControl7))
+    idc_getnumbereventfilters = wrp(c_ulong_p, c_ulong_p, c_ulong_p)
+    idc_setexceptionfiltersecondcommand = wrp(c_ulong, c_char_p)
+    idc_waitforevent = wrp(c_long, c_long)
+    idc_execute = wrp(c_long, c_char_p, c_long)
+    idc_setexpressionsyntax = wrp(c_ulong)
+    idc_addbreakpoint2 = wrp(c_ulong, c_ulong, POINTER(POINTER(DebugBreakpoint2)))
+    idc_setexecutionstatus = wrp(c_ulong)
+    idc_getexecutionstatus = wrp(c_ulong_p)
+    idc_getstacktraceex = wrp(
+        c_ulonglong, c_ulonglong, c_ulonglong, PDEBUG_STACK_FRAME_EX, c_ulong, c_ulong_p
+    )
+    idc_evaluate = wrp(c_char_p, c_ulong, PDEBUG_VALUE, c_ulong_p)
+    idc_setengineoptions = wrp(c_ulong)
+    _fields_ = [
+        ("QueryInterface", c_void_p),
+        ("AddRef", c_void_p),
+        ("Release", c_void_p),
+        ("GetInterrupt", c_void_p),
+        ("SetInterrupt", c_void_p),
+        ("GetInterruptTimeout", c_void_p),
+        ("SetInterruptTimeout", c_void_p),
+        ("GetLogFile", c_void_p),
+        ("OpenLogFile", c_void_p),
+        ("CloseLogFile", c_void_p),
+        ("GetLogMask", c_void_p),
+        ("SetLogMask", c_void_p),
+        ("Input", c_void_p),
+        ("ReturnInput", c_void_p),
+        ("Output", c_void_p),
+        ("OutputVaList", c_void_p),
+        ("ControlledOutput", c_void_p),
+        ("ControlledOutputVaList", c_void_p),
+        ("OutputPrompt", c_void_p),
+        ("OutputPromptVaList", c_void_p),
+        ("GetPromptText", c_void_p),
+        ("OutputCurrentState", c_void_p),
+        ("OutputVersionInformation", c_void_p),
+        ("GetNotifyEventHandle", c_void_p),
+        ("SetNotifyEventHandle", c_void_p),
+        ("Assemble", c_void_p),
+        ("Disassemble", c_void_p),
+        ("GetDisassembleEffectiveOffset", c_void_p),
+        ("OutputDisassembly", c_void_p),
+        ("OutputDisassemblyLines", c_void_p),
+        ("GetNearInstruction", c_void_p),
+        ("GetStackTrace", c_void_p),
+        ("GetReturnOffset", c_void_p),
+        ("OutputStackTrace", c_void_p),
+        ("GetDebuggeeType", c_void_p),
+        ("GetActualProcessorType", c_void_p),
+        ("GetExecutingProcessorType", c_void_p),
+        ("GetNumberPossibleExecutingProcessorTypes", c_void_p),
+        ("GetPossibleExecutingProcessorTypes", c_void_p),
+        ("GetNumberProcessors", c_void_p),
+        ("GetSystemVersion", c_void_p),
+        ("GetPageSize", c_void_p),
+        ("IsPointer64Bit", c_void_p),
+        ("ReadBugCheckData", c_void_p),
+        ("GetNumberSupportedProcessorTypes", c_void_p),
+        ("GetSupportedProcessorTypes", c_void_p),
+        ("GetProcessorTypeNames", c_void_p),
+        ("GetEffectiveProcessorType", c_void_p),
+        ("SetEffectiveProcessorType", c_void_p),
+        ("GetExecutionStatus", idc_getexecutionstatus),
+        ("SetExecutionStatus", idc_setexecutionstatus),
+        ("GetCodeLevel", c_void_p),
+        ("SetCodeLevel", c_void_p),
+        ("GetEngineOptions", c_void_p),
+        ("AddEngineOptions", c_void_p),
+        ("RemoveEngineOptions", c_void_p),
+        ("SetEngineOptions", idc_setengineoptions),
+        ("GetSystemErrorControl", c_void_p),
+        ("SetSystemErrorControl", c_void_p),
+        ("GetTextMacro", c_void_p),
+        ("SetTextMacro", c_void_p),
+        ("GetRadix", c_void_p),
+        ("SetRadix", c_void_p),
+        ("Evaluate", idc_evaluate),
+        ("CoerceValue", c_void_p),
+        ("CoerceValues", c_void_p),
+        ("Execute", idc_execute),
+        ("ExecuteCommandFile", c_void_p),
+        ("GetNumberBreakpoints", c_void_p),
+        ("GetBreakpointByIndex", c_void_p),
+        ("GetBreakpointById", c_void_p),
+        ("GetBreakpointParameters", c_void_p),
+        ("AddBreakpoint", c_void_p),
+        ("RemoveBreakpoint", c_void_p),
+        ("AddExtension", c_void_p),
+        ("RemoveExtension", c_void_p),
+        ("GetExtensionByPath", c_void_p),
+        ("CallExtension", c_void_p),
+        ("GetExtensionFunction", c_void_p),
+        ("GetWindbgExtensionApis32", c_void_p),
+        ("GetWindbgExtensionApis64", c_void_p),
+        ("GetNumberEventFilters", idc_getnumbereventfilters),
+        ("GetEventFilterText", c_void_p),
+        ("GetEventFilterCommand", c_void_p),
+        ("SetEventFilterCommand", c_void_p),
+        ("GetSpecificFilterParameters", c_void_p),
+        ("SetSpecificFilterParameters", c_void_p),
+        ("GetSpecificFilterArgument", c_void_p),
+        ("SetSpecificFilterArgument", c_void_p),
+        ("GetExceptionFilterParameters", c_void_p),
+        ("SetExceptionFilterParameters", c_void_p),
+        ("GetExceptionFilterSecondCommand", c_void_p),
+        ("SetExceptionFilterSecondCommand", idc_setexceptionfiltersecondcommand),
+        ("WaitForEvent", idc_waitforevent),
+        ("GetLastEventInformation", c_void_p),
+        ("GetCurrentTimeDate", c_void_p),
+        ("GetCurrentSystemUpTime", c_void_p),
+        ("GetDumpFormatFlags", c_void_p),
+        ("GetNumberTextReplacements", c_void_p),
+        ("GetTextReplacement", c_void_p),
+        ("SetTextReplacement", c_void_p),
+        ("RemoveTextReplacements", c_void_p),
+        ("OutputTextReplacements", c_void_p),
+        ("GetAssemblyOptions", c_void_p),
+        ("AddAssemblyOptions", c_void_p),
+        ("RemoveAssemblyOptions", c_void_p),
+        ("SetAssemblyOptions", c_void_p),
+        ("GetExpressionSyntax", c_void_p),
+        ("SetExpressionSyntax", idc_setexpressionsyntax),
+        ("SetExpressionSyntaxByName", c_void_p),
+        ("GetNumberExpressionSyntaxes", c_void_p),
+        ("GetExpressionSyntaxNames", c_void_p),
+        ("GetNumberEvents", c_void_p),
+        ("GetEventIndexDescription", c_void_p),
+        ("GetCurrentEventIndex", c_void_p),
+        ("SetNextEventIndex", c_void_p),
+        ("GetLogFileWide", c_void_p),
+        ("OpenLogFileWide", c_void_p),
+        ("InputWide", c_void_p),
+        ("ReturnInputWide", c_void_p),
+        ("OutputWide", c_void_p),
+        ("OutputVaListWide", c_void_p),
+        ("ControlledOutputWide", c_void_p),
+        ("ControlledOutputVaListWide", c_void_p),
+        ("OutputPromptWide", c_void_p),
+        ("OutputPromptVaListWide", c_void_p),
+        ("GetPromptTextWide", c_void_p),
+        ("AssembleWide", c_void_p),
+        ("DisassembleWide", c_void_p),
+        ("GetProcessrTypeNamesWide", c_void_p),
+        ("GetTextMacroWide", c_void_p),
+        ("SetTextMacroWide", c_void_p),
+        ("EvaluateWide", c_void_p),
+        ("ExecuteWide", c_void_p),
+        ("ExecuteCommandFileWide", c_void_p),
+        ("GetBreakpointByIndex2", c_void_p),
+        ("GetBreakpointById2", c_void_p),
+        ("AddBreakpoint2", idc_addbreakpoint2),
+        ("RemoveBreakpoint2", c_void_p),
+        ("AddExtensionWide", c_void_p),
+        ("GetExtensionByPathWide", c_void_p),
+        ("CallExtensionWide", c_void_p),
+        ("GetExtensionFunctionWide", c_void_p),
+        ("GetEventFilterTextWide", c_void_p),
+        ("GetEventfilterCommandWide", c_void_p),
+        ("SetEventFilterCommandWide", c_void_p),
+        ("GetSpecificFilterArgumentWide", c_void_p),
+        ("SetSpecificFilterArgumentWide", c_void_p),
+        ("GetExceptionFilterSecondCommandWide", c_void_p),
+        ("SetExceptionFilterSecondCommandWider", c_void_p),
+        ("GetLastEventInformationWide", c_void_p),
+        ("GetTextReplacementWide", c_void_p),
+        ("SetTextReplacementWide", c_void_p),
+        ("SetExpressionSyntaxByNameWide", c_void_p),
+        ("GetExpressionSyntaxNamesWide", c_void_p),
+        ("GetEventIndexDescriptionWide", c_void_p),
+        ("GetLogFile2", c_void_p),
+        ("OpenLogFile2", c_void_p),
+        ("GetLogFile2Wide", c_void_p),
+        ("OpenLogFile2Wide", c_void_p),
+        ("GetSystemVersionValues", c_void_p),
+        ("GetSystemVersionString", c_void_p),
+        ("GetSystemVersionStringWide", c_void_p),
+        ("GetContextStackTrace", c_void_p),
+        ("OutputContextStackTrace", c_void_p),
+        ("GetStoredEventInformation", c_void_p),
+        ("GetManagedStatus", c_void_p),
+        ("GetManagedStatusWide", c_void_p),
+        ("ResetManagedStatus", c_void_p),
+        ("GetStackTraceEx", idc_getstacktraceex),
+        ("OutputStackTraceEx", c_void_p),
+        ("GetContextStackTraceEx", c_void_p),
+        ("OutputContextStackTraceEx", c_void_p),
+        ("GetBreakpointByGuid", c_void_p),
+        ("GetExecutionStatusEx", c_void_p),
+        ("GetSynchronizationStatus", c_void_p),
+        ("GetDebuggeeType2", c_void_p),
     ]
 
+
 IDebugControl7._fields_ = [("lpVtbl", POINTER(IDebugControl7Vtbl))]
 
+
 class DebugStatus(IntEnum):
-  DEBUG_STATUS_NO_CHANGE =            0
-  DEBUG_STATUS_GO =                   1
-  DEBUG_STATUS_GO_HANDLED =           2
-  DEBUG_STATUS_GO_NOT_HANDLED =       3
-  DEBUG_STATUS_STEP_OVER =            4
-  DEBUG_STATUS_STEP_INTO =            5
-  DEBUG_STATUS_BREAK =                6
-  DEBUG_STATUS_NO_DEBUGGEE =          7
-  DEBUG_STATUS_STEP_BRANCH =          8
-  DEBUG_STATUS_IGNORE_EVENT =         9
-  DEBUG_STATUS_RESTART_REQUESTED =   10
-  DEBUG_STATUS_REVERSE_GO =          11
-  DEBUG_STATUS_REVERSE_STEP_BRANCH = 12
-  DEBUG_STATUS_REVERSE_STEP_OVER =   13
-  DEBUG_STATUS_REVERSE_STEP_INTO =   14
-  DEBUG_STATUS_OUT_OF_SYNC =         15
-  DEBUG_STATUS_WAIT_INPUT =          16
-  DEBUG_STATUS_TIMEOUT =             17
+    DEBUG_STATUS_NO_CHANGE = 0
+    DEBUG_STATUS_GO = 1
+    DEBUG_STATUS_GO_HANDLED = 2
+    DEBUG_STATUS_GO_NOT_HANDLED = 3
+    DEBUG_STATUS_STEP_OVER = 4
+    DEBUG_STATUS_STEP_INTO = 5
+    DEBUG_STATUS_BREAK = 6
+    DEBUG_STATUS_NO_DEBUGGEE = 7
+    DEBUG_STATUS_STEP_BRANCH = 8
+    DEBUG_STATUS_IGNORE_EVENT = 9
+    DEBUG_STATUS_RESTART_REQUESTED = 10
+    DEBUG_STATUS_REVERSE_GO = 11
+    DEBUG_STATUS_REVERSE_STEP_BRANCH = 12
+    DEBUG_STATUS_REVERSE_STEP_OVER = 13
+    DEBUG_STATUS_REVERSE_STEP_INTO = 14
+    DEBUG_STATUS_OUT_OF_SYNC = 15
+    DEBUG_STATUS_WAIT_INPUT = 16
+    DEBUG_STATUS_TIMEOUT = 17
+
 
 class DebugSyntax(IntEnum):
-  DEBUG_EXPR_MASM = 0
-  DEBUG_EXPR_CPLUSPLUS = 1
+    DEBUG_EXPR_MASM = 0
+    DEBUG_EXPR_CPLUSPLUS = 1
+
 
 class Control(object):
-  def __init__(self, control):
-    self.ptr = control
-    self.control = control.contents
-    self.vt = self.control.lpVtbl.contents
-    # Keep a handy ulong for passing into C methods.
-    self.ulong = c_ulong()
-
-  def GetExecutionStatus(self, doprint=False):
-    ret = self.vt.GetExecutionStatus(self.control, byref(self.ulong))
-    aborter(ret, "GetExecutionStatus")
-    status = DebugStatus(self.ulong.value)
-    if doprint:
-      print("Execution status: {}".format(status))
-    return status
-
-  def SetExecutionStatus(self, status):
-    assert isinstance(status, DebugStatus)
-    res = self.vt.SetExecutionStatus(self.control, status.value)
-    aborter(res, "SetExecutionStatus")
-
-  def WaitForEvent(self, timeout=100):
-    # No flags are taken by WaitForEvent, hence 0
-    ret = self.vt.WaitForEvent(self.control, 0, timeout)
-    aborter(ret, "WaitforEvent", ignore=[S_FALSE])
-    return ret
-
-  def GetNumberEventFilters(self):
-    specific_events = c_ulong()
-    specific_exceptions = c_ulong()
-    arbitrary_exceptions = c_ulong()
-    res = self.vt.GetNumberEventFilters(self.control, byref(specific_events),
-                                    byref(specific_exceptions),
-                                    byref(arbitrary_exceptions))
-    aborter(res, "GetNumberEventFilters")
-    return (specific_events.value, specific_exceptions.value,
-            arbitrary_exceptions.value)
-
-  def SetExceptionFilterSecondCommand(self, index, command):
-    buf = create_string_buffer(command.encode('ascii'))
-    res = self.vt.SetExceptionFilterSecondCommand(self.control, index, buf)
-    aborter(res, "SetExceptionFilterSecondCommand")
-    return
-
-  def AddBreakpoint2(self, offset=None, enabled=None):
-    breakpoint = POINTER(DebugBreakpoint2)()
-    res = self.vt.AddBreakpoint2(self.control, BreakpointTypes.DEBUG_BREAKPOINT_CODE, DEBUG_ANY_ID, byref(breakpoint))
-    aborter(res, "Add breakpoint 2")
-    bp = Breakpoint(breakpoint)
-
-    if offset is not None:
-      bp.SetOffset(offset)
-    if enabled is not None and enabled:
-      bp.SetFlags(BreakpointFlags.DEBUG_BREAKPOINT_ENABLED)
-
-    return bp
-
-  def RemoveBreakpoint(self, bp):
-    res = self.vt.RemoveBreakpoint2(self.control, bp.breakpoint)
-    aborter(res, "RemoveBreakpoint2")
-    bp.die()
-
-  def GetStackTraceEx(self):
-    # XXX -- I can't find a way to query for how many stack frames there _are_
-    # in  advance. Guess 128 for now.
-    num_frames_buffer = 128
-
-    frames = (DEBUG_STACK_FRAME_EX * num_frames_buffer)()
-    numframes = c_ulong()
-
-    # First three args are frame/stack/IP offsets -- leave them as zero to
-    # default to the current instruction.
-    res = self.vt.GetStackTraceEx(self.control, 0, 0, 0, frames, num_frames_buffer, byref(numframes))
-    aborter(res, "GetStackTraceEx")
-    return frames, numframes.value
-
-  def Execute(self, command):
-    # First zero is DEBUG_OUTCTL_*, which we leave as a default, second
-    # zero is DEBUG_EXECUTE_* flags, of which we set none.
-    res = self.vt.Execute(self.control, 0, command.encode('ascii'), 0)
-    aborter(res, "Client execute")
-
-  def SetExpressionSyntax(self, cpp=True):
-    if cpp:
-      syntax = DebugSyntax.DEBUG_EXPR_CPLUSPLUS
-    else:
-      syntax = DebugSyntax.DEBUG_EXPR_MASM
-
-    res = self.vt.SetExpressionSyntax(self.control, syntax)
-    aborter(res, "SetExpressionSyntax")
-
-  def Evaluate(self, expr):
-    ptr = DEBUG_VALUE()
-    res = self.vt.Evaluate(self.control, expr.encode("ascii"), DebugValueType.DEBUG_VALUE_INVALID, byref(ptr), None)
-    aborter(res, "Evaluate", ignore=[E_INTERNALEXCEPTION, E_FAIL])
-    if res != 0:
-      return None
-
-    val_type = DebugValueType(ptr.Type)
-
-    # Here's a map from debug value types to fields. Unclear what happens
-    # with unsigned values, as DbgEng doesn't present any unsigned fields.
-
-    extract_map = {
-      DebugValueType.DEBUG_VALUE_INT8    : ("I8", "char"),
-      DebugValueType.DEBUG_VALUE_INT16   : ("I16", "short"),
-      DebugValueType.DEBUG_VALUE_INT32   : ("I32", "int"),
-      DebugValueType.DEBUG_VALUE_INT64   : ("I64", "long"),
-      DebugValueType.DEBUG_VALUE_FLOAT32 : ("F32", "float"),
-      DebugValueType.DEBUG_VALUE_FLOAT64 : ("F64", "double")
-    } # And everything else is invalid.
-
-    if val_type not in extract_map:
-      raise Exception("Unexpected debug value type {} when evalutaing".format(val_type))
-
-    # Also produce a type name...
-
-    return getattr(ptr.U, extract_map[val_type][0]), extract_map[val_type][1]
-
-  def SetEngineOptions(self, opt):
-    res = self.vt.SetEngineOptions(self.control, opt)
-    aborter(res, "SetEngineOptions")
-    return
+    def __init__(self, control):
+        self.ptr = control
+        self.control = control.contents
+        self.vt = self.control.lpVtbl.contents
+        # Keep a handy ulong for passing into C methods.
+        self.ulong = c_ulong()
+
+    def GetExecutionStatus(self, doprint=False):
+        ret = self.vt.GetExecutionStatus(self.control, byref(self.ulong))
+        aborter(ret, "GetExecutionStatus")
+        status = DebugStatus(self.ulong.value)
+        if doprint:
+            print("Execution status: {}".format(status))
+        return status
+
+    def SetExecutionStatus(self, status):
+        assert isinstance(status, DebugStatus)
+        res = self.vt.SetExecutionStatus(self.control, status.value)
+        aborter(res, "SetExecutionStatus")
+
+    def WaitForEvent(self, timeout=100):
+        # No flags are taken by WaitForEvent, hence 0
+        ret = self.vt.WaitForEvent(self.control, 0, timeout)
+        aborter(ret, "WaitforEvent", ignore=[S_FALSE])
+        return ret
+
+    def GetNumberEventFilters(self):
+        specific_events = c_ulong()
+        specific_exceptions = c_ulong()
+        arbitrary_exceptions = c_ulong()
+        res = self.vt.GetNumberEventFilters(
+            self.control,
+            byref(specific_events),
+            byref(specific_exceptions),
+            byref(arbitrary_exceptions),
+        )
+        aborter(res, "GetNumberEventFilters")
+        return (
+            specific_events.value,
+            specific_exceptions.value,
+            arbitrary_exceptions.value,
+        )
+
+    def SetExceptionFilterSecondCommand(self, index, command):
+        buf = create_string_buffer(command.encode("ascii"))
+        res = self.vt.SetExceptionFilterSecondCommand(self.control, index, buf)
+        aborter(res, "SetExceptionFilterSecondCommand")
+        return
+
+    def AddBreakpoint2(self, offset=None, enabled=None):
+        breakpoint = POINTER(DebugBreakpoint2)()
+        res = self.vt.AddBreakpoint2(
+            self.control,
+            BreakpointTypes.DEBUG_BREAKPOINT_CODE,
+            DEBUG_ANY_ID,
+            byref(breakpoint),
+        )
+        aborter(res, "Add breakpoint 2")
+        bp = Breakpoint(breakpoint)
+
+        if offset is not None:
+            bp.SetOffset(offset)
+        if enabled is not None and enabled:
+            bp.SetFlags(BreakpointFlags.DEBUG_BREAKPOINT_ENABLED)
+
+        return bp
+
+    def RemoveBreakpoint(self, bp):
+        res = self.vt.RemoveBreakpoint2(self.control, bp.breakpoint)
+        aborter(res, "RemoveBreakpoint2")
+        bp.die()
+
+    def GetStackTraceEx(self):
+        # XXX -- I can't find a way to query for how many stack frames there _are_
+        # in  advance. Guess 128 for now.
+        num_frames_buffer = 128
+
+        frames = (DEBUG_STACK_FRAME_EX * num_frames_buffer)()
+        numframes = c_ulong()
+
+        # First three args are frame/stack/IP offsets -- leave them as zero to
+        # default to the current instruction.
+        res = self.vt.GetStackTraceEx(
+            self.control, 0, 0, 0, frames, num_frames_buffer, byref(numframes)
+        )
+        aborter(res, "GetStackTraceEx")
+        return frames, numframes.value
+
+    def Execute(self, command):
+        # First zero is DEBUG_OUTCTL_*, which we leave as a default, second
+        # zero is DEBUG_EXECUTE_* flags, of which we set none.
+        res = self.vt.Execute(self.control, 0, command.encode("ascii"), 0)
+        aborter(res, "Client execute")
+
+    def SetExpressionSyntax(self, cpp=True):
+        if cpp:
+            syntax = DebugSyntax.DEBUG_EXPR_CPLUSPLUS
+        else:
+            syntax = DebugSyntax.DEBUG_EXPR_MASM
+
+        res = self.vt.SetExpressionSyntax(self.control, syntax)
+        aborter(res, "SetExpressionSyntax")
+
+    def Evaluate(self, expr):
+        ptr = DEBUG_VALUE()
+        res = self.vt.Evaluate(
+            self.control,
+            expr.encode("ascii"),
+            DebugValueType.DEBUG_VALUE_INVALID,
+            byref(ptr),
+            None,
+        )
+        aborter(res, "Evaluate", ignore=[E_INTERNALEXCEPTION, E_FAIL])
+        if res != 0:
+            return None
+
+        val_type = DebugValueType(ptr.Type)
+
+        # Here's a map from debug value types to fields. Unclear what happens
+        # with unsigned values, as DbgEng doesn't present any unsigned fields.
+
+        extract_map = {
+            DebugValueType.DEBUG_VALUE_INT8: ("I8", "char"),
+            DebugValueType.DEBUG_VALUE_INT16: ("I16", "short"),
+            DebugValueType.DEBUG_VALUE_INT32: ("I32", "int"),
+            DebugValueType.DEBUG_VALUE_INT64: ("I64", "long"),
+            DebugValueType.DEBUG_VALUE_FLOAT32: ("F32", "float"),
+            DebugValueType.DEBUG_VALUE_FLOAT64: ("F64", "double"),
+        }  # And everything else is invalid.
+
+        if val_type not in extract_map:
+            raise Exception(
+                "Unexpected debug value type {} when evalutaing".format(val_type)
+            )
+
+        # Also produce a type name...
+
+        return getattr(ptr.U, extract_map[val_type][0]), extract_map[val_type][1]
+
+    def SetEngineOptions(self, opt):
+        res = self.vt.SetEngineOptions(self.control, opt)
+        aborter(res, "SetEngineOptions")
+        return

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/dbgeng.py b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/dbgeng.py
index 8e015669b732e..03aa548495ce1 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/dbgeng.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/dbgeng.py
@@ -16,10 +16,11 @@
 from dex.utils.ReturnCode import ReturnCode
 
 if platform.system() == "Windows":
-  # Don't load on linux; _load_interface will croak before any names are used.
-  from . import setup
-  from . import probe_process
-  from . import breakpoint
+    # Don't load on linux; _load_interface will croak before any names are used.
+    from . import setup
+    from . import probe_process
+    from . import breakpoint
+
 
 class DbgEng(DebuggerBase):
     def __init__(self, context, *args):
@@ -31,11 +32,11 @@ def __init__(self, context, *args):
 
     def _custom_init(self):
         try:
-          res = setup.setup_everything(self.context.options.executable)
-          self.client = res
-          self.running = True
+            res = setup.setup_everything(self.context.options.executable)
+            self.client = res
+            self.running = True
         except Exception as e:
-          raise Exception('Failed to start debuggee: {}'.format(e))
+            raise Exception("Failed to start debuggee: {}".format(e))
 
     def _custom_exit(self):
         setup.cleanup(self.client)
@@ -43,23 +44,25 @@ def _custom_exit(self):
     def _load_interface(self):
         arch = platform.architecture()[0]
         machine = platform.machine()
-        if arch == '32bit' and machine == 'AMD64':
-          # This python process is 32 bits, but is sitting on a 64 bit machine.
-          # Bad things may happen, don't support it.
-          raise LoadDebuggerException('Can\'t run Dexter dbgeng on 32 bit python in a 64 bit environment')
+        if arch == "32bit" and machine == "AMD64":
+            # This python process is 32 bits, but is sitting on a 64 bit machine.
+            # Bad things may happen, don't support it.
+            raise LoadDebuggerException(
+                "Can't run Dexter dbgeng on 32 bit python in a 64 bit environment"
+            )
 
-        if platform.system() != 'Windows':
-          raise LoadDebuggerException('DbgEng supports Windows only')
+        if platform.system() != "Windows":
+            raise LoadDebuggerException("DbgEng supports Windows only")
 
         # Otherwise, everything was imported earlier
 
     @classmethod
     def get_name(cls):
-        return 'dbgeng'
+        return "dbgeng"
 
     @classmethod
     def get_option_name(cls):
-        return 'dbgeng'
+        return "dbgeng"
 
     @property
     def frames_below_main(self):
@@ -85,25 +88,33 @@ def _add_breakpoint(self, file_, line):
     def _add_conditional_breakpoint(self, file_, line, condition):
         # breakpoint setting/deleting is not supported by dbgeng at this moment
         # but is something that should be considered in the future.
-        raise NotImplementedError('add_conditional_breakpoint is not yet implemented by dbgeng')
+        raise NotImplementedError(
+            "add_conditional_breakpoint is not yet implemented by dbgeng"
+        )
 
     def get_triggered_breakpoint_ids(self):
-      raise NotImplementedError('get_triggered_breakpoint_ids is not yet implemented by dbgeng')
+        raise NotImplementedError(
+            "get_triggered_breakpoint_ids is not yet implemented by dbgeng"
+        )
 
     def delete_breakpoints(self, ids):
         # breakpoint setting/deleting is not supported by dbgeng at this moment
         # but is something that should be considered in the future.
-        raise NotImplementedError('delete_conditional_breakpoint is not yet implemented by dbgeng')
+        raise NotImplementedError(
+            "delete_conditional_breakpoint is not yet implemented by dbgeng"
+        )
 
     def launch(self, cmdline):
-        assert len(cmdline) == 0 and not self.context.options.target_run_args, "Command lines unimplemented for dbgeng right now"
+        assert (
+            len(cmdline) == 0 and not self.context.options.target_run_args
+        ), "Command lines unimplemented for dbgeng right now"
         # We are, by this point, already launched.
         self.step_info = probe_process.probe_state(self.client)
 
     def step(self):
         res = setup.step_once(self.client)
         if not res:
-          self.finished = True
+            self.finished = True
         self.step_info = res
 
     def go(self):
@@ -121,38 +132,44 @@ def _get_step_info(self, watches, step_index):
         # inlining.
         dex_frames = []
         for i, x in enumerate(frames):
-          # XXX Might be able to get columns out through
-          # GetSourceEntriesByOffset, not a priority now
-          loc = LocIR(path=x.source_file, lineno=x.line_no, column=0)
-          new_frame = FrameIR(function=x.function_name, is_inlined=False, loc=loc)
-          dex_frames.append(new_frame)
-
-          state_frame = StackFrame(function=new_frame.function,
-                                   is_inlined=new_frame.is_inlined,
-                                   location=SourceLocation(path=x.source_file,
-                                                           lineno=x.line_no,
-                                                           column=0),
-                                   watches={})
-          for expr in map(
-              # Filter out watches that are not active in the current frame,
-              # and then evaluate all the active watches.
-              lambda watch_info, idx=i:
-                self.evaluate_expression(watch_info.expression, idx),
-              filter(
-                  lambda watch_info, idx=i, line_no=loc.lineno, path=loc.path:
-                    watch_is_active(watch_info, path, idx, line_no),
-                  watches)):
-              state_frame.watches[expr.expression] = expr
-          state_frames.append(state_frame)
+            # XXX Might be able to get columns out through
+            # GetSourceEntriesByOffset, not a priority now
+            loc = LocIR(path=x.source_file, lineno=x.line_no, column=0)
+            new_frame = FrameIR(function=x.function_name, is_inlined=False, loc=loc)
+            dex_frames.append(new_frame)
+
+            state_frame = StackFrame(
+                function=new_frame.function,
+                is_inlined=new_frame.is_inlined,
+                location=SourceLocation(path=x.source_file, lineno=x.line_no, column=0),
+                watches={},
+            )
+            for expr in map(
+                # Filter out watches that are not active in the current frame,
+                # and then evaluate all the active watches.
+                lambda watch_info, idx=i: self.evaluate_expression(
+                    watch_info.expression, idx
+                ),
+                filter(
+                    lambda watch_info, idx=i, line_no=loc.lineno, path=loc.path: watch_is_active(
+                        watch_info, path, idx, line_no
+                    ),
+                    watches,
+                ),
+            ):
+                state_frame.watches[expr.expression] = expr
+            state_frames.append(state_frame)
 
         return StepIR(
-            step_index=step_index, frames=dex_frames,
+            step_index=step_index,
+            frames=dex_frames,
             stop_reason=StopReason.STEP,
-            program_state=ProgramState(state_frames))
+            program_state=ProgramState(state_frames),
+        )
 
     @property
     def is_running(self):
-        return False # We're never free-running
+        return False  # We're never free-running
 
     @property
     def is_finished(self):
@@ -161,18 +178,18 @@ def is_finished(self):
     def evaluate_expression(self, expression, frame_idx=0):
         # XXX: cdb insists on using '->' to examine fields of structures,
         # as it appears to reserve '.' for other purposes.
-        fixed_expr = expression.replace('.', '->')
+        fixed_expr = expression.replace(".", "->")
 
         orig_scope_idx = self.client.Symbols.GetCurrentScopeFrameIndex()
         self.client.Symbols.SetScopeFrameByIndex(frame_idx)
 
         res = self.client.Control.Evaluate(fixed_expr)
         if res is not None:
-          result, typename = self.client.Control.Evaluate(fixed_expr)
-          could_eval = True
+            result, typename = self.client.Control.Evaluate(fixed_expr)
+            could_eval = True
         else:
-          result, typename = (None, None)
-          could_eval = False
+            result, typename = (None, None)
+            could_eval = False
 
         self.client.Symbols.SetScopeFrameByIndex(orig_scope_idx)
 
@@ -183,4 +200,5 @@ def evaluate_expression(self, expression, frame_idx=0):
             error_string="",
             could_evaluate=could_eval,
             is_optimized_away=False,
-            is_irretrievable=not could_eval)
+            is_irretrievable=not could_eval,
+        )

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/probe_process.py b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/probe_process.py
index 8bd7f60708146..b47bb3881997f 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/probe_process.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/probe_process.py
@@ -9,72 +9,75 @@
 
 from .utils import *
 
+
 class Frame(object):
-  def __init__(self, frame, idx, Symbols):
-    # Store some base information about the frame
-    self.ip = frame.InstructionOffset
-    self.scope_idx = idx
-    self.virtual = frame.Virtual
-    self.inline_frame_context = frame.InlineFrameContext
-    self.func_tbl_entry = frame.FuncTableEntry
-
-    # Fetch the module/symbol we're in, with displacement. Useful for debugging.
-    self.descr = Symbols.GetNearNameByOffset(self.ip)
-    split = self.descr.split('!')[0]
-    self.module = split[0]
-    self.symbol = split[1]
-
-    # Fetch symbol group for this scope.
-    prevscope = Symbols.GetCurrentScopeFrameIndex()
-    if Symbols.SetScopeFrameByIndex(idx):
-      symgroup = Symbols.GetScopeSymbolGroup2()
-      Symbols.SetScopeFrameByIndex(prevscope)
-      self.symgroup = symgroup
-    else:
-      self.symgroup = None
-
-    # Fetch the name according to the line-table, using inlining context.
-    name = Symbols.GetNameByInlineContext(self.ip, self.inline_frame_context)
-    self.function_name = name.split('!')[-1]
-
-    try:
-      tup = Symbols.GetLineByInlineContext(self.ip, self.inline_frame_context)
-      self.source_file, self.line_no = tup
-    except WinError as e:
-      # Fall back to trying to use a non-inlining-aware line number
-      # XXX - this is not inlining aware
-      sym = Symbols.GetLineByOffset(self.ip)
-      if sym is not None:
-        self.source_file, self.line_no = sym
-      else:
-        self.source_file = None
-        self.line_no = None
-        self.basename = None
-
-    if self.source_file is not None:
-      self.basename = os.path.basename(self.source_file)
-    else:
-      self.basename = None
-
-
-
-  def __str__(self):
-    return '{}:{}({}) {}'.format(self.basename, self.line, self.descr, self.function_name)
+    def __init__(self, frame, idx, Symbols):
+        # Store some base information about the frame
+        self.ip = frame.InstructionOffset
+        self.scope_idx = idx
+        self.virtual = frame.Virtual
+        self.inline_frame_context = frame.InlineFrameContext
+        self.func_tbl_entry = frame.FuncTableEntry
+
+        # Fetch the module/symbol we're in, with displacement. Useful for debugging.
+        self.descr = Symbols.GetNearNameByOffset(self.ip)
+        split = self.descr.split("!")[0]
+        self.module = split[0]
+        self.symbol = split[1]
+
+        # Fetch symbol group for this scope.
+        prevscope = Symbols.GetCurrentScopeFrameIndex()
+        if Symbols.SetScopeFrameByIndex(idx):
+            symgroup = Symbols.GetScopeSymbolGroup2()
+            Symbols.SetScopeFrameByIndex(prevscope)
+            self.symgroup = symgroup
+        else:
+            self.symgroup = None
+
+        # Fetch the name according to the line-table, using inlining context.
+        name = Symbols.GetNameByInlineContext(self.ip, self.inline_frame_context)
+        self.function_name = name.split("!")[-1]
+
+        try:
+            tup = Symbols.GetLineByInlineContext(self.ip, self.inline_frame_context)
+            self.source_file, self.line_no = tup
+        except WinError as e:
+            # Fall back to trying to use a non-inlining-aware line number
+            # XXX - this is not inlining aware
+            sym = Symbols.GetLineByOffset(self.ip)
+            if sym is not None:
+                self.source_file, self.line_no = sym
+            else:
+                self.source_file = None
+                self.line_no = None
+                self.basename = None
+
+        if self.source_file is not None:
+            self.basename = os.path.basename(self.source_file)
+        else:
+            self.basename = None
+
+    def __str__(self):
+        return "{}:{}({}) {}".format(
+            self.basename, self.line, self.descr, self.function_name
+        )
+
 
 def main_on_stack(Symbols, frames):
-  module_name = Symbols.get_exefile_module_name()
-  main_name = "{}!main".format(module_name)
-  for x in frames:
-    if main_name in x.descr: # Could be less hard coded...
-      return True
-  return False
+    module_name = Symbols.get_exefile_module_name()
+    main_name = "{}!main".format(module_name)
+    for x in frames:
+        if main_name in x.descr:  # Could be less hard coded...
+            return True
+    return False
+
 
 def probe_state(Client):
-  # Fetch the state of the program -- represented by the stack frames.
-  frames, numframes = Client.Control.GetStackTraceEx()
+    # Fetch the state of the program -- represented by the stack frames.
+    frames, numframes = Client.Control.GetStackTraceEx()
 
-  the_frames = [Frame(frames[x], x, Client.Symbols) for x in range(numframes)]
-  if not main_on_stack(Client.Symbols, the_frames):
-    return None
+    the_frames = [Frame(frames[x], x, Client.Symbols) for x in range(numframes)]
+    if not main_on_stack(Client.Symbols, the_frames):
+        return None
 
-  return the_frames
+    return the_frames

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/setup.py b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/setup.py
index 26360f680d23a..fab423c488eca 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/setup.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/setup.py
@@ -13,133 +13,145 @@
 from .probe_process import probe_state
 from .utils import *
 
+
 class STARTUPINFOA(Structure):
-  _fields_ = [
-      ('cb', c_ulong),
-      ('lpReserved', c_char_p),
-      ('lpDesktop', c_char_p),
-      ('lpTitle', c_char_p),
-      ('dwX', c_ulong),
-      ('dwY', c_ulong),
-      ('dwXSize', c_ulong),
-      ('dwYSize', c_ulong),
-      ('dwXCountChars', c_ulong),
-      ('dwYCountChars', c_ulong),
-      ('dwFillAttribute', c_ulong),
-      ('wShowWindow', c_ushort),
-      ('cbReserved2', c_ushort),
-      ('lpReserved2', c_char_p),
-      ('hStdInput', c_void_p),
-      ('hStdOutput', c_void_p),
-      ('hStdError', c_void_p)
+    _fields_ = [
+        ("cb", c_ulong),
+        ("lpReserved", c_char_p),
+        ("lpDesktop", c_char_p),
+        ("lpTitle", c_char_p),
+        ("dwX", c_ulong),
+        ("dwY", c_ulong),
+        ("dwXSize", c_ulong),
+        ("dwYSize", c_ulong),
+        ("dwXCountChars", c_ulong),
+        ("dwYCountChars", c_ulong),
+        ("dwFillAttribute", c_ulong),
+        ("wShowWindow", c_ushort),
+        ("cbReserved2", c_ushort),
+        ("lpReserved2", c_char_p),
+        ("hStdInput", c_void_p),
+        ("hStdOutput", c_void_p),
+        ("hStdError", c_void_p),
     ]
 
+
 class PROCESS_INFORMATION(Structure):
-  _fields_ = [
-      ('hProcess', c_void_p),
-      ('hThread', c_void_p),
-      ('dwProcessId', c_ulong),
-      ('dwThreadId', c_ulong)
+    _fields_ = [
+        ("hProcess", c_void_p),
+        ("hThread", c_void_p),
+        ("dwProcessId", c_ulong),
+        ("dwThreadId", c_ulong),
     ]
 
-def fetch_local_function_syms(Symbols, prefix):
-  syms = Symbols.get_all_functions()
-
-  def is_sym_in_src_dir(sym):
-    name, data = sym
-    symdata = Symbols.GetLineByOffset(data.Offset)
-    if symdata is not None:
-      srcfile, line = symdata
-      if prefix in srcfile:
-        return True
-    return False
-   
-  syms = [x for x in syms if is_sym_in_src_dir(x)]
-  return syms
 
-def break_on_all_but_main(Control, Symbols, main_offset):
-  mainfile, _ = Symbols.GetLineByOffset(main_offset)
-  prefix = '\\'.join(mainfile.split('\\')[:-1])
-
-  for name, rec in fetch_local_function_syms(Symbols, prefix):
-    if name == "main":
-      continue
-    bp = Control.AddBreakpoint2(offset=rec.Offset, enabled=True)
+def fetch_local_function_syms(Symbols, prefix):
+    syms = Symbols.get_all_functions()
 
-  # All breakpoints are currently discarded: we just sys.exit for cleanup
-  return
+    def is_sym_in_src_dir(sym):
+        name, data = sym
+        symdata = Symbols.GetLineByOffset(data.Offset)
+        if symdata is not None:
+            srcfile, line = symdata
+            if prefix in srcfile:
+                return True
+        return False
 
-def setup_everything(binfile):
-  from . import client
-  from . import symbols
-  Client = client.Client()
+    syms = [x for x in syms if is_sym_in_src_dir(x)]
+    return syms
 
-  Client.Control.SetEngineOptions(0x20) # DEBUG_ENGOPT_INITIAL_BREAK
 
-  Client.CreateProcessAndAttach2(binfile)
+def break_on_all_but_main(Control, Symbols, main_offset):
+    mainfile, _ = Symbols.GetLineByOffset(main_offset)
+    prefix = "\\".join(mainfile.split("\\")[:-1])
 
-  # Load lines as well as general symbols
-  sym_opts = Client.Symbols.GetSymbolOptions()
-  sym_opts |= symbols.SymbolOptionFlags.SYMOPT_LOAD_LINES
-  Client.Symbols.SetSymbolOptions(sym_opts)
+    for name, rec in fetch_local_function_syms(Symbols, prefix):
+        if name == "main":
+            continue
+        bp = Control.AddBreakpoint2(offset=rec.Offset, enabled=True)
 
-  # Need to enter the debugger engine to let it attach properly.
-  res = Client.Control.WaitForEvent(timeout=1000)
-  if res == S_FALSE:
-    # The debugee apparently didn't do anything at all. Rather than risk
-    # hanging, bail out at this point.
-    client.TerminateProcesses()
-    raise Exception("Debuggee did not start in a timely manner")
-
-  # Enable line stepping.
-  Client.Control.Execute("l+t")
-  # Enable C++ expression interpretation.
-  Client.Control.SetExpressionSyntax(cpp=True)
-
-  # We've requested to break into the process at the earliest opportunity,
-  # and WaitForEvent'ing means we should have reached that break state.
-  # Now set a breakpoint on the main symbol, and "go" until we reach it.
-  module_name = Client.Symbols.get_exefile_module_name()
-  offset = Client.Symbols.GetOffsetByName("{}!main".format(module_name))
-  breakpoint = Client.Control.AddBreakpoint2(offset=offset, enabled=True)
-  Client.Control.SetExecutionStatus(control.DebugStatus.DEBUG_STATUS_GO)
-
-  # Problem: there is no guarantee that the client will ever reach main,
-  # something else exciting could happen in that time, the host system may
-  # be very loaded, and similar. Wait for some period, say, five seconds, and
-  # abort afterwards: this is a trade-off between spurious timeouts and
-  # completely hanging in the case of a environmental/programming error.
-  res = Client.Control.WaitForEvent(timeout=5000)
-  if res == S_FALSE:
-    client.TerminateProcesses()
-    raise Exception("Debuggee did not reach main function in a timely manner")
+    # All breakpoints are currently discarded: we just sys.exit for cleanup
+    return
 
-  break_on_all_but_main(Client.Control, Client.Symbols, offset)
 
-  # Set the default action on all exceptions to be "quit and detach". If we
-  # don't, dbgeng will merrily spin at the exception site forever.
-  filts = Client.Control.GetNumberEventFilters()
-  for x in range(filts[0], filts[0] + filts[1]):
-    Client.Control.SetExceptionFilterSecondCommand(x, "qd")
+def setup_everything(binfile):
+    from . import client
+    from . import symbols
+
+    Client = client.Client()
+
+    Client.Control.SetEngineOptions(0x20)  # DEBUG_ENGOPT_INITIAL_BREAK
+
+    Client.CreateProcessAndAttach2(binfile)
+
+    # Load lines as well as general symbols
+    sym_opts = Client.Symbols.GetSymbolOptions()
+    sym_opts |= symbols.SymbolOptionFlags.SYMOPT_LOAD_LINES
+    Client.Symbols.SetSymbolOptions(sym_opts)
+
+    # Need to enter the debugger engine to let it attach properly.
+    res = Client.Control.WaitForEvent(timeout=1000)
+    if res == S_FALSE:
+        # The debugee apparently didn't do anything at all. Rather than risk
+        # hanging, bail out at this point.
+        client.TerminateProcesses()
+        raise Exception("Debuggee did not start in a timely manner")
+
+    # Enable line stepping.
+    Client.Control.Execute("l+t")
+    # Enable C++ expression interpretation.
+    Client.Control.SetExpressionSyntax(cpp=True)
+
+    # We've requested to break into the process at the earliest opportunity,
+    # and WaitForEvent'ing means we should have reached that break state.
+    # Now set a breakpoint on the main symbol, and "go" until we reach it.
+    module_name = Client.Symbols.get_exefile_module_name()
+    offset = Client.Symbols.GetOffsetByName("{}!main".format(module_name))
+    breakpoint = Client.Control.AddBreakpoint2(offset=offset, enabled=True)
+    Client.Control.SetExecutionStatus(control.DebugStatus.DEBUG_STATUS_GO)
+
+    # Problem: there is no guarantee that the client will ever reach main,
+    # something else exciting could happen in that time, the host system may
+    # be very loaded, and similar. Wait for some period, say, five seconds, and
+    # abort afterwards: this is a trade-off between spurious timeouts and
+    # completely hanging in the case of a environmental/programming error.
+    res = Client.Control.WaitForEvent(timeout=5000)
+    if res == S_FALSE:
+        client.TerminateProcesses()
+        raise Exception("Debuggee did not reach main function in a timely manner")
+
+    break_on_all_but_main(Client.Control, Client.Symbols, offset)
+
+    # Set the default action on all exceptions to be "quit and detach". If we
+    # don't, dbgeng will merrily spin at the exception site forever.
+    filts = Client.Control.GetNumberEventFilters()
+    for x in range(filts[0], filts[0] + filts[1]):
+        Client.Control.SetExceptionFilterSecondCommand(x, "qd")
+
+    return Client
 
-  return Client
 
 def step_once(client):
-  client.Control.Execute("p")
-  try:
-    client.Control.WaitForEvent()
-  except Exception as e:
-    if client.Control.GetExecutionStatus() == control.DebugStatus.DEBUG_STATUS_NO_DEBUGGEE:
-      return None # Debuggee has gone away, likely due to an exception.
-    raise e
-  # Could assert here that we're in the "break" state
-  client.Control.GetExecutionStatus()
-  return probe_state(client)
+    client.Control.Execute("p")
+    try:
+        client.Control.WaitForEvent()
+    except Exception as e:
+        if (
+            client.Control.GetExecutionStatus()
+            == control.DebugStatus.DEBUG_STATUS_NO_DEBUGGEE
+        ):
+            return None  # Debuggee has gone away, likely due to an exception.
+        raise e
+    # Could assert here that we're in the "break" state
+    client.Control.GetExecutionStatus()
+    return probe_state(client)
+
 
 def main_loop(client):
-  res = True
-  while res is not None:
-    res = step_once(client)
+    res = True
+    while res is not None:
+        res = step_once(client)
+
 
 def cleanup(client):
-  client.TerminateProcesses()
+    client.TerminateProcesses()

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/symbols.py b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/symbols.py
index bc998facb4e81..8ba8fad49c0ae 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/symbols.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/symbols.py
@@ -13,487 +13,594 @@
 from .symgroup import SymbolGroup, IDebugSymbolGroup2
 from .utils import *
 
+
 class SymbolOptionFlags(IntFlag):
-  SYMOPT_CASE_INSENSITIVE          = 0x00000001
-  SYMOPT_UNDNAME                   = 0x00000002
-  SYMOPT_DEFERRED_LOADS            = 0x00000004
-  SYMOPT_NO_CPP                    = 0x00000008
-  SYMOPT_LOAD_LINES                = 0x00000010
-  SYMOPT_OMAP_FIND_NEAREST         = 0x00000020
-  SYMOPT_LOAD_ANYTHING             = 0x00000040
-  SYMOPT_IGNORE_CVREC              = 0x00000080
-  SYMOPT_NO_UNQUALIFIED_LOADS      = 0x00000100
-  SYMOPT_FAIL_CRITICAL_ERRORS      = 0x00000200
-  SYMOPT_EXACT_SYMBOLS             = 0x00000400
-  SYMOPT_ALLOW_ABSOLUTE_SYMBOLS    = 0x00000800
-  SYMOPT_IGNORE_NT_SYMPATH         = 0x00001000
-  SYMOPT_INCLUDE_32BIT_MODULES     = 0x00002000
-  SYMOPT_PUBLICS_ONLY              = 0x00004000
-  SYMOPT_NO_PUBLICS                = 0x00008000
-  SYMOPT_AUTO_PUBLICS              = 0x00010000
-  SYMOPT_NO_IMAGE_SEARCH           = 0x00020000
-  SYMOPT_SECURE                    = 0x00040000
-  SYMOPT_NO_PROMPTS                = 0x00080000
-  SYMOPT_DEBUG                     = 0x80000000
+    SYMOPT_CASE_INSENSITIVE = 0x00000001
+    SYMOPT_UNDNAME = 0x00000002
+    SYMOPT_DEFERRED_LOADS = 0x00000004
+    SYMOPT_NO_CPP = 0x00000008
+    SYMOPT_LOAD_LINES = 0x00000010
+    SYMOPT_OMAP_FIND_NEAREST = 0x00000020
+    SYMOPT_LOAD_ANYTHING = 0x00000040
+    SYMOPT_IGNORE_CVREC = 0x00000080
+    SYMOPT_NO_UNQUALIFIED_LOADS = 0x00000100
+    SYMOPT_FAIL_CRITICAL_ERRORS = 0x00000200
+    SYMOPT_EXACT_SYMBOLS = 0x00000400
+    SYMOPT_ALLOW_ABSOLUTE_SYMBOLS = 0x00000800
+    SYMOPT_IGNORE_NT_SYMPATH = 0x00001000
+    SYMOPT_INCLUDE_32BIT_MODULES = 0x00002000
+    SYMOPT_PUBLICS_ONLY = 0x00004000
+    SYMOPT_NO_PUBLICS = 0x00008000
+    SYMOPT_AUTO_PUBLICS = 0x00010000
+    SYMOPT_NO_IMAGE_SEARCH = 0x00020000
+    SYMOPT_SECURE = 0x00040000
+    SYMOPT_NO_PROMPTS = 0x00080000
+    SYMOPT_DEBUG = 0x80000000
+
 
 class ScopeGroupFlags(IntFlag):
-  DEBUG_SCOPE_GROUP_ARGUMENTS    = 0x00000001
-  DEBUG_SCOPE_GROUP_LOCALS       = 0x00000002
-  DEBUG_SCOPE_GROUP_ALL          = 0x00000003
-  DEBUG_SCOPE_GROUP_BY_DATAMODEL = 0x00000004
+    DEBUG_SCOPE_GROUP_ARGUMENTS = 0x00000001
+    DEBUG_SCOPE_GROUP_LOCALS = 0x00000002
+    DEBUG_SCOPE_GROUP_ALL = 0x00000003
+    DEBUG_SCOPE_GROUP_BY_DATAMODEL = 0x00000004
+
 
 class DebugModuleNames(IntEnum):
-  DEBUG_MODNAME_IMAGE        = 0x00000000
-  DEBUG_MODNAME_MODULE       = 0x00000001
-  DEBUG_MODNAME_LOADED_IMAGE = 0x00000002
-  DEBUG_MODNAME_SYMBOL_FILE  = 0x00000003
-  DEBUG_MODNAME_MAPPED_IMAGE = 0x00000004
+    DEBUG_MODNAME_IMAGE = 0x00000000
+    DEBUG_MODNAME_MODULE = 0x00000001
+    DEBUG_MODNAME_LOADED_IMAGE = 0x00000002
+    DEBUG_MODNAME_SYMBOL_FILE = 0x00000003
+    DEBUG_MODNAME_MAPPED_IMAGE = 0x00000004
+
 
 class DebugModuleFlags(IntFlag):
-  DEBUG_MODULE_LOADED            = 0x00000000
-  DEBUG_MODULE_UNLOADED          = 0x00000001
-  DEBUG_MODULE_USER_MODE         = 0x00000002
-  DEBUG_MODULE_EXE_MODULE        = 0x00000004
-  DEBUG_MODULE_EXPLICIT          = 0x00000008
-  DEBUG_MODULE_SECONDARY         = 0x00000010
-  DEBUG_MODULE_SYNTHETIC         = 0x00000020
-  DEBUG_MODULE_SYM_BAD_CHECKSUM  = 0x00010000
+    DEBUG_MODULE_LOADED = 0x00000000
+    DEBUG_MODULE_UNLOADED = 0x00000001
+    DEBUG_MODULE_USER_MODE = 0x00000002
+    DEBUG_MODULE_EXE_MODULE = 0x00000004
+    DEBUG_MODULE_EXPLICIT = 0x00000008
+    DEBUG_MODULE_SECONDARY = 0x00000010
+    DEBUG_MODULE_SYNTHETIC = 0x00000020
+    DEBUG_MODULE_SYM_BAD_CHECKSUM = 0x00010000
+
 
 class DEBUG_MODULE_PARAMETERS(Structure):
-  _fields_ = [
-      ("Base", c_ulonglong),
-      ("Size", c_ulong),
-      ("TimeDateStamp", c_ulong),
-      ("Checksum", c_ulong),
-      ("Flags", c_ulong),
-      ("SymbolType", c_ulong),
-      ("ImageNameSize", c_ulong),
-      ("ModuleNameSize", c_ulong),
-      ("LoadedImageNameSize", c_ulong),
-      ("SymbolFileNameSize", c_ulong),
-      ("MappedImageNameSize", c_ulong),
-      ("Reserved", c_ulonglong * 2)
+    _fields_ = [
+        ("Base", c_ulonglong),
+        ("Size", c_ulong),
+        ("TimeDateStamp", c_ulong),
+        ("Checksum", c_ulong),
+        ("Flags", c_ulong),
+        ("SymbolType", c_ulong),
+        ("ImageNameSize", c_ulong),
+        ("ModuleNameSize", c_ulong),
+        ("LoadedImageNameSize", c_ulong),
+        ("SymbolFileNameSize", c_ulong),
+        ("MappedImageNameSize", c_ulong),
+        ("Reserved", c_ulonglong * 2),
     ]
+
+
 PDEBUG_MODULE_PARAMETERS = POINTER(DEBUG_MODULE_PARAMETERS)
 
+
 class DEBUG_MODULE_AND_ID(Structure):
-  _fields_ = [
-      ("ModuleBase", c_ulonglong),
-      ("Id", c_ulonglong)
-    ]
+    _fields_ = [("ModuleBase", c_ulonglong), ("Id", c_ulonglong)]
+
+
 PDEBUG_MODULE_AND_ID = POINTER(DEBUG_MODULE_AND_ID)
 
+
 class DEBUG_SYMBOL_ENTRY(Structure):
-  _fields_ = [
-      ("ModuleBase", c_ulonglong),
-      ("Offset", c_ulonglong),
-      ("Id", c_ulonglong),
-      ("Arg64", c_ulonglong),
-      ("Size", c_ulong),
-      ("Flags", c_ulong),
-      ("TypeId", c_ulong),
-      ("NameSize", c_ulong),
-      ("Token", c_ulong),
-      ("Tag", c_ulong),
-      ("Arg32", c_ulong),
-      ("Reserved", c_ulong)
+    _fields_ = [
+        ("ModuleBase", c_ulonglong),
+        ("Offset", c_ulonglong),
+        ("Id", c_ulonglong),
+        ("Arg64", c_ulonglong),
+        ("Size", c_ulong),
+        ("Flags", c_ulong),
+        ("TypeId", c_ulong),
+        ("NameSize", c_ulong),
+        ("Token", c_ulong),
+        ("Tag", c_ulong),
+        ("Arg32", c_ulong),
+        ("Reserved", c_ulong),
     ]
+
+
 PDEBUG_SYMBOL_ENTRY = POINTER(DEBUG_SYMBOL_ENTRY)
 
 # UUID for DebugSymbols5 interface.
-DebugSymbols5IID = IID(0xc65fa83e, 0x1e69, 0x475e, IID_Data4_Type(0x8e, 0x0e, 0xb5, 0xd7, 0x9e, 0x9c, 0xc1, 0x7e))
+DebugSymbols5IID = IID(
+    0xC65FA83E,
+    0x1E69,
+    0x475E,
+    IID_Data4_Type(0x8E, 0x0E, 0xB5, 0xD7, 0x9E, 0x9C, 0xC1, 0x7E),
+)
+
 
 class IDebugSymbols5(Structure):
-  pass
+    pass
+
 
 class IDebugSymbols5Vtbl(Structure):
-  wrp = partial(WINFUNCTYPE, c_long, POINTER(IDebugSymbols5))
-  ids_getsymboloptions = wrp(c_ulong_p)
-  ids_setsymboloptions = wrp(c_ulong)
-  ids_getmoduleparameters = wrp(c_ulong, c_ulong64_p, c_ulong, PDEBUG_MODULE_PARAMETERS)
-  ids_getmodulenamestring = wrp(c_ulong, c_ulong, c_ulonglong, c_char_p, c_ulong, c_ulong_p)
-  ids_getoffsetbyname = wrp(c_char_p, c_ulong64_p)
-  ids_getlinebyoffset = wrp(c_ulonglong, c_ulong_p, c_char_p, c_ulong, c_ulong_p, c_ulong64_p)
-  ids_getsymbolentriesbyname = wrp(c_char_p, c_ulong, PDEBUG_MODULE_AND_ID, c_ulong, c_ulong_p)
-  ids_getsymbolentrystring = wrp(PDEBUG_MODULE_AND_ID, c_ulong, c_char_p, c_ulong, c_ulong_p)
-  ids_getsymbolentryinformation = wrp(PDEBUG_MODULE_AND_ID, PDEBUG_SYMBOL_ENTRY)
-  ids_getcurrentscopeframeindex = wrp(c_ulong_p)
-  ids_getnearnamebyoffset = wrp(c_ulonglong, c_long, c_char_p, c_ulong, c_ulong_p, c_ulong64_p)
-  ids_setscopeframebyindex = wrp(c_ulong)
-  ids_getscopesymbolgroup2 = wrp(c_ulong, POINTER(IDebugSymbolGroup2), POINTER(POINTER(IDebugSymbolGroup2)))
-  ids_getnamebyinlinecontext = wrp(c_ulonglong, c_ulong, c_char_p, c_ulong, c_ulong_p, c_ulong64_p)
-  ids_getlinebyinlinecontext = wrp(c_ulonglong, c_ulong, c_ulong_p, c_char_p, c_ulong, c_ulong_p, c_ulong64_p)
-  _fields_ = [
-      ("QueryInterface", c_void_p),
-      ("AddRef", c_void_p),
-      ("Release", c_void_p),
-      ("GetSymbolOptions", ids_getsymboloptions),
-      ("AddSymbolOptions", c_void_p),
-      ("RemoveSymbolOptions", c_void_p),
-      ("SetSymbolOptions", ids_setsymboloptions),
-      ("GetNameByOffset", c_void_p),
-      ("GetOffsetByName", ids_getoffsetbyname),
-      ("GetNearNameByOffset", ids_getnearnamebyoffset),
-      ("GetLineByOffset", ids_getlinebyoffset),
-      ("GetOffsetByLine", c_void_p),
-      ("GetNumberModules", c_void_p),
-      ("GetModuleByIndex", c_void_p),
-      ("GetModuleByModuleName", c_void_p),
-      ("GetModuleByOffset", c_void_p),
-      ("GetModuleNames", c_void_p),
-      ("GetModuleParameters", ids_getmoduleparameters),
-      ("GetSymbolModule", c_void_p),
-      ("GetTypeName", c_void_p),
-      ("GetTypeId", c_void_p),
-      ("GetTypeSize", c_void_p),
-      ("GetFieldOffset", c_void_p),
-      ("GetSymbolTypeId", c_void_p),
-      ("GetOffsetTypeId", c_void_p),
-      ("ReadTypedDataVirtual", c_void_p),
-      ("WriteTypedDataVirtual", c_void_p),
-      ("OutputTypedDataVirtual", c_void_p),
-      ("ReadTypedDataPhysical", c_void_p),
-      ("WriteTypedDataPhysical", c_void_p),
-      ("OutputTypedDataPhysical", c_void_p),
-      ("GetScope", c_void_p),
-      ("SetScope", c_void_p),
-      ("ResetScope", c_void_p),
-      ("GetScopeSymbolGroup", c_void_p),
-      ("CreateSymbolGroup", c_void_p),
-      ("StartSymbolMatch", c_void_p),
-      ("GetNextSymbolMatch", c_void_p),
-      ("EndSymbolMatch", c_void_p),
-      ("Reload", c_void_p),
-      ("GetSymbolPath", c_void_p),
-      ("SetSymbolPath", c_void_p),
-      ("AppendSymbolPath", c_void_p),
-      ("GetImagePath", c_void_p),
-      ("SetImagePath", c_void_p),
-      ("AppendImagePath", c_void_p),
-      ("GetSourcePath", c_void_p),
-      ("GetSourcePathElement", c_void_p),
-      ("SetSourcePath", c_void_p),
-      ("AppendSourcePath", c_void_p),
-      ("FindSourceFile", c_void_p),
-      ("GetSourceFileLineOffsets", c_void_p),
-      ("GetModuleVersionInformation", c_void_p),
-      ("GetModuleNameString", ids_getmodulenamestring),
-      ("GetConstantName", c_void_p),
-      ("GetFieldName", c_void_p),
-      ("GetTypeOptions", c_void_p),
-      ("AddTypeOptions", c_void_p),
-      ("RemoveTypeOptions", c_void_p),
-      ("SetTypeOptions", c_void_p),
-      ("GetNameByOffsetWide", c_void_p),
-      ("GetOffsetByNameWide", c_void_p),
-      ("GetNearNameByOffsetWide", c_void_p),
-      ("GetLineByOffsetWide", c_void_p),
-      ("GetOffsetByLineWide", c_void_p),
-      ("GetModuleByModuleNameWide", c_void_p),
-      ("GetSymbolModuleWide", c_void_p),
-      ("GetTypeNameWide", c_void_p),
-      ("GetTypeIdWide", c_void_p),
-      ("GetFieldOffsetWide", c_void_p),
-      ("GetSymbolTypeIdWide", c_void_p),
-      ("GetScopeSymbolGroup2", ids_getscopesymbolgroup2),
-      ("CreateSymbolGroup2", c_void_p),
-      ("StartSymbolMatchWide", c_void_p),
-      ("GetNextSymbolMatchWide", c_void_p),
-      ("ReloadWide", c_void_p),
-      ("GetSymbolPathWide", c_void_p),
-      ("SetSymbolPathWide", c_void_p),
-      ("AppendSymbolPathWide", c_void_p),
-      ("GetImagePathWide", c_void_p),
-      ("SetImagePathWide", c_void_p),
-      ("AppendImagePathWide", c_void_p),
-      ("GetSourcePathWide", c_void_p),
-      ("GetSourcePathElementWide", c_void_p),
-      ("SetSourcePathWide", c_void_p),
-      ("AppendSourcePathWide", c_void_p),
-      ("FindSourceFileWide", c_void_p),
-      ("GetSourceFileLineOffsetsWide", c_void_p),
-      ("GetModuleVersionInformationWide", c_void_p),
-      ("GetModuleNameStringWide", c_void_p),
-      ("GetConstantNameWide", c_void_p),
-      ("GetFieldNameWide", c_void_p),
-      ("IsManagedModule", c_void_p),
-      ("GetModuleByModuleName2", c_void_p),
-      ("GetModuleByModuleName2Wide", c_void_p),
-      ("GetModuleByOffset2", c_void_p),
-      ("AddSyntheticModule", c_void_p),
-      ("AddSyntheticModuleWide", c_void_p),
-      ("RemoveSyntheticModule", c_void_p),
-      ("GetCurrentScopeFrameIndex", ids_getcurrentscopeframeindex),
-      ("SetScopeFrameByIndex", ids_setscopeframebyindex),
-      ("SetScopeFromJitDebugInfo", c_void_p),
-      ("SetScopeFromStoredEvent", c_void_p),
-      ("OutputSymbolByOffset", c_void_p),
-      ("GetFunctionEntryByOffset", c_void_p),
-      ("GetFieldTypeAndOffset", c_void_p),
-      ("GetFieldTypeAndOffsetWide", c_void_p),
-      ("AddSyntheticSymbol", c_void_p),
-      ("AddSyntheticSymbolWide", c_void_p),
-      ("RemoveSyntheticSymbol", c_void_p),
-      ("GetSymbolEntriesByOffset", c_void_p),
-      ("GetSymbolEntriesByName", ids_getsymbolentriesbyname),
-      ("GetSymbolEntriesByNameWide", c_void_p),
-      ("GetSymbolEntryByToken", c_void_p),
-      ("GetSymbolEntryInformation", ids_getsymbolentryinformation),
-      ("GetSymbolEntryString", ids_getsymbolentrystring),
-      ("GetSymbolEntryStringWide", c_void_p),
-      ("GetSymbolEntryOffsetRegions", c_void_p),
-      ("GetSymbolEntryBySymbolEntry", c_void_p),
-      ("GetSourceEntriesByOffset", c_void_p),
-      ("GetSourceEntriesByLine", c_void_p),
-      ("GetSourceEntriesByLineWide", c_void_p),
-      ("GetSourceEntryString", c_void_p),
-      ("GetSourceEntryStringWide", c_void_p),
-      ("GetSourceEntryOffsetRegions", c_void_p),
-      ("GetsourceEntryBySourceEntry", c_void_p),
-      ("GetScopeEx", c_void_p),
-      ("SetScopeEx", c_void_p),
-      ("GetNameByInlineContext", ids_getnamebyinlinecontext),
-      ("GetNameByInlineContextWide", c_void_p),
-      ("GetLineByInlineContext", ids_getlinebyinlinecontext),
-      ("GetLineByInlineContextWide", c_void_p),
-      ("OutputSymbolByInlineContext", c_void_p),
-      ("GetCurrentScopeFrameIndexEx", c_void_p),
-      ("SetScopeFrameByIndexEx", c_void_p)
+    wrp = partial(WINFUNCTYPE, c_long, POINTER(IDebugSymbols5))
+    ids_getsymboloptions = wrp(c_ulong_p)
+    ids_setsymboloptions = wrp(c_ulong)
+    ids_getmoduleparameters = wrp(
+        c_ulong, c_ulong64_p, c_ulong, PDEBUG_MODULE_PARAMETERS
+    )
+    ids_getmodulenamestring = wrp(
+        c_ulong, c_ulong, c_ulonglong, c_char_p, c_ulong, c_ulong_p
+    )
+    ids_getoffsetbyname = wrp(c_char_p, c_ulong64_p)
+    ids_getlinebyoffset = wrp(
+        c_ulonglong, c_ulong_p, c_char_p, c_ulong, c_ulong_p, c_ulong64_p
+    )
+    ids_getsymbolentriesbyname = wrp(
+        c_char_p, c_ulong, PDEBUG_MODULE_AND_ID, c_ulong, c_ulong_p
+    )
+    ids_getsymbolentrystring = wrp(
+        PDEBUG_MODULE_AND_ID, c_ulong, c_char_p, c_ulong, c_ulong_p
+    )
+    ids_getsymbolentryinformation = wrp(PDEBUG_MODULE_AND_ID, PDEBUG_SYMBOL_ENTRY)
+    ids_getcurrentscopeframeindex = wrp(c_ulong_p)
+    ids_getnearnamebyoffset = wrp(
+        c_ulonglong, c_long, c_char_p, c_ulong, c_ulong_p, c_ulong64_p
+    )
+    ids_setscopeframebyindex = wrp(c_ulong)
+    ids_getscopesymbolgroup2 = wrp(
+        c_ulong, POINTER(IDebugSymbolGroup2), POINTER(POINTER(IDebugSymbolGroup2))
+    )
+    ids_getnamebyinlinecontext = wrp(
+        c_ulonglong, c_ulong, c_char_p, c_ulong, c_ulong_p, c_ulong64_p
+    )
+    ids_getlinebyinlinecontext = wrp(
+        c_ulonglong, c_ulong, c_ulong_p, c_char_p, c_ulong, c_ulong_p, c_ulong64_p
+    )
+    _fields_ = [
+        ("QueryInterface", c_void_p),
+        ("AddRef", c_void_p),
+        ("Release", c_void_p),
+        ("GetSymbolOptions", ids_getsymboloptions),
+        ("AddSymbolOptions", c_void_p),
+        ("RemoveSymbolOptions", c_void_p),
+        ("SetSymbolOptions", ids_setsymboloptions),
+        ("GetNameByOffset", c_void_p),
+        ("GetOffsetByName", ids_getoffsetbyname),
+        ("GetNearNameByOffset", ids_getnearnamebyoffset),
+        ("GetLineByOffset", ids_getlinebyoffset),
+        ("GetOffsetByLine", c_void_p),
+        ("GetNumberModules", c_void_p),
+        ("GetModuleByIndex", c_void_p),
+        ("GetModuleByModuleName", c_void_p),
+        ("GetModuleByOffset", c_void_p),
+        ("GetModuleNames", c_void_p),
+        ("GetModuleParameters", ids_getmoduleparameters),
+        ("GetSymbolModule", c_void_p),
+        ("GetTypeName", c_void_p),
+        ("GetTypeId", c_void_p),
+        ("GetTypeSize", c_void_p),
+        ("GetFieldOffset", c_void_p),
+        ("GetSymbolTypeId", c_void_p),
+        ("GetOffsetTypeId", c_void_p),
+        ("ReadTypedDataVirtual", c_void_p),
+        ("WriteTypedDataVirtual", c_void_p),
+        ("OutputTypedDataVirtual", c_void_p),
+        ("ReadTypedDataPhysical", c_void_p),
+        ("WriteTypedDataPhysical", c_void_p),
+        ("OutputTypedDataPhysical", c_void_p),
+        ("GetScope", c_void_p),
+        ("SetScope", c_void_p),
+        ("ResetScope", c_void_p),
+        ("GetScopeSymbolGroup", c_void_p),
+        ("CreateSymbolGroup", c_void_p),
+        ("StartSymbolMatch", c_void_p),
+        ("GetNextSymbolMatch", c_void_p),
+        ("EndSymbolMatch", c_void_p),
+        ("Reload", c_void_p),
+        ("GetSymbolPath", c_void_p),
+        ("SetSymbolPath", c_void_p),
+        ("AppendSymbolPath", c_void_p),
+        ("GetImagePath", c_void_p),
+        ("SetImagePath", c_void_p),
+        ("AppendImagePath", c_void_p),
+        ("GetSourcePath", c_void_p),
+        ("GetSourcePathElement", c_void_p),
+        ("SetSourcePath", c_void_p),
+        ("AppendSourcePath", c_void_p),
+        ("FindSourceFile", c_void_p),
+        ("GetSourceFileLineOffsets", c_void_p),
+        ("GetModuleVersionInformation", c_void_p),
+        ("GetModuleNameString", ids_getmodulenamestring),
+        ("GetConstantName", c_void_p),
+        ("GetFieldName", c_void_p),
+        ("GetTypeOptions", c_void_p),
+        ("AddTypeOptions", c_void_p),
+        ("RemoveTypeOptions", c_void_p),
+        ("SetTypeOptions", c_void_p),
+        ("GetNameByOffsetWide", c_void_p),
+        ("GetOffsetByNameWide", c_void_p),
+        ("GetNearNameByOffsetWide", c_void_p),
+        ("GetLineByOffsetWide", c_void_p),
+        ("GetOffsetByLineWide", c_void_p),
+        ("GetModuleByModuleNameWide", c_void_p),
+        ("GetSymbolModuleWide", c_void_p),
+        ("GetTypeNameWide", c_void_p),
+        ("GetTypeIdWide", c_void_p),
+        ("GetFieldOffsetWide", c_void_p),
+        ("GetSymbolTypeIdWide", c_void_p),
+        ("GetScopeSymbolGroup2", ids_getscopesymbolgroup2),
+        ("CreateSymbolGroup2", c_void_p),
+        ("StartSymbolMatchWide", c_void_p),
+        ("GetNextSymbolMatchWide", c_void_p),
+        ("ReloadWide", c_void_p),
+        ("GetSymbolPathWide", c_void_p),
+        ("SetSymbolPathWide", c_void_p),
+        ("AppendSymbolPathWide", c_void_p),
+        ("GetImagePathWide", c_void_p),
+        ("SetImagePathWide", c_void_p),
+        ("AppendImagePathWide", c_void_p),
+        ("GetSourcePathWide", c_void_p),
+        ("GetSourcePathElementWide", c_void_p),
+        ("SetSourcePathWide", c_void_p),
+        ("AppendSourcePathWide", c_void_p),
+        ("FindSourceFileWide", c_void_p),
+        ("GetSourceFileLineOffsetsWide", c_void_p),
+        ("GetModuleVersionInformationWide", c_void_p),
+        ("GetModuleNameStringWide", c_void_p),
+        ("GetConstantNameWide", c_void_p),
+        ("GetFieldNameWide", c_void_p),
+        ("IsManagedModule", c_void_p),
+        ("GetModuleByModuleName2", c_void_p),
+        ("GetModuleByModuleName2Wide", c_void_p),
+        ("GetModuleByOffset2", c_void_p),
+        ("AddSyntheticModule", c_void_p),
+        ("AddSyntheticModuleWide", c_void_p),
+        ("RemoveSyntheticModule", c_void_p),
+        ("GetCurrentScopeFrameIndex", ids_getcurrentscopeframeindex),
+        ("SetScopeFrameByIndex", ids_setscopeframebyindex),
+        ("SetScopeFromJitDebugInfo", c_void_p),
+        ("SetScopeFromStoredEvent", c_void_p),
+        ("OutputSymbolByOffset", c_void_p),
+        ("GetFunctionEntryByOffset", c_void_p),
+        ("GetFieldTypeAndOffset", c_void_p),
+        ("GetFieldTypeAndOffsetWide", c_void_p),
+        ("AddSyntheticSymbol", c_void_p),
+        ("AddSyntheticSymbolWide", c_void_p),
+        ("RemoveSyntheticSymbol", c_void_p),
+        ("GetSymbolEntriesByOffset", c_void_p),
+        ("GetSymbolEntriesByName", ids_getsymbolentriesbyname),
+        ("GetSymbolEntriesByNameWide", c_void_p),
+        ("GetSymbolEntryByToken", c_void_p),
+        ("GetSymbolEntryInformation", ids_getsymbolentryinformation),
+        ("GetSymbolEntryString", ids_getsymbolentrystring),
+        ("GetSymbolEntryStringWide", c_void_p),
+        ("GetSymbolEntryOffsetRegions", c_void_p),
+        ("GetSymbolEntryBySymbolEntry", c_void_p),
+        ("GetSourceEntriesByOffset", c_void_p),
+        ("GetSourceEntriesByLine", c_void_p),
+        ("GetSourceEntriesByLineWide", c_void_p),
+        ("GetSourceEntryString", c_void_p),
+        ("GetSourceEntryStringWide", c_void_p),
+        ("GetSourceEntryOffsetRegions", c_void_p),
+        ("GetsourceEntryBySourceEntry", c_void_p),
+        ("GetScopeEx", c_void_p),
+        ("SetScopeEx", c_void_p),
+        ("GetNameByInlineContext", ids_getnamebyinlinecontext),
+        ("GetNameByInlineContextWide", c_void_p),
+        ("GetLineByInlineContext", ids_getlinebyinlinecontext),
+        ("GetLineByInlineContextWide", c_void_p),
+        ("OutputSymbolByInlineContext", c_void_p),
+        ("GetCurrentScopeFrameIndexEx", c_void_p),
+        ("SetScopeFrameByIndexEx", c_void_p),
     ]
 
+
 IDebugSymbols5._fields_ = [("lpVtbl", POINTER(IDebugSymbols5Vtbl))]
 
 SymbolId = namedtuple("SymbolId", ["ModuleBase", "Id"])
-SymbolEntry = namedtuple("SymbolEntry", ["ModuleBase", "Offset", "Id", "Arg64", "Size", "Flags", "TypeId", "NameSize", "Token", "Tag", "Arg32"])
-DebugModuleParams = namedtuple("DebugModuleParams", ["Base", "Size", "TimeDateStamp", "Checksum", "Flags", "SymbolType", "ImageNameSize", "ModuleNameSize", "LoadedImageNameSize", "SymbolFileNameSize", "MappedImageNameSize"])
+SymbolEntry = namedtuple(
+    "SymbolEntry",
+    [
+        "ModuleBase",
+        "Offset",
+        "Id",
+        "Arg64",
+        "Size",
+        "Flags",
+        "TypeId",
+        "NameSize",
+        "Token",
+        "Tag",
+        "Arg32",
+    ],
+)
+DebugModuleParams = namedtuple(
+    "DebugModuleParams",
+    [
+        "Base",
+        "Size",
+        "TimeDateStamp",
+        "Checksum",
+        "Flags",
+        "SymbolType",
+        "ImageNameSize",
+        "ModuleNameSize",
+        "LoadedImageNameSize",
+        "SymbolFileNameSize",
+        "MappedImageNameSize",
+    ],
+)
+
 
 class SymTags(IntEnum):
-  Null = 0
-  Exe = 1
-  SymTagFunction = 5
+    Null = 0
+    Exe = 1
+    SymTagFunction = 5
+
 
 def make_debug_module_params(cdata):
-  fieldvalues = map(lambda y: getattr(cdata, y), DebugModuleParams._fields)
-  return DebugModuleParams(*fieldvalues)
+    fieldvalues = map(lambda y: getattr(cdata, y), DebugModuleParams._fields)
+    return DebugModuleParams(*fieldvalues)
+
 
 class Symbols(object):
-  def __init__(self, symbols):
-    self.ptr = symbols
-    self.symbols = symbols.contents
-    self.vt = self.symbols.lpVtbl.contents
-    # Keep some handy ulongs for passing into C methods.
-    self.ulong = c_ulong()
-    self.ulong64 = c_ulonglong()
-
-  def GetCurrentScopeFrameIndex(self):
-    res = self.vt.GetCurrentScopeFrameIndex(self.symbols, byref(self.ulong))
-    aborter(res, "GetCurrentScopeFrameIndex")
-    return self.ulong.value
-
-  def SetScopeFrameByIndex(self, idx):
-    res = self.vt.SetScopeFrameByIndex(self.symbols, idx)
-    aborter(res, "SetScopeFrameByIndex", ignore=[E_EINVAL])
-    return res != E_EINVAL
-
-  def GetOffsetByName(self, name):
-    res = self.vt.GetOffsetByName(self.symbols, name.encode("ascii"), byref(self.ulong64))
-    aborter(res, "GetOffsetByName {}".format(name))
-    return self.ulong64.value
-
-  def GetNearNameByOffset(self, addr):
-    ptr = create_string_buffer(256)
-    pulong = c_ulong()
-    disp = c_ulonglong()
-    # Zero arg -> "delta" indicating how many symbols to skip
-    res = self.vt.GetNearNameByOffset(self.symbols, addr, 0, ptr, 255, byref(pulong), byref(disp))
-    if res == E_NOINTERFACE:
-      return "{noname}"
-    aborter(res, "GetNearNameByOffset")
-    ptr[255] = '\0'.encode("ascii")
-    return '{}+{}'.format(string_at(ptr).decode("ascii"), disp.value)
-
-  def GetModuleByModuleName2(self, name):
-    # First zero arg -> module index to search from, second zero arg ->
-    # DEBUG_GETMOD_* flags, none of which we use.
-    res = self.vt.GetModuleByModuleName2(self.symbols, name, 0, 0, None, byref(self.ulong64))
-    aborter(res, "GetModuleByModuleName2")
-    return self.ulong64.value
-
-  def GetScopeSymbolGroup2(self):
-    retptr = POINTER(IDebugSymbolGroup2)()
-    res = self.vt.GetScopeSymbolGroup2(self.symbols, ScopeGroupFlags.DEBUG_SCOPE_GROUP_ALL, None, retptr)
-    aborter(res, "GetScopeSymbolGroup2")
-    return SymbolGroup(retptr)
-
-  def GetSymbolEntryString(self, idx, module):
-    symid = DEBUG_MODULE_AND_ID()
-    symid.ModuleBase = module
-    symid.Id = idx
-    ptr = create_string_buffer(1024)
-    # Zero arg is the string index -- symbols can have multiple names, for now
-    # only support the first one.
-    res = self.vt.GetSymbolEntryString(self.symbols, symid, 0, ptr, 1023, byref(self.ulong))
-    aborter(res, "GetSymbolEntryString")
-    return string_at(ptr).decode("ascii")
-
-  def GetSymbolEntryInformation(self, module, theid):
-    symid = DEBUG_MODULE_AND_ID()
-    symentry = DEBUG_SYMBOL_ENTRY()
-    symid.ModuleBase = module
-    symid.Id = theid
-    res = self.vt.GetSymbolEntryInformation(self.symbols, symid, symentry)
-    aborter(res, "GetSymbolEntryInformation")
-    # Fetch fields into SymbolEntry object
-    fields = map(lambda x: getattr(symentry, x), SymbolEntry._fields)
-    return SymbolEntry(*fields)
-
-  def GetSymbolEntriesByName(self, symstr):
-    # Initial query to find number of symbol entries
-    res = self.vt.GetSymbolEntriesByName(self.symbols, symstr.encode("ascii"), 0, None, 0, byref(self.ulong))
-    aborter(res, "GetSymbolEntriesByName")
-
-    # Build a buffer and query for 'length' entries
-    length = self.ulong.value
-    symrecs = (DEBUG_MODULE_AND_ID * length)()
-    # Zero arg -> flags, of which there are none defined.
-    res = self.vt.GetSymbolEntriesByName(self.symbols, symstr.encode("ascii"), 0, symrecs, length, byref(self.ulong))
-    aborter(res, "GetSymbolEntriesByName")
-
-    # Extract 'length' number of SymbolIds
-    length = self.ulong.value
-    def extract(x):
-      sym = symrecs[x]
-      return SymbolId(sym.ModuleBase, sym.Id)
-    return [extract(x) for x in range(length)]
-
-  def GetSymbolPath(self):
-    # Query for length of buffer to allocate
-    res = self.vt.GetSymbolPath(self.symbols, None, 0, byref(self.ulong))
-    aborter(res, "GetSymbolPath", ignore=[S_FALSE])
-
-    # Fetch 'length' length symbol path string
-    length = self.ulong.value
-    arr = create_string_buffer(length)
-    res = self.vt.GetSymbolPath(self.symbols, arr, length, byref(self.ulong))
-    aborter(res, "GetSymbolPath")
-
-    return string_at(arr).decode("ascii")
-
-  def GetSourcePath(self):
-    # Query for length of buffer to allocate
-    res = self.vt.GetSourcePath(self.symbols, None, 0, byref(self.ulong))
-    aborter(res, "GetSourcePath", ignore=[S_FALSE])
-
-    # Fetch a string of len 'length'
-    length = self.ulong.value
-    arr = create_string_buffer(length)
-    res = self.vt.GetSourcePath(self.symbols, arr, length, byref(self.ulong))
-    aborter(res, "GetSourcePath")
-
-    return string_at(arr).decode("ascii")
-
-  def SetSourcePath(self, string):
-    res = self.vt.SetSourcePath(self.symbols, string.encode("ascii"))
-    aborter(res, "SetSourcePath")
-    return
-
-  def GetModuleParameters(self, base):
-    self.ulong64.value = base
-    params = DEBUG_MODULE_PARAMETERS()
-    # Fetch one module params struct, starting at idx zero
-    res = self.vt.GetModuleParameters(self.symbols, 1, byref(self.ulong64), 0, byref(params))
-    aborter(res, "GetModuleParameters")
-    return make_debug_module_params(params)
-
-  def GetSymbolOptions(self):
-    res = self.vt.GetSymbolOptions(self.symbols, byref(self.ulong))
-    aborter(res, "GetSymbolOptions")
-    return SymbolOptionFlags(self.ulong.value)
-
-  def SetSymbolOptions(self, opts):
-    assert isinstance(opts, SymbolOptionFlags)
-    res = self.vt.SetSymbolOptions(self.symbols, opts.value)
-    aborter(res, "SetSymbolOptions")
-    return
-
-  def GetLineByOffset(self, offs):
-    # Initial query for filename buffer size
-    res = self.vt.GetLineByOffset(self.symbols, offs, None, None, 0, byref(self.ulong), None)
-    if res == E_FAIL:
-      return None # Sometimes we just can't get line numbers, of course
-    aborter(res, "GetLineByOffset", ignore=[S_FALSE])
-
-    # Allocate filename buffer and query for line number too
-    filenamelen = self.ulong.value
-    text = create_string_buffer(filenamelen)
-    line = c_ulong()
-    res = self.vt.GetLineByOffset(self.symbols, offs, byref(line), text, filenamelen, byref(self.ulong), None)
-    aborter(res, "GetLineByOffset")
-
-    return string_at(text).decode("ascii"), line.value
-
-  def GetModuleNameString(self, whichname, base):
-    # Initial query for name string length
-    res = self.vt.GetModuleNameString(self.symbols, whichname, DEBUG_ANY_ID, base, None, 0, byref(self.ulong))
-    aborter(res, "GetModuleNameString", ignore=[S_FALSE])
-
-    module_name_len = self.ulong.value
-    module_name = (c_char * module_name_len)()
-    res = self.vt.GetModuleNameString(self.symbols, whichname, DEBUG_ANY_ID, base, module_name, module_name_len, None)
-    aborter(res, "GetModuleNameString")
-
-    return string_at(module_name).decode("ascii")
-
-  def GetNameByInlineContext(self, pc, ctx):
-    # None args -> ignore output name size and displacement
-    buf = create_string_buffer(256)
-    res = self.vt.GetNameByInlineContext(self.symbols, pc, ctx, buf, 255, None, None)
-    aborter(res, "GetNameByInlineContext")
-    return string_at(buf).decode("ascii")
-
-  def GetLineByInlineContext(self, pc, ctx):
-    # None args -> ignore output filename size and displacement
-    buf = create_string_buffer(256)
-    res = self.vt.GetLineByInlineContext(self.symbols, pc, ctx, byref(self.ulong), buf, 255, None, None)
-    aborter(res, "GetLineByInlineContext")
-    return string_at(buf).decode("ascii"), self.ulong.value
-
-  def get_all_symbols(self):
-    main_module_name = self.get_exefile_module_name()
-    idnumbers = self.GetSymbolEntriesByName("{}!*".format(main_module_name))
-    lst = []
-    for symid in idnumbers:
-      s = self.GetSymbolEntryString(symid.Id, symid.ModuleBase)
-      symentry = self.GetSymbolEntryInformation(symid.ModuleBase, symid.Id)
-      lst.append((s, symentry))
-    return lst
-
-  def get_all_functions(self):
-    syms = self.get_all_symbols()
-    return [x for x in syms if x[1].Tag == SymTags.SymTagFunction]
-
-  def get_all_modules(self):
-    params = DEBUG_MODULE_PARAMETERS()
-    idx = 0
-    res = 0
-    all_modules = []
-    while res != E_EINVAL:
-      res = self.vt.GetModuleParameters(self.symbols, 1, None, idx, byref(params))
-      aborter(res, "GetModuleParameters", ignore=[E_EINVAL])
-      all_modules.append(make_debug_module_params(params))
-      idx += 1
-    return all_modules
-
-  def get_exefile_module(self):
-    all_modules = self.get_all_modules()
-    reduce_func = lambda x, y: y if y.Flags & DebugModuleFlags.DEBUG_MODULE_EXE_MODULE else x
-    main_module = reduce(reduce_func, all_modules, None)
-    if main_module is None:
-      raise Exception("Couldn't find the exefile module")
-    return main_module
-
-  def get_module_name(self, base):
-    return self.GetModuleNameString(DebugModuleNames.DEBUG_MODNAME_MODULE, base)
-
-  def get_exefile_module_name(self):
-    return self.get_module_name(self.get_exefile_module().Base)
+    def __init__(self, symbols):
+        self.ptr = symbols
+        self.symbols = symbols.contents
+        self.vt = self.symbols.lpVtbl.contents
+        # Keep some handy ulongs for passing into C methods.
+        self.ulong = c_ulong()
+        self.ulong64 = c_ulonglong()
+
+    def GetCurrentScopeFrameIndex(self):
+        res = self.vt.GetCurrentScopeFrameIndex(self.symbols, byref(self.ulong))
+        aborter(res, "GetCurrentScopeFrameIndex")
+        return self.ulong.value
+
+    def SetScopeFrameByIndex(self, idx):
+        res = self.vt.SetScopeFrameByIndex(self.symbols, idx)
+        aborter(res, "SetScopeFrameByIndex", ignore=[E_EINVAL])
+        return res != E_EINVAL
+
+    def GetOffsetByName(self, name):
+        res = self.vt.GetOffsetByName(
+            self.symbols, name.encode("ascii"), byref(self.ulong64)
+        )
+        aborter(res, "GetOffsetByName {}".format(name))
+        return self.ulong64.value
+
+    def GetNearNameByOffset(self, addr):
+        ptr = create_string_buffer(256)
+        pulong = c_ulong()
+        disp = c_ulonglong()
+        # Zero arg -> "delta" indicating how many symbols to skip
+        res = self.vt.GetNearNameByOffset(
+            self.symbols, addr, 0, ptr, 255, byref(pulong), byref(disp)
+        )
+        if res == E_NOINTERFACE:
+            return "{noname}"
+        aborter(res, "GetNearNameByOffset")
+        ptr[255] = "\0".encode("ascii")
+        return "{}+{}".format(string_at(ptr).decode("ascii"), disp.value)
+
+    def GetModuleByModuleName2(self, name):
+        # First zero arg -> module index to search from, second zero arg ->
+        # DEBUG_GETMOD_* flags, none of which we use.
+        res = self.vt.GetModuleByModuleName2(
+            self.symbols, name, 0, 0, None, byref(self.ulong64)
+        )
+        aborter(res, "GetModuleByModuleName2")
+        return self.ulong64.value
+
+    def GetScopeSymbolGroup2(self):
+        retptr = POINTER(IDebugSymbolGroup2)()
+        res = self.vt.GetScopeSymbolGroup2(
+            self.symbols, ScopeGroupFlags.DEBUG_SCOPE_GROUP_ALL, None, retptr
+        )
+        aborter(res, "GetScopeSymbolGroup2")
+        return SymbolGroup(retptr)
+
+    def GetSymbolEntryString(self, idx, module):
+        symid = DEBUG_MODULE_AND_ID()
+        symid.ModuleBase = module
+        symid.Id = idx
+        ptr = create_string_buffer(1024)
+        # Zero arg is the string index -- symbols can have multiple names, for now
+        # only support the first one.
+        res = self.vt.GetSymbolEntryString(
+            self.symbols, symid, 0, ptr, 1023, byref(self.ulong)
+        )
+        aborter(res, "GetSymbolEntryString")
+        return string_at(ptr).decode("ascii")
+
+    def GetSymbolEntryInformation(self, module, theid):
+        symid = DEBUG_MODULE_AND_ID()
+        symentry = DEBUG_SYMBOL_ENTRY()
+        symid.ModuleBase = module
+        symid.Id = theid
+        res = self.vt.GetSymbolEntryInformation(self.symbols, symid, symentry)
+        aborter(res, "GetSymbolEntryInformation")
+        # Fetch fields into SymbolEntry object
+        fields = map(lambda x: getattr(symentry, x), SymbolEntry._fields)
+        return SymbolEntry(*fields)
+
+    def GetSymbolEntriesByName(self, symstr):
+        # Initial query to find number of symbol entries
+        res = self.vt.GetSymbolEntriesByName(
+            self.symbols, symstr.encode("ascii"), 0, None, 0, byref(self.ulong)
+        )
+        aborter(res, "GetSymbolEntriesByName")
+
+        # Build a buffer and query for 'length' entries
+        length = self.ulong.value
+        symrecs = (DEBUG_MODULE_AND_ID * length)()
+        # Zero arg -> flags, of which there are none defined.
+        res = self.vt.GetSymbolEntriesByName(
+            self.symbols, symstr.encode("ascii"), 0, symrecs, length, byref(self.ulong)
+        )
+        aborter(res, "GetSymbolEntriesByName")
+
+        # Extract 'length' number of SymbolIds
+        length = self.ulong.value
+
+        def extract(x):
+            sym = symrecs[x]
+            return SymbolId(sym.ModuleBase, sym.Id)
+
+        return [extract(x) for x in range(length)]
+
+    def GetSymbolPath(self):
+        # Query for length of buffer to allocate
+        res = self.vt.GetSymbolPath(self.symbols, None, 0, byref(self.ulong))
+        aborter(res, "GetSymbolPath", ignore=[S_FALSE])
+
+        # Fetch 'length' length symbol path string
+        length = self.ulong.value
+        arr = create_string_buffer(length)
+        res = self.vt.GetSymbolPath(self.symbols, arr, length, byref(self.ulong))
+        aborter(res, "GetSymbolPath")
+
+        return string_at(arr).decode("ascii")
+
+    def GetSourcePath(self):
+        # Query for length of buffer to allocate
+        res = self.vt.GetSourcePath(self.symbols, None, 0, byref(self.ulong))
+        aborter(res, "GetSourcePath", ignore=[S_FALSE])
+
+        # Fetch a string of len 'length'
+        length = self.ulong.value
+        arr = create_string_buffer(length)
+        res = self.vt.GetSourcePath(self.symbols, arr, length, byref(self.ulong))
+        aborter(res, "GetSourcePath")
+
+        return string_at(arr).decode("ascii")
+
+    def SetSourcePath(self, string):
+        res = self.vt.SetSourcePath(self.symbols, string.encode("ascii"))
+        aborter(res, "SetSourcePath")
+        return
+
+    def GetModuleParameters(self, base):
+        self.ulong64.value = base
+        params = DEBUG_MODULE_PARAMETERS()
+        # Fetch one module params struct, starting at idx zero
+        res = self.vt.GetModuleParameters(
+            self.symbols, 1, byref(self.ulong64), 0, byref(params)
+        )
+        aborter(res, "GetModuleParameters")
+        return make_debug_module_params(params)
+
+    def GetSymbolOptions(self):
+        res = self.vt.GetSymbolOptions(self.symbols, byref(self.ulong))
+        aborter(res, "GetSymbolOptions")
+        return SymbolOptionFlags(self.ulong.value)
+
+    def SetSymbolOptions(self, opts):
+        assert isinstance(opts, SymbolOptionFlags)
+        res = self.vt.SetSymbolOptions(self.symbols, opts.value)
+        aborter(res, "SetSymbolOptions")
+        return
+
+    def GetLineByOffset(self, offs):
+        # Initial query for filename buffer size
+        res = self.vt.GetLineByOffset(
+            self.symbols, offs, None, None, 0, byref(self.ulong), None
+        )
+        if res == E_FAIL:
+            return None  # Sometimes we just can't get line numbers, of course
+        aborter(res, "GetLineByOffset", ignore=[S_FALSE])
+
+        # Allocate filename buffer and query for line number too
+        filenamelen = self.ulong.value
+        text = create_string_buffer(filenamelen)
+        line = c_ulong()
+        res = self.vt.GetLineByOffset(
+            self.symbols, offs, byref(line), text, filenamelen, byref(self.ulong), None
+        )
+        aborter(res, "GetLineByOffset")
+
+        return string_at(text).decode("ascii"), line.value
+
+    def GetModuleNameString(self, whichname, base):
+        # Initial query for name string length
+        res = self.vt.GetModuleNameString(
+            self.symbols, whichname, DEBUG_ANY_ID, base, None, 0, byref(self.ulong)
+        )
+        aborter(res, "GetModuleNameString", ignore=[S_FALSE])
+
+        module_name_len = self.ulong.value
+        module_name = (c_char * module_name_len)()
+        res = self.vt.GetModuleNameString(
+            self.symbols,
+            whichname,
+            DEBUG_ANY_ID,
+            base,
+            module_name,
+            module_name_len,
+            None,
+        )
+        aborter(res, "GetModuleNameString")
+
+        return string_at(module_name).decode("ascii")
+
+    def GetNameByInlineContext(self, pc, ctx):
+        # None args -> ignore output name size and displacement
+        buf = create_string_buffer(256)
+        res = self.vt.GetNameByInlineContext(
+            self.symbols, pc, ctx, buf, 255, None, None
+        )
+        aborter(res, "GetNameByInlineContext")
+        return string_at(buf).decode("ascii")
+
+    def GetLineByInlineContext(self, pc, ctx):
+        # None args -> ignore output filename size and displacement
+        buf = create_string_buffer(256)
+        res = self.vt.GetLineByInlineContext(
+            self.symbols, pc, ctx, byref(self.ulong), buf, 255, None, None
+        )
+        aborter(res, "GetLineByInlineContext")
+        return string_at(buf).decode("ascii"), self.ulong.value
+
+    def get_all_symbols(self):
+        main_module_name = self.get_exefile_module_name()
+        idnumbers = self.GetSymbolEntriesByName("{}!*".format(main_module_name))
+        lst = []
+        for symid in idnumbers:
+            s = self.GetSymbolEntryString(symid.Id, symid.ModuleBase)
+            symentry = self.GetSymbolEntryInformation(symid.ModuleBase, symid.Id)
+            lst.append((s, symentry))
+        return lst
+
+    def get_all_functions(self):
+        syms = self.get_all_symbols()
+        return [x for x in syms if x[1].Tag == SymTags.SymTagFunction]
+
+    def get_all_modules(self):
+        params = DEBUG_MODULE_PARAMETERS()
+        idx = 0
+        res = 0
+        all_modules = []
+        while res != E_EINVAL:
+            res = self.vt.GetModuleParameters(self.symbols, 1, None, idx, byref(params))
+            aborter(res, "GetModuleParameters", ignore=[E_EINVAL])
+            all_modules.append(make_debug_module_params(params))
+            idx += 1
+        return all_modules
+
+    def get_exefile_module(self):
+        all_modules = self.get_all_modules()
+        reduce_func = (
+            lambda x, y: y if y.Flags & DebugModuleFlags.DEBUG_MODULE_EXE_MODULE else x
+        )
+        main_module = reduce(reduce_func, all_modules, None)
+        if main_module is None:
+            raise Exception("Couldn't find the exefile module")
+        return main_module
+
+    def get_module_name(self, base):
+        return self.GetModuleNameString(DebugModuleNames.DEBUG_MODNAME_MODULE, base)
+
+    def get_exefile_module_name(self):
+        return self.get_module_name(self.get_exefile_module().Base)

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/symgroup.py b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/symgroup.py
index 2775af3279b78..abe71434d9f71 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/symgroup.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/symgroup.py
@@ -13,86 +13,92 @@
 
 Symbol = namedtuple("Symbol", ["num", "name", "type", "value"])
 
+
 class IDebugSymbolGroup2(Structure):
-  pass
+    pass
+
 
 class IDebugSymbolGroup2Vtbl(Structure):
-  wrp = partial(WINFUNCTYPE, c_long, POINTER(IDebugSymbolGroup2))
-  ids_getnumbersymbols = wrp(c_ulong_p)
-  ids_getsymbolname = wrp(c_ulong, c_char_p, c_ulong, c_ulong_p)
-  ids_getsymboltypename = wrp(c_ulong, c_char_p, c_ulong, c_ulong_p)
-  ids_getsymbolvaluetext = wrp(c_ulong, c_char_p, c_ulong, c_ulong_p)
-  _fields_ = [
-      ("QueryInterface", c_void_p),
-      ("AddRef", c_void_p),
-      ("Release", c_void_p),
-      ("GetNumberSymbols", ids_getnumbersymbols),
-      ("AddSymbol", c_void_p),
-      ("RemoveSymbolByName", c_void_p),
-      ("RemoveSymbolByIndex", c_void_p),
-      ("GetSymbolName", ids_getsymbolname),
-      ("GetSymbolParameters", c_void_p),
-      ("ExpandSymbol", c_void_p),
-      ("OutputSymbols", c_void_p),
-      ("WriteSymbol", c_void_p),
-      ("OutputAsType", c_void_p),
-      ("AddSymbolWide", c_void_p),
-      ("RemoveSymbolByNameWide", c_void_p),
-      ("GetSymbolNameWide", c_void_p),
-      ("WritesymbolWide", c_void_p),
-      ("OutputAsTypeWide", c_void_p),
-      ("GetSymbolTypeName", ids_getsymboltypename),
-      ("GetSymbolTypeNameWide", c_void_p),
-      ("GetSymbolSize", c_void_p),
-      ("GetSymbolOffset", c_void_p),
-      ("GetSymbolRegister", c_void_p),
-      ("GetSymbolValueText", ids_getsymbolvaluetext),
-      ("GetSymbolValueTextWide", c_void_p),
-      ("GetSymbolEntryInformation", c_void_p)
+    wrp = partial(WINFUNCTYPE, c_long, POINTER(IDebugSymbolGroup2))
+    ids_getnumbersymbols = wrp(c_ulong_p)
+    ids_getsymbolname = wrp(c_ulong, c_char_p, c_ulong, c_ulong_p)
+    ids_getsymboltypename = wrp(c_ulong, c_char_p, c_ulong, c_ulong_p)
+    ids_getsymbolvaluetext = wrp(c_ulong, c_char_p, c_ulong, c_ulong_p)
+    _fields_ = [
+        ("QueryInterface", c_void_p),
+        ("AddRef", c_void_p),
+        ("Release", c_void_p),
+        ("GetNumberSymbols", ids_getnumbersymbols),
+        ("AddSymbol", c_void_p),
+        ("RemoveSymbolByName", c_void_p),
+        ("RemoveSymbolByIndex", c_void_p),
+        ("GetSymbolName", ids_getsymbolname),
+        ("GetSymbolParameters", c_void_p),
+        ("ExpandSymbol", c_void_p),
+        ("OutputSymbols", c_void_p),
+        ("WriteSymbol", c_void_p),
+        ("OutputAsType", c_void_p),
+        ("AddSymbolWide", c_void_p),
+        ("RemoveSymbolByNameWide", c_void_p),
+        ("GetSymbolNameWide", c_void_p),
+        ("WritesymbolWide", c_void_p),
+        ("OutputAsTypeWide", c_void_p),
+        ("GetSymbolTypeName", ids_getsymboltypename),
+        ("GetSymbolTypeNameWide", c_void_p),
+        ("GetSymbolSize", c_void_p),
+        ("GetSymbolOffset", c_void_p),
+        ("GetSymbolRegister", c_void_p),
+        ("GetSymbolValueText", ids_getsymbolvaluetext),
+        ("GetSymbolValueTextWide", c_void_p),
+        ("GetSymbolEntryInformation", c_void_p),
     ]
 
+
 IDebugSymbolGroup2._fields_ = [("lpVtbl", POINTER(IDebugSymbolGroup2Vtbl))]
 
+
 class SymbolGroup(object):
-  def __init__(self, symgroup):
-    self.symgroup = symgroup.contents
-    self.vt = self.symgroup.lpVtbl.contents
-    self.ulong = c_ulong()
-
-  def GetNumberSymbols(self):
-    res = self.vt.GetNumberSymbols(self.symgroup, byref(self.ulong))
-    aborter(res, "GetNumberSymbols")
-    return self.ulong.value
-
-  def GetSymbolName(self, idx):
-    buf = create_string_buffer(256)
-    res = self.vt.GetSymbolName(self.symgroup, idx, buf, 255, byref(self.ulong))
-    aborter(res, "GetSymbolName")
-    thelen = self.ulong.value
-    return string_at(buf).decode("ascii")
-
-  def GetSymbolTypeName(self, idx):
-    buf = create_string_buffer(256)
-    res = self.vt.GetSymbolTypeName(self.symgroup, idx, buf, 255, byref(self.ulong))
-    aborter(res, "GetSymbolTypeName")
-    thelen = self.ulong.value
-    return string_at(buf).decode("ascii")
-
-  def GetSymbolValueText(self, idx, handleserror=False):
-    buf = create_string_buffer(256)
-    res = self.vt.GetSymbolValueText(self.symgroup, idx, buf, 255, byref(self.ulong))
-    if res != 0 and handleserror:
-      return None
-    aborter(res, "GetSymbolTypeName")
-    thelen = self.ulong.value
-    return string_at(buf).decode("ascii")
-
-  def get_symbol(self, idx):
-    name = self.GetSymbolName(idx)
-    thetype = self.GetSymbolTypeName(idx)
-    value = self.GetSymbolValueText(idx)
-    return Symbol(idx, name, thetype, value)
-
-  def get_all_symbols(self):
-    num_syms = self.GetNumberSymbols()
-    return list(map(self.get_symbol, list(range(num_syms))))
+    def __init__(self, symgroup):
+        self.symgroup = symgroup.contents
+        self.vt = self.symgroup.lpVtbl.contents
+        self.ulong = c_ulong()
+
+    def GetNumberSymbols(self):
+        res = self.vt.GetNumberSymbols(self.symgroup, byref(self.ulong))
+        aborter(res, "GetNumberSymbols")
+        return self.ulong.value
+
+    def GetSymbolName(self, idx):
+        buf = create_string_buffer(256)
+        res = self.vt.GetSymbolName(self.symgroup, idx, buf, 255, byref(self.ulong))
+        aborter(res, "GetSymbolName")
+        thelen = self.ulong.value
+        return string_at(buf).decode("ascii")
+
+    def GetSymbolTypeName(self, idx):
+        buf = create_string_buffer(256)
+        res = self.vt.GetSymbolTypeName(self.symgroup, idx, buf, 255, byref(self.ulong))
+        aborter(res, "GetSymbolTypeName")
+        thelen = self.ulong.value
+        return string_at(buf).decode("ascii")
+
+    def GetSymbolValueText(self, idx, handleserror=False):
+        buf = create_string_buffer(256)
+        res = self.vt.GetSymbolValueText(
+            self.symgroup, idx, buf, 255, byref(self.ulong)
+        )
+        if res != 0 and handleserror:
+            return None
+        aborter(res, "GetSymbolTypeName")
+        thelen = self.ulong.value
+        return string_at(buf).decode("ascii")
+
+    def get_symbol(self, idx):
+        name = self.GetSymbolName(idx)
+        thetype = self.GetSymbolTypeName(idx)
+        value = self.GetSymbolValueText(idx)
+        return Symbol(idx, name, thetype, value)
+
+    def get_all_symbols(self):
+        num_syms = self.GetNumberSymbols()
+        return list(map(self.get_symbol, list(range(num_syms))))

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/sysobjs.py b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/sysobjs.py
index 0e9844a363bdf..5e1fe927e04d0 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/sysobjs.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/sysobjs.py
@@ -11,190 +11,210 @@
 from .utils import *
 
 # UUID For SystemObjects4 interface.
-DebugSystemObjects4IID = IID(0x489468e6, 0x7d0f, 0x4af5, IID_Data4_Type(0x87, 0xab, 0x25, 0x20, 0x74, 0x54, 0xd5, 0x53))
+DebugSystemObjects4IID = IID(
+    0x489468E6,
+    0x7D0F,
+    0x4AF5,
+    IID_Data4_Type(0x87, 0xAB, 0x25, 0x20, 0x74, 0x54, 0xD5, 0x53),
+)
+
 
 class IDebugSystemObjects4(Structure):
-  pass
+    pass
+
 
 class IDebugSystemObjects4Vtbl(Structure):
-  wrp = partial(WINFUNCTYPE, c_long, POINTER(IDebugSystemObjects4))
-  ids_getnumberprocesses = wrp(POINTER(c_ulong))
-  ids_getprocessidsbyindex = wrp(c_ulong, c_ulong, c_ulong_p, c_ulong_p)
-  ids_setcurrentprocessid = wrp(c_ulong)
-  ids_getnumberthreads = wrp(c_ulong_p)
-  ids_getthreadidsbyindex = wrp(c_ulong, c_ulong, c_ulong_p, c_ulong_p)
-  ids_setcurrentthreadid = wrp(c_ulong)
-  _fields_ = [
-      ("QueryInterface", c_void_p),
-      ("AddRef", c_void_p),
-      ("Release", c_void_p),
-      ("GetEventThread", c_void_p),
-      ("GetEventProcess", c_void_p),
-      ("GetCurrentThreadId", c_void_p),
-      ("SetCurrentThreadId", ids_setcurrentthreadid),
-      ("GetCurrentProcessId", c_void_p),
-      ("SetCurrentProcessId", ids_setcurrentprocessid),
-      ("GetNumberThreads", ids_getnumberthreads),
-      ("GetTotalNumberThreads", c_void_p),
-      ("GetThreadIdsByIndex", ids_getthreadidsbyindex),
-      ("GetThreadIdByProcessor", c_void_p),
-      ("GetCurrentThreadDataOffset", c_void_p),
-      ("GetThreadIdByDataOffset", c_void_p),
-      ("GetCurrentThreadTeb", c_void_p),
-      ("GetThreadIdByTeb", c_void_p),
-      ("GetCurrentThreadSystemId", c_void_p),
-      ("GetThreadIdBySystemId", c_void_p),
-      ("GetCurrentThreadHandle", c_void_p),
-      ("GetThreadIdByHandle", c_void_p),
-      ("GetNumberProcesses", ids_getnumberprocesses),
-      ("GetProcessIdsByIndex", ids_getprocessidsbyindex),
-      ("GetCurrentProcessDataOffset", c_void_p),
-      ("GetProcessIdByDataOffset", c_void_p),
-      ("GetCurrentProcessPeb", c_void_p),
-      ("GetProcessIdByPeb", c_void_p),
-      ("GetCurrentProcessSystemId", c_void_p),
-      ("GetProcessIdBySystemId", c_void_p),
-      ("GetCurrentProcessHandle", c_void_p),
-      ("GetProcessIdByHandle", c_void_p),
-      ("GetCurrentProcessExecutableName", c_void_p),
-      ("GetCurrentProcessUpTime", c_void_p),
-      ("GetImplicitThreadDataOffset", c_void_p),
-      ("SetImplicitThreadDataOffset", c_void_p),
-      ("GetImplicitProcessDataOffset", c_void_p),
-      ("SetImplicitProcessDataOffset", c_void_p),
-      ("GetEventSystem", c_void_p),
-      ("GetCurrentSystemId", c_void_p),
-      ("SetCurrentSystemId", c_void_p),
-      ("GetNumberSystems", c_void_p),
-      ("GetSystemIdsByIndex", c_void_p),
-      ("GetTotalNumberThreadsAndProcesses", c_void_p),
-      ("GetCurrentSystemServer", c_void_p),
-      ("GetSystemByServer", c_void_p),
-      ("GetCurrentSystemServerName", c_void_p),
-      ("GetCurrentProcessExecutableNameWide", c_void_p),
-      ("GetCurrentSystemServerNameWide", c_void_p)
+    wrp = partial(WINFUNCTYPE, c_long, POINTER(IDebugSystemObjects4))
+    ids_getnumberprocesses = wrp(POINTER(c_ulong))
+    ids_getprocessidsbyindex = wrp(c_ulong, c_ulong, c_ulong_p, c_ulong_p)
+    ids_setcurrentprocessid = wrp(c_ulong)
+    ids_getnumberthreads = wrp(c_ulong_p)
+    ids_getthreadidsbyindex = wrp(c_ulong, c_ulong, c_ulong_p, c_ulong_p)
+    ids_setcurrentthreadid = wrp(c_ulong)
+    _fields_ = [
+        ("QueryInterface", c_void_p),
+        ("AddRef", c_void_p),
+        ("Release", c_void_p),
+        ("GetEventThread", c_void_p),
+        ("GetEventProcess", c_void_p),
+        ("GetCurrentThreadId", c_void_p),
+        ("SetCurrentThreadId", ids_setcurrentthreadid),
+        ("GetCurrentProcessId", c_void_p),
+        ("SetCurrentProcessId", ids_setcurrentprocessid),
+        ("GetNumberThreads", ids_getnumberthreads),
+        ("GetTotalNumberThreads", c_void_p),
+        ("GetThreadIdsByIndex", ids_getthreadidsbyindex),
+        ("GetThreadIdByProcessor", c_void_p),
+        ("GetCurrentThreadDataOffset", c_void_p),
+        ("GetThreadIdByDataOffset", c_void_p),
+        ("GetCurrentThreadTeb", c_void_p),
+        ("GetThreadIdByTeb", c_void_p),
+        ("GetCurrentThreadSystemId", c_void_p),
+        ("GetThreadIdBySystemId", c_void_p),
+        ("GetCurrentThreadHandle", c_void_p),
+        ("GetThreadIdByHandle", c_void_p),
+        ("GetNumberProcesses", ids_getnumberprocesses),
+        ("GetProcessIdsByIndex", ids_getprocessidsbyindex),
+        ("GetCurrentProcessDataOffset", c_void_p),
+        ("GetProcessIdByDataOffset", c_void_p),
+        ("GetCurrentProcessPeb", c_void_p),
+        ("GetProcessIdByPeb", c_void_p),
+        ("GetCurrentProcessSystemId", c_void_p),
+        ("GetProcessIdBySystemId", c_void_p),
+        ("GetCurrentProcessHandle", c_void_p),
+        ("GetProcessIdByHandle", c_void_p),
+        ("GetCurrentProcessExecutableName", c_void_p),
+        ("GetCurrentProcessUpTime", c_void_p),
+        ("GetImplicitThreadDataOffset", c_void_p),
+        ("SetImplicitThreadDataOffset", c_void_p),
+        ("GetImplicitProcessDataOffset", c_void_p),
+        ("SetImplicitProcessDataOffset", c_void_p),
+        ("GetEventSystem", c_void_p),
+        ("GetCurrentSystemId", c_void_p),
+        ("SetCurrentSystemId", c_void_p),
+        ("GetNumberSystems", c_void_p),
+        ("GetSystemIdsByIndex", c_void_p),
+        ("GetTotalNumberThreadsAndProcesses", c_void_p),
+        ("GetCurrentSystemServer", c_void_p),
+        ("GetSystemByServer", c_void_p),
+        ("GetCurrentSystemServerName", c_void_p),
+        ("GetCurrentProcessExecutableNameWide", c_void_p),
+        ("GetCurrentSystemServerNameWide", c_void_p),
     ]
 
+
 IDebugSystemObjects4._fields_ = [("lpVtbl", POINTER(IDebugSystemObjects4Vtbl))]
 
+
 class SysObjects(object):
-  def __init__(self, sysobjects):
-    self.ptr = sysobjects
-    self.sysobjects = sysobjects.contents
-    self.vt = self.sysobjects.lpVtbl.contents
-    # Keep a handy ulong for passing into C methods.
-    self.ulong = c_ulong()
-
-  def GetNumberSystems(self):
-    res = self.vt.GetNumberSystems(self.sysobjects, byref(self.ulong))
-    aborter(res, "GetNumberSystems")
-    return self.ulong.value
-
-  def GetNumberProcesses(self):
-    res = self.vt.GetNumberProcesses(self.sysobjects, byref(self.ulong))
-    aborter(res, "GetNumberProcesses")
-    return self.ulong.value
-
-  def GetNumberThreads(self):
-    res = self.vt.GetNumberThreads(self.sysobjects, byref(self.ulong))
-    aborter(res, "GetNumberThreads")
-    return self.ulong.value
-
-  def GetTotalNumberThreadsAndProcesses(self):
-    tthreads = c_ulong()
-    tprocs = c_ulong()
-    pulong3 = c_ulong()
-    res = self.vt.GetTotalNumberThreadsAndProcesses(self.sysobjects, byref(tthreads), byref(tprocs), byref(pulong3), byref(pulong3), byref(pulong3))
-    aborter(res, "GettotalNumberThreadsAndProcesses")
-    return tthreads.value, tprocs.value
-
-  def GetCurrentProcessId(self):
-    res = self.vt.GetCurrentProcessId(self.sysobjects, byref(self.ulong))
-    aborter(res, "GetCurrentProcessId")
-    return self.ulong.value
-
-  def SetCurrentProcessId(self, sysid):
-    res = self.vt.SetCurrentProcessId(self.sysobjects, sysid)
-    aborter(res, "SetCurrentProcessId")
-    return
-
-  def GetCurrentThreadId(self):
-    res = self.vt.GetCurrentThreadId(self.sysobjects, byref(self.ulong))
-    aborter(res, "GetCurrentThreadId")
-    return self.ulong.value
-
-  def SetCurrentThreadId(self, sysid):
-    res = self.vt.SetCurrentThreadId(self.sysobjects, sysid)
-    aborter(res, "SetCurrentThreadId")
-    return
-
-  def GetProcessIdsByIndex(self):
-    num_processes = self.GetNumberProcesses()
-    if num_processes == 0:
-      return []
-    engineids = (c_ulong * num_processes)()
-    pids = (c_ulong * num_processes)()
-    for x in range(num_processes):
-      engineids[x] = DEBUG_ANY_ID
-      pids[x] = DEBUG_ANY_ID
-    res = self.vt.GetProcessIdsByIndex(self.sysobjects, 0, num_processes, engineids, pids)
-    aborter(res, "GetProcessIdsByIndex")
-    return list(zip(engineids, pids))
-
-  def GetThreadIdsByIndex(self):
-    num_threads = self.GetNumberThreads()
-    if num_threads == 0:
-      return []
-    engineids = (c_ulong * num_threads)()
-    tids = (c_ulong * num_threads)()
-    for x in range(num_threads):
-      engineids[x] = DEBUG_ANY_ID
-      tids[x] = DEBUG_ANY_ID
-    # Zero -> start index
-    res = self.vt.GetThreadIdsByIndex(self.sysobjects, 0, num_threads, engineids, tids)
-    aborter(res, "GetThreadIdsByIndex")
-    return list(zip(engineids, tids))
-
-  def GetCurThreadHandle(self):
-    pulong64 = c_ulonglong()
-    res = self.vt.GetCurrentThreadHandle(self.sysobjects, byref(pulong64))
-    aborter(res, "GetCurrentThreadHandle")
-    return pulong64.value
-
-  def set_current_thread(self, pid, tid):
-    proc_sys_id = -1
-    for x in self.GetProcessIdsByIndex():
-      sysid, procid = x
-      if procid == pid:
-        proc_sys_id = sysid
-
-    if proc_sys_id == -1:
-      raise Exception("Couldn't find designated PID {}".format(pid))
-
-    self.SetCurrentProcessId(proc_sys_id)
-
-    thread_sys_id = -1
-    for x in self.GetThreadIdsByIndex():
-      sysid, threadid = x
-      if threadid == tid:
-        thread_sys_id = sysid
-
-    if thread_sys_id == -1:
-      raise Exception("Couldn't find designated TID {}".format(tid))
-
-    self.SetCurrentThreadId(thread_sys_id)
-    return
-
-  def print_current_procs_threads(self):
-    procs = []
-    for x in self.GetProcessIdsByIndex():
-      sysid, procid = x
-      procs.append(procid)
-
-    threads = []
-    for x in self.GetThreadIdsByIndex():
-      sysid, threadid = x
-      threads.append(threadid)
-
-    print("Current processes: {}".format(procs))
-    print("Current threads: {}".format(threads))
+    def __init__(self, sysobjects):
+        self.ptr = sysobjects
+        self.sysobjects = sysobjects.contents
+        self.vt = self.sysobjects.lpVtbl.contents
+        # Keep a handy ulong for passing into C methods.
+        self.ulong = c_ulong()
+
+    def GetNumberSystems(self):
+        res = self.vt.GetNumberSystems(self.sysobjects, byref(self.ulong))
+        aborter(res, "GetNumberSystems")
+        return self.ulong.value
+
+    def GetNumberProcesses(self):
+        res = self.vt.GetNumberProcesses(self.sysobjects, byref(self.ulong))
+        aborter(res, "GetNumberProcesses")
+        return self.ulong.value
+
+    def GetNumberThreads(self):
+        res = self.vt.GetNumberThreads(self.sysobjects, byref(self.ulong))
+        aborter(res, "GetNumberThreads")
+        return self.ulong.value
+
+    def GetTotalNumberThreadsAndProcesses(self):
+        tthreads = c_ulong()
+        tprocs = c_ulong()
+        pulong3 = c_ulong()
+        res = self.vt.GetTotalNumberThreadsAndProcesses(
+            self.sysobjects,
+            byref(tthreads),
+            byref(tprocs),
+            byref(pulong3),
+            byref(pulong3),
+            byref(pulong3),
+        )
+        aborter(res, "GettotalNumberThreadsAndProcesses")
+        return tthreads.value, tprocs.value
+
+    def GetCurrentProcessId(self):
+        res = self.vt.GetCurrentProcessId(self.sysobjects, byref(self.ulong))
+        aborter(res, "GetCurrentProcessId")
+        return self.ulong.value
+
+    def SetCurrentProcessId(self, sysid):
+        res = self.vt.SetCurrentProcessId(self.sysobjects, sysid)
+        aborter(res, "SetCurrentProcessId")
+        return
+
+    def GetCurrentThreadId(self):
+        res = self.vt.GetCurrentThreadId(self.sysobjects, byref(self.ulong))
+        aborter(res, "GetCurrentThreadId")
+        return self.ulong.value
+
+    def SetCurrentThreadId(self, sysid):
+        res = self.vt.SetCurrentThreadId(self.sysobjects, sysid)
+        aborter(res, "SetCurrentThreadId")
+        return
+
+    def GetProcessIdsByIndex(self):
+        num_processes = self.GetNumberProcesses()
+        if num_processes == 0:
+            return []
+        engineids = (c_ulong * num_processes)()
+        pids = (c_ulong * num_processes)()
+        for x in range(num_processes):
+            engineids[x] = DEBUG_ANY_ID
+            pids[x] = DEBUG_ANY_ID
+        res = self.vt.GetProcessIdsByIndex(
+            self.sysobjects, 0, num_processes, engineids, pids
+        )
+        aborter(res, "GetProcessIdsByIndex")
+        return list(zip(engineids, pids))
+
+    def GetThreadIdsByIndex(self):
+        num_threads = self.GetNumberThreads()
+        if num_threads == 0:
+            return []
+        engineids = (c_ulong * num_threads)()
+        tids = (c_ulong * num_threads)()
+        for x in range(num_threads):
+            engineids[x] = DEBUG_ANY_ID
+            tids[x] = DEBUG_ANY_ID
+        # Zero -> start index
+        res = self.vt.GetThreadIdsByIndex(
+            self.sysobjects, 0, num_threads, engineids, tids
+        )
+        aborter(res, "GetThreadIdsByIndex")
+        return list(zip(engineids, tids))
+
+    def GetCurThreadHandle(self):
+        pulong64 = c_ulonglong()
+        res = self.vt.GetCurrentThreadHandle(self.sysobjects, byref(pulong64))
+        aborter(res, "GetCurrentThreadHandle")
+        return pulong64.value
+
+    def set_current_thread(self, pid, tid):
+        proc_sys_id = -1
+        for x in self.GetProcessIdsByIndex():
+            sysid, procid = x
+            if procid == pid:
+                proc_sys_id = sysid
+
+        if proc_sys_id == -1:
+            raise Exception("Couldn't find designated PID {}".format(pid))
+
+        self.SetCurrentProcessId(proc_sys_id)
+
+        thread_sys_id = -1
+        for x in self.GetThreadIdsByIndex():
+            sysid, threadid = x
+            if threadid == tid:
+                thread_sys_id = sysid
+
+        if thread_sys_id == -1:
+            raise Exception("Couldn't find designated TID {}".format(tid))
+
+        self.SetCurrentThreadId(thread_sys_id)
+        return
+
+    def print_current_procs_threads(self):
+        procs = []
+        for x in self.GetProcessIdsByIndex():
+            sysid, procid = x
+            procs.append(procid)
+
+        threads = []
+        for x in self.GetThreadIdsByIndex():
+            sysid, threadid = x
+            threads.append(threadid)
+
+        print("Current processes: {}".format(procs))
+        print("Current threads: {}".format(threads))

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/utils.py b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/utils.py
index 0c9197aa1c90a..8bea3daaf7d9d 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/utils.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/dbgeng/utils.py
@@ -19,29 +19,34 @@
 # This doesn't fit into any convenient category
 DEBUG_ANY_ID = 0xFFFFFFFF
 
+
 class WinError(Exception):
-  def __init__(self, msg, hstatus):
-    self.hstatus = hstatus
-    super(WinError, self).__init__(msg)
+    def __init__(self, msg, hstatus):
+        self.hstatus = hstatus
+        super(WinError, self).__init__(msg)
+
 
 def aborter(res, msg, ignore=[]):
-  if res != 0 and res not in ignore:
-    # Convert a negative error code to a positive unsigned one, which is
-    # now NTSTATUSes appear in documentation.
-    if res < 0:
-      res += 0x100000000
-    msg = '{:08X} : {}'.format(res, msg)
-    raise WinError(msg, res)
+    if res != 0 and res not in ignore:
+        # Convert a negative error code to a positive unsigned one, which is
+        # now NTSTATUSes appear in documentation.
+        if res < 0:
+            res += 0x100000000
+        msg = "{:08X} : {}".format(res, msg)
+        raise WinError(msg, res)
+
 
 IID_Data4_Type = c_ubyte * 8
 
+
 class IID(Structure):
-  _fields_ = [
-      ("Data1", c_uint),
-      ("Data2", c_ushort),
-      ("Data3", c_ushort),
-      ("Data4", IID_Data4_Type)
-  ]
+    _fields_ = [
+        ("Data1", c_uint),
+        ("Data2", c_ushort),
+        ("Data3", c_ushort),
+        ("Data4", IID_Data4_Type),
+    ]
+
 
 c_ulong_p = POINTER(c_ulong)
 c_ulong64_p = POINTER(c_ulonglong)

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/lldb/LLDB.py b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/lldb/LLDB.py
index 4ab693c6d1813..b13e7435ba75a 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/lldb/LLDB.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/lldb/LLDB.py
@@ -36,17 +36,19 @@ def _custom_init(self):
         self._debugger = self._interface.SBDebugger.Create()
         self._debugger.SetAsync(False)
         self._target = self._debugger.CreateTargetWithFileAndArch(
-            self.context.options.executable, self.context.options.arch)
+            self.context.options.executable, self.context.options.arch
+        )
         if not self._target:
             raise LoadDebuggerException(
-                'could not create target for executable "{}" with arch:{}'.
-                format(self.context.options.executable,
-                       self.context.options.arch))
+                'could not create target for executable "{}" with arch:{}'.format(
+                    self.context.options.executable, self.context.options.arch
+                )
+            )
 
     def _custom_exit(self):
-        if getattr(self, '_process', None):
+        if getattr(self, "_process", None):
             self._process.Kill()
-        if getattr(self, '_debugger', None) and getattr(self, '_target', None):
+        if getattr(self, "_debugger", None) and getattr(self, "_target", None):
             self._debugger.DeleteTarget(self._target)
 
     def _translate_stop_reason(self, reason):
@@ -64,38 +66,37 @@ def _translate_stop_reason(self, reason):
 
     def _load_interface(self):
         try:
-            args = [self.lldb_executable, '-P']
-            pythonpath = check_output(
-                args, stderr=STDOUT).rstrip().decode('utf-8')
+            args = [self.lldb_executable, "-P"]
+            pythonpath = check_output(args, stderr=STDOUT).rstrip().decode("utf-8")
         except CalledProcessError as e:
             raise LoadDebuggerException(str(e), sys.exc_info())
         except OSError as e:
             raise LoadDebuggerException(
-                '{} ["{}"]'.format(e.strerror, self.lldb_executable),
-                sys.exc_info())
+                '{} ["{}"]'.format(e.strerror, self.lldb_executable), sys.exc_info()
+            )
 
         if not os.path.isdir(pythonpath):
             raise LoadDebuggerException(
-                'path "{}" does not exist [result of {}]'.format(
-                    pythonpath, args), sys.exc_info())
+                'path "{}" does not exist [result of {}]'.format(pythonpath, args),
+                sys.exc_info(),
+            )
 
         try:
-            module_info = imp.find_module('lldb', [pythonpath])
-            return imp.load_module('lldb', *module_info)
+            module_info = imp.find_module("lldb", [pythonpath])
+            return imp.load_module("lldb", *module_info)
         except ImportError as e:
             msg = str(e)
-            if msg.endswith('not a valid Win32 application.'):
-                msg = '{} [Are you mixing 32-bit and 64-bit binaries?]'.format(
-                    msg)
+            if msg.endswith("not a valid Win32 application."):
+                msg = "{} [Are you mixing 32-bit and 64-bit binaries?]".format(msg)
             raise LoadDebuggerException(msg, sys.exc_info())
 
     @classmethod
     def get_name(cls):
-        return 'lldb'
+        return "lldb"
 
     @classmethod
     def get_option_name(cls):
-        return 'lldb'
+        return "lldb"
 
     @property
     def version(self):
@@ -114,7 +115,8 @@ def _add_conditional_breakpoint(self, file_, line, condition):
         bp = self._target.BreakpointCreateByLocation(file_, line)
         if not bp:
             raise DebuggerException(
-                  'could not add breakpoint [{}:{}]'.format(file_, line))
+                "could not add breakpoint [{}:{}]".format(file_, line)
+            )
         id = bp.GetID()
         if condition:
             bp.SetCondition(condition)
@@ -134,7 +136,7 @@ def _evaulate_breakpoint_condition(self, id):
             # This must be an unconditional breakpoint.
             return True
         valueIR = self.evaluate_expression(condition)
-        return valueIR.type_name == 'bool' and valueIR.value == 'true'
+        return valueIR.type_name == "bool" and valueIR.value == "true"
 
     def get_triggered_breakpoint_ids(self):
         # Breakpoints can only have been triggered if we've hit one.
@@ -173,12 +175,12 @@ def delete_breakpoints(self, ids):
 
     def launch(self, cmdline):
         if self.context.options.target_run_args:
-          cmdline += shlex.split(self.context.options.target_run_args)
+            cmdline += shlex.split(self.context.options.target_run_args)
         self._process = self._target.LaunchSimple(cmdline, None, os.getcwd())
         if not self._process or self._process.GetNumThreads() == 0:
-            raise DebuggerException('could not launch process')
+            raise DebuggerException("could not launch process")
         if self._process.GetNumThreads() != 1:
-            raise DebuggerException('multiple threads not supported')
+            raise DebuggerException("multiple threads not supported")
         self._thread = self._process.GetThreadAtIndex(0)
         assert self._thread, (self._process, self._thread)
 
@@ -199,45 +201,52 @@ def _get_step_info(self, watches, step_index):
             sb_filespec = sb_line.GetFileSpec()
 
             try:
-                path = os.path.join(sb_filespec.GetDirectory(),
-                                    sb_filespec.GetFilename())
+                path = os.path.join(
+                    sb_filespec.GetDirectory(), sb_filespec.GetFilename()
+                )
             except (AttributeError, TypeError):
                 path = None
 
             function = self._sanitize_function_name(sb_frame.GetFunctionName())
 
             loc_dict = {
-                'path': path,
-                'lineno': sb_line.GetLine(),
-                'column': sb_line.GetColumn()
+                "path": path,
+                "lineno": sb_line.GetLine(),
+                "column": sb_line.GetColumn(),
             }
             loc = LocIR(**loc_dict)
             valid_loc_for_watch = loc.path and os.path.exists(loc.path)
 
-            frame = FrameIR(
-                function=function, is_inlined=sb_frame.IsInlined(), loc=loc)
+            frame = FrameIR(function=function, is_inlined=sb_frame.IsInlined(), loc=loc)
 
             if any(
-                    name in (frame.function or '')  # pylint: disable=no-member
-                    for name in self.frames_below_main):
+                name in (frame.function or "")  # pylint: disable=no-member
+                for name in self.frames_below_main
+            ):
                 break
 
             frames.append(frame)
 
-            state_frame = StackFrame(function=frame.function,
-                                     is_inlined=frame.is_inlined,
-                                     location=SourceLocation(**loc_dict),
-                                     watches={})
+            state_frame = StackFrame(
+                function=frame.function,
+                is_inlined=frame.is_inlined,
+                location=SourceLocation(**loc_dict),
+                watches={},
+            )
             if valid_loc_for_watch:
                 for expr in map(
                     # Filter out watches that are not active in the current frame,
                     # and then evaluate all the active watches.
-                    lambda watch_info, idx=i:
-                        self.evaluate_expression(watch_info.expression, idx),
+                    lambda watch_info, idx=i: self.evaluate_expression(
+                        watch_info.expression, idx
+                    ),
                     filter(
-                        lambda watch_info, idx=i, line_no=loc.lineno, loc_path=loc.path:
-                            watch_is_active(watch_info, loc_path, idx, line_no),
-                        watches)):
+                        lambda watch_info, idx=i, line_no=loc.lineno, loc_path=loc.path: watch_is_active(
+                            watch_info, loc_path, idx, line_no
+                        ),
+                        watches,
+                    ),
+                ):
                     state_frame.watches[expr.expression] = expr
             state_frames.append(state_frame)
 
@@ -248,8 +257,11 @@ def _get_step_info(self, watches, step_index):
         reason = self._translate_stop_reason(self._thread.GetStopReason())
 
         return StepIR(
-            step_index=step_index, frames=frames, stop_reason=reason,
-            program_state=ProgramState(state_frames))
+            step_index=step_index,
+            frames=frames,
+            stop_reason=reason,
+            program_state=ProgramState(state_frames),
+        )
 
     @property
     def is_running(self):
@@ -262,46 +274,54 @@ def is_finished(self):
 
     @property
     def frames_below_main(self):
-        return ['__scrt_common_main_seh', '__libc_start_main', '__libc_start_call_main']
+        return ["__scrt_common_main_seh", "__libc_start_main", "__libc_start_call_main"]
 
     def evaluate_expression(self, expression, frame_idx=0) -> ValueIR:
-        result = self._thread.GetFrameAtIndex(frame_idx
-            ).EvaluateExpression(expression)
+        result = self._thread.GetFrameAtIndex(frame_idx).EvaluateExpression(expression)
         error_string = str(result.error)
 
         value = result.value
-        could_evaluate = not any(s in error_string for s in [
-            "Can't run the expression locally",
-            "use of undeclared identifier",
-            "no member named",
-            "Couldn't lookup symbols",
-            "reference to local variable",
-            "invalid use of 'this' outside of a non-static member function",
-        ])
-
-        is_optimized_away = any(s in error_string for s in [
-            'value may have been optimized out',
-        ])
-
-        is_irretrievable = any(s in error_string for s in [
-            "couldn't get the value of variable",
-            "couldn't read its memory",
-            "couldn't read from memory",
-            "Cannot access memory at address",
-            "invalid address (fault address:",
-        ])
+        could_evaluate = not any(
+            s in error_string
+            for s in [
+                "Can't run the expression locally",
+                "use of undeclared identifier",
+                "no member named",
+                "Couldn't lookup symbols",
+                "reference to local variable",
+                "invalid use of 'this' outside of a non-static member function",
+            ]
+        )
+
+        is_optimized_away = any(
+            s in error_string
+            for s in [
+                "value may have been optimized out",
+            ]
+        )
+
+        is_irretrievable = any(
+            s in error_string
+            for s in [
+                "couldn't get the value of variable",
+                "couldn't read its memory",
+                "couldn't read from memory",
+                "Cannot access memory at address",
+                "invalid address (fault address:",
+            ]
+        )
 
         if could_evaluate and not is_irretrievable and not is_optimized_away:
-            assert error_string == 'success', (error_string, expression, value)
+            assert error_string == "success", (error_string, expression, value)
             # assert result.value is not None, (result.value, expression)
 
-        if error_string == 'success':
+        if error_string == "success":
             error_string = None
 
         # attempt to find expression as a variable, if found, take the variable
         # obj's type information as it's 'usually' more accurate.
         var_result = self._thread.GetFrameAtIndex(frame_idx).FindVariable(expression)
-        if str(var_result.error) == 'success':
+        if str(var_result.error) == "success":
             type_name = var_result.type.GetDisplayTypeName()
         else:
             type_name = result.type.GetDisplayTypeName()

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/visualstudio/VisualStudio.py b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/visualstudio/VisualStudio.py
index 1db20c7788072..ca71d8b64c4db 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/visualstudio/VisualStudio.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/visualstudio/VisualStudio.py
@@ -24,9 +24,9 @@
 def _load_com_module():
     try:
         module_info = imp.find_module(
-            'ComInterface',
-            [os.path.join(os.path.dirname(__file__), 'windows')])
-        return imp.load_module('ComInterface', *module_info)
+            "ComInterface", [os.path.join(os.path.dirname(__file__), "windows")]
+        )
+        return imp.load_module("ComInterface", *module_info)
     except ImportError as e:
         raise LoadDebuggerException(e, sys.exc_info())
 
@@ -34,9 +34,12 @@ def _load_com_module():
 # VSBreakpoint(path: PurePath, line: int, col: int, cond: str).  This is enough
 # info to identify breakpoint equivalence in visual studio based on the
 # properties we set through dexter currently.
-VSBreakpoint = namedtuple('VSBreakpoint', 'path, line, col, cond')
+VSBreakpoint = namedtuple("VSBreakpoint", "path, line, col, cond")
+
 
-class VisualStudio(DebuggerBase, metaclass=abc.ABCMeta):  # pylint: disable=abstract-method
+class VisualStudio(
+    DebuggerBase, metaclass=abc.ABCMeta
+):  # pylint: disable=abstract-method
 
     # Constants for results of Debugger.CurrentMode
     # (https://msdn.microsoft.com/en-us/library/envdte.debugger.currentmode.aspx)
@@ -68,29 +71,31 @@ def __init__(self, *args):
         super(VisualStudio, self).__init__(*args)
 
     def _create_solution(self):
-        self._solution.Create(self.context.working_directory.path,
-                              'DexterSolution')
+        self._solution.Create(self.context.working_directory.path, "DexterSolution")
         try:
             self._solution.AddFromFile(self._project_file)
         except OSError:
             raise LoadDebuggerException(
-                'could not debug the specified executable', sys.exc_info())
+                "could not debug the specified executable", sys.exc_info()
+            )
 
     def _load_solution(self):
         try:
             self._solution.Open(self.context.options.vs_solution)
         except:
             raise LoadDebuggerException(
-                    'could not load specified vs solution at {}'.
-                    format(self.context.options.vs_solution), sys.exc_info())
+                "could not load specified vs solution at {}".format(
+                    self.context.options.vs_solution
+                ),
+                sys.exc_info(),
+            )
 
     def _custom_init(self):
         try:
             self._debugger = self._interface.Debugger
             self._debugger.HexDisplayMode = False
 
-            self._interface.MainWindow.Visible = (
-                self.context.options.show_debugger)
+            self._interface.MainWindow.Visible = self.context.options.show_debugger
 
             self._solution = self._interface.Solution
             if self.context.options.vs_solution is None:
@@ -118,14 +123,14 @@ def _dte_version(self):
 
     @property
     def _location(self):
-        #TODO: Find a better way of determining path, line and column info
+        # TODO: Find a better way of determining path, line and column info
         # that doesn't require reading break points. This method requires
         # all lines to have a break point on them.
         bp = self._debugger.BreakpointLastHit
         return {
-            'path': getattr(bp, 'File', None),
-            'lineno': getattr(bp, 'FileLine', None),
-            'column': getattr(bp, 'FileColumn', None)
+            "path": getattr(bp, "File", None),
+            "lineno": getattr(bp, "FileLine", None),
+            "column": getattr(bp, "FileColumn", None),
         }
 
     @property
@@ -150,7 +155,7 @@ def clear_breakpoints(self):
         self._dex_id_to_vs.clear()
 
     def _add_breakpoint(self, file_, line):
-        return self._add_conditional_breakpoint(file_, line, '')
+        return self._add_conditional_breakpoint(file_, line, "")
 
     def _get_next_id(self):
         # "Generate" a new unique id for the breakpoint.
@@ -171,7 +176,7 @@ def _add_conditional_breakpoint(self, file_, line, condition):
 
         # Breakpoint doesn't exist already. Add it now.
         count_before = self._debugger.Breakpoints.Count
-        self._debugger.Breakpoints.Add('', file_, line, col, condition)
+        self._debugger.Breakpoints.Add("", file_, line, col, condition)
         # Our internal representation of VS says that the breakpoint doesn't
         # already exist so we do not expect this operation to fail here.
         assert count_before < self._debugger.Breakpoints.Count
@@ -181,8 +186,7 @@ def _add_conditional_breakpoint(self, file_, line, condition):
         return new_id
 
     def get_triggered_breakpoint_ids(self):
-        """Returns a set of opaque ids for just-triggered breakpoints.
-        """
+        """Returns a set of opaque ids for just-triggered breakpoints."""
         bps_hit = self._debugger.AllBreakpointsLastHit
         bp_id_list = []
         # Intuitively, AllBreakpointsLastHit breakpoints are the last hit
@@ -194,8 +198,12 @@ def get_triggered_breakpoint_ids(self):
             # All bound breakpoints should have the user-defined breakpoint as
             # a parent.
             assert bp.Parent
-            vsbp = VSBreakpoint(PurePath(bp.Parent.File), bp.Parent.FileLine,
-                                bp.Parent.FileColumn, bp.Parent.Condition)
+            vsbp = VSBreakpoint(
+                PurePath(bp.Parent.File),
+                bp.Parent.FileLine,
+                bp.Parent.FileColumn,
+                bp.Parent.Condition,
+            )
             try:
                 ids = self._vs_to_dex_ids[vsbp]
             except KeyError:
@@ -229,20 +237,21 @@ def delete_breakpoints(self, ids):
             # We're looking at the user-set breakpoints so there should be no
             # Parent.
             assert bp.Parent == None
-            this_vsbp = VSBreakpoint(PurePath(bp.File), bp.FileLine,
-                                     bp.FileColumn, bp.Condition)
+            this_vsbp = VSBreakpoint(
+                PurePath(bp.File), bp.FileLine, bp.FileColumn, bp.Condition
+            )
             if this_vsbp in vsbp_set:
                 bp.Delete()
                 vsbp_to_del_count -= 1
                 if vsbp_to_del_count == 0:
                     break
         if vsbp_to_del_count:
-            raise KeyError('did not find breakpoint to be deleted')
+            raise KeyError("did not find breakpoint to be deleted")
 
     def _fetch_property(self, props, name):
         num_props = props.Count
         result = None
-        for x in range(1, num_props+1):
+        for x in range(1, num_props + 1):
             item = props.Item(x)
             if item.Name == name:
                 return item
@@ -251,18 +260,22 @@ def _fetch_property(self, props, name):
     def launch(self, cmdline):
         exe_path = Path(self.context.options.executable)
         self.context.logger.note(f"VS: Using executable: '{exe_path}'")
-        cmdline_str = ' '.join(cmdline)
+        cmdline_str = " ".join(cmdline)
         if self.context.options.target_run_args:
-          cmdline_str += f" {self.context.options.target_run_args}"
+            cmdline_str += f" {self.context.options.target_run_args}"
         if cmdline_str:
-          self.context.logger.note(f"VS: Using executable args: '{cmdline_str}'")
+            self.context.logger.note(f"VS: Using executable args: '{cmdline_str}'")
 
         # In a slightly baroque manner, lookup the VS project that runs when
         # you click "run", and set its command line options to the desired
         # command line options.
-        startup_proj_name = str(self._fetch_property(self._interface.Solution.Properties, 'StartupProject'))
+        startup_proj_name = str(
+            self._fetch_property(self._interface.Solution.Properties, "StartupProject")
+        )
         project = self._fetch_property(self._interface.Solution, startup_proj_name)
-        ActiveConfiguration = self._fetch_property(project.Properties, 'ActiveConfiguration').Object
+        ActiveConfiguration = self._fetch_property(
+            project.Properties, "ActiveConfiguration"
+        ).Object
         ActiveConfiguration.DebugSettings.CommandArguments = cmdline_str
 
         self.context.logger.note("Launching VS debugger...")
@@ -282,8 +295,11 @@ def set_current_stack_frame(self, idx: int = 0):
             stack_frame = stack_frames[idx]
             self._debugger.CurrentStackFrame = stack_frame.raw
         except IndexError:
-            raise Error('attempted to access stack frame {} out of {}'
-                .format(idx, len(stack_frames)))
+            raise Error(
+                "attempted to access stack frame {} out of {}".format(
+                    idx, len(stack_frames)
+                )
+            )
 
     def _get_step_info(self, watches, step_index):
         thread = self._debugger.CurrentThread
@@ -292,30 +308,31 @@ def _get_step_info(self, watches, step_index):
         frames = []
         state_frames = []
 
-
         loc = LocIR(**self._location)
         valid_loc_for_watch = loc.path and os.path.exists(loc.path)
 
         for idx, sf in enumerate(stackframes):
             frame = FrameIR(
                 function=self._sanitize_function_name(sf.FunctionName),
-                is_inlined=sf.FunctionName.startswith('[Inline Frame]'),
-                loc=LocIR(path=None, lineno=None, column=None))
+                is_inlined=sf.FunctionName.startswith("[Inline Frame]"),
+                loc=LocIR(path=None, lineno=None, column=None),
+            )
 
-            fname = frame.function or ''  # pylint: disable=no-member
+            fname = frame.function or ""  # pylint: disable=no-member
             if any(name in fname for name in self.frames_below_main):
                 break
 
-            state_frame = StackFrame(function=frame.function,
-                                     is_inlined=frame.is_inlined,
-                                     watches={})
+            state_frame = StackFrame(
+                function=frame.function, is_inlined=frame.is_inlined, watches={}
+            )
 
             if valid_loc_for_watch and idx == 0:
                 for watch_info in watches:
                     if watch_is_active(watch_info, loc.path, idx, loc.lineno):
                         watch_expr = watch_info.expression
-                        state_frame.watches[watch_expr] = self.evaluate_expression(watch_expr, idx)
-
+                        state_frame.watches[watch_expr] = self.evaluate_expression(
+                            watch_expr, idx
+                        )
 
             state_frames.append(state_frame)
             frames.append(frame)
@@ -331,8 +348,11 @@ def _get_step_info(self, watches, step_index):
         program_state = ProgramState(frames=state_frames)
 
         return StepIR(
-            step_index=step_index, frames=frames, stop_reason=reason,
-            program_state=program_state)
+            step_index=step_index,
+            frames=frames,
+            stop_reason=reason,
+            program_state=program_state,
+        )
 
     @property
     def is_running(self):
@@ -345,8 +365,10 @@ def is_finished(self):
     @property
     def frames_below_main(self):
         return [
-            '[Inline Frame] invoke_main', '__scrt_common_main_seh',
-            '__tmainCRTStartup', 'mainCRTStartup'
+            "[Inline Frame] invoke_main",
+            "__scrt_common_main_seh",
+            "__tmainCRTStartup",
+            "mainCRTStartup",
         ]
 
     def evaluate_expression(self, expression, frame_idx=0) -> ValueIR:
@@ -357,20 +379,25 @@ def evaluate_expression(self, expression, frame_idx=0) -> ValueIR:
             self.set_current_stack_frame(0)
         value = result.Value
 
-        is_optimized_away = any(s in value for s in [
-            'Variable is optimized away and not available',
-            'Value is not available, possibly due to optimization',
-        ])
+        is_optimized_away = any(
+            s in value
+            for s in [
+                "Variable is optimized away and not available",
+                "Value is not available, possibly due to optimization",
+            ]
+        )
 
-        is_irretrievable = any(s in value for s in [
-            '???',
-            '<Unable to read memory>',
-        ])
+        is_irretrievable = any(
+            s in value
+            for s in [
+                "???",
+                "<Unable to read memory>",
+            ]
+        )
 
         # an optimized away value is still counted as being able to be
         # evaluated.
-        could_evaluate = (result.IsValidValue or is_optimized_away
-                          or is_irretrievable)
+        could_evaluate = result.IsValidValue or is_optimized_away or is_irretrievable
 
         return ValueIR(
             expression=expression,

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/visualstudio/VisualStudio2015.py b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/visualstudio/VisualStudio2015.py
index af6edcd2451c5..3c5c5ea0124be 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/visualstudio/VisualStudio2015.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/visualstudio/VisualStudio2015.py
@@ -12,12 +12,12 @@
 class VisualStudio2015(VisualStudio):
     @classmethod
     def get_name(cls):
-        return 'Visual Studio 2015'
+        return "Visual Studio 2015"
 
     @classmethod
     def get_option_name(cls):
-        return 'vs2015'
+        return "vs2015"
 
     @property
     def _dte_version(self):
-        return 'VisualStudio.DTE.14.0'
+        return "VisualStudio.DTE.14.0"

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/visualstudio/VisualStudio2017.py b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/visualstudio/VisualStudio2017.py
index f2f757546f302..fa28df75bd761 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/visualstudio/VisualStudio2017.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/visualstudio/VisualStudio2017.py
@@ -12,12 +12,12 @@
 class VisualStudio2017(VisualStudio):
     @classmethod
     def get_name(cls):
-        return 'Visual Studio 2017'
+        return "Visual Studio 2017"
 
     @classmethod
     def get_option_name(cls):
-        return 'vs2017'
+        return "vs2017"
 
     @property
     def _dte_version(self):
-        return 'VisualStudio.DTE.15.0'
+        return "VisualStudio.DTE.15.0"

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/visualstudio/VisualStudio2019.py b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/visualstudio/VisualStudio2019.py
index 9ebe0a0dee3e5..7a9ba1849167a 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/visualstudio/VisualStudio2019.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/visualstudio/VisualStudio2019.py
@@ -12,12 +12,12 @@
 class VisualStudio2019(VisualStudio):
     @classmethod
     def get_name(cls):
-        return 'Visual Studio 2019'
+        return "Visual Studio 2019"
 
     @classmethod
     def get_option_name(cls):
-        return 'vs2019'
+        return "vs2019"
 
     @property
     def _dte_version(self):
-        return 'VisualStudio.DTE.16.0'
+        return "VisualStudio.DTE.16.0"

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/visualstudio/windows/ComInterface.py b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/visualstudio/windows/ComInterface.py
index 0bce5b533e7bf..84f43360c4480 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/visualstudio/windows/ComInterface.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/visualstudio/windows/ComInterface.py
@@ -13,6 +13,7 @@
 # pylint: disable=import-error
 import win32com.client as com
 import win32api
+
 # pylint: enable=import-error
 
 from dex.utils.Exceptions import LoadDebuggerException
@@ -22,25 +23,27 @@
 
 def get_file_version(file_):
     try:
-        info = win32api.GetFileVersionInfo(file_, '\\')
-        ms = info['FileVersionMS']
-        ls = info['FileVersionLS']
-        return '.'.join(
-            str(s) for s in [
+        info = win32api.GetFileVersionInfo(file_, "\\")
+        ms = info["FileVersionMS"]
+        ls = info["FileVersionLS"]
+        return ".".join(
+            str(s)
+            for s in [
                 win32api.HIWORD(ms),
                 win32api.LOWORD(ms),
                 win32api.HIWORD(ls),
-                win32api.LOWORD(ls)
-            ])
+                win32api.LOWORD(ls),
+            ]
+        )
     except com.pywintypes.error:  # pylint: disable=no-member
-        return 'no versioninfo present'
+        return "no versioninfo present"
 
 
 def _handle_com_error(e):
     exc = sys.exc_info()
     msg = win32api.FormatMessage(e.hresult)
     try:
-        msg = msg.decode('CP1251')
+        msg = msg.decode("CP1251")
     except AttributeError:
         pass
     msg = msg.strip()
@@ -54,7 +57,7 @@ class ComObject(object):
 
     def __init__(self, raw):
         assert not isinstance(raw, ComObject), raw
-        self.__dict__['raw'] = raw
+        self.__dict__["raw"] = raw
 
     def __str__(self):
         return self._call(self.raw.__str__)
@@ -88,15 +91,15 @@ def _call(cls, fn, *args):
         """
         ex = AssertionError("this should never be raised!")
 
-        assert (inspect.isfunction(fn) or inspect.ismethod(fn)
-                or inspect.isbuiltin(fn)), (fn, type(fn))
+        assert (
+            inspect.isfunction(fn) or inspect.ismethod(fn) or inspect.isbuiltin(fn)
+        ), (fn, type(fn))
         retries = ([0] * 50) + ([1] * 5)
         for r in retries:
             try:
                 try:
                     result = fn(*args)
-                    if inspect.ismethod(result) or 'win32com' in str(
-                            result.__class__):
+                    if inspect.ismethod(result) or "win32com" in str(result.__class__):
                         result = ComObject(result)
                     return result
                 except _com_error as e:
@@ -116,4 +119,5 @@ def __init__(self, class_string):
         except _com_error as e:
             msg, exc = _handle_com_error(e)
             raise LoadDebuggerException(
-                '{} [{}]'.format(msg, class_string), orig_exception=exc)
+                "{} [{}]".format(msg, class_string), orig_exception=exc
+            )

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/dextIR/BuilderIR.py b/cross-project-tests/debuginfo-tests/dexter/dex/dextIR/BuilderIR.py
index b94a1fb7e8104..953b1da56ef05 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/dextIR/BuilderIR.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/dextIR/BuilderIR.py
@@ -7,8 +7,7 @@
 
 
 class BuilderIR:
-    """Data class which represents the compiler related options passed to Dexter
-    """
+    """Data class which represents the compiler related options passed to Dexter"""
 
     def __init__(self, name: str, cflags: str, ldflags: str):
         self.name = name

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/dextIR/DextIR.py b/cross-project-tests/debuginfo-tests/dexter/dex/dextIR/DextIR.py
index b82c2bab56dec..be3299c333260 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/dextIR/DextIR.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/dextIR/DextIR.py
@@ -14,12 +14,15 @@
 
 
 def _step_kind_func(context, step):
-    if (step.current_location.path is None or
-        not os.path.exists(step.current_location.path)):
+    if step.current_location.path is None or not os.path.exists(
+        step.current_location.path
+    ):
         return StepKind.FUNC_UNKNOWN
 
-    if any(os.path.samefile(step.current_location.path, f)
-           for f in context.options.source_files):
+    if any(
+        os.path.samefile(step.current_location.path, f)
+        for f in context.options.source_files
+    ):
         return StepKind.FUNC
 
     return StepKind.FUNC_EXTERNAL
@@ -39,13 +42,15 @@ class DextIR:
         commands: { name (str), commands (list[CommandIR])
     """
 
-    def __init__(self,
-                 dexter_version: str,
-                 executable_path: str,
-                 source_paths: List[str],
-                 builder: BuilderIR = None,
-                 debugger: DebuggerIR = None,
-                 commands: OrderedDict = None):
+    def __init__(
+        self,
+        dexter_version: str,
+        executable_path: str,
+        source_paths: List[str],
+        builder: BuilderIR = None,
+        debugger: DebuggerIR = None,
+        commands: OrderedDict = None,
+    ):
         self.dexter_version = dexter_version
         self.executable_path = executable_path
         self.source_paths = source_paths
@@ -55,18 +60,22 @@ def __init__(self,
         self.steps: List[StepIR] = []
 
     def __str__(self):
-        colors = 'rgby'
-        st = '## BEGIN ##\n'
+        colors = "rgby"
+        st = "## BEGIN ##\n"
         color_idx = 0
         for step in self.steps:
-            if step.step_kind in (StepKind.FUNC, StepKind.FUNC_EXTERNAL,
-                                  StepKind.FUNC_UNKNOWN):
+            if step.step_kind in (
+                StepKind.FUNC,
+                StepKind.FUNC_EXTERNAL,
+                StepKind.FUNC_UNKNOWN,
+            ):
                 color_idx += 1
 
             color = colors[color_idx % len(colors)]
-            st += '<{}>{}</>\n'.format(color, step)
-        st += '## END ({} step{}) ##\n'.format(
-            self.num_steps, '' if self.num_steps == 1 else 's')
+            st += "<{}>{}</>\n".format(color, step)
+        st += "## END ({} step{}) ##\n".format(
+            self.num_steps, "" if self.num_steps == 1 else "s"
+        )
         return st
 
     @property
@@ -79,9 +88,15 @@ def _get_prev_step_in_this_frame(self, step):
         Returns:
             StepIR or None if there is no previous step in this frame.
         """
-        return next((s for s in reversed(self.steps)
-            if s.current_function == step.current_function
-            and s.num_frames == step.num_frames), None)
+        return next(
+            (
+                s
+                for s in reversed(self.steps)
+                if s.current_function == step.current_function
+                and s.num_frames == step.num_frames
+            ),
+            None,
+        )
 
     def _get_new_step_kind(self, context, step):
         if step.current_function is None:
@@ -103,7 +118,10 @@ def _get_new_step_kind(self, context, step):
             prev_step = frame_step if frame_step is not None else prev_step
 
         # If we're missing line numbers to compare then the step kind has to be UNKNOWN.
-        if prev_step.current_location.lineno is None or step.current_location.lineno is None:
+        if (
+            prev_step.current_location.lineno is None
+            or step.current_location.lineno is None
+        ):
             return StepKind.UNKNOWN
 
         # We're in the same func as prev step, check lineo.

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/dextIR/LocIR.py b/cross-project-tests/debuginfo-tests/dexter/dex/dextIR/LocIR.py
index 52a56a8fe80c9..9f98a67b5aada 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/dextIR/LocIR.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/dextIR/LocIR.py
@@ -18,13 +18,16 @@ def __init__(self, path: str, lineno: int, column: int):
         self.column = column
 
     def __str__(self):
-        return '{}({}:{})'.format(self.path, self.lineno, self.column)
+        return "{}({}:{})".format(self.path, self.lineno, self.column)
 
     def __eq__(self, rhs):
-        return (os.path.exists(self.path) and os.path.exists(rhs.path)
-                and os.path.samefile(self.path, rhs.path)
-                and self.lineno == rhs.lineno
-                and self.column == rhs.column)
+        return (
+            os.path.exists(self.path)
+            and os.path.exists(rhs.path)
+            and os.path.samefile(self.path, rhs.path)
+            and self.lineno == rhs.lineno
+            and self.column == rhs.column
+        )
 
     def __lt__(self, rhs):
         if self.path != rhs.path:

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/dextIR/ProgramState.py b/cross-project-tests/debuginfo-tests/dexter/dex/dextIR/ProgramState.py
index a1de5fbcf363d..a3b6b3aba488c 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/dextIR/ProgramState.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/dextIR/ProgramState.py
@@ -14,6 +14,7 @@
 from pathlib import PurePath
 from typing import List
 
+
 class SourceLocation:
     def __init__(self, path: str = None, lineno: int = None, column: int = None):
         if path:
@@ -23,7 +24,7 @@ def __init__(self, path: str = None, lineno: int = None, column: int = None):
         self.column = column
 
     def __str__(self):
-        return '{}({}:{})'.format(self.path, self.lineno, self.column)
+        return "{}({}:{})".format(self.path, self.lineno, self.column)
 
     def match(self, other) -> bool:
         """Returns true iff all the properties that appear in `self` have the
@@ -32,7 +33,9 @@ def match(self, other) -> bool:
         if not other or not isinstance(other, SourceLocation):
             return False
 
-        if self.path and (other.path is None or (PurePath(self.path) != PurePath(other.path))):
+        if self.path and (
+            other.path is None or (PurePath(self.path) != PurePath(other.path))
+        ):
             return False
 
         if self.lineno and (self.lineno != other.lineno):
@@ -45,11 +48,13 @@ def match(self, other) -> bool:
 
 
 class StackFrame:
-    def __init__(self,
-                 function: str = None,
-                 is_inlined: bool = None,
-                 location: SourceLocation = None,
-                 watches: OrderedDict = None):
+    def __init__(
+        self,
+        function: str = None,
+        is_inlined: bool = None,
+        location: SourceLocation = None,
+        watches: OrderedDict = None,
+    ):
         if watches is None:
             watches = {}
 
@@ -59,11 +64,12 @@ def __init__(self,
         self.watches = watches
 
     def __str__(self):
-        return '{}{}: {} | {}'.format(
+        return "{}{}: {} | {}".format(
             self.function,
-            ' (inlined)' if self.is_inlined else '',
+            " (inlined)" if self.is_inlined else "",
             self.location,
-            {k: str(self.watches[k]) for k in self.watches})
+            {k: str(self.watches[k]) for k in self.watches},
+        )
 
     def match(self, other) -> bool:
         """Returns true iff all the properties that appear in `self` have the
@@ -80,8 +86,10 @@ def match(self, other) -> bool:
                 try:
                     if isinstance(self.watches[name], dict):
                         for attr in iter(self.watches[name]):
-                            if (getattr(other.watches[name], attr, None) !=
-                                    self.watches[name][attr]):
+                            if (
+                                getattr(other.watches[name], attr, None)
+                                != self.watches[name][attr]
+                            ):
                                 return False
                     else:
                         if other.watches[name].value != self.watches[name]:
@@ -91,14 +99,18 @@ def match(self, other) -> bool:
 
         return True
 
+
 class ProgramState:
     def __init__(self, frames: List[StackFrame] = None):
         self.frames = frames
 
     def __str__(self):
-        return '\n'.join(map(
-            lambda enum: 'Frame {}: {}'.format(enum[0], enum[1]),
-            enumerate(self.frames)))
+        return "\n".join(
+            map(
+                lambda enum: "Frame {}: {}".format(enum[0], enum[1]),
+                enumerate(self.frames),
+            )
+        )
 
     def match(self, other) -> bool:
         """Returns true iff all the properties that appear in `self` have the

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/dextIR/StepIR.py b/cross-project-tests/debuginfo-tests/dexter/dex/dextIR/StepIR.py
index 8111968efe992..b6c965e5e7d2b 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/dextIR/StepIR.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/dextIR/StepIR.py
@@ -43,13 +43,15 @@ class StepIR:
         watches (OrderedDict): { expression (str), result (ValueIR) }
     """
 
-    def __init__(self,
-                 step_index: int,
-                 stop_reason: StopReason,
-                 frames: List[FrameIR],
-                 step_kind: StepKind = None,
-                 watches: OrderedDict = None,
-                 program_state: ProgramState = None):
+    def __init__(
+        self,
+        step_index: int,
+        stop_reason: StopReason,
+        frames: List[FrameIR],
+        step_kind: StepKind = None,
+        watches: OrderedDict = None,
+        program_state: ProgramState = None,
+    ):
         self.step_index = step_index
         self.step_kind = step_kind
         self.stop_reason = stop_reason
@@ -66,17 +68,22 @@ def __init__(self,
     def __str__(self):
         try:
             frame = self.current_frame
-            frame_info = (frame.function, frame.loc.path, frame.loc.lineno,
-                          frame.loc.column)
+            frame_info = (
+                frame.function,
+                frame.loc.path,
+                frame.loc.lineno,
+                frame.loc.column,
+            )
         except AttributeError:
             frame_info = (None, None, None, None)
 
-        step_info = (self.step_index, ) + frame_info + (
-            str(self.stop_reason), str(self.step_kind),
-                                    [w for w in self.watches])
+        step_info = (
+            (self.step_index,)
+            + frame_info
+            + (str(self.stop_reason), str(self.step_kind), [w for w in self.watches])
+        )
 
-        return '{}{}'.format('.   ' * (self.num_frames - 1),
-                             json.dumps(step_info))
+        return "{}{}".format(".   " * (self.num_frames - 1), json.dumps(step_info))
 
     @property
     def num_frames(self):

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/dextIR/ValueIR.py b/cross-project-tests/debuginfo-tests/dexter/dex/dextIR/ValueIR.py
index 9d532acbb2172..770f646258f73 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/dextIR/ValueIR.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/dextIR/ValueIR.py
@@ -9,14 +9,16 @@
 class ValueIR:
     """Data class to store the result of an expression evaluation."""
 
-    def __init__(self,
-                 expression: str,
-                 value: str,
-                 type_name: str,
-                 could_evaluate: bool,
-                 error_string: str = None,
-                 is_optimized_away: bool = False,
-                 is_irretrievable: bool = False):
+    def __init__(
+        self,
+        expression: str,
+        value: str,
+        type_name: str,
+        could_evaluate: bool,
+        error_string: str = None,
+        is_optimized_away: bool = False,
+        is_irretrievable: bool = False,
+    ):
         self.expression = expression
         self.value = value
         self.type_name = type_name
@@ -30,9 +32,10 @@ def __str__(self):
         if self.error_string is not None:
             return prefix + self.error_string
         if self.value is not None:
-            return prefix + '({}) {}'.format(self.type_name, self.value)
-        return (prefix +
-                'could_evaluate: {}; irretrievable: {}; optimized_away: {};'
-                    .format(self.could_evaluate, self.is_irretrievable,
-                            self.is_optimized_away))
-
+            return prefix + "({}) {}".format(self.type_name, self.value)
+        return (
+            prefix
+            + "could_evaluate: {}; irretrievable: {}; optimized_away: {};".format(
+                self.could_evaluate, self.is_irretrievable, self.is_optimized_away
+            )
+        )

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/heuristic/Heuristic.py b/cross-project-tests/debuginfo-tests/dexter/dex/heuristic/Heuristic.py
index 52ba7e1e897c6..5d1c5a777aba7 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/heuristic/Heuristic.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/heuristic/Heuristic.py
@@ -18,84 +18,92 @@
 from dex.command.commands.DexExpectWatchBase import format_address
 
 
-PenaltyCommand = namedtuple('PenaltyCommand', ['pen_dict', 'max_penalty'])
+PenaltyCommand = namedtuple("PenaltyCommand", ["pen_dict", "max_penalty"])
 # 'meta' field used in 
diff erent ways by 
diff erent things
-PenaltyInstance = namedtuple('PenaltyInstance', ['meta', 'the_penalty'])
+PenaltyInstance = namedtuple("PenaltyInstance", ["meta", "the_penalty"])
 
 
 def add_heuristic_tool_arguments(parser):
     parser.add_argument(
-        '--penalty-variable-optimized',
+        "--penalty-variable-optimized",
         type=int,
         default=3,
-        help='set the penalty multiplier for each'
-        ' occurrence of a variable that was optimized'
-        ' away',
-        metavar='<int>')
+        help="set the penalty multiplier for each"
+        " occurrence of a variable that was optimized"
+        " away",
+        metavar="<int>",
+    )
     parser.add_argument(
-        '--penalty-misordered-values',
+        "--penalty-misordered-values",
         type=int,
         default=3,
-        help='set the penalty multiplier for each'
-        ' occurrence of a misordered value.',
-        metavar='<int>')
+        help="set the penalty multiplier for each" " occurrence of a misordered value.",
+        metavar="<int>",
+    )
     parser.add_argument(
-        '--penalty-irretrievable',
+        "--penalty-irretrievable",
         type=int,
         default=4,
-        help='set the penalty multiplier for each'
+        help="set the penalty multiplier for each"
         " occurrence of a variable that couldn't"
-        ' be retrieved',
-        metavar='<int>')
+        " be retrieved",
+        metavar="<int>",
+    )
     parser.add_argument(
-        '--penalty-not-evaluatable',
+        "--penalty-not-evaluatable",
         type=int,
         default=5,
-        help='set the penalty multiplier for each'
+        help="set the penalty multiplier for each"
         " occurrence of a variable that couldn't"
-        ' be evaluated',
-        metavar='<int>')
+        " be evaluated",
+        metavar="<int>",
+    )
     parser.add_argument(
-        '--penalty-missing-values',
+        "--penalty-missing-values",
         type=int,
         default=6,
-        help='set the penalty multiplier for each missing'
-        ' value',
-        metavar='<int>')
+        help="set the penalty multiplier for each missing" " value",
+        metavar="<int>",
+    )
     parser.add_argument(
-        '--penalty-incorrect-values',
+        "--penalty-incorrect-values",
         type=int,
         default=7,
-        help='set the penalty multiplier for each'
-        ' occurrence of an unexpected value.',
-        metavar='<int>')
+        help="set the penalty multiplier for each"
+        " occurrence of an unexpected value.",
+        metavar="<int>",
+    )
     parser.add_argument(
-        '--penalty-unreachable',
+        "--penalty-unreachable",
         type=int,
         default=4,  # XXX XXX XXX selected by random
-        help='set the penalty for each line stepped onto that should'
-        ' have been unreachable.',
-        metavar='<int>')
+        help="set the penalty for each line stepped onto that should"
+        " have been unreachable.",
+        metavar="<int>",
+    )
     parser.add_argument(
-        '--penalty-misordered-steps',
+        "--penalty-misordered-steps",
         type=int,
         default=2,  # XXX XXX XXX selected by random
-        help='set the penalty for 
diff erences in the order of steps'
-        ' the program was expected to observe.',
-        metavar='<int>')
+        help="set the penalty for 
diff erences in the order of steps"
+        " the program was expected to observe.",
+        metavar="<int>",
+    )
     parser.add_argument(
-        '--penalty-missing-step',
+        "--penalty-missing-step",
         type=int,
         default=4,  # XXX XXX XXX selected by random
-        help='set the penalty for the program skipping over a step.',
-        metavar='<int>')
+        help="set the penalty for the program skipping over a step.",
+        metavar="<int>",
+    )
     parser.add_argument(
-        '--penalty-incorrect-program-state',
+        "--penalty-incorrect-program-state",
         type=int,
         default=4,  # XXX XXX XXX selected by random
-        help='set the penalty for the program never entering an expected state'
-        ' or entering an unexpected state.',
-        metavar='<int>')
+        help="set the penalty for the program never entering an expected state"
+        " or entering an unexpected state.",
+        metavar="<int>",
+    )
 
 
 class PenaltyLineRanges:
@@ -106,21 +114,21 @@ def __init__(self, first_step, penalty):
     def add_step(self, next_step, penalty):
         last_range = self.ranges[-1]
         last_step = last_range[1]
-        if (next_step == last_step + 1):
+        if next_step == last_step + 1:
             self.ranges[-1] = (last_range[0], next_step)
         else:
             self.ranges.append((next_step, next_step))
         self.penalty += penalty
 
     def __str__(self):
-        range_to_str = lambda r: str(r[0]) if r[0] == r[1] else f'{r[0]}-{r[1]}'
+        range_to_str = lambda r: str(r[0]) if r[0] == r[1] else f"{r[0]}-{r[1]}"
         if self.ranges[0][0] == self.ranges[-1][1]:
-            text = f'step {self.ranges[0][0]}'
+            text = f"step {self.ranges[0][0]}"
         else:
-            step_list = ', '.join([range_to_str(r) for r in self.ranges])
-            text = f'steps [{step_list}]'
+            step_list = ", ".join([range_to_str(r) for r in self.ranges])
+            text = f"steps [{step_list}]"
         if self.penalty:
-            text += ' <r>[-{}]</>'.format(self.penalty)
+            text += " <r>[-{}]</>".format(self.penalty)
         return text
 
 
@@ -130,16 +138,22 @@ def __init__(self, context, steps):
         self.penalties = {}
         self.address_resolutions = {}
 
-        worst_penalty = max([
-            self.penalty_variable_optimized, self.penalty_irretrievable,
-            self.penalty_not_evaluatable, self.penalty_incorrect_values,
-            self.penalty_missing_values, self.penalty_unreachable,
-            self.penalty_missing_step, self.penalty_misordered_steps
-        ])
+        worst_penalty = max(
+            [
+                self.penalty_variable_optimized,
+                self.penalty_irretrievable,
+                self.penalty_not_evaluatable,
+                self.penalty_incorrect_values,
+                self.penalty_missing_values,
+                self.penalty_unreachable,
+                self.penalty_missing_step,
+                self.penalty_misordered_steps,
+            ]
+        )
 
         # Before evaluating scoring commands, evaluate address values.
         try:
-            for command in steps.commands['DexDeclareAddress']:
+            for command in steps.commands["DexDeclareAddress"]:
                 command.address_resolutions = self.address_resolutions
                 command.eval(steps)
         except KeyError:
@@ -147,90 +161,94 @@ def __init__(self, context, steps):
 
         # Get DexExpectWatchType results.
         try:
-            for command in steps.commands['DexExpectWatchType']:
+            for command in steps.commands["DexExpectWatchType"]:
                 command.eval(steps)
-                maximum_possible_penalty = min(3, len(
-                    command.values)) * worst_penalty
+                maximum_possible_penalty = min(3, len(command.values)) * worst_penalty
                 name, p = self._calculate_expect_watch_penalties(
-                    command, maximum_possible_penalty)
-                name = name + ' ExpectType'
-                self.penalties[name] = PenaltyCommand(p,
-                                                      maximum_possible_penalty)
+                    command, maximum_possible_penalty
+                )
+                name = name + " ExpectType"
+                self.penalties[name] = PenaltyCommand(p, maximum_possible_penalty)
         except KeyError:
             pass
 
         # Get DexExpectWatchValue results.
         try:
-            for command in steps.commands['DexExpectWatchValue']:
+            for command in steps.commands["DexExpectWatchValue"]:
                 command.address_resolutions = self.address_resolutions
                 command.eval(steps)
-                maximum_possible_penalty = min(3, len(
-                    command.values)) * worst_penalty
+                maximum_possible_penalty = min(3, len(command.values)) * worst_penalty
                 name, p = self._calculate_expect_watch_penalties(
-                    command, maximum_possible_penalty)
-                name = name + ' ExpectValue'
-                self.penalties[name] = PenaltyCommand(p,
-                                                      maximum_possible_penalty)
+                    command, maximum_possible_penalty
+                )
+                name = name + " ExpectValue"
+                self.penalties[name] = PenaltyCommand(p, maximum_possible_penalty)
         except KeyError:
             pass
 
         try:
             penalties = defaultdict(list)
             maximum_possible_penalty_all = 0
-            for expect_state in steps.commands['DexExpectProgramState']:
+            for expect_state in steps.commands["DexExpectProgramState"]:
                 success = expect_state.eval(steps)
                 p = 0 if success else self.penalty_incorrect_program_state
 
-                meta = 'expected {}: {}'.format(
-                    '{} times'.format(expect_state.times)
-                        if expect_state.times >= 0 else 'at least once',
-                    expect_state.program_state_text)
+                meta = "expected {}: {}".format(
+                    "{} times".format(expect_state.times)
+                    if expect_state.times >= 0
+                    else "at least once",
+                    expect_state.program_state_text,
+                )
 
                 if success:
-                    meta = '<g>{}</>'.format(meta)
+                    meta = "<g>{}</>".format(meta)
 
                 maximum_possible_penalty = self.penalty_incorrect_program_state
                 maximum_possible_penalty_all += maximum_possible_penalty
                 name = expect_state.program_state_text
-                penalties[meta] = [PenaltyInstance('{} times'.format(
-                    len(expect_state.encounters)), p)]
-            self.penalties['expected program states'] = PenaltyCommand(
-                penalties, maximum_possible_penalty_all)
+                penalties[meta] = [
+                    PenaltyInstance("{} times".format(len(expect_state.encounters)), p)
+                ]
+            self.penalties["expected program states"] = PenaltyCommand(
+                penalties, maximum_possible_penalty_all
+            )
         except KeyError:
             pass
 
         # Get the total number of each step kind.
         step_kind_counts = defaultdict(int)
-        for step in getattr(steps, 'steps'):
+        for step in getattr(steps, "steps"):
             step_kind_counts[step.step_kind] += 1
 
         # Get DexExpectStepKind results.
         penalties = defaultdict(list)
         maximum_possible_penalty_all = 0
         try:
-            for command in steps.commands['DexExpectStepKind']:
+            for command in steps.commands["DexExpectStepKind"]:
                 command.eval()
                 # Cap the penalty at 2 * expected count or else 1
                 maximum_possible_penalty = max(command.count * 2, 1)
                 p = abs(command.count - step_kind_counts[command.name])
                 actual_penalty = min(p, maximum_possible_penalty)
-                key = ('{}'.format(command.name)
-                       if actual_penalty else '<g>{}</>'.format(command.name))
+                key = (
+                    "{}".format(command.name)
+                    if actual_penalty
+                    else "<g>{}</>".format(command.name)
+                )
                 penalties[key] = [PenaltyInstance(p, actual_penalty)]
                 maximum_possible_penalty_all += maximum_possible_penalty
-            self.penalties['step kind 
diff erences'] = PenaltyCommand(
-                penalties, maximum_possible_penalty_all)
+            self.penalties["step kind 
diff erences"] = PenaltyCommand(
+                penalties, maximum_possible_penalty_all
+            )
         except KeyError:
             pass
 
-        if 'DexUnreachable' in steps.commands:
-            cmds = steps.commands['DexUnreachable']
+        if "DexUnreachable" in steps.commands:
+            cmds = steps.commands["DexUnreachable"]
             unreach_count = 0
 
             # Find steps with unreachable in them
-            ureachs = [
-                s for s in steps.steps if 'DexUnreachable' in s.watches.keys()
-            ]
+            ureachs = [s for s in steps.steps if "DexUnreachable" in s.watches.keys()]
 
             # There's no need to match up cmds with the actual watches
             upen = self.penalty_unreachable
@@ -239,22 +257,19 @@ def __init__(self, context, steps):
             if count != 0:
                 d = dict()
                 for x in ureachs:
-                    msg = 'line {} reached'.format(x.current_location.lineno)
+                    msg = "line {} reached".format(x.current_location.lineno)
                     d[msg] = [PenaltyInstance(upen, upen)]
             else:
-                d = {
-                    '<g>No unreachable lines seen</>': [PenaltyInstance(0, 0)]
-                }
+                d = {"<g>No unreachable lines seen</>": [PenaltyInstance(0, 0)]}
             total = PenaltyCommand(d, len(cmds) * upen)
 
-            self.penalties['unreachable lines'] = total
+            self.penalties["unreachable lines"] = total
 
-        if 'DexExpectStepOrder' in steps.commands:
-            cmds = steps.commands['DexExpectStepOrder']
+        if "DexExpectStepOrder" in steps.commands:
+            cmds = steps.commands["DexExpectStepOrder"]
 
             # Form a list of which line/cmd we _should_ have seen
-            cmd_num_lst = [(x, c.get_line()) for c in cmds
-                                         for x in c.sequence]
+            cmd_num_lst = [(x, c.get_line()) for c in cmds for x in c.sequence]
             # Order them by the sequence number
             cmd_num_lst.sort(key=lambda t: t[0])
             # Strip out sequence key
@@ -262,8 +277,8 @@ def __init__(self, context, steps):
 
             # Now do the same, but for the actually observed lines/cmds
             ss = steps.steps
-            deso = [s for s in ss if 'DexExpectStepOrder' in s.watches.keys()]
-            deso = [s.watches['DexExpectStepOrder'] for s in deso]
+            deso = [s for s in ss if "DexExpectStepOrder" in s.watches.keys()]
+            deso = [s.watches["DexExpectStepOrder"] for s in deso]
             # We rely on the steps remaining in order here
             order_list = [int(x.expression) for x in deso]
 
@@ -279,11 +294,11 @@ def __init__(self, context, steps):
             num_repeats = 0
             for k, v in expected.items():
                 if k not in seen:
-                    msg = 'Line {} not seen'.format(k)
+                    msg = "Line {} not seen".format(k)
                     unseen_line_dict[msg] = [PenaltyInstance(mispen, mispen)]
                     num_missing += v
                 elif v > seen[k]:
-                    msg = 'Line {} skipped at least once'.format(k)
+                    msg = "Line {} skipped at least once".format(k)
                     skipped_line_dict[msg] = [PenaltyInstance(mispen, mispen)]
                     num_missing += v - seen[k]
                 elif v < seen[k]:
@@ -294,16 +309,16 @@ def __init__(self, context, steps):
 
             if len(unseen_line_dict) == 0:
                 pi = PenaltyInstance(0, 0)
-                unseen_line_dict['<g>All lines were seen</>'] = [pi]
+                unseen_line_dict["<g>All lines were seen</>"] = [pi]
 
             if len(skipped_line_dict) == 0:
                 pi = PenaltyInstance(0, 0)
-                skipped_line_dict['<g>No lines were skipped</>'] = [pi]
+                skipped_line_dict["<g>No lines were skipped</>"] = [pi]
 
             total = PenaltyCommand(unseen_line_dict, len(expected) * mispen)
-            self.penalties['Unseen lines'] = total
+            self.penalties["Unseen lines"] = total
             total = PenaltyCommand(skipped_line_dict, len(expected) * mispen)
-            self.penalties['Skipped lines'] = total
+            self.penalties["Skipped lines"] = total
 
             ordpen = self.penalty_misordered_steps
             cmd_num_lst = [str(x) for x in cmd_num_lst]
@@ -311,7 +326,7 @@ def __init__(self, context, steps):
             lst = list(
diff lib.Differ().compare(cmd_num_lst, order_list))
             
diff _detail = Counter(l[0] for l in lst)
 
-            assert '?' not in 
diff _detail
+            assert "?" not in 
diff _detail
 
             # Diffs are hard to interpret; there are many algorithms for
             # condensing them. Ignore all that, and just print out the changed
@@ -328,9 +343,9 @@ def filt_lines(s, seg, e, key):
             
diff _msgs = dict()
 
             def report
diff (start_idx, segment, end_idx):
-                msg = 'Order mismatch, expected linenos {}, saw {}'
-                expected_linenos = filt_lines(start_idx, segment, end_idx, '-')
-                seen_linenos = filt_lines(start_idx, segment, end_idx, '+')
+                msg = "Order mismatch, expected linenos {}, saw {}"
+                expected_linenos = filt_lines(start_idx, segment, end_idx, "-")
+                seen_linenos = filt_lines(start_idx, segment, end_idx, "+")
                 msg = msg.format(expected_linenos, seen_linenos)
                 
diff _msgs[msg] = [PenaltyInstance(ordpen, ordpen)]
 
@@ -338,13 +353,12 @@ def report
diff (start_idx, segment, end_idx):
             start_expt_step = 0
             end_expt_step = 0
             to_print_lst = []
-            for k, subit in groupby(lst, lambda x: x[0] == ' '):
+            for k, subit in groupby(lst, lambda x: x[0] == " "):
                 if k:  # Whitespace group
                     nochanged = [x for x in subit]
                     end_expt_step = int(nochanged[0][2:])
                     if len(to_print_lst) > 0:
-                        report
diff (start_expt_step, to_print_lst,
-                                   end_expt_step)
+                        report
diff (start_expt_step, to_print_lst, end_expt_step)
                     start_expt_step = int(nochanged[-1][2:])
                     to_print_lst = []
                 else:  # Diff group, save for printing
@@ -352,14 +366,12 @@ def report
diff (start_idx, segment, end_idx):
 
             # If there was a dangling 
diff erent step, print that too.
             if len(to_print_lst) > 0:
-                report
diff (start_expt_step, to_print_lst, '[End]')
+                report
diff (start_expt_step, to_print_lst, "[End]")
 
             if len(
diff _msgs) == 0:
-                
diff _msgs['<g>No lines misordered</>'] = [
-                    PenaltyInstance(0, 0)
-                ]
+                
diff _msgs["<g>No lines misordered</>"] = [PenaltyInstance(0, 0)]
             total = PenaltyCommand(
diff _msgs, len(cmd_num_lst) * ordpen)
-            self.penalties['Misordered lines'] = total
+            self.penalties["Misordered lines"] = total
 
         return
 
@@ -369,13 +381,11 @@ def _calculate_expect_watch_penalties(self, c, maximum_possible_penalty):
         if c.line_range[0] == c.line_range[-1]:
             line_range = str(c.line_range[0])
         else:
-            line_range = '{}-{}'.format(c.line_range[0], c.line_range[-1])
+            line_range = "{}-{}".format(c.line_range[0], c.line_range[-1])
 
-        name = '{}:{} [{}]'.format(
-            os.path.basename(c.path), line_range, c.expression)
+        name = "{}:{} [{}]".format(os.path.basename(c.path), line_range, c.expression)
 
-        num_actual_watches = len(c.expected_watches) + len(
-            c.unexpected_watches)
+        num_actual_watches = len(c.expected_watches) + len(c.unexpected_watches)
 
         penalty_available = maximum_possible_penalty
 
@@ -384,27 +394,29 @@ def _calculate_expect_watch_penalties(self, c, maximum_possible_penalty):
         # encountered the value at all.
         if num_actual_watches or c.times_encountered == 0:
             for v in c.missing_values:
-                current_penalty = min(penalty_available,
-                                      self.penalty_missing_values)
+                current_penalty = min(penalty_available, self.penalty_missing_values)
                 penalty_available -= current_penalty
-                penalties['missing values'].append(
-                    PenaltyInstance(v, current_penalty))
+                penalties["missing values"].append(PenaltyInstance(v, current_penalty))
 
         for v in c.encountered_values:
-            penalties['<g>expected encountered watches</>'].append(
-                PenaltyInstance(v, 0))
+            penalties["<g>expected encountered watches</>"].append(
+                PenaltyInstance(v, 0)
+            )
 
         penalty_descriptions = [
-            (self.penalty_not_evaluatable, c.invalid_watches,
-             'could not evaluate'),
-            (self.penalty_variable_optimized, c.optimized_out_watches,
-             'result optimized away'),
-            (self.penalty_misordered_values, c.misordered_watches,
-             'misordered result'),
-            (self.penalty_irretrievable, c.irretrievable_watches,
-             'result could not be retrieved'),
-            (self.penalty_incorrect_values, c.unexpected_watches,
-             'unexpected result'),
+            (self.penalty_not_evaluatable, c.invalid_watches, "could not evaluate"),
+            (
+                self.penalty_variable_optimized,
+                c.optimized_out_watches,
+                "result optimized away",
+            ),
+            (self.penalty_misordered_values, c.misordered_watches, "misordered result"),
+            (
+                self.penalty_irretrievable,
+                c.irretrievable_watches,
+                "result could not be retrieved",
+            ),
+            (self.penalty_incorrect_values, c.unexpected_watches, "unexpected result"),
         ]
 
         for penalty_score, watches, description in penalty_descriptions:
@@ -417,8 +429,7 @@ def _calculate_expect_watch_penalties(self, c, maximum_possible_penalty):
                 times_to_penalize -= 1
                 penalty_score = min(penalty_available, penalty_score)
                 penalty_available -= penalty_score
-                penalties[description].append(
-                    PenaltyInstance(w, penalty_score))
+                penalties[description].append(PenaltyInstance(w, penalty_score))
                 if not times_to_penalize:
                     penalty_score = 0
 
@@ -445,56 +456,60 @@ def score(self):
         try:
             return 1.0 - (self.penalty / float(self.max_penalty))
         except ZeroDivisionError:
-            return float('nan')
+            return float("nan")
 
     @property
     def summary_string(self):
         score = self.score
         isnan = score != score  # pylint: disable=comparison-with-itself
-        color = 'g'
+        color = "g"
         if score < 0.25 or isnan:
-            color = 'r'
+            color = "r"
         elif score < 0.75:
-            color = 'y'
+            color = "y"
 
-        return '<{}>({:.4f})</>'.format(color, score)
+        return "<{}>({:.4f})</>".format(color, score)
 
     @property
     def verbose_output(self):  # noqa
-        string = ''
+        string = ""
 
         # Add address resolutions if present.
         if self.address_resolutions:
             if self.resolved_addresses:
-                string += '\nResolved Addresses:\n'
+                string += "\nResolved Addresses:\n"
                 for addr, res in self.resolved_addresses.items():
                     string += f"  '{addr}': {res}\n"
             if self.unresolved_addresses:
-                string += '\n'
-                string += f'Unresolved Addresses:\n  {self.unresolved_addresses}\n'
+                string += "\n"
+                string += f"Unresolved Addresses:\n  {self.unresolved_addresses}\n"
 
-        string += ('\n')
+        string += "\n"
         for command in sorted(self.penalties):
             pen_cmd = self.penalties[command]
             maximum_possible_penalty = pen_cmd.max_penalty
             total_penalty = 0
             lines = []
             for category in sorted(pen_cmd.pen_dict):
-                lines.append('    <r>{}</>:\n'.format(category))
+                lines.append("    <r>{}</>:\n".format(category))
 
                 step_value_results = {}
                 for result, penalty in pen_cmd.pen_dict[category]:
                     if not isinstance(result, StepValueInfo):
                         continue
                     if result.expected_value not in step_value_results:
-                        step_value_results[result.expected_value] = PenaltyLineRanges(result.step_index, penalty)
+                        step_value_results[result.expected_value] = PenaltyLineRanges(
+                            result.step_index, penalty
+                        )
                     else:
-                        step_value_results[result.expected_value].add_step(result.step_index, penalty)
+                        step_value_results[result.expected_value].add_step(
+                            result.step_index, penalty
+                        )
 
                 for value, penalty_line_range in step_value_results.items():
-                    text = f'({value}): {penalty_line_range}'
+                    text = f"({value}): {penalty_line_range}"
                     total_penalty += penalty_line_range.penalty
-                    lines.append('      {}\n'.format(text))
+                    lines.append("      {}\n".format(text))
 
                 for result, penalty in pen_cmd.pen_dict[category]:
                     if isinstance(result, StepValueInfo):
@@ -504,21 +519,26 @@ def verbose_output(self):  # noqa
                     if penalty:
                         assert penalty > 0, penalty
                         total_penalty += penalty
-                        text += ' <r>[-{}]</>'.format(penalty)
-                    lines.append('      {}\n'.format(text))
+                        text += " <r>[-{}]</>".format(penalty)
+                    lines.append("      {}\n".format(text))
 
-                lines.append('\n')
+                lines.append("\n")
 
-            string += ('  <b>{}</> <y>[{}/{}]</>\n'.format(
-                command, total_penalty, maximum_possible_penalty))
+            string += "  <b>{}</> <y>[{}/{}]</>\n".format(
+                command, total_penalty, maximum_possible_penalty
+            )
             for line in lines:
-                string += (line)
-        string += ('\n')
+                string += line
+        string += "\n"
         return string
 
     @property
     def resolved_addresses(self):
-        return {addr: format_address(res) for addr, res in self.address_resolutions.items() if res is not None}
+        return {
+            addr: format_address(res)
+            for addr, res in self.address_resolutions.items()
+            if res is not None
+        }
 
     @property
     def unresolved_addresses(self):

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/tools/Main.py b/cross-project-tests/debuginfo-tests/dexter/dex/tools/Main.py
index c69ffab66c4e4..d9efe32cc2c5c 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/tools/Main.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/tools/Main.py
@@ -26,106 +26,114 @@
 
 
 def _output_bug_report_message(context):
-    """ In the event of a catastrophic failure, print bug report request to the
-        user.
+    """In the event of a catastrophic failure, print bug report request to the
+    user.
     """
     context.o.red(
-        '\n\n'
-        '<g>****************************************</>\n'
-        '<b>****************************************</>\n'
-        '****************************************\n'
-        '**                                    **\n'
-        '** <y>This is a bug in <a>DExTer</>.</>           **\n'
-        '**                                    **\n'
-        '**                  <y>Please report it.</> **\n'
-        '**                                    **\n'
-        '****************************************\n'
-        '<b>****************************************</>\n'
-        '<g>****************************************</>\n'
-        '\n'
-        '<b>system:</>\n'
-        '<d>{}</>\n\n'
-        '<b>version:</>\n'
-        '<d>{}</>\n\n'
-        '<b>args:</>\n'
-        '<d>{}</>\n'
-        '\n'.format(sys.platform, version('DExTer'),
-                    [sys.executable] + sys.argv),
-        stream=PrettyOutput.stderr)
+        "\n\n"
+        "<g>****************************************</>\n"
+        "<b>****************************************</>\n"
+        "****************************************\n"
+        "**                                    **\n"
+        "** <y>This is a bug in <a>DExTer</>.</>           **\n"
+        "**                                    **\n"
+        "**                  <y>Please report it.</> **\n"
+        "**                                    **\n"
+        "****************************************\n"
+        "<b>****************************************</>\n"
+        "<g>****************************************</>\n"
+        "\n"
+        "<b>system:</>\n"
+        "<d>{}</>\n\n"
+        "<b>version:</>\n"
+        "<d>{}</>\n\n"
+        "<b>args:</>\n"
+        "<d>{}</>\n"
+        "\n".format(sys.platform, version("DExTer"), [sys.executable] + sys.argv),
+        stream=PrettyOutput.stderr,
+    )
 
 
 def get_tools_directory():
-    """ Returns directory path where DExTer tool imports can be
-        found.
+    """Returns directory path where DExTer tool imports can be
+    found.
     """
-    tools_directory = os.path.join(get_root_directory(), 'tools')
+    tools_directory = os.path.join(get_root_directory(), "tools")
     assert os.path.isdir(tools_directory), tools_directory
     return tools_directory
 
 
 def get_tool_names():
-    """ Returns a list of expected DExTer Tools
-    """
+    """Returns a list of expected DExTer Tools"""
     return [
-        'clang-opt-bisect', 'help', 'list-debuggers', 'no-tool-',
-        'run-debugger-internal-', 'test', 'view'
+        "clang-opt-bisect",
+        "help",
+        "list-debuggers",
+        "no-tool-",
+        "run-debugger-internal-",
+        "test",
+        "view",
     ]
 
 
 def _set_auto_highlights(context):
-    """Flag some strings for auto-highlighting.
-    """
-    context.o.auto_reds.extend([
-        r'[Ee]rror\:',
-        r'[Ee]xception\:',
-        r'un(expected|recognized) argument',
-    ])
-    context.o.auto_yellows.extend([
-        r'[Ww]arning\:',
-        r'\(did you mean ',
-        r'During handling of the above exception, another exception',
-    ])
+    """Flag some strings for auto-highlighting."""
+    context.o.auto_reds.extend(
+        [
+            r"[Ee]rror\:",
+            r"[Ee]xception\:",
+            r"un(expected|recognized) argument",
+        ]
+    )
+    context.o.auto_yellows.extend(
+        [
+            r"[Ww]arning\:",
+            r"\(did you mean ",
+            r"During handling of the above exception, another exception",
+        ]
+    )
 
 
 def _get_options_and_args(context):
-    """ get the options and arguments from the commandline
-    """
+    """get the options and arguments from the commandline"""
     parser = argparse.ExtArgumentParser(context, add_help=False)
-    parser.add_argument('tool', default=None, nargs='?')
+    parser.add_argument("tool", default=None, nargs="?")
     options, args = parser.parse_known_args(sys.argv[1:])
 
     return options, args
 
 
 def _get_tool_name(options):
-    """ get the name of the dexter tool (if passed) specified on the command
-        line, otherwise return 'no_tool_'.
+    """get the name of the dexter tool (if passed) specified on the command
+    line, otherwise return 'no_tool_'.
     """
     tool_name = options.tool
     if tool_name is None:
-        tool_name = 'no_tool_'
+        tool_name = "no_tool_"
     else:
         _is_valid_tool_name(tool_name)
     return tool_name
 
 
 def _is_valid_tool_name(tool_name):
-    """ check tool name matches a tool directory within the dexter tools
-        directory.
+    """check tool name matches a tool directory within the dexter tools
+    directory.
     """
     valid_tools = get_tool_names()
     if tool_name not in valid_tools:
-        raise Error('invalid tool "{}" (choose from {})'.format(
-            tool_name,
-            ', '.join([t for t in valid_tools if not t.endswith('-')])))
+        raise Error(
+            'invalid tool "{}" (choose from {})'.format(
+                tool_name, ", ".join([t for t in valid_tools if not t.endswith("-")])
+            )
+        )
 
 
 def _import_tool_module(tool_name):
-    """ Imports the python module at the tool directory specificed by
-        tool_name.
+    """Imports the python module at the tool directory specificed by
+    tool_name.
     """
     # format tool argument to reflect tool directory form.
-    tool_name = tool_name.replace('-', '_')
+    tool_name = tool_name.replace("-", "_")
 
     tools_directory = get_tools_directory()
     module_info = imp.find_module(tool_name, [tools_directory])
@@ -143,7 +151,7 @@ def tool_main(context, tool, args):
         context.version = version(tool.name)
 
         if options.version:
-            context.o.green('{}\n'.format(context.version))
+            context.o.green("{}\n".format(context.version))
             return ReturnCode.OK
 
         if options.verbose:
@@ -151,8 +159,8 @@ def tool_main(context, tool, args):
         elif options.no_warnings:
             context.logger.verbosity = 0
 
-        if (options.unittest != 'off' and not unit_tests_ok(context)):
-            raise Error('<d>unit test failures</>')
+        if options.unittest != "off" and not unit_tests_ok(context):
+            raise Error("<d>unit test failures</>")
 
         if options.colortest:
             context.o.colortest()

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/tools/TestToolBase.py b/cross-project-tests/debuginfo-tests/dexter/dex/tools/TestToolBase.py
index 349a6f06b0182..dba245fed635f 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/tools/TestToolBase.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/tools/TestToolBase.py
@@ -34,39 +34,45 @@ def add_tool_arguments(self, parser, defaults):
         add_heuristic_tool_arguments(parser)
 
         parser.add_argument(
-            'test_path',
+            "test_path",
             type=str,
-            metavar='<test-path>',
-            nargs='?',
-            default=os.path.abspath(
-                os.path.join(get_root_directory(), '..', 'tests')),
-            help='directory containing test(s)')
+            metavar="<test-path>",
+            nargs="?",
+            default=os.path.abspath(os.path.join(get_root_directory(), "..", "tests")),
+            help="directory containing test(s)",
+        )
 
         parser.add_argument(
-            '--results-directory',
+            "--results-directory",
             type=str,
-            metavar='<directory>',
+            metavar="<directory>",
             default=None,
-            help='directory to save results (default: none)')
+            help="directory to save results (default: none)",
+        )
 
     def handle_options(self, defaults):
         options = self.context.options
 
         if not options.builder and (options.cflags or options.ldflags):
             self.context.logger.warning(
-                '--cflags and --ldflags will be ignored when not using --builder',
-                enable_prefix=True)
+                "--cflags and --ldflags will be ignored when not using --builder",
+                enable_prefix=True,
+            )
 
         if options.vs_solution:
             options.vs_solution = os.path.abspath(options.vs_solution)
             if not os.path.isfile(options.vs_solution):
-                raise Error('<d>could not find VS solution file</> <r>"{}"</>'
-                            .format(options.vs_solution))
+                raise Error(
+                    '<d>could not find VS solution file</> <r>"{}"</>'.format(
+                        options.vs_solution
+                    )
+                )
         elif options.binary:
             options.binary = os.path.abspath(options.binary)
             if not os.path.isfile(options.binary):
-                raise Error('<d>could not find binary file</> <r>"{}"</>'
-                            .format(options.binary))
+                raise Error(
+                    '<d>could not find binary file</> <r>"{}"</>'.format(options.binary)
+                )
         else:
             try:
                 self.build_script = handle_builder_tool_options(self.context)
@@ -80,10 +86,12 @@ def handle_options(self, defaults):
 
         options.test_path = os.path.abspath(options.test_path)
         options.test_path = os.path.normcase(options.test_path)
-        if not os.path.isfile(options.test_path) and not os.path.isdir(options.test_path):
+        if not os.path.isfile(options.test_path) and not os.path.isdir(
+            options.test_path
+        ):
             raise Error(
-                '<d>could not find test path</> <r>"{}"</>'.format(
-                    options.test_path))
+                '<d>could not find test path</> <r>"{}"</>'.format(options.test_path)
+            )
 
         if options.results_directory:
             options.results_directory = os.path.abspath(options.results_directory)
@@ -92,14 +100,17 @@ def handle_options(self, defaults):
                     os.makedirs(options.results_directory, exist_ok=True)
                 except OSError as e:
                     raise Error(
-                        '<d>could not create directory</> <r>"{}"</> <y>({})</>'.
-                        format(options.results_directory, e.strerror))
+                        '<d>could not create directory</> <r>"{}"</> <y>({})</>'.format(
+                            options.results_directory, e.strerror
+                        )
+                    )
 
     def go(self) -> ReturnCode:  # noqa
         options = self.context.options
 
         options.executable = os.path.join(
-            self.context.working_directory.path, 'tmp.exe')
+            self.context.working_directory.path, "tmp.exe"
+        )
 
         # Test files contain dexter commands.
         options.test_files = []
@@ -107,21 +118,20 @@ def go(self) -> ReturnCode:  # noqa
         # contains dexter commands.
         options.source_files = []
         if os.path.isdir(options.test_path):
-            subdirs = sorted([
-                r for r, _, f in os.walk(options.test_path)
-                if 'test.cfg' in f
-            ])
+            subdirs = sorted(
+                [r for r, _, f in os.walk(options.test_path) if "test.cfg" in f]
+            )
 
             for subdir in subdirs:
                 for f in os.listdir(subdir):
                     # TODO: read file extensions from the test.cfg file instead so
                     # that this isn't just limited to C and C++.
                     file_path = os.path.normcase(os.path.join(subdir, f))
-                    if f.endswith('.cpp'):
+                    if f.endswith(".cpp"):
                         options.source_files.append(file_path)
-                    elif f.endswith('.c'):
+                    elif f.endswith(".c"):
                         options.source_files.append(file_path)
-                    elif f.endswith('.dex'):
+                    elif f.endswith(".dex"):
                         options.test_files.append(file_path)
                 # Source files can contain dexter commands too.
                 options.test_files = options.test_files + options.source_files
@@ -130,7 +140,7 @@ def go(self) -> ReturnCode:  # noqa
         else:
             # We're dealing with a direct file path to a test file. If the file is non
             # .dex, then it must be a source file.
-            if not options.test_path.endswith('.dex'):
+            if not options.test_path.endswith(".dex"):
                 options.source_files = [options.test_path]
             options.test_files = [options.test_path]
             self._run_test(self._get_test_name(options.test_path))
@@ -139,7 +149,7 @@ def go(self) -> ReturnCode:  # noqa
 
     @staticmethod
     def _is_current_directory(test_directory):
-        return test_directory == '.'
+        return test_directory == "."
 
     def _get_test_name(self, test_path):
         """Get the test name from either the test file, or the sub directory
@@ -147,8 +157,7 @@ def _get_test_name(self, test_path):
         """
         # test names are distinguished by their relative path from the
         # specified test path.
-        test_name = os.path.relpath(test_path,
-                                    self.context.options.test_path)
+        test_name = os.path.relpath(test_path, self.context.options.test_path)
         if self._is_current_directory(test_name):
             test_name = os.path.basename(test_path)
         return test_name

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/tools/ToolBase.py b/cross-project-tests/debuginfo-tests/dexter/dex/tools/ToolBase.py
index 53274d3850067..4b09c134a1b6e 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/tools/ToolBase.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/tools/ToolBase.py
@@ -40,74 +40,86 @@ class defaults(object):
             pass
 
         pparser = ExtArgParse.ExtArgumentParser(
-            self.context, add_help=False, prog=self.name)
+            self.context, add_help=False, prog=self.name
+        )
 
         pparser.add_argument(
-            '--no-color-output',
-            action='store_true',
+            "--no-color-output",
+            action="store_true",
             default=False,
-            help='do not use colored output on stdout/stderr')
+            help="do not use colored output on stdout/stderr",
+        )
         pparser.add_argument(
-            '--time-report',
-            action='store_true',
+            "--time-report",
+            action="store_true",
             default=False,
-            help='display timing statistics')
+            help="display timing statistics",
+        )
 
         self.parser = ExtArgParse.ExtArgumentParser(
-            self.context, parents=[pparser], prog=self.name)
+            self.context, parents=[pparser], prog=self.name
+        )
         self.parser.add_argument(
-            '-v',
-            '--verbose',
-            action='store_true',
+            "-v",
+            "--verbose",
+            action="store_true",
             default=False,
-            help='enable verbose output (overrides --no-warnings)')
+            help="enable verbose output (overrides --no-warnings)",
+        )
         self.parser.add_argument(
-            '-V',
-            '--version',
-            action='store_true',
+            "-V",
+            "--version",
+            action="store_true",
             default=False,
-            help='display the DExTer version and exit')
+            help="display the DExTer version and exit",
+        )
         self.parser.add_argument(
-            '-w',
-            '--no-warnings',
-            action='store_true',
+            "-w",
+            "--no-warnings",
+            action="store_true",
             default=False,
-            help='suppress warning output')
+            help="suppress warning output",
+        )
         self.parser.add_argument(
-            '--unittest',
+            "--unittest",
             type=str,
-            choices=['off', 'show-failures', 'show-all'],
-            default='off',
-            help='run the DExTer codebase unit tests')
+            choices=["off", "show-failures", "show-all"],
+            default="off",
+            help="run the DExTer codebase unit tests",
+        )
 
         suppress = ExtArgParse.SUPPRESS  # pylint: disable=no-member
         self.parser.add_argument(
-            '--colortest', action='store_true', default=False, help=suppress)
+            "--colortest", action="store_true", default=False, help=suppress
+        )
         self.parser.add_argument(
-            '--error-debug', action='store_true', default=False, help=suppress)
-        defaults.working_directory = os.path.join(tempfile.gettempdir(),
-                                                  'dexter')
+            "--error-debug", action="store_true", default=False, help=suppress
+        )
+        defaults.working_directory = os.path.join(tempfile.gettempdir(), "dexter")
         self.parser.add_argument(
-            '--indent-timer-level', type=int, default=1, help=suppress)
+            "--indent-timer-level", type=int, default=1, help=suppress
+        )
         self.parser.add_argument(
-            '--working-directory',
+            "--working-directory",
             type=str,
-            metavar='<file>',
+            metavar="<file>",
             default=None,
             display_default=defaults.working_directory,
-            help='location of working directory')
+            help="location of working directory",
+        )
         self.parser.add_argument(
-            '--save-temps',
-            action='store_true',
+            "--save-temps",
+            action="store_true",
             default=False,
-            help='save temporary files')
+            help="save temporary files",
+        )
 
         self.add_tool_arguments(self.parser, defaults)
 
         # If an error is encountered during pparser, show the full usage text
         # including self.parser options. Strip the preceding 'usage: ' to avoid
         # having it appear twice.
-        pparser.usage = self.parser.format_usage().lstrip('usage: ')
+        pparser.usage = self.parser.format_usage().lstrip("usage: ")
 
         options, args = pparser.parse_known_args(args)
 

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/tools/clang_opt_bisect/Tool.py b/cross-project-tests/debuginfo-tests/dexter/dex/tools/clang_opt_bisect/Tool.py
index adabc06ab0e7d..2902eeee3e11c 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/tools/clang_opt_bisect/Tool.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/tools/clang_opt_bisect/Tool.py
@@ -44,8 +44,7 @@ class Tool(TestToolBase):
     debugging experience at each value.
     """
 
-    _re_running_pass = re.compile(
-        r'^BISECT\: running pass \((\d+)\) (.+?)( \(.+\))?$')
+    _re_running_pass = re.compile(r"^BISECT\: running pass \((\d+)\) (.+?)( \(.+\))?$")
 
     def __init__(self, *args, **kwargs):
         super(Tool, self).__init__(*args, **kwargs)
@@ -53,7 +52,7 @@ def __init__(self, *args, **kwargs):
 
     @property
     def name(self):
-        return 'DExTer clang opt bisect'
+        return "DExTer clang opt bisect"
 
     def _get_bisect_limits(self):
         options = self.context.options
@@ -61,40 +60,45 @@ def _get_bisect_limits(self):
         max_limit = 999999
         limits = [max_limit for _ in options.source_files]
         all_passes = [
-            l for l in self._clang_opt_bisect_build(limits)[1].splitlines()
-            if l.startswith('BISECT: running pass (')
+            l
+            for l in self._clang_opt_bisect_build(limits)[1].splitlines()
+            if l.startswith("BISECT: running pass (")
         ]
 
         results = []
         for i, pass_ in enumerate(all_passes[1:]):
-            if pass_.startswith('BISECT: running pass (1)'):
+            if pass_.startswith("BISECT: running pass (1)"):
                 results.append(all_passes[i])
         results.append(all_passes[-1])
 
-        assert len(results) == len(
-            options.source_files), (results, options.source_files)
+        assert len(results) == len(options.source_files), (
+            results,
+            options.source_files,
+        )
 
-        limits = [
-            int(Tool._re_running_pass.match(r).group(1)) for r in results
-        ]
+        limits = [int(Tool._re_running_pass.match(r).group(1)) for r in results]
 
         return limits
 
     def handle_options(self, defaults):
         options = self.context.options
         if "clang" not in options.builder.lower():
-            raise Error("--builder %s is not supported by the clang-opt-bisect tool - only 'clang' is "
-                        "supported " % options.builder)
+            raise Error(
+                "--builder %s is not supported by the clang-opt-bisect tool - only 'clang' is "
+                "supported " % options.builder
+            )
         super(Tool, self).handle_options(defaults)
 
     def _init_debugger_controller(self):
         step_collection = DextIR(
             executable_path=self.context.options.executable,
             source_paths=self.context.options.source_files,
-            dexter_version=self.context.version)
+            dexter_version=self.context.version,
+        )
 
         step_collection.commands, new_source_files = get_command_infos(
-            self.context.options.source_files, self.context.options.source_root_dir)
+            self.context.options.source_files, self.context.options.source_root_dir
+        )
         self.context.options.source_files.extend(list(new_source_files))
 
         debugger_controller = DefaultController(self.context, step_collection)
@@ -139,15 +143,17 @@ def _run_test(self, test_name):  # noqa
                 pass_info = (0, None, None)
 
             try:
-                debugger_controller =self._init_debugger_controller()
+                debugger_controller = self._init_debugger_controller()
                 debugger_controller = run_debugger_subprocess(
-                    debugger_controller, self.context.working_directory.path)
+                    debugger_controller, self.context.working_directory.path
+                )
                 steps = debugger_controller.step_collection
             except DebuggerException:
-                steps =  DextIR(
+                steps = DextIR(
                     executable_path=self.context.options.executable,
                     source_paths=self.context.options.source_files,
-                    dexter_version=self.context.version)
+                    dexter_version=self.context.version,
+                )
 
             steps.builder = builderIR
 
@@ -161,25 +167,25 @@ def _run_test(self, test_name):  # noqa
 
             isnan = heuristic.score != heuristic.score
             if isnan or score_
diff erence < 0:
-                color1 = 'r'
-                color2 = 'r'
+                color1 = "r"
+                color2 = "r"
             elif score_
diff erence > 0:
-                color1 = 'g'
-                color2 = 'g'
+                color1 = "g"
+                color2 = "g"
             else:
-                color1 = 'y'
-                color2 = 'd'
+                color1 = "y"
+                color2 = "d"
 
             summary = '<{}>running pass {}/{} on "{}"'.format(
-                color2, pass_info[0], max_limits[current_file_index],
-                test_name)
+                color2, pass_info[0], max_limits[current_file_index], test_name
+            )
             if len(options.source_files) > 1:
-                summary += ' [{}/{}]'.format(current_limit, overall_limit)
+                summary += " [{}/{}]".format(current_limit, overall_limit)
 
-            pass_text = ''.join(p for p in pass_info[1:] if p)
-            summary += ': {} <{}>{:+.4f}</> <{}>{}</></>\n'.format(
-                heuristic.summary_string, color1, score_
diff erence, color2,
-                pass_text)
+            pass_text = "".join(p for p in pass_info[1:] if p)
+            summary += ": {} <{}>{:+.4f}</> <{}>{}</></>\n".format(
+                heuristic.summary_string, color1, score_
diff erence, color2, pass_text
+            )
 
             self.context.o.auto(summary)
 
@@ -195,79 +201,86 @@ def _run_test(self, test_name):  # noqa
             # If a results directory has been specified and this is the first
             # pass or something has changed, write a text file containing
             # verbose information on the current status.
-            if options.results_directory and (current_limit == 0 or
-                                              score_
diff erence or
-                                              steps_changed):
-                file_name = '-'.join(
-                    str(s) for s in [
-                        'status', test_name, '{{:0>{}}}'.format(
-                            len(str(overall_limit))).format(current_limit),
-                        '{:.4f}'.format(heuristic.score).replace(
-                            '.', '_'), pass_info[1]
-                    ] if s is not None)
-
-                file_name = ''.join(
-                    c for c in file_name
-                    if c.isalnum() or c in '()-_./ ').strip().replace(
-                    ' ', '_').replace('/', '_')
-
-                output_text_path = os.path.join(options.results_directory,
-                                                '{}.txt'.format(file_name))
-                with open(output_text_path, 'w') as fp:
-                    self.context.o.auto(summary + '\n', stream=Stream(fp))
-                    self.context.o.auto(str(steps) + '\n', stream=Stream(fp))
+            if options.results_directory and (
+                current_limit == 0 or score_
diff erence or steps_changed
+            ):
+                file_name = "-".join(
+                    str(s)
+                    for s in [
+                        "status",
+                        test_name,
+                        "{{:0>{}}}".format(len(str(overall_limit))).format(
+                            current_limit
+                        ),
+                        "{:.4f}".format(heuristic.score).replace(".", "_"),
+                        pass_info[1],
+                    ]
+                    if s is not None
+                )
+
+                file_name = (
+                    "".join(c for c in file_name if c.isalnum() or c in "()-_./ ")
+                    .strip()
+                    .replace(" ", "_")
+                    .replace("/", "_")
+                )
+
+                output_text_path = os.path.join(
+                    options.results_directory, "{}.txt".format(file_name)
+                )
+                with open(output_text_path, "w") as fp:
+                    self.context.o.auto(summary + "\n", stream=Stream(fp))
+                    self.context.o.auto(str(steps) + "\n", stream=Stream(fp))
                     self.context.o.auto(
-                        heuristic_verbose_output + '\n', stream=Stream(fp))
+                        heuristic_verbose_output + "\n", stream=Stream(fp)
+                    )
 
-                output_dextIR_path = os.path.join(options.results_directory,
-                                                  '{}.dextIR'.format(file_name))
-                with open(output_dextIR_path, 'wb') as fp:
+                output_dextIR_path = os.path.join(
+                    options.results_directory, "{}.dextIR".format(file_name)
+                )
+                with open(output_dextIR_path, "wb") as fp:
                     pickle.dump(steps, fp, protocol=pickle.HIGHEST_PROTOCOL)
 
-            per_pass_score.append((test_name, pass_text,
-                                   heuristic.score))
+            per_pass_score.append((test_name, pass_text, heuristic.score))
 
             if pass_info[1]:
-                self._all_bisect_pass_summary[pass_info[1]].append(
-                    score_
diff erence)
+                self._all_bisect_pass_summary[pass_info[1]].append(score_
diff erence)
 
-                current_bisect_pass_summary[pass_info[1]].append(
-                    score_
diff erence)
+                current_bisect_pass_summary[pass_info[1]].append(score_
diff erence)
 
         if options.results_directory:
             per_pass_score_path = os.path.join(
-                options.results_directory,
-                '{}-per_pass_score.csv'.format(test_name))
+                options.results_directory, "{}-per_pass_score.csv".format(test_name)
+            )
 
-            with open(per_pass_score_path, mode='w', newline='') as fp:
-                writer = csv.writer(fp, delimiter=',')
-                writer.writerow(['Source File', 'Pass', 'Score'])
+            with open(per_pass_score_path, mode="w", newline="") as fp:
+                writer = csv.writer(fp, delimiter=",")
+                writer.writerow(["Source File", "Pass", "Score"])
 
                 for path, pass_, score in per_pass_score:
                     writer.writerow([path, pass_, score])
             self.context.o.blue('wrote "{}"\n'.format(per_pass_score_path))
 
             pass_summary_path = os.path.join(
-                options.results_directory, '{}-pass-summary.csv'.format(test_name))
+                options.results_directory, "{}-pass-summary.csv".format(test_name)
+            )
 
-            self._write_pass_summary(pass_summary_path,
-                                     current_bisect_pass_summary)
+            self._write_pass_summary(pass_summary_path, current_bisect_pass_summary)
 
     def _handle_results(self) -> ReturnCode:
         options = self.context.options
         if options.results_directory:
-            pass_summary_path = os.path.join(options.results_directory,
-                                             'overall-pass-summary.csv')
+            pass_summary_path = os.path.join(
+                options.results_directory, "overall-pass-summary.csv"
+            )
 
-            self._write_pass_summary(pass_summary_path,
-                                     self._all_bisect_pass_summary)
+            self._write_pass_summary(pass_summary_path, self._all_bisect_pass_summary)
         return ReturnCode.OK
 
     def _clang_opt_bisect_build(self, opt_bisect_limits):
         options = self.context.options
         compiler_options = [
-            '{} -mllvm -opt-bisect-limit={}'.format(options.cflags,
-                                                    opt_bisect_limit)
+            "{} -mllvm -opt-bisect-limit={}".format(options.cflags, opt_bisect_limit)
             for opt_bisect_limit in opt_bisect_limits
         ]
         linker_options = options.ldflags
@@ -279,7 +292,8 @@ def _clang_opt_bisect_build(self, opt_bisect_limits):
                 compiler_options=compiler_options,
                 linker_options=linker_options,
                 script_path=self.build_script,
-                executable_file=options.executable)
+                executable_file=options.executable,
+            )
         except BuildScriptException as e:
             raise Error(e)
 
@@ -291,11 +305,15 @@ def _write_pass_summary(self, path, pass_summary):
             # Add elems for the sum, min, and max of the values, as well as
             # 'interestingness' which is whether any of these values are
             # non-zero.
-            pass_summary_list[i] += (sum(item[1]), min(item[1]), max(item[1]),
-                                     any(item[1]))
+            pass_summary_list[i] += (
+                sum(item[1]),
+                min(item[1]),
+                max(item[1]),
+                any(item[1]),
+            )
 
             # Split the pass name into the basic name and kind.
-            pass_summary_list[i] += tuple(item[0].rsplit(' on ', 1))
+            pass_summary_list[i] += tuple(item[0].rsplit(" on ", 1))
 
         # Sort the list by the following columns in order of precedence:
         #   - Is interesting (True first)
@@ -304,16 +322,23 @@ def _write_pass_summary(self, path, pass_summary):
         #   - Kind (alphabetically)
         #   - Name (alphabetically)
         pass_summary_list.sort(
-            key=lambda tup: (not tup[5], tup[2], -len(tup[1]), tup[7], tup[6]))
-
-        with open(path, mode='w', newline='') as fp:
-            writer = csv.writer(fp, delimiter=',')
-            writer.writerow(
-                ['Pass', 'Kind', 'Sum', 'Min', 'Max', 'Interesting'])
-
-            for (_, vals, sum_, min_, max_, interesting, name,
-                 kind) in pass_summary_list:
-                writer.writerow([name, kind, sum_, min_, max_, interesting] +
-                                vals)
+            key=lambda tup: (not tup[5], tup[2], -len(tup[1]), tup[7], tup[6])
+        )
+
+        with open(path, mode="w", newline="") as fp:
+            writer = csv.writer(fp, delimiter=",")
+            writer.writerow(["Pass", "Kind", "Sum", "Min", "Max", "Interesting"])
+
+            for (
+                _,
+                vals,
+                sum_,
+                min_,
+                max_,
+                interesting,
+                name,
+                kind,
+            ) in pass_summary_list:
+                writer.writerow([name, kind, sum_, min_, max_, interesting] + vals)
 
         self.context.o.blue('wrote "{}"\n'.format(path))

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/tools/help/Tool.py b/cross-project-tests/debuginfo-tests/dexter/dex/tools/help/Tool.py
index 2b35af4b98fb8..520bf9f59917a 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/tools/help/Tool.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/tools/help/Tool.py
@@ -18,35 +18,32 @@ class Tool(ToolBase):
 
     @property
     def name(self):
-        return 'DExTer help'
+        return "DExTer help"
 
     @property
     def _visible_tool_names(self):
-        return [t for t in get_tool_names() if not t.endswith('-')]
+        return [t for t in get_tool_names() if not t.endswith("-")]
 
     def add_tool_arguments(self, parser, defaults):
         parser.description = Tool.__doc__
         parser.add_argument(
-            'tool',
-            choices=self._visible_tool_names,
-            nargs='?',
-            help='name of subtool')
+            "tool", choices=self._visible_tool_names, nargs="?", help="name of subtool"
+        )
 
     def handle_options(self, defaults):
         pass
 
     @property
     def _default_text(self):
-        s = '\n<b>The following subtools are available:</>\n\n'
+        s = "\n<b>The following subtools are available:</>\n\n"
         tools_directory = get_tools_directory()
         for tool_name in sorted(self._visible_tool_names):
-            internal_name = tool_name.replace('-', '_')
+            internal_name = tool_name.replace("-", "_")
             module_info = imp.find_module(internal_name, [tools_directory])
-            tool_doc = imp.load_module(internal_name,
-                                       *module_info).Tool.__doc__
-            tool_doc = tool_doc.strip() if tool_doc else ''
-            tool_doc = textwrap.fill(' '.join(tool_doc.split()), 80)
-            s += '<g>{}</>\n{}\n\n'.format(tool_name, tool_doc)
+            tool_doc = imp.load_module(internal_name, *module_info).Tool.__doc__
+            tool_doc = tool_doc.strip() if tool_doc else ""
+            tool_doc = textwrap.fill(" ".join(tool_doc.split()), 80)
+            s += "<g>{}</>\n{}\n\n".format(tool_name, tool_doc)
         return s
 
     def go(self) -> ReturnCode:
@@ -54,8 +51,8 @@ def go(self) -> ReturnCode:
             self.context.o.auto(self._default_text)
             return ReturnCode.OK
 
-        tool_name = self.context.options.tool.replace('-', '_')
+        tool_name = self.context.options.tool.replace("-", "_")
         tools_directory = get_tools_directory()
         module_info = imp.find_module(tool_name, [tools_directory])
         module = imp.load_module(tool_name, *module_info)
-        return tool_main(self.context, module.Tool(self.context), ['--help'])
+        return tool_main(self.context, module.Tool(self.context), ["--help"])

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/tools/list_debuggers/Tool.py b/cross-project-tests/debuginfo-tests/dexter/dex/tools/list_debuggers/Tool.py
index 5ef5d65464fa7..75e6dcb891af7 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/tools/list_debuggers/Tool.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/tools/list_debuggers/Tool.py
@@ -22,7 +22,7 @@ class Tool(ToolBase):
 
     @property
     def name(self):
-        return 'DExTer list debuggers'
+        return "DExTer list debuggers"
 
     def add_tool_arguments(self, parser, defaults):
         parser.description = Tool.__doc__
@@ -32,7 +32,7 @@ def handle_options(self, defaults):
         handle_debugger_tool_base_options(self.context, defaults)
 
     def go(self) -> ReturnCode:
-        with Timer('list debuggers'):
+        with Timer("list debuggers"):
             try:
                 Debuggers(self.context).list()
             except DebuggerException as e:

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/tools/no_tool_/Tool.py b/cross-project-tests/debuginfo-tests/dexter/dex/tools/no_tool_/Tool.py
index 3d73189cd5b6d..4e25fdb9b1880 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/tools/no_tool_/Tool.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/tools/no_tool_/Tool.py
@@ -23,25 +23,28 @@ class Tool(ToolBase):
 
     @property
     def name(self):
-        return 'DExTer'
+        return "DExTer"
 
     def add_tool_arguments(self, parser, defaults):
         parser.description = Tool.__doc__
         parser.add_argument(
-            'subtool',
-            choices=[t for t in get_tool_names() if not t.endswith('-')],
-            nargs='?',
-            help='name of subtool')
+            "subtool",
+            choices=[t for t in get_tool_names() if not t.endswith("-")],
+            nargs="?",
+            help="name of subtool",
+        )
         parser.add_argument(
-            'subtool_options',
-            metavar='subtool-options',
-            nargs='*',
-            help='subtool specific options')
+            "subtool_options",
+            metavar="subtool-options",
+            nargs="*",
+            help="subtool specific options",
+        )
 
     def handle_options(self, defaults):
         if not self.context.options.subtool:
-            raise Error('<d>no subtool specified</>\n\n{}\n'.format(
-                self.parser.format_help()))
+            raise Error(
+                "<d>no subtool specified</>\n\n{}\n".format(self.parser.format_help())
+            )
 
     def go(self) -> ReturnCode:
         # This fn is never called because not specifying a subtool raises an

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/tools/run_debugger_internal_/Tool.py b/cross-project-tests/debuginfo-tests/dexter/dex/tools/run_debugger_internal_/Tool.py
index 5091e607b6c65..844a3ef196d0e 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/tools/run_debugger_internal_/Tool.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/tools/run_debugger_internal_/Tool.py
@@ -17,6 +17,7 @@
 from dex.utils.Exceptions import DebuggerException, Error
 from dex.utils.ReturnCode import ReturnCode
 
+
 class Tool(ToolBase):
     def __init__(self, *args, **kwargs):
         self.controller_path = None
@@ -26,43 +27,42 @@ def __init__(self, *args, **kwargs):
 
     @property
     def name(self):
-        return 'DExTer run debugger internal'
+        return "DExTer run debugger internal"
 
     def add_tool_arguments(self, parser, defaults):
         parser.add_argument(
-            'controller_path',
-            type=str,
-            help='pickled debugger controller file')
+            "controller_path", type=str, help="pickled debugger controller file"
+        )
 
     def handle_options(self, defaults):
-        with open(self.context.options.controller_path, 'rb') as fp:
+        with open(self.context.options.controller_path, "rb") as fp:
             self.debugger_controller = pickle.load(fp)
-        self.controller_path = self.context.options.controller_path   
+        self.controller_path = self.context.options.controller_path
         self.context = self.debugger_controller.context
         self.options = self.context.options
         Timer.display = self.options.time_report
 
     def raise_debugger_error(self, action, debugger):
-        msg = '<d>could not {} {}</> ({})\n'.format(
-            action, debugger.name, debugger.loading_error)
+        msg = "<d>could not {} {}</> ({})\n".format(
+            action, debugger.name, debugger.loading_error
+        )
         if self.options.verbose:
-            msg = '{}\n    {}'.format(
-                msg, '    '.join(debugger.loading_error_trace))
+            msg = "{}\n    {}".format(msg, "    ".join(debugger.loading_error_trace))
         raise Error(msg)
 
     def go(self) -> ReturnCode:
-        with Timer('loading debugger'):
+        with Timer("loading debugger"):
             debugger = Debuggers(self.context).load(self.options.debugger)
 
-        with Timer('running debugger'):
+        with Timer("running debugger"):
             if not debugger.is_available:
-                self.raise_debugger_error('load', debugger)
+                self.raise_debugger_error("load", debugger)
 
             self.debugger_controller.run_debugger(debugger)
 
             if debugger.loading_error:
-                self.raise_debugger_error('run', debugger)
+                self.raise_debugger_error("run", debugger)
 
-        with open(self.controller_path, 'wb') as fp:
+        with open(self.controller_path, "wb") as fp:
             pickle.dump(self.debugger_controller, fp)
         return ReturnCode.OK

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/tools/test/Tool.py b/cross-project-tests/debuginfo-tests/dexter/dex/tools/test/Tool.py
index dcf4838881222..9c73b56502abd 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/tools/test/Tool.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/tools/test/Tool.py
@@ -39,43 +39,44 @@ def penalty(self):
         try:
             return self.heuristic.penalty
         except AttributeError:
-            return float('nan')
+            return float("nan")
 
     @property
     def max_penalty(self):
         try:
             return self.heuristic.max_penalty
         except AttributeError:
-            return float('nan')
+            return float("nan")
 
     @property
     def score(self):
         try:
             return self.heuristic.score
         except AttributeError:
-            return float('nan')
+            return float("nan")
 
     def __str__(self):
         if self.error and self.context.options.verbose:
             verbose_error = str(self.error)
         else:
-            verbose_error = ''
+            verbose_error = ""
 
         if self.error:
-            script_error = (' : {}'.format(
-                self.error.script_error.splitlines()[0]) if getattr(
-                    self.error, 'script_error', None) else '')
+            script_error = (
+                " : {}".format(self.error.script_error.splitlines()[0])
+                if getattr(self.error, "script_error", None)
+                else ""
+            )
 
-            error = ' [{}{}]'.format(
-                str(self.error).splitlines()[0], script_error)
+            error = " [{}{}]".format(str(self.error).splitlines()[0], script_error)
         else:
-            error = ''
+            error = ""
 
         try:
             summary = self.heuristic.summary_string
         except AttributeError:
-            summary = '<r>nan/nan (nan)</>'
-        return '{}: {}{}\n{}'.format(self.name, summary, error, verbose_error)
+            summary = "<r>nan/nan (nan)</>"
+        return "{}: {}{}\n{}".format(self.name, summary, error, verbose_error)
 
 
 class Tool(TestToolBase):
@@ -90,18 +91,22 @@ def __init__(self, *args, **kwargs):
 
     @property
     def name(self):
-        return 'DExTer test'
+        return "DExTer test"
 
     def add_tool_arguments(self, parser, defaults):
-        parser.add_argument('--fail-lt',
-                            type=float,
-                            default=0.0, # By default TEST always succeeds.
-                            help='exit with status FAIL(2) if the test result'
-                                ' is less than this value.',
-                            metavar='<float>')
-        parser.add_argument('--calculate-average',
-                            action="store_true",
-                            help='calculate the average score of every test run')
+        parser.add_argument(
+            "--fail-lt",
+            type=float,
+            default=0.0,  # By default TEST always succeeds.
+            help="exit with status FAIL(2) if the test result"
+            " is less than this value.",
+            metavar="<float>",
+        )
+        parser.add_argument(
+            "--calculate-average",
+            action="store_true",
+            help="calculate the average score of every test run",
+        )
         super(Tool, self).add_tool_arguments(parser, defaults)
 
     def _build_test_case(self):
@@ -119,10 +124,7 @@ def _build_test_case(self):
         if options.binary:
             # Copy user's binary into the tmp working directory
             shutil.copy(options.binary, options.executable)
-            builderIR = BuilderIR(
-                name='binary',
-                cflags=[options.binary],
-                ldflags='')
+            builderIR = BuilderIR(name="binary", cflags=[options.binary], ldflags="")
         else:
             options = self.context.options
             compiler_options = [options.cflags for _ in options.source_files]
@@ -133,21 +135,24 @@ def _build_test_case(self):
                 source_files=options.source_files,
                 compiler_options=compiler_options,
                 linker_options=linker_options,
-                executable_file=options.executable)
+                executable_file=options.executable,
+            )
         return builderIR
 
     def _init_debugger_controller(self):
         step_collection = DextIR(
             executable_path=self.context.options.executable,
             source_paths=self.context.options.source_files,
-            dexter_version=self.context.version)
+            dexter_version=self.context.version,
+        )
 
         step_collection.commands, new_source_files = get_command_infos(
-            self.context.options.test_files, self.context.options.source_root_dir)
+            self.context.options.test_files, self.context.options.source_root_dir
+        )
 
         self.context.options.source_files.extend(list(new_source_files))
 
-        if 'DexLimitSteps' in step_collection.commands:
+        if "DexLimitSteps" in step_collection.commands:
             debugger_controller = ConditionalController(self.context, step_collection)
         else:
             debugger_controller = DefaultController(self.context, step_collection)
@@ -155,11 +160,11 @@ def _init_debugger_controller(self):
         return debugger_controller
 
     def _get_steps(self, builderIR):
-        """Generate a list of debugger steps from a test case.
-        """
+        """Generate a list of debugger steps from a test case."""
         debugger_controller = self._init_debugger_controller()
         debugger_controller = run_debugger_subprocess(
-            debugger_controller, self.context.working_directory.path)
+            debugger_controller, self.context.working_directory.path
+        )
         steps = debugger_controller.step_collection
         steps.builder = builderIR
         return steps
@@ -167,30 +172,31 @@ def _get_steps(self, builderIR):
     def _get_results_basename(self, test_name):
         def splitall(x):
             while len(x) > 0:
-              x, y = os.path.split(x)
-              yield y
+                x, y = os.path.split(x)
+                yield y
+
         all_components = reversed([x for x in splitall(test_name)])
-        return '_'.join(all_components)
+        return "_".join(all_components)
 
     def _get_results_path(self, test_name):
         """Returns the path to the test results directory for the test denoted
         by test_name.
         """
         assert self.context.options.results_directory != None
-        return os.path.join(self.context.options.results_directory,
-                            self._get_results_basename(test_name))
+        return os.path.join(
+            self.context.options.results_directory,
+            self._get_results_basename(test_name),
+        )
 
     def _get_results_text_path(self, test_name):
-        """Returns path results .txt file for test denoted by test_name.
-        """
+        """Returns path results .txt file for test denoted by test_name."""
         test_results_path = self._get_results_path(test_name)
-        return '{}.txt'.format(test_results_path)
+        return "{}.txt".format(test_results_path)
 
     def _get_results_pickle_path(self, test_name):
-        """Returns path results .dextIR file for test denoted by test_name.
-        """
+        """Returns path results .dextIR file for test denoted by test_name."""
         test_results_path = self._get_results_path(test_name)
-        return '{}.dextIR'.format(test_results_path)
+        return "{}.dextIR".format(test_results_path)
 
     def _record_steps(self, test_name, steps):
         """Write out the set of steps out to the test's .txt and .json
@@ -198,11 +204,11 @@ def _record_steps(self, test_name, steps):
         """
         if self.context.options.results_directory:
             output_text_path = self._get_results_text_path(test_name)
-            with open(output_text_path, 'w') as fp:
+            with open(output_text_path, "w") as fp:
                 self.context.o.auto(str(steps), stream=Stream(fp))
 
             output_dextIR_path = self._get_results_pickle_path(test_name)
-            with open(output_dextIR_path, 'wb') as fp:
+            with open(output_dextIR_path, "wb") as fp:
                 pickle.dump(steps, fp, protocol=pickle.HIGHEST_PROTOCOL)
 
     def _record_score(self, test_name, heuristic):
@@ -211,7 +217,7 @@ def _record_score(self, test_name, heuristic):
         """
         if self.context.options.results_directory:
             output_text_path = self._get_results_text_path(test_name)
-            with open(output_text_path, 'a') as fp:
+            with open(output_text_path, "a") as fp:
                 self.context.o.auto(heuristic.verbose_output, stream=Stream(fp))
 
     def _record_test_and_display(self, test_case):
@@ -235,7 +241,7 @@ def _record_successful_test(self, test_name, steps, heuristic):
         test_case = TestCase(self.context, test_name, heuristic, None)
         self._record_test_and_display(test_case)
         if self.context.options.verbose:
-            self.context.o.auto('\n{}\n'.format(steps))
+            self.context.o.auto("\n{}\n".format(steps))
             self.context.o.auto(heuristic.verbose_output)
 
     def _run_test(self, test_name):
@@ -248,8 +254,7 @@ def _run_test(self, test_name):
             self._record_steps(test_name, steps)
             heuristic_score = Heuristic(self.context, steps)
             self._record_score(test_name, heuristic_score)
-        except (BuildScriptException, DebuggerException,
-                HeuristicException) as e:
+        except (BuildScriptException, DebuggerException, HeuristicException) as e:
             self._record_failed_test(test_name, e)
             return
 
@@ -261,7 +266,7 @@ def _handle_results(self) -> ReturnCode:
         options = self.context.options
 
         if not options.verbose:
-            self.context.o.auto('\n')
+            self.context.o.auto("\n")
 
         if options.calculate_average:
             # Calculate and print the average score
@@ -274,22 +279,25 @@ def _handle_results(self) -> ReturnCode:
                     num_tests += 1
 
             if num_tests != 0:
-                print("@avg: ({:.4f})".format(score_sum/num_tests))
+                print("@avg: ({:.4f})".format(score_sum / num_tests))
 
         has_failed = lambda test: test.score < options.fail_lt or test.error
         if any(map(has_failed, self._test_cases)):
             return_code = ReturnCode.FAIL
 
         if options.results_directory:
-            summary_path = os.path.join(options.results_directory, 'summary.csv')
-            with open(summary_path, mode='w', newline='') as fp:
-                writer = csv.writer(fp, delimiter=',')
-                writer.writerow(['Test Case', 'Score', 'Error'])
+            summary_path = os.path.join(options.results_directory, "summary.csv")
+            with open(summary_path, mode="w", newline="") as fp:
+                writer = csv.writer(fp, delimiter=",")
+                writer.writerow(["Test Case", "Score", "Error"])
 
                 for test_case in self._test_cases:
-                    writer.writerow([
-                        test_case.name, '{:.4f}'.format(test_case.score),
-                        test_case.error
-                    ])
+                    writer.writerow(
+                        [
+                            test_case.name,
+                            "{:.4f}".format(test_case.score),
+                            test_case.error,
+                        ]
+                    )
 
         return return_code

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/tools/view/Tool.py b/cross-project-tests/debuginfo-tests/dexter/dex/tools/view/Tool.py
index ad7d5300035c9..9ddb6a2b26469 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/tools/view/Tool.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/tools/view/Tool.py
@@ -17,21 +17,21 @@
 
 
 class Tool(ToolBase):
-    """Given a dextIR file, display the information in a human-readable form.
-    """
+    """Given a dextIR file, display the information in a human-readable form."""
 
     @property
     def name(self):
-        return 'DExTer view'
+        return "DExTer view"
 
     def add_tool_arguments(self, parser, defaults):
         add_heuristic_tool_arguments(parser)
         parser.add_argument(
-            'input_path',
-            metavar='dextIR-file',
+            "input_path",
+            metavar="dextIR-file",
             type=str,
             default=None,
-            help='dexter dextIR file to view')
+            help="dexter dextIR file to view",
+        )
         parser.description = Tool.__doc__
 
     def handle_options(self, defaults):
@@ -39,21 +39,25 @@ def handle_options(self, defaults):
 
         options.input_path = os.path.abspath(options.input_path)
         if not os.path.isfile(options.input_path):
-            raise Error('<d>could not find dextIR file</> <r>"{}"</>'.format(
-                options.input_path))
+            raise Error(
+                '<d>could not find dextIR file</> <r>"{}"</>'.format(options.input_path)
+            )
 
     def go(self) -> ReturnCode:
         options = self.context.options
 
-        with open(options.input_path, 'rb') as fp:
+        with open(options.input_path, "rb") as fp:
             steps = pickle.load(fp)
 
         try:
             heuristic = Heuristic(self.context, steps)
         except HeuristicException as e:
-            raise Error('could not apply heuristic: {}'.format(e))
+            raise Error("could not apply heuristic: {}".format(e))
 
-        self.context.o.auto('{}\n\n{}\n\n{}\n\n'.format(
-            heuristic.summary_string, steps, heuristic.verbose_output))
+        self.context.o.auto(
+            "{}\n\n{}\n\n{}\n\n".format(
+                heuristic.summary_string, steps, heuristic.verbose_output
+            )
+        )
 
         return ReturnCode.OK

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/utils/Environment.py b/cross-project-tests/debuginfo-tests/dexter/dex/utils/Environment.py
index d2df252244023..f6fd601cb8fdf 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/utils/Environment.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/utils/Environment.py
@@ -10,13 +10,14 @@
 
 
 def is_native_windows():
-    return os.name == 'nt'
+    return os.name == "nt"
 
 
 def has_pywin32():
     try:
         import win32com.client  # pylint:disable=unused-variable
         import win32api  # pylint:disable=unused-variable
+
         return True
     except ImportError:
         return False

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/utils/Exceptions.py b/cross-project-tests/debuginfo-tests/dexter/dex/utils/Exceptions.py
index b636acbc0cee5..3c00752b5ee47 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/utils/Exceptions.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/utils/Exceptions.py
@@ -9,11 +9,13 @@
 
 class Dexception(Exception):
     """All dexter-specific exceptions derive from this."""
+
     pass
 
 
 class Error(Dexception):
     """Error.  Prints 'error: <message>' without a traceback."""
+
     pass
 
 
@@ -31,6 +33,7 @@ def __str__(self):
 
 class LoadDebuggerException(DebuggerException):
     """If specified debugger cannot be loaded."""
+
     pass
 
 
@@ -38,8 +41,9 @@ class NotYetLoadedDebuggerException(LoadDebuggerException):
     """If specified debugger has not yet been attempted to load."""
 
     def __init__(self):
-        super(NotYetLoadedDebuggerException,
-              self).__init__('not loaded', orig_exception=None)
+        super(NotYetLoadedDebuggerException, self).__init__(
+            "not loaded", orig_exception=None
+        )
 
 
 class CommandParseError(Dexception):
@@ -62,8 +66,10 @@ def __init__(self, *args, **kwargs):
         super(NonFloatValueInCommand, self).__init__(*args, **kwargs)
         self.value = None
 
+
 class ToolArgumentError(Dexception):
     """If a tool argument is invalid."""
+
     pass
 
 
@@ -71,10 +77,11 @@ class BuildScriptException(Dexception):
     """If there is an error in a build script file."""
 
     def __init__(self, *args, **kwargs):
-        self.script_error = kwargs.pop('script_error', None)
+        self.script_error = kwargs.pop("script_error", None)
         super(BuildScriptException, self).__init__(*args, **kwargs)
 
 
 class HeuristicException(Dexception):
     """If there was a problem with the heuristic."""
+
     pass

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/utils/ExtArgParse.py b/cross-project-tests/debuginfo-tests/dexter/dex/utils/ExtArgParse.py
index 9fa08fb066e08..8d968a7320807 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/utils/ExtArgParse.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/utils/ExtArgParse.py
@@ -22,32 +22,32 @@
 
 def _did_you_mean(val, possibles):
     close_matches = 
diff lib.get_close_matches(val, possibles)
-    did_you_mean = ''
+    did_you_mean = ""
     if close_matches:
-        did_you_mean = 'did you mean {}?'.format(' or '.join(
-            "<y>'{}'</>".format(c) for c in close_matches[:2]))
+        did_you_mean = "did you mean {}?".format(
+            " or ".join("<y>'{}'</>".format(c) for c in close_matches[:2])
+        )
     return did_you_mean
 
 
 def _colorize(message):
     lines = message.splitlines()
     for i, line in enumerate(lines):
-        lines[i] = lines[i].replace('usage:', '<g>usage:</>')
-        if line.endswith(':'):
-            lines[i] = '<g>{}</>'.format(line)
-    return '\n'.join(lines)
+        lines[i] = lines[i].replace("usage:", "<g>usage:</>")
+        if line.endswith(":"):
+            lines[i] = "<g>{}</>".format(line)
+    return "\n".join(lines)
 
 
 class ExtArgumentParser(argparse.ArgumentParser):
     def error(self, message):
-        """Use the Dexception Error mechanism (including auto-colored output).
-        """
-        raise Error('{}\n\n{}'.format(message, self.format_usage()))
+        """Use the Dexception Error mechanism (including auto-colored output)."""
+        raise Error("{}\n\n{}".format(message, self.format_usage()))
 
     # pylint: disable=redefined-builtin
     def _print_message(self, message, file=None):
         if message:
-            if file and file.name == '<stdout>':
+            if file and file.name == "<stdout>":
                 file = PrettyOutput.stdout
             else:
                 file = PrettyOutput.stderr
@@ -60,13 +60,14 @@ def format_usage(self):
         return _colorize(super(ExtArgumentParser, self).format_usage())
 
     def format_help(self):
-        return _colorize(super(ExtArgumentParser, self).format_help() + '\n\n')
+        return _colorize(super(ExtArgumentParser, self).format_help() + "\n\n")
 
     @property
     def _valid_visible_options(self):
         """A list of all non-suppressed command line flags."""
         return [
-            item for sublist in vars(self)['_actions']
+            item
+            for sublist in vars(self)["_actions"]
             for item in sublist.option_strings
             if sublist.help != argparse.SUPPRESS
         ]
@@ -83,29 +84,32 @@ def parse_args(self, args=None, namespace=None):
                     error = "unrecognized argument: <y>'{}'</>".format(arg)
                     dym = _did_you_mean(arg, self._valid_visible_options)
                     if dym:
-                        error += '  ({})'.format(dym)
+                        error += "  ({})".format(dym)
                 errors.append(error)
-            self.error('\n       '.join(errors))
+            self.error("\n       ".join(errors))
 
         return args
 
     def add_argument(self, *args, **kwargs):
         """Automatically add the default value to help text."""
-        if 'default' in kwargs:
-            default = kwargs['default']
+        if "default" in kwargs:
+            default = kwargs["default"]
             if default is None:
-                default = kwargs.pop('display_default', None)
+                default = kwargs.pop("display_default", None)
 
-            if (default and isinstance(default, (str, int, float))
-                    and default != argparse.SUPPRESS):
+            if (
+                default
+                and isinstance(default, (str, int, float))
+                and default != argparse.SUPPRESS
+            ):
                 assert (
-                    'choices' not in kwargs or default in kwargs['choices']), (
-                        "default value '{}' is not one of allowed choices: {}".
-                        format(default, kwargs['choices']))
-                if 'help' in kwargs and kwargs['help'] != argparse.SUPPRESS:
-                    assert isinstance(kwargs['help'], str), type(kwargs['help'])
-                    kwargs['help'] = ('{} (default:{})'.format(
-                        kwargs['help'], default))
+                    "choices" not in kwargs or default in kwargs["choices"]
+                ), "default value '{}' is not one of allowed choices: {}".format(
+                    default, kwargs["choices"]
+                )
+                if "help" in kwargs and kwargs["help"] != argparse.SUPPRESS:
+                    assert isinstance(kwargs["help"], str), type(kwargs["help"])
+                    kwargs["help"] = "{} (default:{})".format(kwargs["help"], default)
 
         super(ExtArgumentParser, self).add_argument(*args, **kwargs)
 
@@ -117,32 +121,34 @@ def __init__(self, context, *args, **kwargs):
 class TestExtArgumentParser(unittest.TestCase):
     def test_did_you_mean(self):
         parser = ExtArgumentParser(None)
-        parser.add_argument('--foo')
-        parser.add_argument('--qoo', help=argparse.SUPPRESS)
-        parser.add_argument('jam', nargs='?')
+        parser.add_argument("--foo")
+        parser.add_argument("--qoo", help=argparse.SUPPRESS)
+        parser.add_argument("jam", nargs="?")
 
-        parser.parse_args(['--foo', '0'])
+        parser.parse_args(["--foo", "0"])
 
-        expected = (r"^unrecognized argument\: <y>'\-\-doo'</>\s+"
-                    r"\(did you mean <y>'\-\-foo'</>\?\)\n"
-                    r"\s*<g>usage:</>")
+        expected = (
+            r"^unrecognized argument\: <y>'\-\-doo'</>\s+"
+            r"\(did you mean <y>'\-\-foo'</>\?\)\n"
+            r"\s*<g>usage:</>"
+        )
         with self.assertRaisesRegex(Error, expected):
-            parser.parse_args(['--doo'])
+            parser.parse_args(["--doo"])
 
-        parser.add_argument('--noo')
+        parser.add_argument("--noo")
 
-        expected = (r"^unrecognized argument\: <y>'\-\-doo'</>\s+"
-                    r"\(did you mean <y>'\-\-noo'</> or <y>'\-\-foo'</>\?\)\n"
-                    r"\s*<g>usage:</>")
+        expected = (
+            r"^unrecognized argument\: <y>'\-\-doo'</>\s+"
+            r"\(did you mean <y>'\-\-noo'</> or <y>'\-\-foo'</>\?\)\n"
+            r"\s*<g>usage:</>"
+        )
         with self.assertRaisesRegex(Error, expected):
-            parser.parse_args(['--doo'])
+            parser.parse_args(["--doo"])
 
-        expected = (r"^unrecognized argument\: <y>'\-\-bar'</>\n"
-                    r"\s*<g>usage:</>")
+        expected = r"^unrecognized argument\: <y>'\-\-bar'</>\n" r"\s*<g>usage:</>"
         with self.assertRaisesRegex(Error, expected):
-            parser.parse_args(['--bar'])
+            parser.parse_args(["--bar"])
 
-        expected = (r"^unexpected argument\: <y>'\-\-foo'</>\n"
-                    r"\s*<g>usage:</>")
+        expected = r"^unexpected argument\: <y>'\-\-foo'</>\n" r"\s*<g>usage:</>"
         with self.assertRaisesRegex(Error, expected):
-            parser.parse_args(['--', 'x', '--foo'])
+            parser.parse_args(["--", "x", "--foo"])

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/utils/Logging.py b/cross-project-tests/debuginfo-tests/dexter/dex/utils/Logging.py
index 11386b4768844..89fe50f3626fa 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/utils/Logging.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/utils/Logging.py
@@ -8,6 +8,7 @@
 
 from dex.utils import PrettyOutput
 
+
 class Logger(object):
     def __init__(self, pretty_output: PrettyOutput):
         self.o = pretty_output
@@ -20,25 +21,25 @@ def error(self, msg, enable_prefix=True, flag=None):
         if self.verbosity < 0:
             return
         if enable_prefix:
-            msg = f'error: {msg}'
+            msg = f"error: {msg}"
         if flag:
-           msg = f'{msg} <y>[{flag}]</>'
-        self.error_color('{}\n'.format(msg), stream=PrettyOutput.stderr)
+            msg = f"{msg} <y>[{flag}]</>"
+        self.error_color("{}\n".format(msg), stream=PrettyOutput.stderr)
 
     def warning(self, msg, enable_prefix=True, flag=None):
         if self.verbosity < 1:
             return
         if enable_prefix:
-            msg = f'warning: {msg}'
+            msg = f"warning: {msg}"
         if flag:
-           msg = f'{msg} <y>[{flag}]</>'
-        self.warning_color('{}\n'.format(msg), stream=PrettyOutput.stderr)
+            msg = f"{msg} <y>[{flag}]</>"
+        self.warning_color("{}\n".format(msg), stream=PrettyOutput.stderr)
 
     def note(self, msg, enable_prefix=True, flag=None):
         if self.verbosity < 2:
             return
         if enable_prefix:
-            msg = f'note: {msg}'
+            msg = f"note: {msg}"
         if flag:
-           msg = f'{msg} <y>[{flag}]</>'
-        self.note_color('{}\n'.format(msg), stream=PrettyOutput.stderr)
+            msg = f"{msg} <y>[{flag}]</>"
+        self.note_color("{}\n".format(msg), stream=PrettyOutput.stderr)

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/utils/PrettyOutputBase.py b/cross-project-tests/debuginfo-tests/dexter/dex/utils/PrettyOutputBase.py
index d21db89a6ae61..0b4a47dda741d 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/utils/PrettyOutputBase.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/utils/PrettyOutputBase.py
@@ -37,9 +37,7 @@ class PreserveAutoColors(object):
     def __init__(self, pretty_output):
         self.pretty_output = pretty_output
         self.orig_values = {}
-        self.properties = [
-            'auto_reds', 'auto_yellows', 'auto_greens', 'auto_blues'
-        ]
+        self.properties = ["auto_reds", "auto_yellows", "auto_greens", "auto_blues"]
 
     def __enter__(self):
         for p in self.properties:
@@ -90,18 +88,16 @@ def _write(self, text, stream):
         # This function parses these tags using a very simple recursive
         # descent.
         colors = {
-            'r': self.red,
-            'y': self.yellow,
-            'g': self.green,
-            'b': self.blue,
-            'd': self.default,
-            'a': self.auto,
+            "r": self.red,
+            "y": self.yellow,
+            "g": self.green,
+            "b": self.blue,
+            "d": self.default,
+            "a": self.auto,
         }
 
         # Find all tags (whether open or close)
-        tags = [
-            t for t in re.finditer('<([{}/])>'.format(''.join(colors)), text)
-        ]
+        tags = [t for t in re.finditer("<([{}/])>".format("".join(colors)), text)]
 
         if not tags:
             # No tags.  Just write the text to the current stream and return.
@@ -109,17 +105,20 @@ def _write(self, text, stream):
             # render as colors (for example in error output from this
             # function).
             stream = self._set_valid_stream(stream)
-            stream.py.write(text.replace(r'\>', '>'))
+            stream.py.write(text.replace(r"\>", ">"))
             return
 
-        open_tags = [i for i in tags if i.group(1) != '/']
-        close_tags = [i for i in tags if i.group(1) == '/']
+        open_tags = [i for i in tags if i.group(1) != "/"]
+        close_tags = [i for i in tags if i.group(1) == "/"]
 
-        if (len(open_tags) != len(close_tags)
-                or any(o.start() >= c.start()
-                       for (o, c) in zip(open_tags, close_tags))):
-            raise Error('open/close tag mismatch in "{}"'.format(
-                text.rstrip()).replace('>', r'\>'))
+        if len(open_tags) != len(close_tags) or any(
+            o.start() >= c.start() for (o, c) in zip(open_tags, close_tags)
+        ):
+            raise Error(
+                'open/close tag mismatch in "{}"'.format(text.rstrip()).replace(
+                    ">", r"\>"
+                )
+            )
 
         open_tag = open_tags.pop(0)
 
@@ -128,7 +127,7 @@ def _write(self, text, stream):
         tag_nesting = 1
         close_tag = None
         for tag in tags[1:]:
-            if tag.group(1) == '/':
+            if tag.group(1) == "/":
                 tag_nesting -= 1
             else:
                 tag_nesting += 1
@@ -140,18 +139,18 @@ def _write(self, text, stream):
 
         # Use the method on the top of the stack for text prior to the open
         # tag.
-        before = text[:open_tag.start()]
+        before = text[: open_tag.start()]
         if before:
             self._stack[-1](before, lock=_null_lock, stream=stream)
 
         # Use the specified color for the tag itself.
         color = open_tag.group(1)
-        within = text[open_tag.end():close_tag.start()]
+        within = text[open_tag.end() : close_tag.start()]
         if within:
             colors[color](within, lock=_null_lock, stream=stream)
 
         # Use the method on the top of the stack for text after the close tag.
-        after = text[close_tag.end():]
+        after = text[close_tag.end() :]
         if after:
             self._stack[-1](after, lock=_null_lock, stream=stream)
 
@@ -168,16 +167,17 @@ def auto(self, text, stream=None, lock=_lock):
             for line in lines:
                 # This is just being cute for the sake of cuteness, but why
                 # not?
-                line = line.replace('DExTer', '<r>D<y>E<g>x<b>T</></>e</>r</>')
+                line = line.replace("DExTer", "<r>D<y>E<g>x<b>T</></>e</>r</>")
 
                 # Apply the appropriate color method if the expression matches
                 # any of
                 # the patterns we have set up.
-                for fn, regexs in ((self.red, self.auto_reds),
-                                   (self.yellow, self.auto_yellows),
-                                   (self.green,
-                                    self.auto_greens), (self.blue,
-                                                        self.auto_blues)):
+                for fn, regexs in (
+                    (self.red, self.auto_reds),
+                    (self.yellow, self.auto_yellows),
+                    (self.green, self.auto_greens),
+                    (self.blue, self.auto_blues),
+                ):
                     if any(re.search(regex, line) for regex in regexs):
                         fn(line, stream=stream, lock=_null_lock)
                         break
@@ -203,190 +203,197 @@ def yellow_impl(self, text, stream=None, **kwargs):
         pass
 
     def yellow(self, *args, **kwargs):
-        return self._call_color_impl(self.yellow, self.yellow_impl, *args,
-                                     **kwargs)
+        return self._call_color_impl(self.yellow, self.yellow_impl, *args, **kwargs)
 
     @abc.abstractmethod
     def green_impl(self, text, stream=None, **kwargs):
         pass
 
     def green(self, *args, **kwargs):
-        return self._call_color_impl(self.green, self.green_impl, *args,
-                                     **kwargs)
+        return self._call_color_impl(self.green, self.green_impl, *args, **kwargs)
 
     @abc.abstractmethod
     def blue_impl(self, text, stream=None, **kwargs):
         pass
 
     def blue(self, *args, **kwargs):
-        return self._call_color_impl(self.blue, self.blue_impl, *args,
-                                     **kwargs)
+        return self._call_color_impl(self.blue, self.blue_impl, *args, **kwargs)
 
     @abc.abstractmethod
     def default_impl(self, text, stream=None, **kwargs):
         pass
 
     def default(self, *args, **kwargs):
-        return self._call_color_impl(self.default, self.default_impl, *args,
-                                     **kwargs)
+        return self._call_color_impl(self.default, self.default_impl, *args, **kwargs)
 
     def colortest(self):
         from itertools import combinations, permutations
 
-        fns = ((self.red, 'rrr'), (self.yellow, 'yyy'), (self.green, 'ggg'),
-               (self.blue, 'bbb'), (self.default, 'ddd'))
+        fns = (
+            (self.red, "rrr"),
+            (self.yellow, "yyy"),
+            (self.green, "ggg"),
+            (self.blue, "bbb"),
+            (self.default, "ddd"),
+        )
 
         for l in range(1, len(fns) + 1):
             for comb in combinations(fns, l):
                 for perm in permutations(comb):
                     for stream in (None, self.__class__.stderr):
-                        perm[0][0]('stdout '
-                                   if stream is None else 'stderr ', stream)
+                        perm[0][0]("stdout " if stream is None else "stderr ", stream)
                         for fn, string in perm:
                             fn(string, stream)
-                        self.default('\n', stream)
+                        self.default("\n", stream)
 
         tests = [
-            (self.auto, 'default1<r>red2</>default3'),
-            (self.red, 'red1<r>red2</>red3'),
-            (self.blue, 'blue1<r>red2</>blue3'),
-            (self.red, 'red1<y>yellow2</>red3'),
-            (self.auto, 'default1<y>yellow2<r>red3</></>'),
-            (self.auto, 'default1<g>green2<r>red3</></>'),
-            (self.auto, 'default1<g>green2<r>red3</>green4</>default5'),
-            (self.auto, 'default1<g>green2</>default3<g>green4</>default5'),
-            (self.auto, '<r>red1<g>green2</>red3<g>green4</>red5</>'),
-            (self.auto, '<r>red1<y><g>green2</>yellow3</>green4</>default5'),
-            (self.auto, '<r><y><g><b><d>default1</></><r></></></>red2</>'),
-            (self.auto, '<r>red1</>default2<r>red3</><g>green4</>default5'),
-            (self.blue, '<r>red1</>blue2<r><r>red3</><g><g>green</></></>'),
-            (self.blue, '<r>r<r>r<y>y<r><r><r><r>r</></></></></></></>b'),
+            (self.auto, "default1<r>red2</>default3"),
+            (self.red, "red1<r>red2</>red3"),
+            (self.blue, "blue1<r>red2</>blue3"),
+            (self.red, "red1<y>yellow2</>red3"),
+            (self.auto, "default1<y>yellow2<r>red3</></>"),
+            (self.auto, "default1<g>green2<r>red3</></>"),
+            (self.auto, "default1<g>green2<r>red3</>green4</>default5"),
+            (self.auto, "default1<g>green2</>default3<g>green4</>default5"),
+            (self.auto, "<r>red1<g>green2</>red3<g>green4</>red5</>"),
+            (self.auto, "<r>red1<y><g>green2</>yellow3</>green4</>default5"),
+            (self.auto, "<r><y><g><b><d>default1</></><r></></></>red2</>"),
+            (self.auto, "<r>red1</>default2<r>red3</><g>green4</>default5"),
+            (self.blue, "<r>red1</>blue2<r><r>red3</><g><g>green</></></>"),
+            (self.blue, "<r>r<r>r<y>y<r><r><r><r>r</></></></></></></>b"),
         ]
 
         for fn, text in tests:
             for stream in (None, self.__class__.stderr):
-                stream_name = 'stdout' if stream is None else 'stderr'
-                fn('{} {}\n'.format(stream_name, text), stream)
+                stream_name = "stdout" if stream is None else "stderr"
+                fn("{} {}\n".format(stream_name, text), stream)
 
 
 class TestPrettyOutput(unittest.TestCase):
     class MockPrettyOutput(PrettyOutputBase):
         def red_impl(self, text, stream=None, **kwargs):
-            self._write('[R]{}[/R]'.format(text), stream)
+            self._write("[R]{}[/R]".format(text), stream)
 
         def yellow_impl(self, text, stream=None, **kwargs):
-            self._write('[Y]{}[/Y]'.format(text), stream)
+            self._write("[Y]{}[/Y]".format(text), stream)
 
         def green_impl(self, text, stream=None, **kwargs):
-            self._write('[G]{}[/G]'.format(text), stream)
+            self._write("[G]{}[/G]".format(text), stream)
 
         def blue_impl(self, text, stream=None, **kwargs):
-            self._write('[B]{}[/B]'.format(text), stream)
+            self._write("[B]{}[/B]".format(text), stream)
 
         def default_impl(self, text, stream=None, **kwargs):
-            self._write('[D]{}[/D]'.format(text), stream)
+            self._write("[D]{}[/D]".format(text), stream)
 
     def test_red(self):
         with TestPrettyOutput.MockPrettyOutput() as o:
             stream = Stream(StringIO())
-            o.red('hello', stream)
-            self.assertEqual(stream.py.getvalue(), '[R]hello[/R]')
+            o.red("hello", stream)
+            self.assertEqual(stream.py.getvalue(), "[R]hello[/R]")
 
     def test_yellow(self):
         with TestPrettyOutput.MockPrettyOutput() as o:
             stream = Stream(StringIO())
-            o.yellow('hello', stream)
-            self.assertEqual(stream.py.getvalue(), '[Y]hello[/Y]')
+            o.yellow("hello", stream)
+            self.assertEqual(stream.py.getvalue(), "[Y]hello[/Y]")
 
     def test_green(self):
         with TestPrettyOutput.MockPrettyOutput() as o:
             stream = Stream(StringIO())
-            o.green('hello', stream)
-            self.assertEqual(stream.py.getvalue(), '[G]hello[/G]')
+            o.green("hello", stream)
+            self.assertEqual(stream.py.getvalue(), "[G]hello[/G]")
 
     def test_blue(self):
         with TestPrettyOutput.MockPrettyOutput() as o:
             stream = Stream(StringIO())
-            o.blue('hello', stream)
-            self.assertEqual(stream.py.getvalue(), '[B]hello[/B]')
+            o.blue("hello", stream)
+            self.assertEqual(stream.py.getvalue(), "[B]hello[/B]")
 
     def test_default(self):
         with TestPrettyOutput.MockPrettyOutput() as o:
             stream = Stream(StringIO())
-            o.default('hello', stream)
-            self.assertEqual(stream.py.getvalue(), '[D]hello[/D]')
+            o.default("hello", stream)
+            self.assertEqual(stream.py.getvalue(), "[D]hello[/D]")
 
     def test_auto(self):
         with TestPrettyOutput.MockPrettyOutput() as o:
             stream = Stream(StringIO())
-            o.auto_reds.append('foo')
-            o.auto('bar\n', stream)
-            o.auto('foo\n', stream)
-            o.auto('baz\n', stream)
-            self.assertEqual(stream.py.getvalue(),
-                             '[D]bar\n[/D][R]foo\n[/R][D]baz\n[/D]')
+            o.auto_reds.append("foo")
+            o.auto("bar\n", stream)
+            o.auto("foo\n", stream)
+            o.auto("baz\n", stream)
+            self.assertEqual(
+                stream.py.getvalue(), "[D]bar\n[/D][R]foo\n[/R][D]baz\n[/D]"
+            )
 
             stream = Stream(StringIO())
-            o.auto('bar\nfoo\nbaz\n', stream)
-            self.assertEqual(stream.py.getvalue(),
-                             '[D]bar\n[/D][R]foo\n[/R][D]baz\n[/D]')
+            o.auto("bar\nfoo\nbaz\n", stream)
+            self.assertEqual(
+                stream.py.getvalue(), "[D]bar\n[/D][R]foo\n[/R][D]baz\n[/D]"
+            )
 
             stream = Stream(StringIO())
-            o.auto('barfoobaz\nbardoobaz\n', stream)
-            self.assertEqual(stream.py.getvalue(),
-                             '[R]barfoobaz\n[/R][D]bardoobaz\n[/D]')
+            o.auto("barfoobaz\nbardoobaz\n", stream)
+            self.assertEqual(
+                stream.py.getvalue(), "[R]barfoobaz\n[/R][D]bardoobaz\n[/D]"
+            )
 
-            o.auto_greens.append('doo')
+            o.auto_greens.append("doo")
             stream = Stream(StringIO())
-            o.auto('barfoobaz\nbardoobaz\n', stream)
-            self.assertEqual(stream.py.getvalue(),
-                             '[R]barfoobaz\n[/R][G]bardoobaz\n[/G]')
+            o.auto("barfoobaz\nbardoobaz\n", stream)
+            self.assertEqual(
+                stream.py.getvalue(), "[R]barfoobaz\n[/R][G]bardoobaz\n[/G]"
+            )
 
     def test_PreserveAutoColors(self):
         with TestPrettyOutput.MockPrettyOutput() as o:
-            o.auto_reds.append('foo')
+            o.auto_reds.append("foo")
             with PreserveAutoColors(o):
-                o.auto_greens.append('bar')
+                o.auto_greens.append("bar")
                 stream = Stream(StringIO())
-                o.auto('foo\nbar\nbaz\n', stream)
-                self.assertEqual(stream.py.getvalue(),
-                                 '[R]foo\n[/R][G]bar\n[/G][D]baz\n[/D]')
+                o.auto("foo\nbar\nbaz\n", stream)
+                self.assertEqual(
+                    stream.py.getvalue(), "[R]foo\n[/R][G]bar\n[/G][D]baz\n[/D]"
+                )
 
             stream = Stream(StringIO())
-            o.auto('foo\nbar\nbaz\n', stream)
-            self.assertEqual(stream.py.getvalue(),
-                             '[R]foo\n[/R][D]bar\n[/D][D]baz\n[/D]')
+            o.auto("foo\nbar\nbaz\n", stream)
+            self.assertEqual(
+                stream.py.getvalue(), "[R]foo\n[/R][D]bar\n[/D][D]baz\n[/D]"
+            )
 
             stream = Stream(StringIO())
-            o.yellow('<a>foo</>bar<a>baz</>', stream)
+            o.yellow("<a>foo</>bar<a>baz</>", stream)
             self.assertEqual(
                 stream.py.getvalue(),
-                '[Y][Y][/Y][R]foo[/R][Y][Y]bar[/Y][D]baz[/D][Y][/Y][/Y][/Y]')
+                "[Y][Y][/Y][R]foo[/R][Y][Y]bar[/Y][D]baz[/D][Y][/Y][/Y][/Y]",
+            )
 
     def test_tags(self):
         with TestPrettyOutput.MockPrettyOutput() as o:
             stream = Stream(StringIO())
-            o.auto('<r>hi</>', stream)
-            self.assertEqual(stream.py.getvalue(),
-                             '[D][D][/D][R]hi[/R][D][/D][/D]')
+            o.auto("<r>hi</>", stream)
+            self.assertEqual(stream.py.getvalue(), "[D][D][/D][R]hi[/R][D][/D][/D]")
 
             stream = Stream(StringIO())
-            o.auto('<r><y>a</>b</>c', stream)
+            o.auto("<r><y>a</>b</>c", stream)
             self.assertEqual(
                 stream.py.getvalue(),
-                '[D][D][/D][R][R][/R][Y]a[/Y][R]b[/R][/R][D]c[/D][/D]')
+                "[D][D][/D][R][R][/R][Y]a[/Y][R]b[/R][/R][D]c[/D][/D]",
+            )
 
-            with self.assertRaisesRegex(Error, 'tag mismatch'):
-                o.auto('<r>hi', stream)
+            with self.assertRaisesRegex(Error, "tag mismatch"):
+                o.auto("<r>hi", stream)
 
-            with self.assertRaisesRegex(Error, 'tag mismatch'):
-                o.auto('hi</>', stream)
+            with self.assertRaisesRegex(Error, "tag mismatch"):
+                o.auto("hi</>", stream)
 
-            with self.assertRaisesRegex(Error, 'tag mismatch'):
-                o.auto('<r><y>hi</>', stream)
+            with self.assertRaisesRegex(Error, "tag mismatch"):
+                o.auto("<r><y>hi</>", stream)
 
-            with self.assertRaisesRegex(Error, 'tag mismatch'):
-                o.auto('<r><y>hi</><r></>', stream)
+            with self.assertRaisesRegex(Error, "tag mismatch"):
+                o.auto("<r><y>hi</><r></>", stream)
 
-            with self.assertRaisesRegex(Error, 'tag mismatch'):
-                o.auto('</>hi<r>', stream)
+            with self.assertRaisesRegex(Error, "tag mismatch"):
+                o.auto("</>hi<r>", stream)

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/utils/ReturnCode.py b/cross-project-tests/debuginfo-tests/dexter/dex/utils/ReturnCode.py
index 487d225d1b635..a3257913a90ac 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/utils/ReturnCode.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/utils/ReturnCode.py
@@ -9,12 +9,12 @@
 
 
 class ReturnCode(Enum):
-   """Used to indicate whole program success status."""
+    """Used to indicate whole program success status."""
 
-   OK = 0
-   _ERROR = 1        # Unhandled exceptions result in exit(1) by default.
-                     # Usage of _ERROR is discouraged:
-                     # If the program cannot run, raise an exception.
-                     # If the program runs successfully but the result is
-                     # "failure" based on the inputs, return FAIL
-   FAIL = 2
+    OK = 0
+    _ERROR = 1  # Unhandled exceptions result in exit(1) by default.
+    # Usage of _ERROR is discouraged:
+    # If the program cannot run, raise an exception.
+    # If the program runs successfully but the result is
+    # "failure" based on the inputs, return FAIL
+    FAIL = 2

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/utils/RootDirectory.py b/cross-project-tests/debuginfo-tests/dexter/dex/utils/RootDirectory.py
index 57f204c79acd4..c41797c7a05d3 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/utils/RootDirectory.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/utils/RootDirectory.py
@@ -10,6 +10,6 @@
 
 
 def get_root_directory():
-    root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
-    assert os.path.basename(root) == 'dex', root
+    root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
+    assert os.path.basename(root) == "dex", root
     return root

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/utils/Timeout.py b/cross-project-tests/debuginfo-tests/dexter/dex/utils/Timeout.py
index d59d7d7693a6f..c356206f0c50d 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/utils/Timeout.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/utils/Timeout.py
@@ -11,8 +11,8 @@
 
 import time
 
-class Timeout(object):
 
+class Timeout(object):
     def __init__(self, duration: float):
         self.start = self.now
         self.duration = duration

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/utils/Timer.py b/cross-project-tests/debuginfo-tests/dexter/dex/utils/Timer.py
index 63726f1a7578e..6548086677635 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/utils/Timer.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/utils/Timer.py
@@ -13,7 +13,7 @@
 
 
 def _indent(indent):
-    return '| ' * indent
+    return "| " * indent
 
 
 class Timer(object):
@@ -28,17 +28,18 @@ def __init__(self, name=None):
     def __enter__(self):
         Timer.indent += 1
         if Timer.display and self.name:
-            indent = _indent(Timer.indent - 1) + ' _'
-            Timer.fn('{}\n'.format(_indent(Timer.indent - 1)))
-            Timer.fn('{} start {}\n'.format(indent, self.name))
+            indent = _indent(Timer.indent - 1) + " _"
+            Timer.fn("{}\n".format(_indent(Timer.indent - 1)))
+            Timer.fn("{} start {}\n".format(indent, self.name))
         return self
 
     def __exit__(self, *args):
         if Timer.display and self.name:
-            indent = _indent(Timer.indent - 1) + '|_'
-            Timer.fn('{} {} time taken: {:0.1f}s\n'.format(
-                indent, self.name, self.elapsed))
-            Timer.fn('{}\n'.format(_indent(Timer.indent - 1)))
+            indent = _indent(Timer.indent - 1) + "|_"
+            Timer.fn(
+                "{} {} time taken: {:0.1f}s\n".format(indent, self.name, self.elapsed)
+            )
+            Timer.fn("{}\n".format(_indent(Timer.indent - 1)))
         Timer.indent -= 1
 
     @property

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/utils/UnitTests.py b/cross-project-tests/debuginfo-tests/dexter/dex/utils/UnitTests.py
index cfddce5c31103..0c4999150abb5 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/utils/UnitTests.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/utils/UnitTests.py
@@ -24,14 +24,14 @@ def _match_path(self, path, full_path, pattern):
         """
         d = os.path.basename(os.path.dirname(full_path))
         if is_native_windows():
-            if d == 'posix':
+            if d == "posix":
                 return False
-            if d == 'windows':
+            if d == "windows":
                 return has_pywin32()
         else:
-            if d == 'windows':
+            if d == "windows":
                 return False
-            elif d == 'dbgeng':
+            elif d == "dbgeng":
                 return False
         return fnmatch(path, pattern)
 
@@ -39,20 +39,18 @@ def _match_path(self, path, full_path, pattern):
 def unit_tests_ok(context):
     unittest.TestCase.maxDiff = None  # remove size limit from 
diff  output.
 
-    with Timer('unit tests'):
-        suite = DexTestLoader().discover(
-            context.root_directory, pattern='*.py')
+    with Timer("unit tests"):
+        suite = DexTestLoader().discover(context.root_directory, pattern="*.py")
         stream = StringIO()
         result = unittest.TextTestRunner(verbosity=2, stream=stream).run(suite)
 
         ok = result.wasSuccessful()
-        if not ok or context.options.unittest == 'show-all':
+        if not ok or context.options.unittest == "show-all":
             with PreserveAutoColors(context.o):
-                context.o.auto_reds.extend(
-                    [r'FAIL(ED|\:)', r'\.\.\.\s(FAIL|ERROR)$'])
-                context.o.auto_greens.extend([r'^OK$', r'\.\.\.\sok$'])
-                context.o.auto_blues.extend([r'^Ran \d+ test'])
-                context.o.default('\n')
+                context.o.auto_reds.extend([r"FAIL(ED|\:)", r"\.\.\.\s(FAIL|ERROR)$"])
+                context.o.auto_greens.extend([r"^OK$", r"\.\.\.\sok$"])
+                context.o.auto_blues.extend([r"^Ran \d+ test"])
+                context.o.default("\n")
                 for line in stream.getvalue().splitlines(True):
                     context.o.auto(line, stream=PrettyOutput.stderr)
 

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/utils/Version.py b/cross-project-tests/debuginfo-tests/dexter/dex/utils/Version.py
index 1a257fa7107e5..505aebaebc4f4 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/utils/Version.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/utils/Version.py
@@ -16,17 +16,26 @@
 def _git_version():
     dir_ = os.path.dirname(__file__)
     try:
-        branch = (check_output(
-            ['git', 'rev-parse', '--abbrev-ref', 'HEAD'],
-            stderr=STDOUT,
-            cwd=dir_).rstrip().decode('utf-8'))
-        hash_ = check_output(
-            ['git', 'rev-parse', 'HEAD'], stderr=STDOUT,
-            cwd=dir_).rstrip().decode('utf-8')
-        repo = check_output(
-            ['git', 'remote', 'get-url', 'origin'], stderr=STDOUT,
-            cwd=dir_).rstrip().decode('utf-8')
-        return '[{} {}] ({})'.format(branch, hash_, repo)
+        branch = (
+            check_output(
+                ["git", "rev-parse", "--abbrev-ref", "HEAD"], stderr=STDOUT, cwd=dir_
+            )
+            .rstrip()
+            .decode("utf-8")
+        )
+        hash_ = (
+            check_output(["git", "rev-parse", "HEAD"], stderr=STDOUT, cwd=dir_)
+            .rstrip()
+            .decode("utf-8")
+        )
+        repo = (
+            check_output(
+                ["git", "remote", "get-url", "origin"], stderr=STDOUT, cwd=dir_
+            )
+            .rstrip()
+            .decode("utf-8")
+        )
+        return "[{} {}] ({})".format(branch, hash_, repo)
     except (OSError, CalledProcessError):
         pass
     return None
@@ -34,7 +43,6 @@ def _git_version():
 
 def version(name):
     lines = []
-    lines.append(' '.join(
-        [s for s in [name, __version__, _git_version()] if s]))
-    lines.append('  using Python {}'.format(sys.version))
-    return '\n'.join(lines)
+    lines.append(" ".join([s for s in [name, __version__, _git_version()] if s]))
+    lines.append("  using Python {}".format(sys.version))
+    return "\n".join(lines)

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/utils/WorkingDirectory.py b/cross-project-tests/debuginfo-tests/dexter/dex/utils/WorkingDirectory.py
index 06d776f0501a9..28bcff798f182 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/utils/WorkingDirectory.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/utils/WorkingDirectory.py
@@ -13,12 +13,13 @@
 
 from dex.utils.Exceptions import Error
 
+
 class WorkingDirectory(object):
     def __init__(self, context, *args, **kwargs):
         self.context = context
         self.orig_cwd = os.getcwd()
 
-        dir_ = kwargs.get('dir', None)
+        dir_ = kwargs.get("dir", None)
         if dir_ and not os.path.isdir(dir_):
             os.makedirs(dir_, exist_ok=True)
         self.path = tempfile.mkdtemp(*args, **kwargs)
@@ -30,8 +31,7 @@ def __enter__(self):
     def __exit__(self, *args):
         os.chdir(self.orig_cwd)
         if self.context.options.save_temps:
-            self.context.o.blue('"{}" left in place [--save-temps]\n'.format(
-                self.path))
+            self.context.o.blue('"{}" left in place [--save-temps]\n'.format(self.path))
             return
 
         for _ in range(100):
@@ -41,5 +41,7 @@ def __exit__(self, *args):
             except OSError:
                 time.sleep(0.1)
 
-        self.context.logger.warning(f'"{self.path}" left in place (couldn\'t delete)', enable_prefix=True)
+        self.context.logger.warning(
+            f'"{self.path}" left in place (couldn\'t delete)', enable_prefix=True
+        )
         return

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/utils/posix/PrettyOutput.py b/cross-project-tests/debuginfo-tests/dexter/dex/utils/posix/PrettyOutput.py
index 82cfed5dfd627..1597e1a413c6f 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/utils/posix/PrettyOutput.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/utils/posix/PrettyOutput.py
@@ -15,7 +15,7 @@ def _color(self, text, color, stream, lock=_lock):
         stream = self._set_valid_stream(stream)
         with lock:
             if stream.color_enabled:
-                text = '\033[{}m{}\033[0m'.format(color, text)
+                text = "\033[{}m{}\033[0m".format(color, text)
             self._write(text, stream)
 
     def red_impl(self, text, stream=None, **kwargs):

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dex/utils/windows/PrettyOutput.py b/cross-project-tests/debuginfo-tests/dexter/dex/utils/windows/PrettyOutput.py
index 657406a59acbd..bb82e24155785 100644
--- a/cross-project-tests/debuginfo-tests/dexter/dex/utils/windows/PrettyOutput.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dex/utils/windows/PrettyOutput.py
@@ -16,11 +16,13 @@
 
 class _CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
     # pylint: disable=protected-access
-    _fields_ = [('dwSize', ctypes.wintypes._COORD), ('dwCursorPosition',
-                                                     ctypes.wintypes._COORD),
-                ('wAttributes',
-                 ctypes.c_ushort), ('srWindow', ctypes.wintypes._SMALL_RECT),
-                ('dwMaximumWindowSize', ctypes.wintypes._COORD)]
+    _fields_ = [
+        ("dwSize", ctypes.wintypes._COORD),
+        ("dwCursorPosition", ctypes.wintypes._COORD),
+        ("wAttributes", ctypes.c_ushort),
+        ("srWindow", ctypes.wintypes._SMALL_RECT),
+        ("dwMaximumWindowSize", ctypes.wintypes._COORD),
+    ]
     # pylint: enable=protected-access
 
 
@@ -33,8 +35,7 @@ def __enter__(self):
         info = _CONSOLE_SCREEN_BUFFER_INFO()
 
         for s in (PrettyOutput.stdout, PrettyOutput.stderr):
-            ctypes.windll.kernel32.GetConsoleScreenBufferInfo(
-                s.os, ctypes.byref(info))
+            ctypes.windll.kernel32.GetConsoleScreenBufferInfo(s.os, ctypes.byref(info))
             s.orig_color = info.wAttributes
 
         return self
@@ -52,15 +53,15 @@ def _restore_orig_color(self, stream, lock=_lock):
             self.flush(stream)
             if stream.orig_color:
                 ctypes.windll.kernel32.SetConsoleTextAttribute(
-                    stream.os, stream.orig_color)
+                    stream.os, stream.orig_color
+                )
 
     def _color(self, text, color, stream, lock=_lock):
         stream = self._set_valid_stream(stream)
         with lock:
             try:
                 if stream.color_enabled:
-                    ctypes.windll.kernel32.SetConsoleTextAttribute(
-                        stream.os, color)
+                    ctypes.windll.kernel32.SetConsoleTextAttribute(stream.os, color)
                 self._write(text, stream)
             finally:
                 if stream.color_enabled:

diff  --git a/cross-project-tests/debuginfo-tests/dexter/dexter.py b/cross-project-tests/debuginfo-tests/dexter/dexter.py
index 49ba85db43d9c..8473cff3c7135 100755
--- a/cross-project-tests/debuginfo-tests/dexter/dexter.py
+++ b/cross-project-tests/debuginfo-tests/dexter/dexter.py
@@ -16,6 +16,6 @@
 
 from dex.tools import main
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     return_code = main()
     sys.exit(return_code.value)

diff  --git a/cross-project-tests/debuginfo-tests/dexter/feature_tests/commands/perfect/dex_declare_file/dex_and_source/lit.local.cfg.py b/cross-project-tests/debuginfo-tests/dexter/feature_tests/commands/perfect/dex_declare_file/dex_and_source/lit.local.cfg.py
index 159c376beedbd..70e844d02e7fb 100644
--- a/cross-project-tests/debuginfo-tests/dexter/feature_tests/commands/perfect/dex_declare_file/dex_and_source/lit.local.cfg.py
+++ b/cross-project-tests/debuginfo-tests/dexter/feature_tests/commands/perfect/dex_declare_file/dex_and_source/lit.local.cfg.py
@@ -1 +1 @@
-config.suffixes = ['.cpp']
+config.suffixes = [".cpp"]

diff  --git a/cross-project-tests/debuginfo-tests/dexter/feature_tests/commands/perfect/dex_declare_file/precompiled_binary/lit.local.cfg.py b/cross-project-tests/debuginfo-tests/dexter/feature_tests/commands/perfect/dex_declare_file/precompiled_binary/lit.local.cfg.py
index e65498f23dde4..3dfcaa7e8301f 100644
--- a/cross-project-tests/debuginfo-tests/dexter/feature_tests/commands/perfect/dex_declare_file/precompiled_binary/lit.local.cfg.py
+++ b/cross-project-tests/debuginfo-tests/dexter/feature_tests/commands/perfect/dex_declare_file/precompiled_binary/lit.local.cfg.py
@@ -1 +1 @@
-config.suffixes = ['.dex']
+config.suffixes = [".dex"]

diff  --git a/cross-project-tests/debuginfo-tests/dexter/feature_tests/commands/perfect/dex_declare_file/precompiled_binary_
diff erent_dir/lit.local.cfg.py b/cross-project-tests/debuginfo-tests/dexter/feature_tests/commands/perfect/dex_declare_file/precompiled_binary_
diff erent_dir/lit.local.cfg.py
index e65498f23dde4..3dfcaa7e8301f 100644
--- a/cross-project-tests/debuginfo-tests/dexter/feature_tests/commands/perfect/dex_declare_file/precompiled_binary_
diff erent_dir/lit.local.cfg.py
+++ b/cross-project-tests/debuginfo-tests/dexter/feature_tests/commands/perfect/dex_declare_file/precompiled_binary_
diff erent_dir/lit.local.cfg.py
@@ -1 +1 @@
-config.suffixes = ['.dex']
+config.suffixes = [".dex"]

diff  --git a/cross-project-tests/debuginfo-tests/dexter/feature_tests/commands/perfect/dex_declare_file/windows_noncanonical_path/lit.local.cfg.py b/cross-project-tests/debuginfo-tests/dexter/feature_tests/commands/perfect/dex_declare_file/windows_noncanonical_path/lit.local.cfg.py
index e65498f23dde4..3dfcaa7e8301f 100644
--- a/cross-project-tests/debuginfo-tests/dexter/feature_tests/commands/perfect/dex_declare_file/windows_noncanonical_path/lit.local.cfg.py
+++ b/cross-project-tests/debuginfo-tests/dexter/feature_tests/commands/perfect/dex_declare_file/windows_noncanonical_path/lit.local.cfg.py
@@ -1 +1 @@
-config.suffixes = ['.dex']
+config.suffixes = [".dex"]

diff  --git a/cross-project-tests/debuginfo-tests/dexter/feature_tests/commands/perfect/limit_steps/lit.local.cfg b/cross-project-tests/debuginfo-tests/dexter/feature_tests/commands/perfect/limit_steps/lit.local.cfg
index 4f3bbe2165a4a..b6cc2f73f8baf 100644
--- a/cross-project-tests/debuginfo-tests/dexter/feature_tests/commands/perfect/limit_steps/lit.local.cfg
+++ b/cross-project-tests/debuginfo-tests/dexter/feature_tests/commands/perfect/limit_steps/lit.local.cfg
@@ -1,4 +1,3 @@
 # The dbgeng driver doesn't support DexLimitSteps yet.
 if config.is_msvc:
     config.unsupported = True
-

diff  --git a/cross-project-tests/debuginfo-tests/dexter/feature_tests/lit.local.cfg b/cross-project-tests/debuginfo-tests/dexter/feature_tests/lit.local.cfg
index 6eb9f0ada02ad..16b9690546702 100644
--- a/cross-project-tests/debuginfo-tests/dexter/feature_tests/lit.local.cfg
+++ b/cross-project-tests/debuginfo-tests/dexter/feature_tests/lit.local.cfg
@@ -1,5 +1,5 @@
-if 'dexter' not in config.available_features:
+if "dexter" not in config.available_features:
     config.unsupported = True
 
 config.name = "DExTer feature tests"
-config.suffixes = ['.cpp', '.c', '.test']
+config.suffixes = [".cpp", ".c", ".test"]

diff  --git a/cross-project-tests/debuginfo-tests/lit.local.cfg b/cross-project-tests/debuginfo-tests/lit.local.cfg
index 62f90a181d630..530f4c01646ff 100644
--- a/cross-project-tests/debuginfo-tests/lit.local.cfg
+++ b/cross-project-tests/debuginfo-tests/lit.local.cfg
@@ -1,2 +1,2 @@
-if 'clang' not in config.available_features:
+if "clang" not in config.available_features:
     config.unsupported = True

diff  --git a/cross-project-tests/debuginfo-tests/llgdb-tests/lit.local.cfg b/cross-project-tests/debuginfo-tests/llgdb-tests/lit.local.cfg
index 725aa59ab5934..2b79104f73d35 100644
--- a/cross-project-tests/debuginfo-tests/llgdb-tests/lit.local.cfg
+++ b/cross-project-tests/debuginfo-tests/llgdb-tests/lit.local.cfg
@@ -1,5 +1,3 @@
 # debuginfo-tests are not expected to pass in a cross-compilation setup.
-if 'native' not in config.available_features or config.is_msvc:
+if "native" not in config.available_features or config.is_msvc:
     config.unsupported = True
-
-

diff  --git a/cross-project-tests/debuginfo-tests/llgdb-tests/llgdb.py b/cross-project-tests/debuginfo-tests/llgdb-tests/llgdb.py
index a97a039e76baa..6795d3b989a30 100755
--- a/cross-project-tests/debuginfo-tests/llgdb-tests/llgdb.py
+++ b/cross-project-tests/debuginfo-tests/llgdb-tests/llgdb.py
@@ -6,14 +6,17 @@
 
 # ----------------------------------------------------------------------
 # Auto-detect lldb python module.
-import subprocess, platform, os,  sys
+import subprocess, platform, os, sys
+
 try:
     # Just try for LLDB in case PYTHONPATH is already correctly setup.
     import lldb
 except ImportError:
     # Ask the command line driver for the path to the lldb module. Copy over
     # the environment so that SDKROOT is propagated to xcrun.
-    command =  ['xcrun', 'lldb', '-P'] if platform.system() == 'Darwin' else ['lldb', '-P']
+    command = (
+        ["xcrun", "lldb", "-P"] if platform.system() == "Darwin" else ["lldb", "-P"]
+    )
     # Extend the PYTHONPATH if the path exists and isn't already there.
     lldb_python_path = subprocess.check_output(command).decode("utf-8").strip()
     if os.path.exists(lldb_python_path) and not sys.path.__contains__(lldb_python_path):
@@ -21,21 +24,27 @@
     # Try importing LLDB again.
     try:
         import lldb
-        print('imported lldb from: "%s"'%lldb_python_path)
+
+        print('imported lldb from: "%s"' % lldb_python_path)
     except ImportError:
-        print("error: couldn't locate the 'lldb' module, please set PYTHONPATH correctly")
+        print(
+            "error: couldn't locate the 'lldb' module, please set PYTHONPATH correctly"
+        )
         sys.exit(1)
 # ----------------------------------------------------------------------
 
 # Command line option handling.
 import argparse
+
 parser = argparse.ArgumentParser(description=__doc__)
-parser.add_argument('--quiet', '-q', action="store_true", help='ignored')
-parser.add_argument('-batch', action="store_true",
-                    help='exit after processing comand line')
-parser.add_argument('-n', action="store_true", help='ignore .lldb file')
-parser.add_argument('-x', dest='script', type=argparse.FileType('r'),
-                    help='execute commands from file')
+parser.add_argument("--quiet", "-q", action="store_true", help="ignored")
+parser.add_argument(
+    "-batch", action="store_true", help="exit after processing comand line"
+)
+parser.add_argument("-n", action="store_true", help="ignore .lldb file")
+parser.add_argument(
+    "-x", dest="script", type=argparse.FileType("r"), help="execute commands from file"
+)
 parser.add_argument("target", help="the program to debug")
 args = parser.parse_args()
 
@@ -46,19 +55,23 @@
 
 # Make sure to clean up the debugger on exit.
 import atexit
+
+
 def on_exit():
     debugger.Terminate()
+
+
 atexit.register(on_exit)
 
 # Don't return from lldb function calls until the process stops.
 debugger.SetAsync(False)
 
 # Create a target from a file and arch.
-arch = os.popen("file "+args.target).read().split()[-1]
+arch = os.popen("file " + args.target).read().split()[-1]
 target = debugger.CreateTargetWithFileAndArch(args.target, arch)
 
 if not target:
-    print("Could not create target %s"% args.target)
+    print("Could not create target %s" % args.target)
     sys.exit(1)
 
 if not args.script:
@@ -66,16 +79,17 @@ def on_exit():
     sys.exit(1)
 
 import re
+
 for command in args.script:
     # Strip newline and whitespaces and split into words.
     cmd = command[:-1].strip().split()
     if not cmd:
         continue
 
-    print('> %s'% command[:-1])
+    print("> %s" % command[:-1])
 
     try:
-        if re.match('^r|(run)$', cmd[0]):
+        if re.match("^r|(run)$", cmd[0]):
             error = lldb.SBError()
             launchinfo = lldb.SBLaunchInfo([])
             launchinfo.SetWorkingDirectory(os.getcwd())
@@ -84,25 +98,27 @@ def on_exit():
             if not process or error.fail:
                 state = process.GetState()
                 print("State = %d" % state)
-                print("""
+                print(
+                    """
 ERROR: Could not launch process.
 NOTE: There are several reasons why this may happen:
   * Root needs to run "DevToolsSecurity --enable".
   * Older versions of lldb cannot launch more than one process simultaneously.
-""")
+"""
+                )
                 sys.exit(1)
 
-        elif re.match('^b|(break)$', cmd[0]) and len(cmd) == 2:
-            if re.match('[0-9]+', cmd[1]):
+        elif re.match("^b|(break)$", cmd[0]) and len(cmd) == 2:
+            if re.match("[0-9]+", cmd[1]):
                 # b line
-                mainfile = target.FindFunctions('main')[0].compile_unit.file
+                mainfile = target.FindFunctions("main")[0].compile_unit.file
                 print(target.BreakpointCreateByLocation(mainfile, int(cmd[1])))
             else:
                 # b file:line
-                file, line = cmd[1].split(':')
+                file, line = cmd[1].split(":")
                 print(target.BreakpointCreateByLocation(file, int(line)))
 
-        elif re.match('^ptype$', cmd[0]) and len(cmd) == 2:
+        elif re.match("^ptype$", cmd[0]) and len(cmd) == 2:
             # GDB's ptype has multiple incarnations depending on its
             # argument (global variable, function, type).  The definition
             # here is for looking up the signature of a function and only
@@ -113,36 +129,35 @@ def on_exit():
                 continue
             print(target.FindFirstType(cmd[1]))
 
-        elif re.match('^po$', cmd[0]) and len(cmd) > 1:
+        elif re.match("^po$", cmd[0]) and len(cmd) > 1:
             try:
                 opts = lldb.SBExpressionOptions()
                 opts.SetFetchDynamicValue(True)
                 opts.SetCoerceResultToId(True)
-                print(target.EvaluateExpression(' '.join(cmd[1:]), opts))
+                print(target.EvaluateExpression(" ".join(cmd[1:]), opts))
             except:
                 # FIXME: This is a fallback path for the lab.llvm.org
                 # buildbot running OS X 10.7; it should be removed.
                 thread = process.GetThreadAtIndex(0)
                 frame = thread.GetFrameAtIndex(0)
-                print(frame.EvaluateExpression(' '.join(cmd[1:])))
+                print(frame.EvaluateExpression(" ".join(cmd[1:])))
 
-        elif re.match('^p|(print)$', cmd[0]) and len(cmd) > 1:
+        elif re.match("^p|(print)$", cmd[0]) and len(cmd) > 1:
             thread = process.GetThreadAtIndex(0)
             frame = thread.GetFrameAtIndex(0)
-            print(frame.EvaluateExpression(' '.join(cmd[1:])))
+            print(frame.EvaluateExpression(" ".join(cmd[1:])))
 
-        elif re.match('^n|(next)$', cmd[0]):
+        elif re.match("^n|(next)$", cmd[0]):
             thread = process.GetThreadAtIndex(0)
             thread.StepOver()
 
-        elif re.match('^q|(quit)$', cmd[0]):
+        elif re.match("^q|(quit)$", cmd[0]):
             sys.exit(0)
 
         else:
-            print(debugger.HandleCommand(' '.join(cmd)))
+            print(debugger.HandleCommand(" ".join(cmd)))
 
     except SystemExit:
         raise
     except:
-        print('Could not handle the command "%s"' % ' '.join(cmd))
-
+        print('Could not handle the command "%s"' % " ".join(cmd))

diff  --git a/cross-project-tests/debuginfo-tests/llvm-prettyprinters/gdb/lit.local.cfg b/cross-project-tests/debuginfo-tests/llvm-prettyprinters/gdb/lit.local.cfg
index a4200fb726c2f..222448f169e02 100644
--- a/cross-project-tests/debuginfo-tests/llvm-prettyprinters/gdb/lit.local.cfg
+++ b/cross-project-tests/debuginfo-tests/llvm-prettyprinters/gdb/lit.local.cfg
@@ -1,13 +1,11 @@
 import lit.util
 
 # debuginfo-tests are not expected to pass in a cross-compilation setup.
-if 'native' not in config.available_features or lit.util.which('gdb') is None:
+if "native" not in config.available_features or lit.util.which("gdb") is None:
     config.unsupported = True
 
 if config.mlir_src_root:
-  config.substitutions.append(("%mlir_src_root", config.mlir_src_root))
-  config.available_features.add('mlir')
-
-config.suffixes = ['.gdb']
-
+    config.substitutions.append(("%mlir_src_root", config.mlir_src_root))
+    config.available_features.add("mlir")
 
+config.suffixes = [".gdb"]

diff  --git a/cross-project-tests/intrinsic-header-tests/lit.local.cfg b/cross-project-tests/intrinsic-header-tests/lit.local.cfg
index 62f90a181d630..530f4c01646ff 100644
--- a/cross-project-tests/intrinsic-header-tests/lit.local.cfg
+++ b/cross-project-tests/intrinsic-header-tests/lit.local.cfg
@@ -1,2 +1,2 @@
-if 'clang' not in config.available_features:
+if "clang" not in config.available_features:
     config.unsupported = True

diff  --git a/cross-project-tests/lit.cfg.py b/cross-project-tests/lit.cfg.py
index 4bdd2d0f1e0c9..350dd45dfd9f8 100644
--- a/cross-project-tests/lit.cfg.py
+++ b/cross-project-tests/lit.cfg.py
@@ -16,18 +16,18 @@
 # Configuration file for the 'lit' test runner.
 
 # name: The name of this test suite.
-config.name = 'cross-project-tests'
+config.name = "cross-project-tests"
 
 # testFormat: The test format to use to interpret tests.
 config.test_format = lit.formats.ShTest(not llvm_config.use_lit_shell)
 
 # suffixes: A list of file extensions to treat as test files.
-config.suffixes = ['.c', '.cl', '.cpp', '.m']
+config.suffixes = [".c", ".cl", ".cpp", ".m"]
 
 # excludes: A list of directories to exclude from the testsuite. The 'Inputs'
 # subdirectories contain auxiliary inputs for various tests in their parent
 # directories.
-config.excludes = ['Inputs']
+config.excludes = ["Inputs"]
 
 # test_source_root: The root path where tests are located.
 config.test_source_root = config.cross_project_tests_src_root
@@ -38,154 +38,191 @@
 llvm_config.use_default_substitutions()
 
 tools = [
-    ToolSubst('%test_debuginfo', command=os.path.join(
-        config.cross_project_tests_src_root, 'debuginfo-tests',
-        'llgdb-tests', 'test_debuginfo.pl')),
+    ToolSubst(
+        "%test_debuginfo",
+        command=os.path.join(
+            config.cross_project_tests_src_root,
+            "debuginfo-tests",
+            "llgdb-tests",
+            "test_debuginfo.pl",
+        ),
+    ),
     ToolSubst("%llvm_src_root", config.llvm_src_root),
     ToolSubst("%llvm_tools_dir", config.llvm_tools_dir),
 ]
 
+
 def get_required_attr(config, attr_name):
-  attr_value = getattr(config, attr_name, None)
-  if attr_value == None:
-    lit_config.fatal(
-      "No attribute %r in test configuration! You may need to run "
-      "tests from your build directory or add this attribute "
-      "to lit.site.cfg " % attr_name)
-  return attr_value
+    attr_value = getattr(config, attr_name, None)
+    if attr_value == None:
+        lit_config.fatal(
+            "No attribute %r in test configuration! You may need to run "
+            "tests from your build directory or add this attribute "
+            "to lit.site.cfg " % attr_name
+        )
+    return attr_value
+
 
 # If this is an MSVC environment, the tests at the root of the tree are
 # unsupported. The local win_cdb test suite, however, is supported.
 is_msvc = get_required_attr(config, "is_msvc")
 if is_msvc:
-    config.available_features.add('msvc')
+    config.available_features.add("msvc")
     # FIXME: We should add some llvm lit utility code to find the Windows SDK
     # and set up the environment appopriately.
-    win_sdk = 'C:/Program Files (x86)/Windows Kits/10/'
-    arch = 'x64'
-    llvm_config.with_system_environment(['LIB', 'LIBPATH', 'INCLUDE'])
+    win_sdk = "C:/Program Files (x86)/Windows Kits/10/"
+    arch = "x64"
+    llvm_config.with_system_environment(["LIB", "LIBPATH", "INCLUDE"])
     # Clear _NT_SYMBOL_PATH to prevent cdb from attempting to load symbols from
     # the network.
-    llvm_config.with_environment('_NT_SYMBOL_PATH', '')
-    tools.append(ToolSubst('%cdb', '"%s"' % os.path.join(win_sdk, 'Debuggers',
-                                                         arch, 'cdb.exe')))
+    llvm_config.with_environment("_NT_SYMBOL_PATH", "")
+    tools.append(
+        ToolSubst("%cdb", '"%s"' % os.path.join(win_sdk, "Debuggers", arch, "cdb.exe"))
+    )
 
 # clang_src_dir and lld_src_dir are not used by these tests, but are required by
 # use_clang() and use_lld() respectively, so set them to "", if needed.
-if not hasattr(config, 'clang_src_dir'):
+if not hasattr(config, "clang_src_dir"):
     config.clang_src_dir = ""
-llvm_config.use_clang(required=('clang' in config.llvm_enabled_projects))
+llvm_config.use_clang(required=("clang" in config.llvm_enabled_projects))
 
-if not hasattr(config, 'lld_src_dir'):
+if not hasattr(config, "lld_src_dir"):
     config.lld_src_dir = ""
-llvm_config.use_lld(required=('lld' in config.llvm_enabled_projects))
+llvm_config.use_lld(required=("lld" in config.llvm_enabled_projects))
 
-if 'compiler-rt' in config.llvm_enabled_projects:
-  config.available_features.add('compiler-rt')
+if "compiler-rt" in config.llvm_enabled_projects:
+    config.available_features.add("compiler-rt")
 
 # Check which debuggers are available:
-lldb_path = llvm_config.use_llvm_tool('lldb', search_env='LLDB')
+lldb_path = llvm_config.use_llvm_tool("lldb", search_env="LLDB")
 
 if lldb_path is not None:
-    config.available_features.add('lldb')
+    config.available_features.add("lldb")
+
 
 def configure_dexter_substitutions():
-  """Configure substitutions for host platform and return list of dependencies
-  """
-  # Produce dexter path, lldb path, and combine into the %dexter substitution
-  # for running a test.
-  dexter_path = os.path.join(config.cross_project_tests_src_root,
-                             'debuginfo-tests', 'dexter', 'dexter.py')
-  dexter_test_cmd = '"{}" "{}" test'.format(sys.executable, dexter_path)
-  if lldb_path is not None:
-    dexter_test_cmd += ' --lldb-executable "{}"'.format(lldb_path)
-  tools.append(ToolSubst('%dexter', dexter_test_cmd))
-
-  # For testing other bits of dexter that aren't under the "test" subcommand,
-  # have a %dexter_base substitution.
-  dexter_base_cmd = '"{}" "{}"'.format(sys.executable, dexter_path)
-  tools.append(ToolSubst('%dexter_base', dexter_base_cmd))
-
-  # Set up commands for DexTer regression tests.
-  # Builder, debugger, optimisation level and several other flags 
diff er
-  # depending on whether we're running a unix like or windows os.
-  if platform.system() == 'Windows':
-    # The Windows builder script uses lld.
-    dependencies = ['clang', 'lld-link']
-    dexter_regression_test_builder = 'clang-cl_vs2015'
-    dexter_regression_test_debugger = 'dbgeng'
-    dexter_regression_test_cflags = '/Zi /Od'
-    dexter_regression_test_ldflags = '/Zi'
-  else:
-    # Use lldb as the debugger on non-Windows platforms.
-    dependencies = ['clang', 'lldb']
-    dexter_regression_test_builder = 'clang'
-    dexter_regression_test_debugger = 'lldb'
-    dexter_regression_test_cflags = '-O0 -glldb'
-    dexter_regression_test_ldflags = ''
-
-  tools.append(ToolSubst('%dexter_regression_test_builder', dexter_regression_test_builder))
-  tools.append(ToolSubst('%dexter_regression_test_debugger', dexter_regression_test_debugger))
-  tools.append(ToolSubst('%dexter_regression_test_cflags', dexter_regression_test_cflags))
-  tools.append(ToolSubst('%dexter_regression_test_ldflags', dexter_regression_test_cflags))
-
-  # Typical command would take the form:
-  # ./path_to_py/python.exe ./path_to_dex/dexter.py test --fail-lt 1.0 -w --builder clang --debugger lldb --cflags '-O0 -g'
-  # Exclude build flags for %dexter_regression_base.
-  dexter_regression_test_base = ' '.join(
-    # "python", "dexter.py", test, fail_mode, builder, debugger, cflags, ldflags
-    ['"{}"'.format(sys.executable),
-    '"{}"'.format(dexter_path),
-    'test',
-    '--fail-lt 1.0 -w',
-    '--debugger', dexter_regression_test_debugger])
-  tools.append(ToolSubst('%dexter_regression_base', dexter_regression_test_base))
-
-  # Include build flags for %dexter_regression_test.
-  dexter_regression_test_build = ' '.join([
-    dexter_regression_test_base,
-    '--builder', dexter_regression_test_builder,
-    '--cflags "',  dexter_regression_test_cflags + '"',
-    '--ldflags "', dexter_regression_test_ldflags + '"'])
-  tools.append(ToolSubst('%dexter_regression_test', dexter_regression_test_build))
-  return dependencies
+    """Configure substitutions for host platform and return list of dependencies"""
+    # Produce dexter path, lldb path, and combine into the %dexter substitution
+    # for running a test.
+    dexter_path = os.path.join(
+        config.cross_project_tests_src_root, "debuginfo-tests", "dexter", "dexter.py"
+    )
+    dexter_test_cmd = '"{}" "{}" test'.format(sys.executable, dexter_path)
+    if lldb_path is not None:
+        dexter_test_cmd += ' --lldb-executable "{}"'.format(lldb_path)
+    tools.append(ToolSubst("%dexter", dexter_test_cmd))
+
+    # For testing other bits of dexter that aren't under the "test" subcommand,
+    # have a %dexter_base substitution.
+    dexter_base_cmd = '"{}" "{}"'.format(sys.executable, dexter_path)
+    tools.append(ToolSubst("%dexter_base", dexter_base_cmd))
+
+    # Set up commands for DexTer regression tests.
+    # Builder, debugger, optimisation level and several other flags 
diff er
+    # depending on whether we're running a unix like or windows os.
+    if platform.system() == "Windows":
+        # The Windows builder script uses lld.
+        dependencies = ["clang", "lld-link"]
+        dexter_regression_test_builder = "clang-cl_vs2015"
+        dexter_regression_test_debugger = "dbgeng"
+        dexter_regression_test_cflags = "/Zi /Od"
+        dexter_regression_test_ldflags = "/Zi"
+    else:
+        # Use lldb as the debugger on non-Windows platforms.
+        dependencies = ["clang", "lldb"]
+        dexter_regression_test_builder = "clang"
+        dexter_regression_test_debugger = "lldb"
+        dexter_regression_test_cflags = "-O0 -glldb"
+        dexter_regression_test_ldflags = ""
+
+    tools.append(
+        ToolSubst("%dexter_regression_test_builder", dexter_regression_test_builder)
+    )
+    tools.append(
+        ToolSubst("%dexter_regression_test_debugger", dexter_regression_test_debugger)
+    )
+    tools.append(
+        ToolSubst("%dexter_regression_test_cflags", dexter_regression_test_cflags)
+    )
+    tools.append(
+        ToolSubst("%dexter_regression_test_ldflags", dexter_regression_test_cflags)
+    )
+
+    # Typical command would take the form:
+    # ./path_to_py/python.exe ./path_to_dex/dexter.py test --fail-lt 1.0 -w --builder clang --debugger lldb --cflags '-O0 -g'
+    # Exclude build flags for %dexter_regression_base.
+    dexter_regression_test_base = " ".join(
+        # "python", "dexter.py", test, fail_mode, builder, debugger, cflags, ldflags
+        [
+            '"{}"'.format(sys.executable),
+            '"{}"'.format(dexter_path),
+            "test",
+            "--fail-lt 1.0 -w",
+            "--debugger",
+            dexter_regression_test_debugger,
+        ]
+    )
+    tools.append(ToolSubst("%dexter_regression_base", dexter_regression_test_base))
+
+    # Include build flags for %dexter_regression_test.
+    dexter_regression_test_build = " ".join(
+        [
+            dexter_regression_test_base,
+            "--builder",
+            dexter_regression_test_builder,
+            '--cflags "',
+            dexter_regression_test_cflags + '"',
+            '--ldflags "',
+            dexter_regression_test_ldflags + '"',
+        ]
+    )
+    tools.append(ToolSubst("%dexter_regression_test", dexter_regression_test_build))
+    return dependencies
+
 
 def add_host_triple(clang):
-  return '{} --target={}'.format(clang, config.host_triple)
+    return "{} --target={}".format(clang, config.host_triple)
+
 
 # The set of arches we can build.
 targets = set(config.targets_to_build)
 # Add aliases to the target set.
-if 'AArch64' in targets:
-  targets.add('arm64')
-if 'ARM' in config.targets_to_build:
-  targets.add('thumbv7')
+if "AArch64" in targets:
+    targets.add("arm64")
+if "ARM" in config.targets_to_build:
+    targets.add("thumbv7")
+
 
 def can_target_host():
-  # Check if the targets set contains anything that looks like our host arch.
-  # The arch name in the triple and targets set may be spelled 
diff erently
-  # (e.g. x86 vs X86).
-  return any(config.host_triple.lower().startswith(x.lower())
-             for x in targets)
+    # Check if the targets set contains anything that looks like our host arch.
+    # The arch name in the triple and targets set may be spelled 
diff erently
+    # (e.g. x86 vs X86).
+    return any(config.host_triple.lower().startswith(x.lower()) for x in targets)
+
 
 # Dexter tests run on the host machine. If the host arch is supported add
 # 'dexter' as an available feature and force the dexter tests to use the host
 # triple.
 if can_target_host():
-  if config.host_triple != config.target_triple:
-    print('Forcing dexter tests to use host triple {}.'.format(config.host_triple))
-  dependencies = configure_dexter_substitutions()
-  if all(d in config.available_features for d in dependencies):
-    config.available_features.add('dexter')
-    llvm_config.with_environment('PATHTOCLANG',
-                                 add_host_triple(llvm_config.config.clang))
-    llvm_config.with_environment('PATHTOCLANGPP',
-                                 add_host_triple(llvm_config.use_llvm_tool('clang++')))
-    llvm_config.with_environment('PATHTOCLANGCL',
-                                 add_host_triple(llvm_config.use_llvm_tool('clang-cl')))
+    if config.host_triple != config.target_triple:
+        print("Forcing dexter tests to use host triple {}.".format(config.host_triple))
+    dependencies = configure_dexter_substitutions()
+    if all(d in config.available_features for d in dependencies):
+        config.available_features.add("dexter")
+        llvm_config.with_environment(
+            "PATHTOCLANG", add_host_triple(llvm_config.config.clang)
+        )
+        llvm_config.with_environment(
+            "PATHTOCLANGPP", add_host_triple(llvm_config.use_llvm_tool("clang++"))
+        )
+        llvm_config.with_environment(
+            "PATHTOCLANGCL", add_host_triple(llvm_config.use_llvm_tool("clang-cl"))
+        )
 else:
-  print('Host triple {} not supported. Skipping dexter tests in the '
-        'debuginfo-tests project.'.format(config.host_triple))
+    print(
+        "Host triple {} not supported. Skipping dexter tests in the "
+        "debuginfo-tests project.".format(config.host_triple)
+    )
 
 tool_dirs = [config.llvm_tools_dir]
 
@@ -193,47 +230,54 @@ def can_target_host():
 
 lit.util.usePlatformSdkOnDarwin(config, lit_config)
 
-if platform.system() == 'Darwin':
-    xcode_lldb_vers = subprocess.check_output(['xcrun', 'lldb', '--version']).decode("utf-8")
-    match = re.search('lldb-(\d+)', xcode_lldb_vers)
+if platform.system() == "Darwin":
+    xcode_lldb_vers = subprocess.check_output(["xcrun", "lldb", "--version"]).decode(
+        "utf-8"
+    )
+    match = re.search("lldb-(\d+)", xcode_lldb_vers)
     if match:
         apple_lldb_vers = int(match.group(1))
         if apple_lldb_vers < 1000:
-            config.available_features.add('apple-lldb-pre-1000')
+            config.available_features.add("apple-lldb-pre-1000")
+
 
 def get_gdb_version_string():
-  """Return gdb's version string, or None if gdb cannot be found or the
-  --version output is formatted unexpectedly.
-  """
-  # See if we can get a gdb version, e.g.
-  #   $ gdb --version
-  #   GNU gdb (GDB) 10.2
-  #   ...More stuff...
-  try:
-    gdb_vers_lines = subprocess.check_output(['gdb', '--version']).decode().splitlines()
-  except:
-    return None # We coudln't find gdb or something went wrong running it.
-  if len(gdb_vers_lines) < 1:
-    print("Unkown GDB version format (too few lines)", file=sys.stderr)
-    return None
-  match = re.search('GNU gdb \(.*?\) ((\d|\.)+)', gdb_vers_lines[0].strip())
-  if match is None:
-    print(f"Unkown GDB version format: {gdb_vers_lines[0]}", file=sys.stderr)
-    return None
-  return match.group(1)
+    """Return gdb's version string, or None if gdb cannot be found or the
+    --version output is formatted unexpectedly.
+    """
+    # See if we can get a gdb version, e.g.
+    #   $ gdb --version
+    #   GNU gdb (GDB) 10.2
+    #   ...More stuff...
+    try:
+        gdb_vers_lines = (
+            subprocess.check_output(["gdb", "--version"]).decode().splitlines()
+        )
+    except:
+        return None  # We coudln't find gdb or something went wrong running it.
+    if len(gdb_vers_lines) < 1:
+        print("Unkown GDB version format (too few lines)", file=sys.stderr)
+        return None
+    match = re.search("GNU gdb \(.*?\) ((\d|\.)+)", gdb_vers_lines[0].strip())
+    if match is None:
+        print(f"Unkown GDB version format: {gdb_vers_lines[0]}", file=sys.stderr)
+        return None
+    return match.group(1)
+
 
 def get_clang_default_dwarf_version_string(triple):
-  """Return the default dwarf version string for clang on this (host) platform
-  or None if we can't work it out.
-  """
-  # Get the flags passed by the driver and look for -dwarf-version.
-  cmd = f'{llvm_config.use_llvm_tool("clang")} -g -xc  -c - -v -### --target={triple}'
-  stderr = subprocess.run(cmd.split(), stderr=subprocess.PIPE).stderr.decode()
-  match = re.search('-dwarf-version=(\d+)', stderr)
-  if match is None:
-    print("Cannot determine default dwarf version", file=sys.stderr)
-    return None
-  return match.group(1)
+    """Return the default dwarf version string for clang on this (host) platform
+    or None if we can't work it out.
+    """
+    # Get the flags passed by the driver and look for -dwarf-version.
+    cmd = f'{llvm_config.use_llvm_tool("clang")} -g -xc  -c - -v -### --target={triple}'
+    stderr = subprocess.run(cmd.split(), stderr=subprocess.PIPE).stderr.decode()
+    match = re.search("-dwarf-version=(\d+)", stderr)
+    if match is None:
+        print("Cannot determine default dwarf version", file=sys.stderr)
+        return None
+    return match.group(1)
+
 
 # Some cross-project-tests use gdb, but not all versions of gdb are compatible
 # with clang's dwarf. Add feature `gdb-clang-incompatibility` to signal that
@@ -242,17 +286,18 @@ def get_clang_default_dwarf_version_string(triple):
 dwarf_version_string = get_clang_default_dwarf_version_string(config.host_triple)
 gdb_version_string = get_gdb_version_string()
 if dwarf_version_string and gdb_version_string:
-  if int(dwarf_version_string) >= 5:
-    if LooseVersion(gdb_version_string) < LooseVersion('10.1'):
-      # Example for llgdb-tests, which use lldb on darwin but gdb elsewhere:
-      # XFAIL: !system-darwin && gdb-clang-incompatibility
-      config.available_features.add('gdb-clang-incompatibility')
-      print("XFAIL some tests: use gdb version >= 10.1 to restore test coverage", file=sys.stderr)
-
-llvm_config.feature_config(
-    [('--build-mode', {'Debug|RelWithDebInfo': 'debug-info'})]
-)
+    if int(dwarf_version_string) >= 5:
+        if LooseVersion(gdb_version_string) < LooseVersion("10.1"):
+            # Example for llgdb-tests, which use lldb on darwin but gdb elsewhere:
+            # XFAIL: !system-darwin && gdb-clang-incompatibility
+            config.available_features.add("gdb-clang-incompatibility")
+            print(
+                "XFAIL some tests: use gdb version >= 10.1 to restore test coverage",
+                file=sys.stderr,
+            )
+
+llvm_config.feature_config([("--build-mode", {"Debug|RelWithDebInfo": "debug-info"})])
 
 # Allow 'REQUIRES: XXX-registered-target' in tests.
 for arch in config.targets_to_build:
-    config.available_features.add(arch.lower() + '-registered-target')
+    config.available_features.add(arch.lower() + "-registered-target")

diff  --git a/flang/docs/FIR/CreateFIRLangRef.py b/flang/docs/FIR/CreateFIRLangRef.py
index d61671796fe8a..b6077364cdee6 100644
--- a/flang/docs/FIR/CreateFIRLangRef.py
+++ b/flang/docs/FIR/CreateFIRLangRef.py
@@ -4,16 +4,16 @@
 import os
 
 # These paths are relative to flang/docs in the build directory, not source, as that's where this tool is executed.
-HEADER_PATH = os.path.join('Source', 'FIR', 'FIRLangRef_Header.md')
-DOCS_PATH   = os.path.join('Dialect', 'FIRLangRef.md')
-OUTPUT_PATH = os.path.join('Source', 'FIRLangRef.md')
+HEADER_PATH = os.path.join("Source", "FIR", "FIRLangRef_Header.md")
+DOCS_PATH = os.path.join("Dialect", "FIRLangRef.md")
+OUTPUT_PATH = os.path.join("Source", "FIRLangRef.md")
 
 # 1. Writes line 1 from docs to output, (comment line that the file is autogenerated)
 # 2. Adds a new line
 # 3. Writes the entire header to the output file
 # 4. Writes the remainder of docs to the output file
-with open(OUTPUT_PATH, 'w') as output:
-    with open(HEADER_PATH, 'r') as header, open(DOCS_PATH, 'r') as docs:
+with open(OUTPUT_PATH, "w") as output:
+    with open(HEADER_PATH, "r") as header, open(DOCS_PATH, "r") as docs:
         output.write(docs.readline())
         output.write("\n")
         output.write(header.read())

diff  --git a/flang/docs/conf.py b/flang/docs/conf.py
index 7ad291526b697..117cd1f1c97ae 100644
--- a/flang/docs/conf.py
+++ b/flang/docs/conf.py
@@ -16,76 +16,79 @@
 # If extensions (or modules to document with autodoc) are in another directory,
 # add these directories to sys.path here. If the directory is relative to the
 # documentation root, use os.path.abspath to make it absolute, like shown here.
-#sys.path.insert(0, os.path.abspath('.'))
+# sys.path.insert(0, os.path.abspath('.'))
 
 # -- General configuration -----------------------------------------------------
 
 # https://github.com/readthedocs/recommonmark/issues/177
-#Method used to remove the warning message.
+# Method used to remove the warning message.
 class CustomCommonMarkParser(CommonMarkParser):
     def visit_document(self, node):
         pass
 
 
 # If your documentation needs a minimal Sphinx version, state it here.
-#needs_sphinx = '1.0'
+# needs_sphinx = '1.0'
 # Add any Sphinx extension module names here, as strings. They can be extensions
 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.todo', 'sphinx.ext.mathjax', 'sphinx.ext.intersphinx']
+extensions = ["sphinx.ext.todo", "sphinx.ext.mathjax", "sphinx.ext.intersphinx"]
 
 # Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
+templates_path = ["_templates"]
 
 # The suffix of source filenames.
 source_suffix = {
-    '.rst': 'restructuredtext',
+    ".rst": "restructuredtext",
 }
 try:
-  import recommonmark
+    import recommonmark
 except ImportError:
-  # manpages do not use any .md sources
-  if not tags.has('builder-man'):
-    raise
+    # manpages do not use any .md sources
+    if not tags.has("builder-man"):
+        raise
 else:
-  import sphinx
-  if sphinx.version_info >= (3, 0):
-    # This requires 0.5 or later.
-    extensions.append('recommonmark')
-  else:
-    source_parsers = {'.md': CustomCommonMarkParser}
-  source_suffix['.md'] = 'markdown'
-  extensions.append('sphinx_markdown_tables')
-
-  # Setup AutoStructify for inline .rst toctrees in index.md
-  from recommonmark.transform import AutoStructify
-
-  # Stolen from https://github.com/readthedocs/recommonmark/issues/93
-  # Monkey patch to fix recommonmark 0.4 doc reference issues.
-  from recommonmark.states import DummyStateMachine
-  orig_run_role = DummyStateMachine.run_role
-  def run_role(self, name, options=None, content=None):
-    if name == 'doc':
-      name = 'any'
-      return orig_run_role(self, name, options, content)
-  DummyStateMachine.run_role = run_role
-
-  def setup(app):
-    # Disable inline math to avoid
-    # https://github.com/readthedocs/recommonmark/issues/120 in Extensions.md
-    app.add_config_value('recommonmark_config', {
-      'enable_inline_math': False
-    }, True)
-    app.add_transform(AutoStructify)
+    import sphinx
+
+    if sphinx.version_info >= (3, 0):
+        # This requires 0.5 or later.
+        extensions.append("recommonmark")
+    else:
+        source_parsers = {".md": CustomCommonMarkParser}
+    source_suffix[".md"] = "markdown"
+    extensions.append("sphinx_markdown_tables")
+
+    # Setup AutoStructify for inline .rst toctrees in index.md
+    from recommonmark.transform import AutoStructify
+
+    # Stolen from https://github.com/readthedocs/recommonmark/issues/93
+    # Monkey patch to fix recommonmark 0.4 doc reference issues.
+    from recommonmark.states import DummyStateMachine
+
+    orig_run_role = DummyStateMachine.run_role
+
+    def run_role(self, name, options=None, content=None):
+        if name == "doc":
+            name = "any"
+            return orig_run_role(self, name, options, content)
+
+    DummyStateMachine.run_role = run_role
+
+    def setup(app):
+        # Disable inline math to avoid
+        # https://github.com/readthedocs/recommonmark/issues/120 in Extensions.md
+        app.add_config_value("recommonmark_config", {"enable_inline_math": False}, True)
+        app.add_transform(AutoStructify)
+
 
 # The encoding of source files.
-#source_encoding = 'utf-8-sig'
+# source_encoding = 'utf-8-sig'
 
 # The master toctree document.
-master_doc = 'index'
+master_doc = "index"
 
 # General information about the project.
-project = u'Flang'
-copyright = u'2017-%d, The Flang Team' % date.today().year
+project = "Flang"
+copyright = "2017-%d, The Flang Team" % date.today().year
 
 # The version info for the project you're documenting, acts as replacement for
 # |version| and |release|, also used in various other places throughout the
@@ -94,138 +97,136 @@ def setup(app):
 # everytime a new release comes out.
 #
 # The short version.
-#version = '0'
+# version = '0'
 # The full version, including alpha/beta/rc tags.
-#release = '0'
+# release = '0'
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
-#language = None
+# language = None
 
 # There are two options for replacing |today|: either, you set today to some
 # non-false value, then it is used:
-#today = ''
+# today = ''
 # Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
+# today_fmt = '%B %d, %Y'
 
 # List of patterns, relative to source directory, that match files and
 # directories to ignore when looking for source files.
-exclude_patterns = ['_build', 'analyzer']
+exclude_patterns = ["_build", "analyzer"]
 
 # The reST default role (used for this markup: `text`) to use for all documents.
-#default_role = None
+# default_role = None
 
 # If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
+# add_function_parentheses = True
 
 # If true, the current module name will be prepended to all description
 # unit titles (such as .. function::).
-#add_module_names = True
+# add_module_names = True
 
 # If true, sectionauthor and moduleauthor directives will be shown in the
 # output. They are ignored by default.
-#show_authors = False
+# show_authors = False
 
 # The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'friendly'
+pygments_style = "friendly"
 
 # A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
+# modindex_common_prefix = []
 
 
 # -- Options for HTML output ---------------------------------------------------
 
 # The theme to use for HTML and HTML Help pages.  See the documentation for
 # a list of builtin themes.
-html_theme = 'llvm-theme'
+html_theme = "llvm-theme"
 
 # Theme options are theme-specific and customize the look and feel of a theme
 # further.  For a list of options available for each theme, see the
 # documentation.
-html_theme_options = { "nosidebar": False }
+html_theme_options = {"nosidebar": False}
 
 # Add any paths that contain custom themes here, relative to this directory.
 html_theme_path = ["_themes"]
 
 # Add any paths that contain custom themes here, relative to this directory.
-#html_theme_path = []
+# html_theme_path = []
 
 # The name for this set of Sphinx documents.  If None, it defaults to
 # "<project> v<release> documentation".
-html_title = 'The Flang Compiler'
+html_title = "The Flang Compiler"
 
 # A shorter title for the navigation bar.  Default is the same as html_title.
-#html_short_title = None
+# html_short_title = None
 
 # The name of an image file (relative to this directory) to place at the top
 # of the sidebar.
-#html_logo = None
+# html_logo = None
 
 # The name of an image file (within the static path) to use as favicon of the
 # docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
 # pixels large.
-#html_favicon = None
+# html_favicon = None
 
 # Add any paths that contain custom static files (such as style sheets) here,
 # relative to this directory. They are copied after the builtin static files,
 # so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
+html_static_path = ["_static"]
 
 html_context = {
-    'css_files': [
-        '_static/llvm.css'
-        ],
-    }
+    "css_files": ["_static/llvm.css"],
+}
 
 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
 # using the given strftime format.
-html_last_updated_fmt = '%b %d, %Y'
+html_last_updated_fmt = "%b %d, %Y"
 
 # If true, SmartyPants will be used to convert quotes and dashes to
 # typographically correct entities.
-#html_use_smartypants = True
+# html_use_smartypants = True
 
 # Custom sidebar templates, maps document names to template names.
 html_sidebars = {
-    '**': [
-        'indexsidebar.html',
-        'searchbox.html',
+    "**": [
+        "indexsidebar.html",
+        "searchbox.html",
     ]
 }
 
 
 # Additional templates that should be rendered to pages, maps page names to
 # template names.
-#html_additional_pages = {}
+# html_additional_pages = {}
 
 # If false, no module index is generated.
-#html_domain_indices = True
+# html_domain_indices = True
 
 # If false, no index is generated.
-#html_use_index = True
+# html_use_index = True
 
 # If true, the index is split into individual pages for each letter.
-#html_split_index = False
+# html_split_index = False
 
 # If true, links to the reST sources are added to the pages.
-#html_show_sourcelink = True
+# html_show_sourcelink = True
 
 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
-#html_show_sphinx = True
+# html_show_sphinx = True
 
 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
-#html_show_copyright = True
+# html_show_copyright = True
 
 # If true, an OpenSearch description file will be output, and all pages will
 # contain a <link> tag referring to it.  The value of this option must be the
 # base URL from which the finished HTML is served.
-#html_use_opensearch = ''
+# html_use_opensearch = ''
 
 # This is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = None
+# html_file_suffix = None
 
 # Output file base name for HTML help builder.
-htmlhelp_basename = 'Flangdoc'
+htmlhelp_basename = "Flangdoc"
 
 # If true, the reST sources are included in the HTML build as
 # _sources/name. The default is True.
@@ -234,42 +235,39 @@ def setup(app):
 # -- Options for LaTeX output --------------------------------------------------
 
 latex_elements = {
-# The paper size ('letterpaper' or 'a4paper').
-#'papersize': 'letterpaper',
-
-# The font size ('10pt', '11pt' or '12pt').
-#'pointsize': '10pt',
-
-# Additional stuff for the LaTeX preamble.
-#'preamble': '',
+    # The paper size ('letterpaper' or 'a4paper').
+    #'papersize': 'letterpaper',
+    # The font size ('10pt', '11pt' or '12pt').
+    #'pointsize': '10pt',
+    # Additional stuff for the LaTeX preamble.
+    #'preamble': '',
 }
 
 # Grouping the document tree into LaTeX files. List of tuples
 # (source start file, target name, title, author, documentclass [howto/manual]).
 latex_documents = [
-  ('Overview', 'Flang.tex', u'Flang Documentation',
-   u'The Flang Team', 'manual'),
+    ("Overview", "Flang.tex", "Flang Documentation", "The Flang Team", "manual"),
 ]
 
 # The name of an image file (relative to this directory) to place at the top of
 # the title page.
-#latex_logo = None
+# latex_logo = None
 
 # For "manual" documents, if this is true, then toplevel headings are parts,
 # not chapters.
-#latex_use_parts = False
+# latex_use_parts = False
 
 # If true, show page references after internal links.
-#latex_show_pagerefs = False
+# latex_show_pagerefs = False
 
 # If true, show URL addresses after external links.
-#latex_show_urls = False
+# latex_show_urls = False
 
 # Documents to append as an appendix to all manuals.
-#latex_appendices = []
+# latex_appendices = []
 
 # If false, no module index is generated.
-#latex_domain_indices = True
+# latex_domain_indices = True
 
 
 # -- Options for manual page output --------------------------------------------
@@ -279,7 +277,7 @@ def setup(app):
 man_pages = []
 
 # If true, show URL addresses after external links.
-#man_show_urls = False
+# man_show_urls = False
 
 
 # -- Options for Texinfo output ------------------------------------------------
@@ -288,16 +286,22 @@ def setup(app):
 # (source start file, target name, title, author,
 #  dir menu entry, description, category)
 texinfo_documents = [
-  ('Overview', 'Flang', u'Flang Documentation',
-   u'The Flang Team', 'Flang', 'A Fortran front end for LLVM.',
-   'Miscellaneous'),
+    (
+        "Overview",
+        "Flang",
+        "Flang Documentation",
+        "The Flang Team",
+        "Flang",
+        "A Fortran front end for LLVM.",
+        "Miscellaneous",
+    ),
 ]
 
 # Documents to append as an appendix to all manuals.
-#texinfo_appendices = []
+# texinfo_appendices = []
 
 # If false, no module index is generated.
-#texinfo_domain_indices = True
+# texinfo_domain_indices = True
 
 # How to display URL addresses: 'footnote', 'no', or 'inline'.
-#texinfo_show_urls = 'footnote'
+# texinfo_show_urls = 'footnote'

diff  --git a/flang/examples/FlangOmpReport/yaml_summarizer.py b/flang/examples/FlangOmpReport/yaml_summarizer.py
index d1d73cde07681..5726ff8da77f5 100644
--- a/flang/examples/FlangOmpReport/yaml_summarizer.py
+++ b/flang/examples/FlangOmpReport/yaml_summarizer.py
@@ -77,6 +77,7 @@
 
 from ruamel.yaml import YAML
 
+
 def find_yaml_files(search_directory: Path, search_pattern: str):
     """
     Find all '.yaml' files and returns an iglob iterator to them.
@@ -90,10 +91,13 @@ def find_yaml_files(search_directory: Path, search_pattern: str):
     # been generated with  'flang-omp-report' or not. This might result in the script
     # reading files that it should ignore.
     if search_directory:
-        return glob.iglob(str(search_directory.joinpath(search_pattern)), recursive=True)
+        return glob.iglob(
+            str(search_directory.joinpath(search_pattern)), recursive=True
+        )
 
     return glob.iglob(str("/" + search_pattern), recursive=True)
 
+
 def process_log(data, result: list):
     """
     Process the data input as a 'log' to the result array. This esssentially just
@@ -104,11 +108,16 @@ def process_log(data, result: list):
     result -- Array to add the processed data to.
     """
     for datum in data:
-        items = result.get(datum['file'], [])
-        items.append({"construct" : datum['construct'],
-                        "line" : datum['line'],
-                        "clauses" : datum['clauses']})
-        result[datum['file']] = items
+        items = result.get(datum["file"], [])
+        items.append(
+            {
+                "construct": datum["construct"],
+                "line": datum["line"],
+                "clauses": datum["clauses"],
+            }
+        )
+        result[datum["file"]] = items
+
 
 def add_clause(datum, construct):
     """
@@ -119,8 +128,8 @@ def add_clause(datum, construct):
     datum -- Data construct containing clauses to check.
     construct -- Construct to add or increment clause count.
     """
-    to_check = [i['clause'] for i in construct['clauses']]
-    to_add = [i['clause'] for i in datum['clauses']]
+    to_check = [i["clause"] for i in construct["clauses"]]
+    to_add = [i["clause"] for i in datum["clauses"]]
     clauses = construct["clauses"]
     for item in to_add:
         if item in to_check:
@@ -128,8 +137,8 @@ def add_clause(datum, construct):
                 if clause["clause"] == item:
                     clause["count"] += 1
         else:
-            clauses.append({"clause" : item,
-                            "count" : 1})
+            clauses.append({"clause": item, "count": 1})
+
 
 def process_summary(data, result: dict):
     """
@@ -140,37 +149,40 @@ def process_summary(data, result: dict):
     result -- Dictionary to add the processed data to.
     """
     for datum in data:
-        construct = next((item for item in result
-                            if item["construct"] == datum["construct"]), None)
+        construct = next(
+            (item for item in result if item["construct"] == datum["construct"]), None
+        )
         clauses = []
         # Add the construct and clauses to the summary if
         # they haven't been seen before
         if not construct:
-            for i in datum['clauses']:
-                clauses.append({"clause" : i['clause'],
-                                "count"    : 1})
-            result.append({"construct" : datum['construct'],
-                            "count" : 1,
-                            "clauses" : clauses})
+            for i in datum["clauses"]:
+                clauses.append({"clause": i["clause"], "count": 1})
+            result.append(
+                {"construct": datum["construct"], "count": 1, "clauses": clauses}
+            )
         else:
             construct["count"] += 1
 
             add_clause(datum, construct)
 
+
 def clean_summary(result):
-    """ Cleans the result after processing the yaml files with summary format."""
+    """Cleans the result after processing the yaml files with summary format."""
     # Remove all "clauses" that are empty to keep things compact
     for construct in result:
         if construct["clauses"] == []:
             construct.pop("clauses")
 
+
 def clean_log(result):
-    """ Cleans the result after processing the yaml files with log format."""
+    """Cleans the result after processing the yaml files with log format."""
     for constructs in result.values():
         for construct in constructs:
             if construct["clauses"] == []:
                 construct.pop("clauses")
 
+
 def output_result(yaml: YAML, result, output_file: Path):
     """
     Outputs result to either 'stdout' or to a output file.
@@ -181,7 +193,7 @@ def output_result(yaml: YAML, result, output_file: Path):
                    outputted to 'stdout'.
     """
     if output_file:
-        with open(output_file, 'w+', encoding='utf-8') as file:
+        with open(output_file, "w+", encoding="utf-8") as file:
             if output_file.suffix == ".yaml":
                 yaml.dump(result, file)
             else:
@@ -189,8 +201,10 @@ def output_result(yaml: YAML, result, output_file: Path):
     else:
         yaml.dump(result, sys.stdout)
 
-def process_yaml(search_directories: list, search_pattern: str,
-                 result_format: str, output_file: Path):
+
+def process_yaml(
+    search_directories: list, search_pattern: str, result_format: str, output_file: Path
+):
     """
     Reads each yaml file, calls the appropiate format function for
     the file and then ouputs the result to either 'stdout' or to an output file.
@@ -215,7 +229,7 @@ def process_yaml(search_directories: list, search_pattern: str,
 
     for search_directory in search_directories:
         for file in find_yaml_files(search_directory, search_pattern):
-            with open(file, "r", encoding='utf-8') as yaml_file:
+            with open(file, "r", encoding="utf-8") as yaml_file:
                 data = yaml.load(yaml_file)
                 action(data, result)
 
@@ -224,24 +238,45 @@ def process_yaml(search_directories: list, search_pattern: str,
 
     output_result(yaml, result, output_file)
 
+
 def create_arg_parser():
-    """ Create and return a argparse.ArgumentParser modified for script. """
+    """Create and return a argparse.ArgumentParser modified for script."""
     parser = argparse.ArgumentParser()
-    parser.add_argument("-d", "--directory", help="Specify a directory to scan",
-                        dest="dir", type=str)
-    parser.add_argument("-o", "--output", help="Writes to a file instead of\
-                                                stdout", dest="output", type=str)
-    parser.add_argument("-r", "--recursive", help="Recursive search for .yaml files",
-                        dest="recursive", type=bool, nargs='?', const=True, default=False)
+    parser.add_argument(
+        "-d", "--directory", help="Specify a directory to scan", dest="dir", type=str
+    )
+    parser.add_argument(
+        "-o",
+        "--output",
+        help="Writes to a file instead of\
+                                                stdout",
+        dest="output",
+        type=str,
+    )
+    parser.add_argument(
+        "-r",
+        "--recursive",
+        help="Recursive search for .yaml files",
+        dest="recursive",
+        type=bool,
+        nargs="?",
+        const=True,
+        default=False,
+    )
 
     exclusive_parser = parser.add_mutually_exclusive_group()
-    exclusive_parser.add_argument("-l", "--log", help="Modifies report format: "
-                                  "Combines the log '.yaml' files into one file.",
-                                  action='store_true', dest='log')
+    exclusive_parser.add_argument(
+        "-l",
+        "--log",
+        help="Modifies report format: " "Combines the log '.yaml' files into one file.",
+        action="store_true",
+        dest="log",
+    )
     return parser
 
+
 def parse_arguments():
-    """ Parses arguments given to script and returns a tuple of processed arguments. """
+    """Parses arguments given to script and returns a tuple of processed arguments."""
     parser = create_arg_parser()
     args = parser.parse_args()
 
@@ -270,13 +305,15 @@ def parse_arguments():
 
     return (search_directory, search_pattern, result_format, output_file)
 
+
 def main():
-    """ Main function of script. """
+    """Main function of script."""
     (search_directory, search_pattern, result_format, output_file) = parse_arguments()
 
     process_yaml(search_directory, search_pattern, result_format, output_file)
 
     return 0
 
+
 if __name__ == "__main__":
     sys.exit(main())

diff  --git a/flang/test/Evaluate/test_folding.py b/flang/test/Evaluate/test_folding.py
index ac1c8e553864e..dcd1541997c2a 100755
--- a/flang/test/Evaluate/test_folding.py
+++ b/flang/test/Evaluate/test_folding.py
@@ -40,12 +40,14 @@
 from 
diff lib import unified_
diff 
 from pathlib import Path
 
+
 def check_args(args):
     """Verifies that the number is arguments passed is correct."""
     if len(args) < 3:
         print(f"Usage: {args[0]} <fortran-source> <flang-command>")
         sys.exit(1)
 
+
 def set_source(source):
     """Sets the path to the source files."""
     if not Path(source).is_file():
@@ -53,6 +55,7 @@ def set_source(source):
         sys.exit(1)
     return Path(source)
 
+
 def set_executable(exe):
     """Sets the path to the Flang frontend driver."""
     if not Path(exe).is_file():
@@ -60,10 +63,11 @@ def set_executable(exe):
         sys.exit(1)
     return str(Path(exe))
 
+
 check_args(sys.argv)
 cwd = os.getcwd()
 srcdir = set_source(sys.argv[1]).resolve()
-with open(srcdir, 'r', encoding="utf-8") as f:
+with open(srcdir, "r", encoding="utf-8") as f:
     src = f.readlines()
 src1 = ""
 src2 = ""
@@ -77,7 +81,7 @@ def set_executable(exe):
 flang_fc1 = set_executable(sys.argv[2])
 flang_fc1_args = sys.argv[3:]
 flang_fc1_options = ""
-LIBPGMATH = os.getenv('LIBPGMATH')
+LIBPGMATH = os.getenv("LIBPGMATH")
 if LIBPGMATH:
     flang_fc1_options = ["-fdebug-dump-symbols", "-DTEST_LIBPGMATH"]
     print("Assuming libpgmath support")
@@ -87,8 +91,14 @@ def set_executable(exe):
 
 cmd = [flang_fc1, *flang_fc1_args, *flang_fc1_options, str(srcdir)]
 with tempfile.TemporaryDirectory() as tmpdir:
-    proc = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
-                          check=True, universal_newlines=True, cwd=tmpdir)
+    proc = subprocess.run(
+        cmd,
+        stdout=subprocess.PIPE,
+        stderr=subprocess.PIPE,
+        check=True,
+        universal_newlines=True,
+        cwd=tmpdir,
+    )
     src1 = proc.stdout
     messages = proc.stderr
 
@@ -125,8 +135,9 @@ def set_executable(exe):
             expected_warnings += f"{i}:{x}\n"
         warnings = []
 
-for line in unified_
diff (actual_warnings.split("\n"),
-                         expected_warnings.split("\n"), n=0):
+for line in unified_
diff (
+    actual_warnings.split("\n"), expected_warnings.split("\n"), n=0
+):
     line = re.sub(r"(^\-)(\d+:)", r"\nactual at \g<2>", line)
     line = re.sub(r"(^\+)(\d+:)", r"\nexpect at \g<2>", line)
     warning_
diff s += line
@@ -152,4 +163,3 @@ def set_executable(exe):
     print()
     print(f"All {passed_results+passed_warnings} tests passed")
     print("PASS")
-

diff  --git a/flang/test/NonGtestUnit/lit.cfg.py b/flang/test/NonGtestUnit/lit.cfg.py
index 7f53f861bc65c..39ae19fc164d6 100644
--- a/flang/test/NonGtestUnit/lit.cfg.py
+++ b/flang/test/NonGtestUnit/lit.cfg.py
@@ -2,15 +2,20 @@
 
 import lit.Test
 
-config.name = 'flang-OldUnit'
+config.name = "flang-OldUnit"
 
 config.suffixes = [".test"]
 
-config.test_source_root = os.path.join(config.flang_obj_root, 'unittests')
+config.test_source_root = os.path.join(config.flang_obj_root, "unittests")
 config.test_exec_root = config.test_source_root
 
 config.test_format = lit.formats.ExecutableTest()
 
-path = os.path.pathsep.join((config.flang_libs_dir, config.llvm_libs_dir,
-                              config.environment.get('LD_LIBRARY_PATH','')))
-config.environment['LD_LIBRARY_PATH'] = path
+path = os.path.pathsep.join(
+    (
+        config.flang_libs_dir,
+        config.llvm_libs_dir,
+        config.environment.get("LD_LIBRARY_PATH", ""),
+    )
+)
+config.environment["LD_LIBRARY_PATH"] = path

diff  --git a/flang/test/Semantics/common.py b/flang/test/Semantics/common.py
index 14323f4a70d87..ee7a820965b0c 100755
--- a/flang/test/Semantics/common.py
+++ b/flang/test/Semantics/common.py
@@ -4,12 +4,14 @@
 import sys
 from pathlib import Path
 
+
 def set_source(source):
     """Checks whether the source file exists and returns its path."""
     if not Path(source).is_file():
         die(source)
     return Path(source)
 
+
 def set_executable(executable):
     """Checks whether a Flang executable has been set and returns its path."""
     flang_fc1 = Path(executable)
@@ -17,11 +19,13 @@ def set_executable(executable):
         die(flang_fc1)
     return str(flang_fc1)
 
+
 def set_temp(tmp):
     """Sets a temporary directory or creates one if it doesn't exist."""
     os.makedirs(Path(tmp), exist_ok=True)
     return Path(tmp)
 
+
 def die(file=None):
     """Used in other functions."""
     if file is None:
@@ -30,15 +34,16 @@ def die(file=None):
         print(f"{sys.argv[0]}: File not found: {file}")
     sys.exit(1)
 
+
 def check_args(args):
     """Verifies that 2 arguments have been passed."""
     if len(args) < 3:
         print(f"Usage: {args[0]} <fortran-source> <flang-command>")
         sys.exit(1)
 
+
 def check_args_long(args):
     """Verifies that 3 arguments have been passed."""
     if len(args) < 4:
         print(f"Usage: {args[0]} <fortran-source> <temp-test-dir> <flang-command>")
         sys.exit(1)
-

diff  --git a/flang/test/Semantics/test_errors.py b/flang/test/Semantics/test_errors.py
index 5a4d8b56a2357..63ff3367edefd 100755
--- a/flang/test/Semantics/test_errors.py
+++ b/flang/test/Semantics/test_errors.py
@@ -17,7 +17,7 @@
 
 cm.check_args(sys.argv)
 srcdir = cm.set_source(sys.argv[1])
-with open(srcdir, 'r') as f:
+with open(srcdir, "r") as f:
     src = f.readlines()
 actual = ""
 expect = ""
@@ -32,8 +32,14 @@
 cmd = [flang_fc1, *flang_fc1_args, flang_fc1_options, str(srcdir)]
 with tempfile.TemporaryDirectory() as tmpdir:
     try:
-        proc = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
-                              check=True, universal_newlines=True, cwd=tmpdir)
+        proc = subprocess.run(
+            cmd,
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE,
+            check=True,
+            universal_newlines=True,
+            cwd=tmpdir,
+        )
     except subprocess.CalledProcessError as e:
         log = e.stderr
         if e.returncode >= 128:
@@ -41,11 +47,11 @@
             sys.exit(1)
 
 # Cleans up the output from the compilation process to be easier to process
-for line in log.split('\n'):
+for line in log.split("\n"):
     m = re.search(r"[^:]*:(\d+:).*(?:error|warning|portability|because):(.*)", line)
     if m:
         if re.search(r"warning: .*fold.*host", line):
-            continue # ignore host-dependent folding warnings
+            continue  # ignore host-dependent folding warnings
         actual += m.expand(r"\1\2\n")
 
 # Gets the expected errors and their line numbers

diff  --git a/flang/test/Semantics/test_modfile.py b/flang/test/Semantics/test_modfile.py
index 4126f9adf8cea..87bd7dd0b55b8 100755
--- a/flang/test/Semantics/test_modfile.py
+++ b/flang/test/Semantics/test_modfile.py
@@ -41,12 +41,13 @@
 
         prev_files = set(os.listdir(tmpdir))
         cmd = [flang_fc1, *flang_fc_args, flang_fc1_options, str(src)]
-        proc = subprocess.check_output(cmd, stderr=subprocess.PIPE,
-                                       universal_newlines=True, cwd=tmpdir)
+        proc = subprocess.check_output(
+            cmd, stderr=subprocess.PIPE, universal_newlines=True, cwd=tmpdir
+        )
         actual_files = set(os.listdir(tmpdir)).
diff erence(prev_files)
 
         # The first 3 bytes of the files are an UTF-8 BOM
-        with open(src, 'r', encoding="utf-8", errors="strict") as f:
+        with open(src, "r", encoding="utf-8", errors="strict") as f:
             for line in f:
                 m = re.search(r"^!Expect: (.*)", line)
                 if m:
@@ -62,13 +63,13 @@
             if not mod.is_file():
                 print(f"Compilation did not produce expected mod file: {mod}")
                 sys.exit(1)
-            with open(mod, 'r', encoding="utf-8", errors="strict") as f:
+            with open(mod, "r", encoding="utf-8", errors="strict") as f:
                 for line in f:
                     if "!mod$" in line:
                         continue
                     actual += line
 
-            with open(src, 'r', encoding="utf-8", errors="strict") as f:
+            with open(src, "r", encoding="utf-8", errors="strict") as f:
                 for line in f:
                     if f"!Expect: {mod.name}" in line:
                         for line in f:
@@ -77,9 +78,15 @@
                             m = re.sub(r"^!", "", line.lstrip())
                             expect += m
 
-            
diff s = "\n".join(unified_
diff (actual.replace(" ", "").split("\n"),
-                                           expect.replace(" ", "").split("\n"),
-                                           fromfile=mod.name, tofile="Expect", n=999999))
+            
diff s = "\n".join(
+                unified_
diff (
+                    actual.replace(" ", "").split("\n"),
+                    expect.replace(" ", "").split("\n"),
+                    fromfile=mod.name,
+                    tofile="Expect",
+                    n=999999,
+                )
+            )
 
             if 
diff s != "":
                 print(
diff s)
@@ -89,4 +96,3 @@
 
 print()
 print("PASS")
-

diff  --git a/flang/test/Semantics/test_symbols.py b/flang/test/Semantics/test_symbols.py
index b82903fe44f2b..24dc5004a4229 100755
--- a/flang/test/Semantics/test_symbols.py
+++ b/flang/test/Semantics/test_symbols.py
@@ -27,7 +27,7 @@
 flang_fc1_options = "-fdebug-unparse-with-symbols"
 
 # Strips out blank lines and all comments except for "!DEF:", "!REF:", "!$acc" and "!$omp"
-with open(src, 'r') as text_in:
+with open(src, "r") as text_in:
     for line in text_in:
         text = re.sub(r"!(?![DR]EF:|\$omp|\$acc).*", "", line)
         text = re.sub(r"^\s*$", "", text)
@@ -41,7 +41,9 @@
 # Compiles, inserting comments for symbols:
 cmd = [flang_fc1, *flang_fc1_args, flang_fc1_options]
 with tempfile.TemporaryDirectory() as tmpdir:
-    
diff 3 = subprocess.check_output(cmd, input=
diff 2, universal_newlines=True, cwd=tmpdir)
+    
diff 3 = subprocess.check_output(
+        cmd, input=
diff 2, universal_newlines=True, cwd=tmpdir
+    )
 
 # Removes all whitespace to compare 
diff erences in files
 
diff 1 = 
diff 1.replace(" ", "")
@@ -49,8 +51,15 @@
 
diff _check = ""
 
 # Compares the input with the output
-
diff _check = "\n".join(unified_
diff (
diff 1.split("\n"), 
diff 3.split("\n"), n=999999,
-                       fromfile="Expected_output", tofile="Actual_output"))
+
diff _check = "\n".join(
+    unified_
diff (
+        
diff 1.split("\n"),
+        
diff 3.split("\n"),
+        n=999999,
+        fromfile="Expected_output",
+        tofile="Actual_output",
+    )
+)
 
 if 
diff _check != "":
     print(
diff _check.replace(" ", ""))
@@ -60,4 +69,3 @@
 else:
     print()
     print("PASS")
-

diff  --git a/flang/test/Unit/lit.cfg.py b/flang/test/Unit/lit.cfg.py
index 406018c3f89c2..c82ac1c5aed45 100644
--- a/flang/test/Unit/lit.cfg.py
+++ b/flang/test/Unit/lit.cfg.py
@@ -7,29 +7,36 @@
 import lit.formats
 
 # name: The name of this test suite.
-config.name = 'flang-Unit'
+config.name = "flang-Unit"
 
 # suffixes: A list of file extensions to treat as test files.
-config.suffixes =  []
+config.suffixes = []
 
 # test_source_root: The root path where unit test binaries are located.
 # test_exec_root: The root path where tests should be run.
-config.test_source_root = os.path.join(config.flang_obj_root, 'unittests')
+config.test_source_root = os.path.join(config.flang_obj_root, "unittests")
 config.test_exec_root = config.test_source_root
 
 # testFormat: The test format to use to interpret tests.
-config.test_format = lit.formats.GoogleTest(config.llvm_build_mode, 'Tests')
+config.test_format = lit.formats.GoogleTest(config.llvm_build_mode, "Tests")
 
 # Tweak the PATH to include the tools dir.
-path = os.path.pathsep.join((config.flang_tools_dir, config.llvm_tools_dir, config.environment['PATH']))
-config.environment['PATH'] = path
-
-path = os.path.pathsep.join((config.flang_libs_dir, config.llvm_libs_dir,
-                              config.environment.get('LD_LIBRARY_PATH','')))
-config.environment['LD_LIBRARY_PATH'] = path
+path = os.path.pathsep.join(
+    (config.flang_tools_dir, config.llvm_tools_dir, config.environment["PATH"])
+)
+config.environment["PATH"] = path
+
+path = os.path.pathsep.join(
+    (
+        config.flang_libs_dir,
+        config.llvm_libs_dir,
+        config.environment.get("LD_LIBRARY_PATH", ""),
+    )
+)
+config.environment["LD_LIBRARY_PATH"] = path
 
 # Propagate PYTHON_EXECUTABLE into the environment
-#config.environment['PYTHON_EXECUTABLE'] = sys.executable
+# config.environment['PYTHON_EXECUTABLE'] = sys.executable
 
 # To modify the default target triple for flang tests.
 if config.flang_test_triple:

diff  --git a/flang/test/lib/lit.local.cfg b/flang/test/lib/lit.local.cfg
index 9832f42447387..f2c77f45c1bbe 100644
--- a/flang/test/lib/lit.local.cfg
+++ b/flang/test/lib/lit.local.cfg
@@ -1,7 +1,28 @@
-
-# Excluding .cpp file from the extensions since from this level down they are used for the development 
-config.suffixes = ['.c', '.f', '.F', '.ff', '.FOR', '.for', '.f77', '.f90', '.F90',
-                   '.ff90', '.f95', '.F95', '.ff95', '.fpp', '.FPP', '.cuf'
-                   '.CUF', '.f18', '.F18', '.f03', '.F03', '.f08', '.F08',
-                   '.ll', '.fir', '.mlir']
-
+# Excluding .cpp file from the extensions since from this level down they are used for the development
+config.suffixes = [
+    ".c",
+    ".f",
+    ".F",
+    ".ff",
+    ".FOR",
+    ".for",
+    ".f77",
+    ".f90",
+    ".F90",
+    ".ff90",
+    ".f95",
+    ".F95",
+    ".ff95",
+    ".fpp",
+    ".FPP",
+    ".cuf" ".CUF",
+    ".f18",
+    ".F18",
+    ".f03",
+    ".F03",
+    ".f08",
+    ".F08",
+    ".ll",
+    ".fir",
+    ".mlir",
+]

diff  --git a/flang/test/lit.cfg.py b/flang/test/lit.cfg.py
index 73ba872b20e40..ba25cb6c78dc2 100644
--- a/flang/test/lit.cfg.py
+++ b/flang/test/lit.cfg.py
@@ -16,7 +16,7 @@
 # Configuration file for the 'lit' test runner.
 
 # name: The name of this test suite.
-config.name = 'Flang'
+config.name = "Flang"
 
 # testFormat: The test format to use to interpret tests.
 #
@@ -25,25 +25,48 @@
 config.test_format = lit.formats.ShTest(not llvm_config.use_lit_shell)
 
 # suffixes: A list of file extensions to treat as test files.
-config.suffixes = ['.c', '.cpp', '.f', '.F', '.ff', '.FOR', '.for', '.f77', '.f90', '.F90',
-                   '.ff90', '.f95', '.F95', '.ff95', '.fpp', '.FPP', '.cuf'
-                   '.CUF', '.f18', '.F18', '.f03', '.F03', '.f08', '.F08',
-                   '.ll', '.fir', '.mlir']
-
-config.substitutions.append(('%PATH%', config.environment['PATH']))
-config.substitutions.append(('%llvmshlibdir', config.llvm_shlib_dir))
-config.substitutions.append(('%pluginext', config.llvm_plugin_ext))
+config.suffixes = [
+    ".c",
+    ".cpp",
+    ".f",
+    ".F",
+    ".ff",
+    ".FOR",
+    ".for",
+    ".f77",
+    ".f90",
+    ".F90",
+    ".ff90",
+    ".f95",
+    ".F95",
+    ".ff95",
+    ".fpp",
+    ".FPP",
+    ".cuf" ".CUF",
+    ".f18",
+    ".F18",
+    ".f03",
+    ".F03",
+    ".f08",
+    ".F08",
+    ".ll",
+    ".fir",
+    ".mlir",
+]
+
+config.substitutions.append(("%PATH%", config.environment["PATH"]))
+config.substitutions.append(("%llvmshlibdir", config.llvm_shlib_dir))
+config.substitutions.append(("%pluginext", config.llvm_plugin_ext))
 
 llvm_config.use_default_substitutions()
 
 # ask llvm-config about asserts
-llvm_config.feature_config(
-    [('--assertion-mode', {'ON': 'asserts'})])
+llvm_config.feature_config([("--assertion-mode", {"ON": "asserts"})])
 
 # Targets
 config.targets = frozenset(config.targets_to_build.split())
 for arch in config.targets_to_build.split():
-    config.available_features.add(arch.lower() + '-registered-target')
+    config.available_features.add(arch.lower() + "-registered-target")
 
 # To modify the default target triple for flang tests.
 if config.flang_test_triple:
@@ -53,81 +76,103 @@
 # excludes: A list of directories to exclude from the testsuite. The 'Inputs'
 # subdirectories contain auxiliary inputs for various tests in their parent
 # directories.
-config.excludes = ['Inputs', 'CMakeLists.txt', 'README.txt', 'LICENSE.txt']
+config.excludes = ["Inputs", "CMakeLists.txt", "README.txt", "LICENSE.txt"]
 
 # If the flang examples are built, add examples to the config
 if config.flang_examples:
-    config.available_features.add('examples')
+    config.available_features.add("examples")
 
 # Plugins (loadable modules)
 if config.has_plugins:
-    config.available_features.add('plugins')
+    config.available_features.add("plugins")
 
 if config.linked_bye_extension:
-    config.substitutions.append(('%loadbye', ''))
+    config.substitutions.append(("%loadbye", ""))
 else:
-    config.substitutions.append(('%loadbye',
-                                 '-fpass-plugin={}/Bye{}'.format(config.llvm_shlib_dir,
-                                                                 config.llvm_plugin_ext)))
+    config.substitutions.append(
+        (
+            "%loadbye",
+            "-fpass-plugin={}/Bye{}".format(
+                config.llvm_shlib_dir, config.llvm_plugin_ext
+            ),
+        )
+    )
 
 # test_source_root: The root path where tests are located.
 config.test_source_root = os.path.dirname(__file__)
 
 # test_exec_root: The root path where tests should be run.
-config.test_exec_root = os.path.join(config.flang_obj_root, 'test')
+config.test_exec_root = os.path.join(config.flang_obj_root, "test")
 
 # Tweak the PATH to include the tools dir.
-llvm_config.with_environment('PATH', config.flang_tools_dir, append_path=True)
-llvm_config.with_environment('PATH', config.llvm_tools_dir, append_path=True)
+llvm_config.with_environment("PATH", config.flang_tools_dir, append_path=True)
+llvm_config.with_environment("PATH", config.llvm_tools_dir, append_path=True)
 
 if config.flang_standalone_build:
     # For builds with FIR, set path for tco and enable related tests
     if config.flang_llvm_tools_dir != "":
-        config.available_features.add('fir')
+        config.available_features.add("fir")
         if config.llvm_tools_dir != config.flang_llvm_tools_dir:
-            llvm_config.with_environment('PATH', config.flang_llvm_tools_dir, append_path=True)
+            llvm_config.with_environment(
+                "PATH", config.flang_llvm_tools_dir, append_path=True
+            )
 
 # For each occurrence of a flang tool name, replace it with the full path to
 # the build directory holding that tool.
 tools = [
-        ToolSubst('%flang', command=FindTool('flang-new'), unresolved='fatal'),
-    ToolSubst('%flang_fc1', command=FindTool('flang-new'), extra_args=['-fc1'],
-        unresolved='fatal')]
+    ToolSubst("%flang", command=FindTool("flang-new"), unresolved="fatal"),
+    ToolSubst(
+        "%flang_fc1",
+        command=FindTool("flang-new"),
+        extra_args=["-fc1"],
+        unresolved="fatal",
+    ),
+]
 
 # Flang has several unimplemented features. TODO messages are used to mark
 # and fail if these features are exercised. Some TODOs exit with a non-zero
 # exit code, but others abort the execution in assert builds.
 # To catch aborts, the `--crash` option for the `not` command has to be used.
-tools.append(ToolSubst('%not_todo_cmd', command=FindTool('not'), unresolved='fatal'))
-if 'asserts' in config.available_features:
-    tools.append(ToolSubst('%not_todo_abort_cmd', command=FindTool('not'),
-        extra_args=['--crash'], unresolved='fatal'))
+tools.append(ToolSubst("%not_todo_cmd", command=FindTool("not"), unresolved="fatal"))
+if "asserts" in config.available_features:
+    tools.append(
+        ToolSubst(
+            "%not_todo_abort_cmd",
+            command=FindTool("not"),
+            extra_args=["--crash"],
+            unresolved="fatal",
+        )
+    )
 else:
-    tools.append(ToolSubst('%not_todo_abort_cmd', command=FindTool('not'),
-        unresolved='fatal'))
+    tools.append(
+        ToolSubst("%not_todo_abort_cmd", command=FindTool("not"), unresolved="fatal")
+    )
 
 # Define some variables to help us test that the flang runtime doesn't depend on
 # the C++ runtime libraries. For this we need a C compiler. If for some reason
 # we don't have one, we can just disable the test.
 if config.cc:
-    libruntime = os.path.join(config.flang_lib_dir, 'libFortranRuntime.a')
-    libdecimal = os.path.join(config.flang_lib_dir, 'libFortranDecimal.a')
-    include = os.path.join(config.flang_src_dir, 'include')
-
-    if os.path.isfile(libruntime) and os.path.isfile(libdecimal) and os.path.isdir(include):
-        config.available_features.add('c-compiler')
-        tools.append(ToolSubst('%cc', command=config.cc, unresolved='fatal'))
-        tools.append(ToolSubst('%libruntime', command=libruntime,
-            unresolved='fatal'))
-        tools.append(ToolSubst('%libdecimal', command=libdecimal,
-            unresolved='fatal'))
-        tools.append(ToolSubst('%include', command=include,
-            unresolved='fatal'))
+    libruntime = os.path.join(config.flang_lib_dir, "libFortranRuntime.a")
+    libdecimal = os.path.join(config.flang_lib_dir, "libFortranDecimal.a")
+    include = os.path.join(config.flang_src_dir, "include")
+
+    if (
+        os.path.isfile(libruntime)
+        and os.path.isfile(libdecimal)
+        and os.path.isdir(include)
+    ):
+        config.available_features.add("c-compiler")
+        tools.append(ToolSubst("%cc", command=config.cc, unresolved="fatal"))
+        tools.append(ToolSubst("%libruntime", command=libruntime, unresolved="fatal"))
+        tools.append(ToolSubst("%libdecimal", command=libdecimal, unresolved="fatal"))
+        tools.append(ToolSubst("%include", command=include, unresolved="fatal"))
 
 # Add all the tools and their substitutions (if applicable). Use the search paths provided for
 # finding the tools.
 if config.flang_standalone_build:
-    llvm_config.add_tool_substitutions(tools, [config.flang_llvm_tools_dir, config.llvm_tools_dir])
+    llvm_config.add_tool_substitutions(
+        tools, [config.flang_llvm_tools_dir, config.llvm_tools_dir]
+    )
 else:
     llvm_config.add_tool_substitutions(tools, config.llvm_tools_dir)
 

diff  --git a/libc/AOR_v20.02/math/tools/plot.py b/libc/AOR_v20.02/math/tools/plot.py
index 8c7da5a8ffd72..9ce6b0308dd57 100755
--- a/libc/AOR_v20.02/math/tools/plot.py
+++ b/libc/AOR_v20.02/math/tools/plot.py
@@ -14,49 +14,53 @@
 # example usage:
 # build/bin/ulp -e .0001 log 0.5 2.0 2345678 | math/tools/plot.py
 
+
 def fhex(s):
-	return float.fromhex(s)
+    return float.fromhex(s)
+
 
 def parse(f):
-	xs = []
-	gs = []
-	ys = []
-	es = []
-	# Has to match the format used in ulp.c
-	r = re.compile(r'[^ (]+\(([^ )]*)\) got ([^ ]+) want ([^ ]+) [^ ]+ ulp err ([^ ]+)')
-	for line in f:
-		m = r.match(line)
-		if m:
-			x = fhex(m.group(1))
-			g = fhex(m.group(2))
-			y = fhex(m.group(3))
-			e = float(m.group(4))
-			xs.append(x)
-			gs.append(g)
-			ys.append(y)
-			es.append(e)
-		elif line.startswith('PASS') or line.startswith('FAIL'):
-			# Print the summary line
-			print(line)
-	return xs, gs, ys, es
+    xs = []
+    gs = []
+    ys = []
+    es = []
+    # Has to match the format used in ulp.c
+    r = re.compile(r"[^ (]+\(([^ )]*)\) got ([^ ]+) want ([^ ]+) [^ ]+ ulp err ([^ ]+)")
+    for line in f:
+        m = r.match(line)
+        if m:
+            x = fhex(m.group(1))
+            g = fhex(m.group(2))
+            y = fhex(m.group(3))
+            e = float(m.group(4))
+            xs.append(x)
+            gs.append(g)
+            ys.append(y)
+            es.append(e)
+        elif line.startswith("PASS") or line.startswith("FAIL"):
+            # Print the summary line
+            print(line)
+    return xs, gs, ys, es
+
 
 def plot(xs, gs, ys, es):
-	if len(xs) < 2:
-		print('not enough samples')
-		return
-	a = min(xs)
-	b = max(xs)
-	fig, (ax0,ax1) = plt.subplots(nrows=2)
-	es = np.abs(es) # ignore the sign
-	emax = max(es)
-	ax0.text(a+(b-a)*0.7, emax*0.8, '%s\n%g'%(emax.hex(),emax))
-	ax0.plot(xs,es,'r.')
-	ax0.grid()
-	ax1.plot(xs,ys,'r.',label='want')
-	ax1.plot(xs,gs,'b.',label='got')
-	ax1.grid()
-	ax1.legend()
-	plt.show()
+    if len(xs) < 2:
+        print("not enough samples")
+        return
+    a = min(xs)
+    b = max(xs)
+    fig, (ax0, ax1) = plt.subplots(nrows=2)
+    es = np.abs(es)  # ignore the sign
+    emax = max(es)
+    ax0.text(a + (b - a) * 0.7, emax * 0.8, "%s\n%g" % (emax.hex(), emax))
+    ax0.plot(xs, es, "r.")
+    ax0.grid()
+    ax1.plot(xs, ys, "r.", label="want")
+    ax1.plot(xs, gs, "b.", label="got")
+    ax1.grid()
+    ax1.legend()
+    plt.show()
+
 
 xs, gs, ys, es = parse(sys.stdin)
 plot(xs, gs, ys, es)

diff  --git a/libc/docs/conf.py b/libc/docs/conf.py
index 941c9e5224adc..502a479b3eb2c 100644
--- a/libc/docs/conf.py
+++ b/libc/docs/conf.py
@@ -16,32 +16,32 @@
 # If extensions (or modules to document with autodoc) are in another directory,
 # add these directories to sys.path here. If the directory is relative to the
 # documentation root, use os.path.abspath to make it absolute, like shown here.
-#sys.path.insert(0, os.path.abspath('.'))
+# sys.path.insert(0, os.path.abspath('.'))
 
 # -- General configuration -----------------------------------------------------
 
 # If your documentation needs a minimal Sphinx version, state it here.
-#needs_sphinx = '1.0'
+# needs_sphinx = '1.0'
 
 # Add any Sphinx extension module names here, as strings. They can be extensions
 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.intersphinx', 'sphinx.ext.todo']
+extensions = ["sphinx.ext.intersphinx", "sphinx.ext.todo"]
 
 # Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
+templates_path = ["_templates"]
 
 # The suffix of source filenames.
-source_suffix = '.rst'
+source_suffix = ".rst"
 
 # The encoding of source files.
-#source_encoding = 'utf-8-sig'
+# source_encoding = 'utf-8-sig'
 
 # The master toctree document.
-master_doc = 'index'
+master_doc = "index"
 
 # General information about the project.
-project = u'libc'
-copyright = u'2011-%d, LLVM Project' % date.today().year
+project = "libc"
+copyright = "2011-%d, LLVM Project" % date.today().year
 
 # The version info for the project you're documenting, acts as replacement for
 # |version| and |release|, also used in various other places throughout the
@@ -54,80 +54,80 @@
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
-#language = None
+# language = None
 
 # There are two options for replacing |today|: either, you set today to some
 # non-false value, then it is used:
-#today = ''
+# today = ''
 # Else, today_fmt is used as the format for a strftime call.
-today_fmt = '%Y-%m-%d'
+today_fmt = "%Y-%m-%d"
 
 # List of patterns, relative to source directory, that match files and
 # directories to ignore when looking for source files.
-exclude_patterns = ['_build', 'Helpers']
+exclude_patterns = ["_build", "Helpers"]
 
 # The reST default role (used for this markup: `text`) to use for all documents.
-#default_role = None
+# default_role = None
 
 # If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
+# add_function_parentheses = True
 
 # If true, the current module name will be prepended to all description
 # unit titles (such as .. function::).
-#add_module_names = True
+# add_module_names = True
 
 # If true, sectionauthor and moduleauthor directives will be shown in the
 # output. They are ignored by default.
 show_authors = True
 
 # The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'friendly'
+pygments_style = "friendly"
 
 # A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
+# modindex_common_prefix = []
 
 
 # -- Options for HTML output ---------------------------------------------------
 
 # The theme to use for HTML and HTML Help pages.  See the documentation for
 # a list of builtin themes.
-#html_theme = 'haiku'
-html_theme = 'alabaster'
+# html_theme = 'haiku'
+html_theme = "alabaster"
 
 # Theme options are theme-specific and customize the look and feel of a theme
 # further.  For a list of options available for each theme, see the
 # documentation.
 html_theme_options = {
-     'font_size': '11pt',
+    "font_size": "11pt",
     # Don't generate any links to GitHub.
-    'github_button' : 'false',
+    "github_button": "false",
 }
 
 # Add any paths that contain custom themes here, relative to this directory.
-#html_theme_path = []
+# html_theme_path = []
 
 # The name for this set of Sphinx documents.  If None, it defaults to
 # "<project> v<release> documentation".
-html_title = 'The LLVM C Library'
+html_title = "The LLVM C Library"
 
 # A shorter title for the navigation bar.  Default is the same as html_title.
-#html_short_title = None
+# html_short_title = None
 
 # The name of an image file (relative to this directory) to place at the top
 # of the sidebar.
-#html_logo = None
+# html_logo = None
 
 # The name of an image file (within the static path) to use as favicon of the
 # docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
 # pixels large.
-#html_favicon = None
+# html_favicon = None
 
 # Add any paths that contain custom static files (such as style sheets) here,
 # relative to this directory. They are copied after the builtin static files,
 # so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
+html_static_path = ["_static"]
 #
-#html_context = {
+# html_context = {
 #    'css_files': [
 #        '_static/libc.css'
 #        ],
@@ -135,101 +135,95 @@
 
 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
 # using the given strftime format.
-#html_last_updated_fmt = '%b %d, %Y'
+# html_last_updated_fmt = '%b %d, %Y'
 
 # If true, SmartyPants will be used to convert quotes and dashes to
 # typographically correct entities.
-#html_use_smartypants = True
+# html_use_smartypants = True
 
 # Custom sidebar templates, maps document names to template names.
-#html_sidebars = {}
+# html_sidebars = {}
 
 # Additional templates that should be rendered to pages, maps page names to
 # template names.
-#html_additional_pages = {}
+# html_additional_pages = {}
 
 # If false, no module index is generated.
-#html_domain_indices = True
+# html_domain_indices = True
 
 # If false, no index is generated.
-#html_use_index = True
+# html_use_index = True
 
 # If true, the index is split into individual pages for each letter.
-#html_split_index = False
+# html_split_index = False
 
 # If true, links to the reST sources are added to the pages.
-#html_show_sourcelink = True
+# html_show_sourcelink = True
 
 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
-#html_show_sphinx = True
+# html_show_sphinx = True
 
 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
-#html_show_copyright = True
+# html_show_copyright = True
 
 # If true, an OpenSearch description file will be output, and all pages will
 # contain a <link> tag referring to it.  The value of this option must be the
 # base URL from which the finished HTML is served.
-#html_use_opensearch = ''
+# html_use_opensearch = ''
 
 # This is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = None
+# html_file_suffix = None
 
 # Output file base name for HTML help builder.
-htmlhelp_basename = 'libcxxdoc'
+htmlhelp_basename = "libcxxdoc"
 
 
 # -- Options for LaTeX output --------------------------------------------------
 
 latex_elements = {
-# The paper size ('letterpaper' or 'a4paper').
-#'papersize': 'letterpaper',
-
-# The font size ('10pt', '11pt' or '12pt').
-#'pointsize': '10pt',
-
-# Additional stuff for the LaTeX preamble.
-#'preamble': '',
+    # The paper size ('letterpaper' or 'a4paper').
+    #'papersize': 'letterpaper',
+    # The font size ('10pt', '11pt' or '12pt').
+    #'pointsize': '10pt',
+    # Additional stuff for the LaTeX preamble.
+    #'preamble': '',
 }
 
 # Grouping the document tree into LaTeX files. List of tuples
 # (source start file, target name, title, author, documentclass [howto/manual]).
 latex_documents = [
-  ('contents', 'libcxx.tex', u'libcxx Documentation',
-   u'LLVM project', 'manual'),
+    ("contents", "libcxx.tex", "libcxx Documentation", "LLVM project", "manual"),
 ]
 
 # The name of an image file (relative to this directory) to place at the top of
 # the title page.
-#latex_logo = None
+# latex_logo = None
 
 # For "manual" documents, if this is true, then toplevel headings are parts,
 # not chapters.
-#latex_use_parts = False
+# latex_use_parts = False
 
 # If true, show page references after internal links.
-#latex_show_pagerefs = False
+# latex_show_pagerefs = False
 
 # If true, show URL addresses after external links.
-#latex_show_urls = False
+# latex_show_urls = False
 
 # Documents to append as an appendix to all manuals.
-#latex_appendices = []
+# latex_appendices = []
 
 # If false, no module index is generated.
-#latex_domain_indices = True
+# latex_domain_indices = True
 
 
 # -- Options for manual page output --------------------------------------------
 
 # One entry per manual page. List of tuples
 # (source start file, name, description, authors, manual section).
-man_pages = [
-    ('contents', 'libc', u'libc Documentation',
-     [u'LLVM project'], 1)
-]
+man_pages = [("contents", "libc", "libc Documentation", ["LLVM project"], 1)]
 
 # If true, show URL addresses after external links.
-#man_show_urls = False
+# man_show_urls = False
 
 
 # -- Options for Texinfo output ------------------------------------------------
@@ -238,19 +232,25 @@
 # (source start file, target name, title, author,
 #  dir menu entry, description, category)
 texinfo_documents = [
-  ('contents', 'libc', u'libc Documentation',
-   u'LLVM project', 'libc', 'One line description of project.',
-   'Miscellaneous'),
+    (
+        "contents",
+        "libc",
+        "libc Documentation",
+        "LLVM project",
+        "libc",
+        "One line description of project.",
+        "Miscellaneous",
+    ),
 ]
 
 # Documents to append as an appendix to all manuals.
-#texinfo_appendices = []
+# texinfo_appendices = []
 
 # If false, no module index is generated.
-#texinfo_domain_indices = True
+# texinfo_domain_indices = True
 
 # How to display URL addresses: 'footnote', 'no', or 'inline'.
-#texinfo_show_urls = 'footnote'
+# texinfo_show_urls = 'footnote'
 
 
 # FIXME: Define intersphinx configuration.

diff  --git a/libc/utils/mathtools/GenerateHPDConstants.py b/libc/utils/mathtools/GenerateHPDConstants.py
index 4c87d45a07d5f..c37b35e000c53 100644
--- a/libc/utils/mathtools/GenerateHPDConstants.py
+++ b/libc/utils/mathtools/GenerateHPDConstants.py
@@ -1,6 +1,6 @@
 from math import *
 
-'''
+"""
 This script is used to generate a table used by 
 libc/src/__support/high_precision_decimal.h.
 
@@ -46,20 +46,20 @@
 being shifted are greater than or equal to the digits of 5^i (the second value
 of each entry) then it is just the first value in the entry, else it is one
 fewer.
-'''
+"""
 
 
 # Generate Left Shift Table
 outStr = ""
 for i in range(61):
-  tenToTheI = 10**i
-  fiveToTheI = 5**i
-  outStr += "{"
-  # The number of new digits that would be created by multiplying 5**i by 2**i
-  outStr += str(ceil(log10(tenToTheI) - log10(fiveToTheI)))
-  outStr += ', "'
-  if not i == 0:
-    outStr += str(fiveToTheI)
-  outStr += '"},\n'
+    tenToTheI = 10**i
+    fiveToTheI = 5**i
+    outStr += "{"
+    # The number of new digits that would be created by multiplying 5**i by 2**i
+    outStr += str(ceil(log10(tenToTheI) - log10(fiveToTheI)))
+    outStr += ', "'
+    if not i == 0:
+        outStr += str(fiveToTheI)
+    outStr += '"},\n'
 
 print(outStr)

diff  --git a/libclc/generic/lib/gen_convert.py b/libclc/generic/lib/gen_convert.py
index 469244047de96..612a9184f4b27 100644
--- a/libclc/generic/lib/gen_convert.py
+++ b/libclc/generic/lib/gen_convert.py
@@ -26,86 +26,114 @@
 #
 # convert_<destTypen><_sat><_roundingMode>(<sourceTypen>)
 
-types = ['char', 'uchar', 'short', 'ushort', 'int', 'uint', 'long', 'ulong', 'float', 'double']
-int_types = ['char', 'uchar', 'short', 'ushort', 'int', 'uint', 'long', 'ulong']
-unsigned_types = ['uchar', 'ushort', 'uint', 'ulong']
-float_types = ['float', 'double']
-int64_types = ['long', 'ulong']
-float64_types = ['double']
-vector_sizes = ['', '2', '3', '4', '8', '16']
-half_sizes = [('2',''), ('4','2'), ('8','4'), ('16','8')]
-
-saturation = ['','_sat']
-rounding_modes = ['_rtz','_rte','_rtp','_rtn']
-float_prefix = {'float':'FLT_', 'double':'DBL_'}
-float_suffix = {'float':'f', 'double':''}
-
-bool_type = {'char'  : 'char',
-             'uchar' : 'char',
-             'short' : 'short',
-             'ushort': 'short',
-             'int'   : 'int',
-             'uint'  : 'int',
-             'long'  : 'long',
-             'ulong' : 'long',
-             'float'  : 'int',
-             'double' : 'long'}
-
-unsigned_type = {'char'  : 'uchar',
-                 'uchar' : 'uchar',
-                 'short' : 'ushort',
-                 'ushort': 'ushort',
-                 'int'   : 'uint',
-                 'uint'  : 'uint',
-                 'long'  : 'ulong',
-                 'ulong' : 'ulong'}
-
-sizeof_type = {'char'  : 1, 'uchar'  : 1,
-               'short' : 2, 'ushort' : 2,
-               'int'   : 4, 'uint'   : 4,
-               'long'  : 8, 'ulong'  : 8,
-               'float' : 4, 'double' : 8}
-
-limit_max = {'char'  : 'CHAR_MAX',
-             'uchar' : 'UCHAR_MAX',
-             'short' : 'SHRT_MAX',
-             'ushort': 'USHRT_MAX',
-             'int'   : 'INT_MAX',
-             'uint'  : 'UINT_MAX',
-             'long'  : 'LONG_MAX',
-             'ulong' : 'ULONG_MAX'}
-
-limit_min = {'char'  : 'CHAR_MIN',
-             'uchar' : '0',
-             'short' : 'SHRT_MIN',
-             'ushort': '0',
-             'int'   : 'INT_MIN',
-             'uint'  : '0',
-             'long'  : 'LONG_MIN',
-             'ulong' : '0'}
+types = [
+    "char",
+    "uchar",
+    "short",
+    "ushort",
+    "int",
+    "uint",
+    "long",
+    "ulong",
+    "float",
+    "double",
+]
+int_types = ["char", "uchar", "short", "ushort", "int", "uint", "long", "ulong"]
+unsigned_types = ["uchar", "ushort", "uint", "ulong"]
+float_types = ["float", "double"]
+int64_types = ["long", "ulong"]
+float64_types = ["double"]
+vector_sizes = ["", "2", "3", "4", "8", "16"]
+half_sizes = [("2", ""), ("4", "2"), ("8", "4"), ("16", "8")]
+
+saturation = ["", "_sat"]
+rounding_modes = ["_rtz", "_rte", "_rtp", "_rtn"]
+float_prefix = {"float": "FLT_", "double": "DBL_"}
+float_suffix = {"float": "f", "double": ""}
+
+bool_type = {
+    "char": "char",
+    "uchar": "char",
+    "short": "short",
+    "ushort": "short",
+    "int": "int",
+    "uint": "int",
+    "long": "long",
+    "ulong": "long",
+    "float": "int",
+    "double": "long",
+}
+
+unsigned_type = {
+    "char": "uchar",
+    "uchar": "uchar",
+    "short": "ushort",
+    "ushort": "ushort",
+    "int": "uint",
+    "uint": "uint",
+    "long": "ulong",
+    "ulong": "ulong",
+}
+
+sizeof_type = {
+    "char": 1,
+    "uchar": 1,
+    "short": 2,
+    "ushort": 2,
+    "int": 4,
+    "uint": 4,
+    "long": 8,
+    "ulong": 8,
+    "float": 4,
+    "double": 8,
+}
+
+limit_max = {
+    "char": "CHAR_MAX",
+    "uchar": "UCHAR_MAX",
+    "short": "SHRT_MAX",
+    "ushort": "USHRT_MAX",
+    "int": "INT_MAX",
+    "uint": "UINT_MAX",
+    "long": "LONG_MAX",
+    "ulong": "ULONG_MAX",
+}
+
+limit_min = {
+    "char": "CHAR_MIN",
+    "uchar": "0",
+    "short": "SHRT_MIN",
+    "ushort": "0",
+    "int": "INT_MIN",
+    "uint": "0",
+    "long": "LONG_MIN",
+    "ulong": "0",
+}
+
 
 def conditional_guard(src, dst):
-  int64_count = 0
-  float64_count = 0
-  if src in int64_types:
-    int64_count = int64_count +1
-  elif src in float64_types:
-    float64_count = float64_count + 1
-  if dst in int64_types:
-    int64_count = int64_count +1
-  elif dst in float64_types:
-    float64_count = float64_count + 1
-  if float64_count > 0:
-    #In embedded profile, if cl_khr_fp64 is supported cles_khr_int64 has to be
-    print("#ifdef cl_khr_fp64")
-    return True
-  elif int64_count > 0:
-    print("#if defined cles_khr_int64 || !defined(__EMBEDDED_PROFILE__)")
-    return True
-  return False
-
-
-print("""/* !!!! AUTOGENERATED FILE generated by convert_type.py !!!!!
+    int64_count = 0
+    float64_count = 0
+    if src in int64_types:
+        int64_count = int64_count + 1
+    elif src in float64_types:
+        float64_count = float64_count + 1
+    if dst in int64_types:
+        int64_count = int64_count + 1
+    elif dst in float64_types:
+        float64_count = float64_count + 1
+    if float64_count > 0:
+        # In embedded profile, if cl_khr_fp64 is supported cles_khr_int64 has to be
+        print("#ifdef cl_khr_fp64")
+        return True
+    elif int64_count > 0:
+        print("#if defined cles_khr_int64 || !defined(__EMBEDDED_PROFILE__)")
+        return True
+    return False
+
+
+print(
+    """/* !!!! AUTOGENERATED FILE generated by convert_type.py !!!!!
 
    DON'T CHANGE THIS FILE. MAKE YOUR CHANGES TO convert_type.py AND RUN:
    $ ./generate-conversion-type-cl.sh
@@ -149,7 +177,8 @@ def conditional_guard(src, dst):
 #pragma OPENCL EXTENSION cles_khr_int64 : enable
 #endif
 
-""")
+"""
+)
 
 #
 # Default Conversions
@@ -178,45 +207,58 @@ def conditional_guard(src, dst):
 # is used, the rounding mode is ignored.
 #
 
+
 def generate_default_conversion(src, dst, mode):
-  close_conditional = conditional_guard(src, dst)
+    close_conditional = conditional_guard(src, dst)
 
-  # scalar conversions
-  print("""_CLC_DEF _CLC_OVERLOAD
+    # scalar conversions
+    print(
+        """_CLC_DEF _CLC_OVERLOAD
 {DST} convert_{DST}{M}({SRC} x)
 {{
   return ({DST})x;
 }}
-""".format(SRC=src, DST=dst, M=mode))
-
-  # vector conversions, done through decomposition to components
-  for size, half_size in half_sizes:
-    print("""_CLC_DEF _CLC_OVERLOAD
+""".format(
+            SRC=src, DST=dst, M=mode
+        )
+    )
+
+    # vector conversions, done through decomposition to components
+    for size, half_size in half_sizes:
+        print(
+            """_CLC_DEF _CLC_OVERLOAD
 {DST}{N} convert_{DST}{N}{M}({SRC}{N} x)
 {{
   return ({DST}{N})(convert_{DST}{H}(x.lo), convert_{DST}{H}(x.hi));
 }}
-""".format(SRC=src, DST=dst, N=size, H=half_size, M=mode))
-
-  # 3-component vector conversions
-  print("""_CLC_DEF _CLC_OVERLOAD
+""".format(
+                SRC=src, DST=dst, N=size, H=half_size, M=mode
+            )
+        )
+
+    # 3-component vector conversions
+    print(
+        """_CLC_DEF _CLC_OVERLOAD
 {DST}3 convert_{DST}3{M}({SRC}3 x)
 {{
   return ({DST}3)(convert_{DST}2(x.s01), convert_{DST}(x.s2));
-}}""".format(SRC=src, DST=dst, M=mode))
+}}""".format(
+            SRC=src, DST=dst, M=mode
+        )
+    )
 
-  if close_conditional:
-    print("#endif")
+    if close_conditional:
+        print("#endif")
 
 
 for src in types:
-  for dst in types:
-    generate_default_conversion(src, dst, '')
+    for dst in types:
+        generate_default_conversion(src, dst, "")
 
 for src in int_types:
-  for dst in int_types:
-    for mode in rounding_modes:
-      generate_default_conversion(src, dst, mode)
+    for dst in int_types:
+        for mode in rounding_modes:
+            generate_default_conversion(src, dst, mode)
 
 #
 # Saturated Conversions To Integers
@@ -229,97 +271,127 @@ def generate_default_conversion(src, dst, mode):
 # conversions with saturation.
 #
 
+
 def generate_saturated_conversion(src, dst, size):
-  # Header
-  close_conditional = conditional_guard(src, dst)
-  print("""_CLC_DEF _CLC_OVERLOAD
+    # Header
+    close_conditional = conditional_guard(src, dst)
+    print(
+        """_CLC_DEF _CLC_OVERLOAD
 {DST}{N} convert_{DST}{N}_sat({SRC}{N} x)
-{{""".format(DST=dst, SRC=src, N=size))
-
-  # FIXME: This is a work around for lack of select function with
-  # signed third argument when the first two arguments are unsigned types.
-  # We cast to the signed type for sign-extension, then do a bitcast to
-  # the unsigned type.
-  if dst in unsigned_types:
-    bool_prefix = "as_{DST}{N}(convert_{BOOL}{N}".format(DST=dst, BOOL=bool_type[dst], N=size);
-    bool_suffix = ")"
-  else:
-    bool_prefix = "convert_{BOOL}{N}".format(BOOL=bool_type[dst], N=size);
-    bool_suffix = ""
-
-  # Body
-  if src == dst:
-
-    # Conversion between same types
-    print("  return x;")
-
-  elif src in float_types:
-
-    # Conversion from float to int
-    print("""  {DST}{N} y = convert_{DST}{N}(x);
+{{""".format(
+            DST=dst, SRC=src, N=size
+        )
+    )
+
+    # FIXME: This is a work around for lack of select function with
+    # signed third argument when the first two arguments are unsigned types.
+    # We cast to the signed type for sign-extension, then do a bitcast to
+    # the unsigned type.
+    if dst in unsigned_types:
+        bool_prefix = "as_{DST}{N}(convert_{BOOL}{N}".format(
+            DST=dst, BOOL=bool_type[dst], N=size
+        )
+        bool_suffix = ")"
+    else:
+        bool_prefix = "convert_{BOOL}{N}".format(BOOL=bool_type[dst], N=size)
+        bool_suffix = ""
+
+    # Body
+    if src == dst:
+
+        # Conversion between same types
+        print("  return x;")
+
+    elif src in float_types:
+
+        # Conversion from float to int
+        print(
+            """  {DST}{N} y = convert_{DST}{N}(x);
   y = select(y, ({DST}{N}){DST_MIN}, {BP}(x < ({SRC}{N}){DST_MIN}){BS});
   y = select(y, ({DST}{N}){DST_MAX}, {BP}(x > ({SRC}{N}){DST_MAX}){BS});
-  return y;""".format(SRC=src, DST=dst, N=size,
-      DST_MIN=limit_min[dst], DST_MAX=limit_max[dst],
-      BP=bool_prefix, BS=bool_suffix))
-
-  else:
-
-    # Integer to integer convesion with sizeof(src) == sizeof(dst)
-    if sizeof_type[src] == sizeof_type[dst]:
-      if src in unsigned_types:
-        print("  x = min(x, ({SRC}){DST_MAX});".format(SRC=src, DST_MAX=limit_max[dst]))
-      else:
-        print("  x = max(x, ({SRC})0);".format(SRC=src))
-
-    # Integer to integer conversion where sizeof(src) > sizeof(dst)
-    elif sizeof_type[src] > sizeof_type[dst]:
-      if src in unsigned_types:
-        print("  x = min(x, ({SRC}){DST_MAX});".format(SRC=src, DST_MAX=limit_max[dst]))
-      else:
-        print("  x = clamp(x, ({SRC}){DST_MIN}, ({SRC}){DST_MAX});"
-          .format(SRC=src, DST_MIN=limit_min[dst], DST_MAX=limit_max[dst]))
-
-    # Integer to integer conversion where sizeof(src) < sizeof(dst)
-    elif src not in unsigned_types and dst in unsigned_types:
-        print("  x = max(x, ({SRC})0);".format(SRC=src))
-
-    print("  return convert_{DST}{N}(x);".format(DST=dst, N=size))
-
-  # Footer
-  print("}")
-  if close_conditional:
-    print("#endif")
+  return y;""".format(
+                SRC=src,
+                DST=dst,
+                N=size,
+                DST_MIN=limit_min[dst],
+                DST_MAX=limit_max[dst],
+                BP=bool_prefix,
+                BS=bool_suffix,
+            )
+        )
+
+    else:
+
+        # Integer to integer convesion with sizeof(src) == sizeof(dst)
+        if sizeof_type[src] == sizeof_type[dst]:
+            if src in unsigned_types:
+                print(
+                    "  x = min(x, ({SRC}){DST_MAX});".format(
+                        SRC=src, DST_MAX=limit_max[dst]
+                    )
+                )
+            else:
+                print("  x = max(x, ({SRC})0);".format(SRC=src))
+
+        # Integer to integer conversion where sizeof(src) > sizeof(dst)
+        elif sizeof_type[src] > sizeof_type[dst]:
+            if src in unsigned_types:
+                print(
+                    "  x = min(x, ({SRC}){DST_MAX});".format(
+                        SRC=src, DST_MAX=limit_max[dst]
+                    )
+                )
+            else:
+                print(
+                    "  x = clamp(x, ({SRC}){DST_MIN}, ({SRC}){DST_MAX});".format(
+                        SRC=src, DST_MIN=limit_min[dst], DST_MAX=limit_max[dst]
+                    )
+                )
+
+        # Integer to integer conversion where sizeof(src) < sizeof(dst)
+        elif src not in unsigned_types and dst in unsigned_types:
+            print("  x = max(x, ({SRC})0);".format(SRC=src))
+
+        print("  return convert_{DST}{N}(x);".format(DST=dst, N=size))
+
+    # Footer
+    print("}")
+    if close_conditional:
+        print("#endif")
 
 
 for src in types:
-  for dst in int_types:
-    for size in vector_sizes:
-      generate_saturated_conversion(src, dst, size)
+    for dst in int_types:
+        for size in vector_sizes:
+            generate_saturated_conversion(src, dst, size)
 
 
 def generate_saturated_conversion_with_rounding(src, dst, size, mode):
-  # Header
-  close_conditional = conditional_guard(src, dst)
+    # Header
+    close_conditional = conditional_guard(src, dst)
 
-  # Body
-  print("""_CLC_DEF _CLC_OVERLOAD
+    # Body
+    print(
+        """_CLC_DEF _CLC_OVERLOAD
 {DST}{N} convert_{DST}{N}_sat{M}({SRC}{N} x)
 {{
   return convert_{DST}{N}_sat(x);
 }}
-""".format(DST=dst, SRC=src, N=size, M=mode))
+""".format(
+            DST=dst, SRC=src, N=size, M=mode
+        )
+    )
 
-  # Footer
-  if close_conditional:
-    print("#endif")
+    # Footer
+    if close_conditional:
+        print("#endif")
 
 
 for src in int_types:
-  for dst in int_types:
-    for size in vector_sizes:
-      for mode in rounding_modes:
-        generate_saturated_conversion_with_rounding(src, dst, size, mode)
+    for dst in int_types:
+        for size in vector_sizes:
+            for mode in rounding_modes:
+                generate_saturated_conversion_with_rounding(src, dst, size, mode)
 
 #
 # Conversions To/From Floating-Point With Rounding
@@ -335,59 +407,81 @@ def generate_saturated_conversion_with_rounding(src, dst, size, mode):
 # Only conversions to integers can have saturation.
 #
 
+
 def generate_float_conversion(src, dst, size, mode, sat):
-  # Header
-  close_conditional = conditional_guard(src, dst)
-  print("""_CLC_DEF _CLC_OVERLOAD
+    # Header
+    close_conditional = conditional_guard(src, dst)
+    print(
+        """_CLC_DEF _CLC_OVERLOAD
 {DST}{N} convert_{DST}{N}{S}{M}({SRC}{N} x)
-{{""".format(SRC=src, DST=dst, N=size, M=mode, S=sat))
-
-  # Perform conversion
-  if dst in int_types:
-    if mode == '_rte':
-      print("  x = rint(x);");
-    elif mode == '_rtp':
-      print("  x = ceil(x);");
-    elif mode == '_rtn':
-      print("  x = floor(x);");
-    print("  return convert_{DST}{N}{S}(x);".format(DST=dst, N=size, S=sat))
-  elif mode == '_rte':
-    print("  return convert_{DST}{N}(x);".format(DST=dst, N=size))
-  else:
-    print("  {DST}{N} r = convert_{DST}{N}(x);".format(DST=dst, N=size))
-    print("  {SRC}{N} y = convert_{SRC}{N}(r);".format(SRC=src, N=size))
-    if mode == '_rtz':
-      if src in int_types:
-        print("  {USRC}{N} abs_x = abs(x);".format(USRC=unsigned_type[src], N=size))
-        print("  {USRC}{N} abs_y = abs(y);".format(USRC=unsigned_type[src], N=size))
-      else:
-        print("  {SRC}{N} abs_x = fabs(x);".format(SRC=src, N=size))
-        print("  {SRC}{N} abs_y = fabs(y);".format(SRC=src, N=size))
-      print("  return select(r, nextafter(r, sign(r) * ({DST}{N})-INFINITY), convert_{BOOL}{N}(abs_y > abs_x));"
-        .format(DST=dst, N=size, BOOL=bool_type[dst]))
-    if mode == '_rtp':
-      print("  return select(r, nextafter(r, ({DST}{N})INFINITY), convert_{BOOL}{N}(y < x));"
-        .format(DST=dst, N=size, BOOL=bool_type[dst]))
-    if mode == '_rtn':
-      print("  return select(r, nextafter(r, ({DST}{N})-INFINITY), convert_{BOOL}{N}(y > x));"
-        .format(DST=dst, N=size, BOOL=bool_type[dst]))
-
-  # Footer
-  print("}")
-  if close_conditional:
-    print("#endif")
+{{""".format(
+            SRC=src, DST=dst, N=size, M=mode, S=sat
+        )
+    )
+
+    # Perform conversion
+    if dst in int_types:
+        if mode == "_rte":
+            print("  x = rint(x);")
+        elif mode == "_rtp":
+            print("  x = ceil(x);")
+        elif mode == "_rtn":
+            print("  x = floor(x);")
+        print("  return convert_{DST}{N}{S}(x);".format(DST=dst, N=size, S=sat))
+    elif mode == "_rte":
+        print("  return convert_{DST}{N}(x);".format(DST=dst, N=size))
+    else:
+        print("  {DST}{N} r = convert_{DST}{N}(x);".format(DST=dst, N=size))
+        print("  {SRC}{N} y = convert_{SRC}{N}(r);".format(SRC=src, N=size))
+        if mode == "_rtz":
+            if src in int_types:
+                print(
+                    "  {USRC}{N} abs_x = abs(x);".format(
+                        USRC=unsigned_type[src], N=size
+                    )
+                )
+                print(
+                    "  {USRC}{N} abs_y = abs(y);".format(
+                        USRC=unsigned_type[src], N=size
+                    )
+                )
+            else:
+                print("  {SRC}{N} abs_x = fabs(x);".format(SRC=src, N=size))
+                print("  {SRC}{N} abs_y = fabs(y);".format(SRC=src, N=size))
+            print(
+                "  return select(r, nextafter(r, sign(r) * ({DST}{N})-INFINITY), convert_{BOOL}{N}(abs_y > abs_x));".format(
+                    DST=dst, N=size, BOOL=bool_type[dst]
+                )
+            )
+        if mode == "_rtp":
+            print(
+                "  return select(r, nextafter(r, ({DST}{N})INFINITY), convert_{BOOL}{N}(y < x));".format(
+                    DST=dst, N=size, BOOL=bool_type[dst]
+                )
+            )
+        if mode == "_rtn":
+            print(
+                "  return select(r, nextafter(r, ({DST}{N})-INFINITY), convert_{BOOL}{N}(y > x));".format(
+                    DST=dst, N=size, BOOL=bool_type[dst]
+                )
+            )
+
+    # Footer
+    print("}")
+    if close_conditional:
+        print("#endif")
 
 
 for src in float_types:
-  for dst in int_types:
-    for size in vector_sizes:
-      for mode in rounding_modes:
-        for sat in saturation:
-          generate_float_conversion(src, dst, size, mode, sat)
+    for dst in int_types:
+        for size in vector_sizes:
+            for mode in rounding_modes:
+                for sat in saturation:
+                    generate_float_conversion(src, dst, size, mode, sat)
 
 
 for src in types:
-  for dst in float_types:
-    for size in vector_sizes:
-      for mode in rounding_modes:
-        generate_float_conversion(src, dst, size, mode, '')
+    for dst in float_types:
+        for size in vector_sizes:
+            for mode in rounding_modes:
+                generate_float_conversion(src, dst, size, mode, "")

diff  --git a/lld/docs/conf.py b/lld/docs/conf.py
index 95befddf80ea3..6f411ed3863d4 100644
--- a/lld/docs/conf.py
+++ b/lld/docs/conf.py
@@ -16,92 +16,92 @@
 # If extensions (or modules to document with autodoc) are in another directory,
 # add these directories to sys.path here. If the directory is relative to the
 # documentation root, use os.path.abspath to make it absolute, like shown here.
-#sys.path.insert(0, os.path.abspath('.'))
+# sys.path.insert(0, os.path.abspath('.'))
 
 # -- General configuration -----------------------------------------------------
 
 # If your documentation needs a minimal Sphinx version, state it here.
-#needs_sphinx = '1.0'
+# needs_sphinx = '1.0'
 
 # Add any Sphinx extension module names here, as strings. They can be extensions
 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.intersphinx', 'sphinx.ext.todo']
+extensions = ["sphinx.ext.intersphinx", "sphinx.ext.todo"]
 
 # Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
+templates_path = ["_templates"]
 
 # The suffix of source filenames.
-source_suffix = '.rst'
+source_suffix = ".rst"
 
 # The encoding of source files.
-#source_encoding = 'utf-8-sig'
+# source_encoding = 'utf-8-sig'
 
 # The master toctree document.
-master_doc = 'index'
+master_doc = "index"
 
 # General information about the project.
-project = u'lld'
-copyright = u'2011-%d, LLVM Project' % date.today().year
+project = "lld"
+copyright = "2011-%d, LLVM Project" % date.today().year
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
-#language = None
+# language = None
 
 # There are two options for replacing |today|: either, you set today to some
 # non-false value, then it is used:
-#today = ''
+# today = ''
 # Else, today_fmt is used as the format for a strftime call.
-today_fmt = '%Y-%m-%d'
+today_fmt = "%Y-%m-%d"
 
 # List of patterns, relative to source directory, that match files and
 # directories to ignore when looking for source files.
-exclude_patterns = ['_build']
+exclude_patterns = ["_build"]
 
 # The reST default role (used for this markup: `text`) to use for all documents.
-#default_role = None
+# default_role = None
 
 # If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
+# add_function_parentheses = True
 
 # If true, the current module name will be prepended to all description
 # unit titles (such as .. function::).
-#add_module_names = True
+# add_module_names = True
 
 # If true, sectionauthor and moduleauthor directives will be shown in the
 # output. They are ignored by default.
 show_authors = True
 
 # The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'friendly'
+pygments_style = "friendly"
 
 # A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
+# modindex_common_prefix = []
 
 
 # -- Options for HTML output ---------------------------------------------------
 
 # The theme to use for HTML and HTML Help pages.  See the documentation for
 # a list of builtin themes.
-html_theme = 'llvm-theme'
+html_theme = "llvm-theme"
 
 # Theme options are theme-specific and customize the look and feel of a theme
 # further.  For a list of options available for each theme, see the
 # documentation.
-#html_theme_options = {}
+# html_theme_options = {}
 
 # Add any paths that contain custom themes here, relative to this directory.
 html_theme_path = ["."]
 
 # The name for this set of Sphinx documents.  If None, it defaults to
 # "<project> v<release> documentation".
-#html_title = None
+# html_title = None
 
 # A shorter title for the navigation bar.  Default is the same as html_title.
-#html_short_title = None
+# html_short_title = None
 
 # The name of an image file (relative to this directory) to place at the top
 # of the sidebar.
-#html_logo = None
+# html_logo = None
 
 # If given, this must be the name of an image file (path relative to the
 # configuration directory) that is the favicon of the docs. Modern browsers use
@@ -109,110 +109,104 @@
 # icon file (.ico), which is 16x16 or 32x32 pixels large. Default: None.  The
 # image file will be copied to the _static directory of the output HTML, but
 # only if the file does not already exist there.
-html_favicon = '_static/favicon.ico'
+html_favicon = "_static/favicon.ico"
 
 # Add any paths that contain custom static files (such as style sheets) here,
 # relative to this directory. They are copied after the builtin static files,
 # so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
+html_static_path = ["_static"]
 
 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
 # using the given strftime format.
-html_last_updated_fmt = '%Y-%m-%d'
+html_last_updated_fmt = "%Y-%m-%d"
 
 # If true, SmartyPants will be used to convert quotes and dashes to
 # typographically correct entities.
-#html_use_smartypants = True
+# html_use_smartypants = True
 
 # Custom sidebar templates, maps document names to template names.
-html_sidebars = {'index': ['indexsidebar.html']}
+html_sidebars = {"index": ["indexsidebar.html"]}
 
 # Additional templates that should be rendered to pages, maps page names to
 # template names.
 # html_additional_pages = {'index': 'index.html'}
 
 # If false, no module index is generated.
-#html_domain_indices = True
+# html_domain_indices = True
 
 # If false, no index is generated.
-#html_use_index = True
+# html_use_index = True
 
 # If true, the index is split into individual pages for each letter.
-#html_split_index = False
+# html_split_index = False
 
 # If true, links to the reST sources are added to the pages.
 html_show_sourcelink = True
 
 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
-#html_show_sphinx = True
+# html_show_sphinx = True
 
 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
-#html_show_copyright = True
+# html_show_copyright = True
 
 # If true, an OpenSearch description file will be output, and all pages will
 # contain a <link> tag referring to it.  The value of this option must be the
 # base URL from which the finished HTML is served.
-#html_use_opensearch = ''
+# html_use_opensearch = ''
 
 # This is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = None
+# html_file_suffix = None
 
 # Output file base name for HTML help builder.
-htmlhelp_basename = 'llddoc'
+htmlhelp_basename = "llddoc"
 
 
 # -- Options for LaTeX output --------------------------------------------------
 
 latex_elements = {
-# The paper size ('letterpaper' or 'a4paper').
-#'papersize': 'letterpaper',
-
-# The font size ('10pt', '11pt' or '12pt').
-#'pointsize': '10pt',
-
-# Additional stuff for the LaTeX preamble.
-#'preamble': '',
+    # The paper size ('letterpaper' or 'a4paper').
+    #'papersize': 'letterpaper',
+    # The font size ('10pt', '11pt' or '12pt').
+    #'pointsize': '10pt',
+    # Additional stuff for the LaTeX preamble.
+    #'preamble': '',
 }
 
 # Grouping the document tree into LaTeX files. List of tuples
 # (source start file, target name, title, author, documentclass [howto/manual]).
 latex_documents = [
-  ('contents', 'lld.tex', u'lld Documentation',
-   u'LLVM project', 'manual'),
+    ("contents", "lld.tex", "lld Documentation", "LLVM project", "manual"),
 ]
 
 # The name of an image file (relative to this directory) to place at the top of
 # the title page.
-#latex_logo = None
+# latex_logo = None
 
 # For "manual" documents, if this is true, then toplevel headings are parts,
 # not chapters.
-#latex_use_parts = False
+# latex_use_parts = False
 
 # If true, show page references after internal links.
-#latex_show_pagerefs = False
+# latex_show_pagerefs = False
 
 # If true, show URL addresses after external links.
-#latex_show_urls = False
+# latex_show_urls = False
 
 # Documents to append as an appendix to all manuals.
-#latex_appendices = []
+# latex_appendices = []
 
 # If false, no module index is generated.
-#latex_domain_indices = True
+# latex_domain_indices = True
 
 
 # -- Options for manual page output --------------------------------------------
 
 # One entry per manual page. List of tuples
 # (source start file, name, description, authors, manual section).
-man_pages = [
-    ('contents', 'lld', u'lld Documentation',
-     [u'LLVM project'], 1)
-]
+man_pages = [("contents", "lld", "lld Documentation", ["LLVM project"], 1)]
 
 # If true, show URL addresses after external links.
-#man_show_urls = False
+# man_show_urls = False
 
 
 # -- Options for Texinfo output ------------------------------------------------
@@ -221,19 +215,25 @@
 # (source start file, target name, title, author,
 #  dir menu entry, description, category)
 texinfo_documents = [
-  ('contents', 'lld', u'lld Documentation',
-   u'LLVM project', 'lld', 'One line description of project.',
-   'Miscellaneous'),
+    (
+        "contents",
+        "lld",
+        "lld Documentation",
+        "LLVM project",
+        "lld",
+        "One line description of project.",
+        "Miscellaneous",
+    ),
 ]
 
 # Documents to append as an appendix to all manuals.
-#texinfo_appendices = []
+# texinfo_appendices = []
 
 # If false, no module index is generated.
-#texinfo_domain_indices = True
+# texinfo_domain_indices = True
 
 # How to display URL addresses: 'footnote', 'no', or 'inline'.
-#texinfo_show_urls = 'footnote'
+# texinfo_show_urls = 'footnote'
 
 
 # FIXME: Define intersphinx configuration.

diff  --git a/lld/test/COFF/lit.local.cfg b/lld/test/COFF/lit.local.cfg
index 5d0c856d2a108..2ba713fb65778 100644
--- a/lld/test/COFF/lit.local.cfg
+++ b/lld/test/COFF/lit.local.cfg
@@ -1 +1 @@
-config.environment['LLD_IN_TEST'] = '2'
+config.environment["LLD_IN_TEST"] = "2"

diff  --git a/lld/test/ELF/lit.local.cfg b/lld/test/ELF/lit.local.cfg
index ad2cf48e40f0a..4e125419e8b60 100644
--- a/lld/test/ELF/lit.local.cfg
+++ b/lld/test/ELF/lit.local.cfg
@@ -1,8 +1,8 @@
-config.suffixes = ['.test', '.s', '.ll', '.bat']
+config.suffixes = [".test", ".s", ".ll", ".bat"]
 
 # The environment variable DFLTCC=0 disables use of the hardware compression
 # facility on SystemZ.  When this facility is enabled, slightly 
diff erent
 # compression results can be seen, which can cause spurious failures in the
 # compressed-debug-level.test test case.
-if 's390x' in config.host_triple:
-    config.environment['DFLTCC'] = '0'
+if "s390x" in config.host_triple:
+    config.environment["DFLTCC"] = "0"

diff  --git a/lld/test/MachO/Inputs/DependencyDump.py b/lld/test/MachO/Inputs/DependencyDump.py
index b1c1151d33fab..bbd14d3ae701f 100644
--- a/lld/test/MachO/Inputs/DependencyDump.py
+++ b/lld/test/MachO/Inputs/DependencyDump.py
@@ -7,17 +7,17 @@
 
 f = open(sys.argv[1], "rb")
 byte = f.read(1)
-while byte != b'':
-    if byte == b'\x00':
+while byte != b"":
+    if byte == b"\x00":
         sys.stdout.write("lld-version: ")
-    elif byte == b'\x10':
+    elif byte == b"\x10":
         sys.stdout.write("input-file: ")
-    elif byte == b'\x11':
+    elif byte == b"\x11":
         sys.stdout.write("not-found: ")
-    elif byte == b'\x40':
+    elif byte == b"\x40":
         sys.stdout.write("output-file: ")
     byte = f.read(1)
-    while byte != b'\x00':
+    while byte != b"\x00":
         sys.stdout.write(byte.decode("ascii"))
         byte = f.read(1)
     sys.stdout.write("\n")

diff  --git a/lld/test/MachO/Inputs/code-signature-check.py b/lld/test/MachO/Inputs/code-signature-check.py
index 75c01a2174c7e..2efcf80bdd41d 100644
--- a/lld/test/MachO/Inputs/code-signature-check.py
+++ b/lld/test/MachO/Inputs/code-signature-check.py
@@ -24,15 +24,25 @@
 import sys
 import typing
 
+
 class CodeDirectoryVersion:
     SUPPORTSSCATTER = 0x20100
     SUPPORTSTEAMID = 0x20200
     SUPPORTSCODELIMIT64 = 0x20300
     SUPPORTSEXECSEG = 0x20400
 
+
 class CodeDirectory:
     @staticmethod
-    def make(buf: memoryview) -> typing.Union['CodeDirectoryBase', 'CodeDirectoryV20100', 'CodeDirectoryV20200', 'CodeDirectoryV20300', 'CodeDirectoryV20400']:
+    def make(
+        buf: memoryview,
+    ) -> typing.Union[
+        "CodeDirectoryBase",
+        "CodeDirectoryV20100",
+        "CodeDirectoryV20200",
+        "CodeDirectoryV20300",
+        "CodeDirectoryV20400",
+    ]:
         _magic, _length, version = struct.unpack_from(">III", buf, 0)
         subtype = {
             CodeDirectoryVersion.SUPPORTSSCATTER: CodeDirectoryV20100,
@@ -43,6 +53,7 @@ def make(buf: memoryview) -> typing.Union['CodeDirectoryBase', 'CodeDirectoryV20
 
         return subtype._make(struct.unpack_from(subtype._format(), buf, 0))
 
+
 class CodeDirectoryBase(typing.NamedTuple):
     magic: int
     length: int
@@ -63,6 +74,7 @@ class CodeDirectoryBase(typing.NamedTuple):
     def _format() -> str:
         return ">IIIIIIIIIBBBBI"
 
+
 class CodeDirectoryV20100(typing.NamedTuple):
     magic: int
     length: int
@@ -85,6 +97,7 @@ class CodeDirectoryV20100(typing.NamedTuple):
     def _format() -> str:
         return CodeDirectoryBase._format() + "I"
 
+
 class CodeDirectoryV20200(typing.NamedTuple):
     magic: int
     length: int
@@ -109,6 +122,7 @@ class CodeDirectoryV20200(typing.NamedTuple):
     def _format() -> str:
         return CodeDirectoryV20100._format() + "I"
 
+
 class CodeDirectoryV20300(typing.NamedTuple):
     magic: int
     length: int
@@ -136,6 +150,7 @@ class CodeDirectoryV20300(typing.NamedTuple):
     def _format() -> str:
         return CodeDirectoryV20200._format() + "IQ"
 
+
 class CodeDirectoryV20400(typing.NamedTuple):
     magic: int
     length: int
@@ -167,13 +182,16 @@ class CodeDirectoryV20400(typing.NamedTuple):
     def _format() -> str:
         return CodeDirectoryV20300._format() + "QQQ"
 
+
 class CodeDirectoryBlobIndex(typing.NamedTuple):
     type_: int
     offset: int
 
     @staticmethod
-    def make(buf: memoryview) -> 'CodeDirectoryBlobIndex':
-        return CodeDirectoryBlobIndex._make(struct.unpack_from(CodeDirectoryBlobIndex.__format(), buf, 0))
+    def make(buf: memoryview) -> "CodeDirectoryBlobIndex":
+        return CodeDirectoryBlobIndex._make(
+            struct.unpack_from(CodeDirectoryBlobIndex.__format(), buf, 0)
+        )
 
     @staticmethod
     def bytesize() -> int:
@@ -183,6 +201,7 @@ def bytesize() -> int:
     def __format() -> str:
         return ">II"
 
+
 class CodeDirectorySuperBlob(typing.NamedTuple):
     magic: int
     length: int
@@ -190,7 +209,7 @@ class CodeDirectorySuperBlob(typing.NamedTuple):
     blob_indices: typing.List[CodeDirectoryBlobIndex]
 
     @staticmethod
-    def make(buf: memoryview) -> 'CodeDirectorySuperBlob':
+    def make(buf: memoryview) -> "CodeDirectorySuperBlob":
         super_blob_layout = ">III"
         super_blob = struct.unpack_from(super_blob_layout, buf, 0)
 
@@ -202,17 +221,25 @@ def make(buf: memoryview) -> 'CodeDirectorySuperBlob':
 
         return CodeDirectorySuperBlob(*super_blob, blob_indices)
 
+
 def unpack_null_terminated_string(buf: memoryview) -> str:
     b = bytes(itertools.takewhile(lambda b: b != 0, buf))
     return b.decode()
 
+
 def main():
     parser = argparse.ArgumentParser()
-    parser.add_argument('binary', type=argparse.FileType('rb'), help='The file to analyze')
-    parser.add_argument('offset', type=int, help='Offset to start of Code Directory data')
-    parser.add_argument('size', type=int, help='Size of Code Directory data')
-    parser.add_argument('code_offset', type=int, help='Offset to start of code pages to hash')
-    parser.add_argument('code_size', type=int, help='Size of the code pages to hash')
+    parser.add_argument(
+        "binary", type=argparse.FileType("rb"), help="The file to analyze"
+    )
+    parser.add_argument(
+        "offset", type=int, help="Offset to start of Code Directory data"
+    )
+    parser.add_argument("size", type=int, help="Size of Code Directory data")
+    parser.add_argument(
+        "code_offset", type=int, help="Offset to start of code pages to hash"
+    )
+    parser.add_argument("code_size", type=int, help="Size of the code pages to hash")
 
     args = parser.parse_args()
 
@@ -229,7 +256,10 @@ def main():
         print(code_directory)
 
         ident_offset = code_directory_offset + code_directory.identOffset
-        print("Code Directory ID: " + unpack_null_terminated_string(super_blob_mem[ident_offset:]))
+        print(
+            "Code Directory ID: "
+            + unpack_null_terminated_string(super_blob_mem[ident_offset:])
+        )
 
         code_offset = args.code_offset
         code_end = code_offset + args.code_size
@@ -238,7 +268,9 @@ def main():
 
         hashes_offset = code_directory_offset + code_directory.hashOffset
         for idx in range(code_directory.nCodeSlots):
-            hash_bytes = bytes(super_blob_mem[hashes_offset:hashes_offset+code_directory.hashSize])
+            hash_bytes = bytes(
+                super_blob_mem[hashes_offset : hashes_offset + code_directory.hashSize]
+            )
             hashes_offset += code_directory.hashSize
 
             hasher = hashlib.sha256()
@@ -253,5 +285,5 @@ def main():
                 sys.exit(-1)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()

diff  --git a/lld/test/MachO/lit.local.cfg b/lld/test/MachO/lit.local.cfg
index 08bec0a1aff19..54ee0c1d440a3 100644
--- a/lld/test/MachO/lit.local.cfg
+++ b/lld/test/MachO/lit.local.cfg
@@ -5,7 +5,7 @@ import os
 # FIXME: The MachO back-end currently does not respect endianness when
 # accessing binary data structures, and therefore only works correctly
 # on little-endian host systems.  Skip all tests on big-endian hosts.
-if sys.byteorder == 'big':
+if sys.byteorder == "big":
     config.unsupported = True
 
 # We specify the most commonly-used archs and platform versions in our tests
@@ -16,17 +16,21 @@ if sys.byteorder == 'big':
 # flag will append to the set of library roots. As such, we define a separate
 # alias for each platform.
 
-lld_watchos = ('ld64.lld -lSystem -arch arm64_32 -platform_version watchos 7.0 8.0 -syslibroot ' +
-    os.path.join(config.test_source_root, "MachO", "Inputs", "WatchOS.sdk"))
-config.substitutions.append(('%lld-watchos', lld_watchos + ' -fatal_warnings'))
-config.substitutions.append(('%no-fatal-warnings-lld-watchos', lld_watchos))
+lld_watchos = (
+    "ld64.lld -lSystem -arch arm64_32 -platform_version watchos 7.0 8.0 -syslibroot "
+    + os.path.join(config.test_source_root, "MachO", "Inputs", "WatchOS.sdk")
+)
+config.substitutions.append(("%lld-watchos", lld_watchos + " -fatal_warnings"))
+config.substitutions.append(("%no-fatal-warnings-lld-watchos", lld_watchos))
 
-config.substitutions.append(('%no-arg-lld', 'ld64.lld'))
+config.substitutions.append(("%no-arg-lld", "ld64.lld"))
 
 # Since most of our tests are written around x86_64, we give this platform the
 # shortest substitution of "%lld".
-lld = ('ld64.lld -arch x86_64 -platform_version macos 11.0 11.0 -syslibroot ' +
-    os.path.join(config.test_source_root, "MachO", "Inputs", "MacOSX.sdk"))
-config.substitutions.append(('%lld', lld + ' -lSystem -fatal_warnings'))
-config.substitutions.append(('%no-lsystem-lld', lld + ' -fatal_warnings'))
-config.substitutions.append(('%no-fatal-warnings-lld', lld + ' -lSystem'))
+lld = (
+    "ld64.lld -arch x86_64 -platform_version macos 11.0 11.0 -syslibroot "
+    + os.path.join(config.test_source_root, "MachO", "Inputs", "MacOSX.sdk")
+)
+config.substitutions.append(("%lld", lld + " -lSystem -fatal_warnings"))
+config.substitutions.append(("%no-lsystem-lld", lld + " -fatal_warnings"))
+config.substitutions.append(("%no-fatal-warnings-lld", lld + " -lSystem"))

diff  --git a/lld/test/MachO/tools/generate-cfi-funcs.py b/lld/test/MachO/tools/generate-cfi-funcs.py
index e2b75d0cd9a5a..4617bd578e591 100755
--- a/lld/test/MachO/tools/generate-cfi-funcs.py
+++ b/lld/test/MachO/tools/generate-cfi-funcs.py
@@ -16,64 +16,80 @@
 func_size_low = 0x10
 func_size_high = 0x100
 saved_regs = ["%r15", "%r14", "%r13", "%r12", "%rbx"]
-saved_regs_combined = list(list(permutations(saved_regs, i))
-                           for i in range(0,6))
+saved_regs_combined = list(list(permutations(saved_regs, i)) for i in range(0, 6))
+
 
 def print_function(name):
-  global lsda_odds
-  have_lsda = (random.random() < lsda_odds)
-  frame_size = random.randint(4, 64) * 16
-  frame_offset = -random.randint(0, int(frame_size/16 - 4)) * 16
-  global func_size_low, func_size_high
-  func_size = random.randint(func_size_low, func_size_high) * 0x10
-  func_size_high += 1
-  if func_size_high % 0x10 == 0:
-    func_size_low += 1
-
-  print("""\
+    global lsda_odds
+    have_lsda = random.random() < lsda_odds
+    frame_size = random.randint(4, 64) * 16
+    frame_offset = -random.randint(0, int(frame_size / 16 - 4)) * 16
+    global func_size_low, func_size_high
+    func_size = random.randint(func_size_low, func_size_high) * 0x10
+    func_size_high += 1
+    if func_size_high % 0x10 == 0:
+        func_size_low += 1
+
+    print(
+        """\
 ### %s frame=%d lsda=%s size=%d
     .section __TEXT,__text,regular,pure_instructions
     .p2align 4, 0x90
     .globl %s
 %s:
-    .cfi_startproc""" % (
-        name, frame_size, have_lsda, func_size, name, name))
-  if have_lsda:
-    global lsda_n
-    lsda_n += 1
-    print("""\
+    .cfi_startproc"""
+        % (name, frame_size, have_lsda, func_size, name, name)
+    )
+    if have_lsda:
+        global lsda_n
+        lsda_n += 1
+        print(
+            """\
     .cfi_personality 155, ___gxx_personality_v0
-    .cfi_lsda 16, Lexception%d""" % lsda_n)
-  print("""\
+    .cfi_lsda 16, Lexception%d"""
+            % lsda_n
+        )
+    print(
+        """\
     pushq %%rbp
     .cfi_def_cfa_offset %d
     .cfi_offset %%rbp, %d
     movq %%rsp, %%rbp
-    .cfi_def_cfa_register %%rbp""" % (frame_size, frame_offset + 6*8))
-  print("""\
+    .cfi_def_cfa_register %%rbp"""
+        % (frame_size, frame_offset + 6 * 8)
+    )
+    print(
+        """\
     .fill %d
     popq %%rbp
     retq
     .cfi_endproc
-""" % (func_size - 6))
+"""
+        % (func_size - 6)
+    )
 
-  if have_lsda:
-    print("""\
+    if have_lsda:
+        print(
+            """\
     .section __TEXT,__gcc_except_tab
     .p2align 2
 Lexception%d:
     .space 0x10
-""" % lsda_n)
-  return func_size
+"""
+            % lsda_n
+        )
+    return func_size
+
 
 def random_seed():
-  """Generate a seed that can easily be passed back in via --seed=STRING"""
-  return ''.join(random.choice(string.ascii_lowercase) for i in range(10))
+    """Generate a seed that can easily be passed back in via --seed=STRING"""
+    return "".join(random.choice(string.ascii_lowercase) for i in range(10))
+
 
 def main():
-  parser = argparse.ArgumentParser(
-    description=__doc__,
-    epilog="""\
+    parser = argparse.ArgumentParser(
+        description=__doc__,
+        epilog="""\
 Function sizes begin small then monotonically increase.  The goal is
 to produce early pages that are full and later pages that are less
 than full, in order to test handling for both cases.  Full pages
@@ -83,39 +99,58 @@ def main():
 Use --pages=N or --functions=N to control the size of the output.
 Default is --pages=2, meaning produce at least two full pages of
 compact unwind entries, plus some more. The calculation is sloppy.
-""")
-  parser.add_argument('--seed', type=str, default=random_seed(),
-                      help='Seed the random number generator')
-  parser.add_argument('--pages', type=int, default=2,
-                      help='Number of compact-unwind pages')
-  parser.add_argument('--functions', type=int, default=None,
-                      help='Number of functions to generate')
-  parser.add_argument('--encodings', type=int, default=127,
-                      help='Maximum number of unique unwind encodings (default = 127)')
-  parser.add_argument('--lsda', type=int, default=0,
-                      help='Percentage of functions with personality & LSDA (default = 10')
-  args = parser.parse_args()
-  random.seed(args.seed)
-  p2align = 14
-  global lsda_odds
-  lsda_odds = args.lsda / 100.0
-
-  print("""\
+""",
+    )
+    parser.add_argument(
+        "--seed",
+        type=str,
+        default=random_seed(),
+        help="Seed the random number generator",
+    )
+    parser.add_argument(
+        "--pages", type=int, default=2, help="Number of compact-unwind pages"
+    )
+    parser.add_argument(
+        "--functions", type=int, default=None, help="Number of functions to generate"
+    )
+    parser.add_argument(
+        "--encodings",
+        type=int,
+        default=127,
+        help="Maximum number of unique unwind encodings (default = 127)",
+    )
+    parser.add_argument(
+        "--lsda",
+        type=int,
+        default=0,
+        help="Percentage of functions with personality & LSDA (default = 10",
+    )
+    args = parser.parse_args()
+    random.seed(args.seed)
+    p2align = 14
+    global lsda_odds
+    lsda_odds = args.lsda / 100.0
+
+    print(
+        """\
 ### seed=%s lsda=%f p2align=%d
     .section __TEXT,__text,regular,pure_instructions
     .p2align %d, 0x90
-""" % (args.seed, lsda_odds, p2align, p2align))
-
-  size = 0
-  base = (1 << p2align)
-  if args.functions:
-    for n in range(args.functions):
-      size += print_function("x%08x" % (size+base))
-  else:
-    while size < (args.pages << 24):
-      size += print_function("x%08x" % (size+base))
-
-  print("""\
+"""
+        % (args.seed, lsda_odds, p2align, p2align)
+    )
+
+    size = 0
+    base = 1 << p2align
+    if args.functions:
+        for n in range(args.functions):
+            size += print_function("x%08x" % (size + base))
+    else:
+        while size < (args.pages << 24):
+            size += print_function("x%08x" % (size + base))
+
+    print(
+        """\
     .section __TEXT,__text,regular,pure_instructions
     .globl _main
     .p2align 4, 0x90
@@ -125,8 +160,9 @@ def main():
     .p2align 4, 0x90
 ___gxx_personality_v0:
     retq
-""")
+"""
+    )
 
 
-if __name__ == '__main__':
-  main()
+if __name__ == "__main__":
+    main()

diff  --git a/lld/test/MachO/tools/generate-thunkable-program.py b/lld/test/MachO/tools/generate-thunkable-program.py
index 57e32a837457a..76d24d400574c 100755
--- a/lld/test/MachO/tools/generate-thunkable-program.py
+++ b/lld/test/MachO/tools/generate-thunkable-program.py
@@ -15,316 +15,1305 @@
 # This list comes from libSystem.tbd and contains a sizeable subset
 # of dylib calls available for all MacOS target archs.
 libSystem_calls = (
-  "__CurrentRuneLocale", "__DefaultRuneLocale", "__Exit", "__NSGetArgc",
-  "__NSGetArgv", "__NSGetEnviron", "__NSGetMachExecuteHeader",
-  "__NSGetProgname", "__PathLocale", "__Read_RuneMagi", "___Balloc_D2A",
-  "___Bfree_D2A", "___ULtod_D2A", "____mb_cur_max", "____mb_cur_max_l",
-  "____runetype", "____runetype_l", "____tolower", "____tolower_l",
-  "____toupper", "____toupper_l", "___add_ovflpage", "___addel",
-  "___any_on_D2A", "___assert_rtn", "___b2d_D2A", "___big_delete",
-  "___big_insert", "___big_keydata", "___big_return", "___big_split",
-  "___bigtens_D2A", "___bt_close", "___bt_cmp", "___bt_defcmp",
-  "___bt_defpfx", "___bt_delete", "___bt_dleaf", "___bt_fd",
-  "___bt_free", "___bt_get", "___bt_new", "___bt_open", "___bt_pgin",
-  "___bt_pgout", "___bt_put", "___bt_ret", "___bt_search", "___bt_seq",
-  "___bt_setcur", "___bt_split", "___bt_sync", "___buf_free",
-  "___call_hash", "___cleanup", "___cmp_D2A", "___collate_equiv_match",
-  "___collate_load_error", "___collate_lookup", "___collate_lookup_l",
-  "___copybits_D2A", "___cxa_atexit", "___cxa_finalize",
-  "___cxa_finalize_ranges", "___cxa_thread_atexit", "___d2b_D2A",
-  "___dbpanic", "___decrement_D2A", "___default_hash", "___default_utx",
-  "___delpair", "___
diff _D2A", "___dtoa", "___expand_table",
-  "___fflush", "___fgetwc", "___find_bigpair", "___find_last_page",
-  "___fix_locale_grouping_str", "___fread", "___free_ovflpage",
-  "___freedtoa", "___gdtoa", "___gdtoa_locks", "___get_buf",
-  "___get_page", "___gethex_D2A", "___getonlyClocaleconv",
-  "___hash_open", "___hdtoa", "___hexdig_D2A", "___hexdig_init_D2A",
-  "___hexnan_D2A", "___hi0bits_D2A", "___hldtoa", "___i2b_D2A",
-  "___ibitmap", "___increment_D2A", "___isctype", "___istype",
-  "___istype_l", "___ldtoa", "___libc_init", "___lo0bits_D2A",
-  "___log2", "___lshift_D2A", "___maskrune", "___maskrune_l",
-  "___match_D2A", "___mb_cur_max", "___mb_sb_limit", "___memccpy_chk",
-  "___memcpy_chk", "___memmove_chk", "___memset_chk", "___mult_D2A",
-  "___multadd_D2A", "___nrv_alloc_D2A", "___opendir2", "___ovfl_delete",
-  "___ovfl_get", "___ovfl_put", "___pow5mult_D2A", "___put_page",
-  "___quorem_D2A", "___ratio_D2A", "___rec_close", "___rec_delete",
-  "___rec_dleaf", "___rec_fd", "___rec_fmap", "___rec_fpipe",
-  "___rec_get", "___rec_iput", "___rec_open", "___rec_put",
-  "___rec_ret", "___rec_search", "___rec_seq", "___rec_sync",
-  "___rec_vmap", "___rec_vpipe", "___reclaim_buf", "___rshift_D2A",
-  "___rv_alloc_D2A", "___s2b_D2A", "___sF", "___sclose", "___sdidinit",
-  "___set_ones_D2A", "___setonlyClocaleconv", "___sflags", "___sflush",
-  "___sfp", "___sfvwrite", "___sglue", "___sinit", "___slbexpand",
-  "___smakebuf", "___snprintf_chk", "___snprintf_object_size_chk",
-  "___split_page", "___sprintf_chk", "___sprintf_object_size_chk",
-  "___sread", "___srefill", "___srget", "___sseek", "___stack_chk_fail",
-  "___stack_chk_guard", "___stderrp", "___stdinp", "___stdoutp",
-  "___stpcpy_chk", "___stpncpy_chk", "___strcat_chk", "___strcp_D2A",
-  "___strcpy_chk", "___strlcat_chk", "___strlcpy_chk", "___strncat_chk",
-  "___strncpy_chk", "___strtodg", "___strtopdd", "___sum_D2A",
-  "___svfscanf", "___swbuf", "___swhatbuf", "___swrite", "___swsetup",
-  "___tens_D2A", "___tinytens_D2A", "___tolower", "___tolower_l",
-  "___toupper", "___toupper_l", "___trailz_D2A", "___ulp_D2A",
-  "___ungetc", "___ungetwc", "___vsnprintf_chk", "___vsprintf_chk",
-  "___wcwidth", "___wcwidth_l", "__allocenvstate", "__atexit_receipt",
-  "__c_locale", "__cleanup", "__closeutx", "__copyenv",
-  "__cthread_init_routine", "__deallocenvstate", "__endutxent",
-  "__flockfile_debug_stub", "__fseeko", "__ftello", "__fwalk",
-  "__getenvp", "__getutxent", "__getutxid", "__getutxline",
-  "__inet_aton_check", "__init_clock_port", "__int_to_time",
-  "__libc_fork_child", "__libc_initializer", "__long_to_time",
-  "__mkpath_np", "__mktemp", "__openutx", "__os_assert_log",
-  "__os_assert_log_ctx", "__os_assumes_log", "__os_assumes_log_ctx",
-  "__os_avoid_tail_call", "__os_crash", "__os_crash_callback",
-  "__os_crash_fmt", "__os_debug_log", "__os_debug_log_error_str",
-  "__putenvp", "__pututxline", "__rand48_add", "__rand48_mult",
-  "__rand48_seed", "__readdir_unlocked", "__reclaim_telldir",
-  "__seekdir", "__setenvp", "__setutxent", "__sigaction_nobind",
-  "__sigintr", "__signal_nobind", "__sigvec_nobind", "__sread",
-  "__sseek", "__subsystem_init", "__swrite", "__time32_to_time",
-  "__time64_to_time", "__time_to_int", "__time_to_long",
-  "__time_to_time32", "__time_to_time64", "__unsetenvp", "__utmpxname",
-  "_a64l", "_abort", "_abort_report_np", "_abs", "_acl_add_flag_np",
-  "_acl_add_perm", "_acl_calc_mask", "_acl_clear_flags_np",
-  "_acl_clear_perms", "_acl_copy_entry", "_acl_copy_ext",
-  "_acl_copy_ext_native", "_acl_copy_int", "_acl_copy_int_native",
-  "_acl_create_entry", "_acl_create_entry_np", "_acl_delete_def_file",
-  "_acl_delete_entry", "_acl_delete_fd_np", "_acl_delete_file_np",
-  "_acl_delete_flag_np", "_acl_delete_link_np", "_acl_delete_perm",
-  "_acl_dup", "_acl_free", "_acl_from_text", "_acl_get_entry",
-  "_acl_get_fd", "_acl_get_fd_np", "_acl_get_file", "_acl_get_flag_np",
-  "_acl_get_flagset_np", "_acl_get_link_np", "_acl_get_perm_np",
-  "_acl_get_permset", "_acl_get_permset_mask_np", "_acl_get_qualifier",
-  "_acl_get_tag_type", "_acl_init", "_acl_maximal_permset_mask_np",
-  "_acl_set_fd", "_acl_set_fd_np", "_acl_set_file", "_acl_set_flagset_np",
-  "_acl_set_link_np", "_acl_set_permset", "_acl_set_permset_mask_np",
-  "_acl_set_qualifier", "_acl_set_tag_type", "_acl_size", "_acl_to_text",
-  "_acl_valid", "_acl_valid_fd_np", "_acl_valid_file_np",
-  "_acl_valid_link", "_addr2ascii", "_alarm", "_alphasort",
-  "_arc4random", "_arc4random_addrandom", "_arc4random_buf",
-  "_arc4random_stir", "_arc4random_uniform", "_ascii2addr", "_asctime",
-  "_asctime_r", "_asprintf", "_asprintf_l", "_asxprintf",
-  "_asxprintf_exec", "_atexit", "_atexit_b", "_atof", "_atof_l",
-  "_atoi", "_atoi_l", "_atol", "_atol_l", "_atoll", "_atoll_l",
-  "_backtrace", "_backtrace_from_fp", "_backtrace_image_offsets",
-  "_backtrace_symbols", "_backtrace_symbols_fd", "_basename",
-  "_basename_r", "_bcopy", "_brk", "_bsd_signal", "_bsearch",
-  "_bsearch_b", "_btowc", "_btowc_l", "_catclose", "_catgets",
-  "_catopen", "_cfgetispeed", "_cfgetospeed", "_cfmakeraw",
-  "_cfsetispeed", "_cfsetospeed", "_cfsetspeed", "_cgetcap",
-  "_cgetclose", "_cgetent", "_cgetfirst", "_cgetmatch", "_cgetnext",
-  "_cgetnum", "_cgetset", "_cgetstr", "_cgetustr", "_chmodx_np",
-  "_clearerr", "_clearerr_unlocked", "_clock", "_clock_getres",
-  "_clock_gettime", "_clock_gettime_nsec_np", "_clock_port",
-  "_clock_sem", "_clock_settime", "_closedir", "_compat_mode",
-  "_confstr", "_copy_printf_domain", "_creat", "_crypt", "_ctermid",
-  "_ctermid_r", "_ctime", "_ctime_r", "_daemon", "_daylight",
-  "_dbm_clearerr", "_dbm_close", "_dbm_delete", "_dbm_dirfno",
-  "_dbm_error", "_dbm_fetch", "_dbm_firstkey", "_dbm_nextkey",
-  "_dbm_open", "_dbm_store", "_dbopen", "_devname", "_devname_r",
-  "_
diff time", "_digittoint", "_digittoint_l", "_dirfd", "_dirname",
-  "_dirname_r", "_div", "_dprintf", "_dprintf_l", "_drand48",
-  "_duplocale", "_dxprintf", "_dxprintf_exec", "_ecvt", "_encrypt",
-  "_endttyent", "_endusershell", "_endutxent", "_endutxent_wtmp",
-  "_erand48", "_err", "_err_set_exit", "_err_set_exit_b",
-  "_err_set_file", "_errc", "_errx", "_execl", "_execle", "_execlp",
-  "_execv", "_execvP", "_execvp", "_exit", "_f_prealloc", "_fchmodx_np",
-  "_fclose", "_fcvt", "_fdopen", "_fdopendir", "_feof", "_feof_unlocked",
-  "_ferror", "_ferror_unlocked", "_fflagstostr", "_fflush", "_fgetc",
-  "_fgetln", "_fgetpos", "_fgetrune", "_fgets", "_fgetwc", "_fgetwc_l",
-  "_fgetwln", "_fgetwln_l", "_fgetws", "_fgetws_l", "_fileno",
-  "_fileno_unlocked", "_filesec_dup", "_filesec_free",
-  "_filesec_get_property", "_filesec_init", "_filesec_query_property",
-  "_filesec_set_property", "_filesec_unset_property", "_flockfile",
-  "_fmemopen", "_fmtcheck", "_fmtmsg", "_fnmatch", "_fopen", "_fork",
-  "_forkpty", "_fparseln", "_fprintf", "_fprintf_l", "_fpurge",
-  "_fputc", "_fputrune", "_fputs", "_fputwc", "_fputwc_l", "_fputws",
-  "_fputws_l", "_fread", "_free_printf_comp", "_free_printf_domain",
-  "_freelocale", "_freopen", "_fscanf", "_fscanf_l", "_fseek",
-  "_fseeko", "_fsetpos", "_fstatvfs", "_fstatx_np", "_fsync_volume_np",
-  "_ftell", "_ftello", "_ftime", "_ftok", "_ftrylockfile",
-  "_fts_children", "_fts_close", "_fts_open", "_fts_open_b",
-  "_fts_read", "_fts_set", "_ftw", "_fungetrune", "_funlockfile",
-  "_funopen", "_fwide", "_fwprintf", "_fwprintf_l", "_fwrite",
-  "_fwscanf", "_fwscanf_l", "_fxprintf", "_fxprintf_exec", "_gcvt",
-  "_getbsize", "_getc", "_getc_unlocked", "_getchar", "_getchar_unlocked",
-  "_getcwd", "_getdate", "_getdate_err", "_getdelim", "_getdiskbyname",
-  "_getenv", "_gethostid", "_gethostname", "_getipv4sourcefilter",
-  "_getlastlogx", "_getlastlogxbyname", "_getline", "_getloadavg",
-  "_getlogin", "_getlogin_r", "_getmntinfo", "_getmntinfo_r_np",
-  "_getmode", "_getopt", "_getopt_long", "_getopt_long_only",
-  "_getpagesize", "_getpass", "_getpeereid", "_getprogname", "_gets",
-  "_getsourcefilter", "_getsubopt", "_gettimeofday", "_getttyent",
-  "_getttynam", "_getusershell", "_getutmp", "_getutmpx", "_getutxent",
-  "_getutxent_wtmp", "_getutxid", "_getutxline", "_getvfsbyname",
-  "_getw", "_getwc", "_getwc_l", "_getwchar", "_getwchar_l", "_getwd",
-  "_glob", "_glob_b", "_globfree", "_gmtime", "_gmtime_r", "_grantpt",
-  "_hash_create", "_hash_destroy", "_hash_purge", "_hash_search",
-  "_hash_stats", "_hash_traverse", "_hcreate", "_hdestroy",
-  "_heapsort", "_heapsort_b", "_hsearch", "_imaxabs", "_imaxdiv",
-  "_inet_addr", "_inet_aton", "_inet_lnaof", "_inet_makeaddr",
-  "_inet_net_ntop", "_inet_net_pton", "_inet_neta", "_inet_netof",
-  "_inet_network", "_inet_nsap_addr", "_inet_nsap_ntoa", "_inet_ntoa",
-  "_inet_ntop", "_inet_ntop4", "_inet_ntop6", "_inet_pton",
-  "_initstate", "_insque", "_isalnum", "_isalnum_l", "_isalpha",
-  "_isalpha_l", "_isascii", "_isatty", "_isblank", "_isblank_l",
-  "_iscntrl", "_iscntrl_l", "_isdigit", "_isdigit_l", "_isgraph",
-  "_isgraph_l", "_ishexnumber", "_ishexnumber_l", "_isideogram",
-  "_isideogram_l", "_islower", "_islower_l", "_isnumber", "_isnumber_l",
-  "_isphonogram", "_isphonogram_l", "_isprint", "_isprint_l",
-  "_ispunct", "_ispunct_l", "_isrune", "_isrune_l", "_isspace",
-  "_isspace_l", "_isspecial", "_isspecial_l", "_isupper", "_isupper_l",
-  "_iswalnum", "_iswalnum_l", "_iswalpha", "_iswalpha_l", "_iswascii",
-  "_iswblank", "_iswblank_l", "_iswcntrl", "_iswcntrl_l", "_iswctype",
-  "_iswctype_l", "_iswdigit", "_iswdigit_l", "_iswgraph", "_iswgraph_l",
-  "_iswhexnumber", "_iswhexnumber_l", "_iswideogram", "_iswideogram_l",
-  "_iswlower", "_iswlower_l", "_iswnumber", "_iswnumber_l",
-  "_iswphonogram", "_iswphonogram_l", "_iswprint", "_iswprint_l",
-  "_iswpunct", "_iswpunct_l", "_iswrune", "_iswrune_l", "_iswspace",
-  "_iswspace_l", "_iswspecial", "_iswspecial_l", "_iswupper",
-  "_iswupper_l", "_iswxdigit", "_iswxdigit_l", "_isxdigit",
-  "_isxdigit_l", "_jrand48", "_kOSThermalNotificationPressureLevelName",
-  "_killpg", "_l64a", "_labs", "_lchflags", "_lchmod", "_lcong48",
-  "_ldiv", "_lfind", "_link_addr", "_link_ntoa", "_llabs", "_lldiv",
-  "_localeconv", "_localeconv_l", "_localtime", "_localtime_r",
-  "_lockf", "_login", "_login_tty", "_logout", "_logwtmp", "_lrand48",
-  "_lsearch", "_lstatx_np", "_lutimes", "_mblen", "_mblen_l",
-  "_mbmb", "_mbrlen", "_mbrlen_l", "_mbrrune", "_mbrtowc", "_mbrtowc_l",
-  "_mbrune", "_mbsinit", "_mbsinit_l", "_mbsnrtowcs", "_mbsnrtowcs_l",
-  "_mbsrtowcs", "_mbsrtowcs_l", "_mbstowcs", "_mbstowcs_l", "_mbtowc",
-  "_mbtowc_l", "_memmem", "_memset_s", "_mergesort", "_mergesort_b",
-  "_mkdirx_np", "_mkdtemp", "_mkdtempat_np", "_mkfifox_np",
-  "_mkostemp", "_mkostemps", "_mkostempsat_np", "_mkpath_np",
-  "_mkpathat_np", "_mkstemp", "_mkstemp_dprotected_np", "_mkstemps",
-  "_mkstempsat_np", "_mktemp", "_mktime", "_monaddition", "_moncontrol",
-  "_moncount", "_moninit", "_monitor", "_monoutput", "_monreset",
-  "_monstartup", "_mpool_close", "_mpool_filter", "_mpool_get",
-  "_mpool_new", "_mpool_open", "_mpool_put", "_mpool_sync", "_mrand48",
-  "_nanosleep", "_new_printf_comp", "_new_printf_domain", "_newlocale",
-  "_nextwctype", "_nextwctype_l", "_nftw", "_nice", "_nl_langinfo",
-  "_nl_langinfo_l", "_nrand48", "_nvis", "_off32", "_off64",
-  "_offtime", "_open_memstream", "_open_with_subsystem",
-  "_open_wmemstream", "_opendev", "_opendir", "_openpty", "_openx_np",
-  "_optarg", "_opterr", "_optind", "_optopt", "_optreset", "_pause",
-  "_pclose", "_perror", "_popen", "_posix2time", "_posix_openpt",
-  "_posix_spawnp", "_printf", "_printf_l", "_psignal", "_psort",
-  "_psort_b", "_psort_r", "_ptsname", "_ptsname_r", "_putc",
-  "_putc_unlocked", "_putchar", "_putchar_unlocked", "_putenv",
-  "_puts", "_pututxline", "_putw", "_putwc", "_putwc_l", "_putwchar",
-  "_putwchar_l", "_qsort", "_qsort_b", "_qsort_r", "_querylocale",
-  "_radixsort", "_raise", "_rand", "_rand_r", "_random", "_rb_tree_count",
-  "_rb_tree_find_node", "_rb_tree_find_node_geq", "_rb_tree_find_node_leq",
-  "_rb_tree_init", "_rb_tree_insert_node", "_rb_tree_iterate",
-  "_rb_tree_remove_node", "_readdir", "_readdir_r", "_readpassphrase",
-  "_reallocf", "_realpath", "_recv", "_regcomp", "_regcomp_l",
-  "_regerror", "_regexec", "_regfree", "_register_printf_domain_function",
-  "_register_printf_domain_render_std", "_regncomp", "_regncomp_l",
-  "_regnexec", "_regwcomp", "_regwcomp_l", "_regwexec", "_regwncomp",
-  "_regwncomp_l", "_regwnexec", "_remove", "_remque", "_rewind",
-  "_rewinddir", "_rindex", "_rpmatch", "_sbrk", "_scandir",
-  "_scandir_b", "_scanf", "_scanf_l", "_seed48", "_seekdir", "_send",
-  "_setbuf", "_setbuffer", "_setenv", "_sethostid", "_sethostname",
-  "_setinvalidrune", "_setipv4sourcefilter", "_setkey", "_setlinebuf",
-  "_setlocale", "_setlogin", "_setmode", "_setpgrp", "_setprogname",
-  "_setrgid", "_setruid", "_setrunelocale", "_setsourcefilter",
-  "_setstate", "_settimeofday", "_setttyent", "_setusershell",
-  "_setutxent", "_setutxent_wtmp", "_setvbuf", "_sigaction",
-  "_sigaddset", "_sigaltstack", "_sigblock", "_sigdelset",
-  "_sigemptyset", "_sigfillset", "_sighold", "_sigignore",
-  "_siginterrupt", "_sigismember", "_signal", "_sigpause", "_sigrelse",
-  "_sigset", "_sigsetmask", "_sigvec", "_skip", "_sl_add", "_sl_find",
-  "_sl_free", "_sl_init", "_sleep", "_snprintf", "_snprintf_l",
-  "_snvis", "_sockatmark", "_sprintf", "_sprintf_l", "_sradixsort",
-  "_srand", "_srand48", "_sranddev", "_srandom", "_srandomdev",
-  "_sscanf", "_sscanf_l", "_stat_with_subsystem", "_statvfs",
-  "_statx_np", "_stpcpy", "_stpncpy", "_strcasecmp", "_strcasecmp_l",
-  "_strcasestr", "_strcasestr_l", "_strcat", "_strcoll", "_strcoll_l",
-  "_strcspn", "_strdup", "_strenvisx", "_strerror", "_strerror_r",
-  "_strfmon", "_strfmon_l", "_strftime", "_strftime_l", "_strmode",
-  "_strncasecmp", "_strncasecmp_l", "_strncat", "_strndup", "_strnstr",
-  "_strnunvis", "_strnunvisx", "_strnvis", "_strnvisx", "_strpbrk",
-  "_strptime", "_strptime_l", "_strrchr", "_strsenvisx", "_strsep",
-  "_strsignal", "_strsignal_r", "_strsnvis", "_strsnvisx", "_strspn",
-  "_strsvis", "_strsvisx", "_strtod", "_strtod_l", "_strtof",
-  "_strtof_l", "_strtofflags", "_strtoimax", "_strtoimax_l",
-  "_strtok", "_strtok_r", "_strtol", "_strtol_l", "_strtold",
-  "_strtold_l", "_strtoll", "_strtoll_l", "_strtonum", "_strtoq",
-  "_strtoq_l", "_strtoul", "_strtoul_l", "_strtoull", "_strtoull_l",
-  "_strtoumax", "_strtoumax_l", "_strtouq", "_strtouq_l", "_strunvis",
-  "_strunvisx", "_strvis", "_strvisx", "_strxfrm", "_strxfrm_l",
-  "_suboptarg", "_svis", "_swab", "_swprintf", "_swprintf_l",
-  "_swscanf", "_swscanf_l", "_sxprintf", "_sxprintf_exec",
-  "_sync_volume_np", "_sys_errlist", "_sys_nerr", "_sys_siglist",
-  "_sys_signame", "_sysconf", "_sysctl", "_sysctlbyname",
-  "_sysctlnametomib", "_system", "_tcdrain", "_tcflow", "_tcflush",
-  "_tcgetattr", "_tcgetpgrp", "_tcgetsid", "_tcsendbreak", "_tcsetattr",
-  "_tcsetpgrp", "_tdelete", "_telldir", "_tempnam", "_tfind",
-  "_thread_stack_pcs", "_time", "_time2posix", "_timegm", "_timelocal",
-  "_timeoff", "_times", "_timespec_get", "_timezone", "_timingsafe_bcmp",
-  "_tmpfile", "_tmpnam", "_toascii", "_tolower", "_tolower_l",
-  "_toupper", "_toupper_l", "_towctrans", "_towctrans_l", "_towlower",
-  "_towlower_l", "_towupper", "_towupper_l", "_tre_ast_new_catenation",
-  "_tre_ast_new_iter", "_tre_ast_new_literal", "_tre_ast_new_node",
-  "_tre_ast_new_union", "_tre_compile", "_tre_fill_pmatch",
-  "_tre_free", "_tre_mem_alloc_impl", "_tre_mem_destroy",
-  "_tre_mem_new_impl", "_tre_parse", "_tre_stack_destroy",
-  "_tre_stack_new", "_tre_stack_num_objects", "_tre_tnfa_run_backtrack",
-  "_tre_tnfa_run_parallel", "_tsearch", "_ttyname", "_ttyname_r",
-  "_ttyslot", "_twalk", "_tzname", "_tzset", "_tzsetwall", "_ualarm",
-  "_ulimit", "_umaskx_np", "_uname", "_ungetc", "_ungetwc",
-  "_ungetwc_l", "_unlockpt", "_unsetenv", "_unvis", "_uselocale",
-  "_usleep", "_utime", "_utmpxname", "_uuid_clear", "_uuid_compare",
-  "_uuid_copy", "_uuid_generate", "_uuid_generate_random",
-  "_uuid_generate_time", "_uuid_is_null", "_uuid_pack", "_uuid_parse",
-  "_uuid_unpack", "_uuid_unparse", "_uuid_unparse_lower",
-  "_uuid_unparse_upper", "_vasprintf", "_vasprintf_l", "_vasxprintf",
-  "_vasxprintf_exec", "_vdprintf", "_vdprintf_l", "_vdxprintf",
-  "_vdxprintf_exec", "_verr", "_verrc", "_verrx", "_vfprintf",
-  "_vfprintf_l", "_vfscanf", "_vfscanf_l", "_vfwprintf", "_vfwprintf_l",
-  "_vfwscanf", "_vfwscanf_l", "_vfxprintf", "_vfxprintf_exec",
-  "_vis", "_vprintf", "_vprintf_l", "_vscanf", "_vscanf_l",
-  "_vsnprintf", "_vsnprintf_l", "_vsprintf", "_vsprintf_l", "_vsscanf",
-  "_vsscanf_l", "_vswprintf", "_vswprintf_l", "_vswscanf",
-  "_vswscanf_l", "_vsxprintf", "_vsxprintf_exec", "_vwarn", "_vwarnc",
-  "_vwarnx", "_vwprintf", "_vwprintf_l", "_vwscanf", "_vwscanf_l",
-  "_vxprintf", "_vxprintf_exec", "_wait", "_wait3", "_waitpid",
-  "_warn", "_warnc", "_warnx", "_wcpcpy", "_wcpncpy", "_wcrtomb",
-  "_wcrtomb_l", "_wcscasecmp", "_wcscasecmp_l", "_wcscat", "_wcschr",
-  "_wcscmp", "_wcscoll", "_wcscoll_l", "_wcscpy", "_wcscspn",
-  "_wcsdup", "_wcsftime", "_wcsftime_l", "_wcslcat", "_wcslcpy",
-  "_wcslen", "_wcsncasecmp", "_wcsncasecmp_l", "_wcsncat", "_wcsncmp",
-  "_wcsncpy", "_wcsnlen", "_wcsnrtombs", "_wcsnrtombs_l", "_wcspbrk",
-  "_wcsrchr", "_wcsrtombs", "_wcsrtombs_l", "_wcsspn", "_wcsstr",
-  "_wcstod", "_wcstod_l", "_wcstof", "_wcstof_l", "_wcstoimax",
-  "_wcstoimax_l", "_wcstok", "_wcstol", "_wcstol_l", "_wcstold",
-  "_wcstold_l", "_wcstoll", "_wcstoll_l", "_wcstombs", "_wcstombs_l",
-  "_wcstoul", "_wcstoul_l", "_wcstoull", "_wcstoull_l", "_wcstoumax",
-  "_wcstoumax_l", "_wcswidth", "_wcswidth_l", "_wcsxfrm", "_wcsxfrm_l",
-  "_wctob", "_wctob_l", "_wctomb", "_wctomb_l", "_wctrans",
-  "_wctrans_l", "_wctype", "_wctype_l", "_wcwidth", "_wcwidth_l",
-  "_wmemchr", "_wmemcmp", "_wmemcpy", "_wmemmove", "_wmemset",
-  "_wordexp", "_wordfree", "_wprintf", "_wprintf_l", "_wscanf",
-  "_wscanf_l", "_wtmpxname", "_xprintf", "_xprintf_exec"
+    "__CurrentRuneLocale",
+    "__DefaultRuneLocale",
+    "__Exit",
+    "__NSGetArgc",
+    "__NSGetArgv",
+    "__NSGetEnviron",
+    "__NSGetMachExecuteHeader",
+    "__NSGetProgname",
+    "__PathLocale",
+    "__Read_RuneMagi",
+    "___Balloc_D2A",
+    "___Bfree_D2A",
+    "___ULtod_D2A",
+    "____mb_cur_max",
+    "____mb_cur_max_l",
+    "____runetype",
+    "____runetype_l",
+    "____tolower",
+    "____tolower_l",
+    "____toupper",
+    "____toupper_l",
+    "___add_ovflpage",
+    "___addel",
+    "___any_on_D2A",
+    "___assert_rtn",
+    "___b2d_D2A",
+    "___big_delete",
+    "___big_insert",
+    "___big_keydata",
+    "___big_return",
+    "___big_split",
+    "___bigtens_D2A",
+    "___bt_close",
+    "___bt_cmp",
+    "___bt_defcmp",
+    "___bt_defpfx",
+    "___bt_delete",
+    "___bt_dleaf",
+    "___bt_fd",
+    "___bt_free",
+    "___bt_get",
+    "___bt_new",
+    "___bt_open",
+    "___bt_pgin",
+    "___bt_pgout",
+    "___bt_put",
+    "___bt_ret",
+    "___bt_search",
+    "___bt_seq",
+    "___bt_setcur",
+    "___bt_split",
+    "___bt_sync",
+    "___buf_free",
+    "___call_hash",
+    "___cleanup",
+    "___cmp_D2A",
+    "___collate_equiv_match",
+    "___collate_load_error",
+    "___collate_lookup",
+    "___collate_lookup_l",
+    "___copybits_D2A",
+    "___cxa_atexit",
+    "___cxa_finalize",
+    "___cxa_finalize_ranges",
+    "___cxa_thread_atexit",
+    "___d2b_D2A",
+    "___dbpanic",
+    "___decrement_D2A",
+    "___default_hash",
+    "___default_utx",
+    "___delpair",
+    "___
diff _D2A",
+    "___dtoa",
+    "___expand_table",
+    "___fflush",
+    "___fgetwc",
+    "___find_bigpair",
+    "___find_last_page",
+    "___fix_locale_grouping_str",
+    "___fread",
+    "___free_ovflpage",
+    "___freedtoa",
+    "___gdtoa",
+    "___gdtoa_locks",
+    "___get_buf",
+    "___get_page",
+    "___gethex_D2A",
+    "___getonlyClocaleconv",
+    "___hash_open",
+    "___hdtoa",
+    "___hexdig_D2A",
+    "___hexdig_init_D2A",
+    "___hexnan_D2A",
+    "___hi0bits_D2A",
+    "___hldtoa",
+    "___i2b_D2A",
+    "___ibitmap",
+    "___increment_D2A",
+    "___isctype",
+    "___istype",
+    "___istype_l",
+    "___ldtoa",
+    "___libc_init",
+    "___lo0bits_D2A",
+    "___log2",
+    "___lshift_D2A",
+    "___maskrune",
+    "___maskrune_l",
+    "___match_D2A",
+    "___mb_cur_max",
+    "___mb_sb_limit",
+    "___memccpy_chk",
+    "___memcpy_chk",
+    "___memmove_chk",
+    "___memset_chk",
+    "___mult_D2A",
+    "___multadd_D2A",
+    "___nrv_alloc_D2A",
+    "___opendir2",
+    "___ovfl_delete",
+    "___ovfl_get",
+    "___ovfl_put",
+    "___pow5mult_D2A",
+    "___put_page",
+    "___quorem_D2A",
+    "___ratio_D2A",
+    "___rec_close",
+    "___rec_delete",
+    "___rec_dleaf",
+    "___rec_fd",
+    "___rec_fmap",
+    "___rec_fpipe",
+    "___rec_get",
+    "___rec_iput",
+    "___rec_open",
+    "___rec_put",
+    "___rec_ret",
+    "___rec_search",
+    "___rec_seq",
+    "___rec_sync",
+    "___rec_vmap",
+    "___rec_vpipe",
+    "___reclaim_buf",
+    "___rshift_D2A",
+    "___rv_alloc_D2A",
+    "___s2b_D2A",
+    "___sF",
+    "___sclose",
+    "___sdidinit",
+    "___set_ones_D2A",
+    "___setonlyClocaleconv",
+    "___sflags",
+    "___sflush",
+    "___sfp",
+    "___sfvwrite",
+    "___sglue",
+    "___sinit",
+    "___slbexpand",
+    "___smakebuf",
+    "___snprintf_chk",
+    "___snprintf_object_size_chk",
+    "___split_page",
+    "___sprintf_chk",
+    "___sprintf_object_size_chk",
+    "___sread",
+    "___srefill",
+    "___srget",
+    "___sseek",
+    "___stack_chk_fail",
+    "___stack_chk_guard",
+    "___stderrp",
+    "___stdinp",
+    "___stdoutp",
+    "___stpcpy_chk",
+    "___stpncpy_chk",
+    "___strcat_chk",
+    "___strcp_D2A",
+    "___strcpy_chk",
+    "___strlcat_chk",
+    "___strlcpy_chk",
+    "___strncat_chk",
+    "___strncpy_chk",
+    "___strtodg",
+    "___strtopdd",
+    "___sum_D2A",
+    "___svfscanf",
+    "___swbuf",
+    "___swhatbuf",
+    "___swrite",
+    "___swsetup",
+    "___tens_D2A",
+    "___tinytens_D2A",
+    "___tolower",
+    "___tolower_l",
+    "___toupper",
+    "___toupper_l",
+    "___trailz_D2A",
+    "___ulp_D2A",
+    "___ungetc",
+    "___ungetwc",
+    "___vsnprintf_chk",
+    "___vsprintf_chk",
+    "___wcwidth",
+    "___wcwidth_l",
+    "__allocenvstate",
+    "__atexit_receipt",
+    "__c_locale",
+    "__cleanup",
+    "__closeutx",
+    "__copyenv",
+    "__cthread_init_routine",
+    "__deallocenvstate",
+    "__endutxent",
+    "__flockfile_debug_stub",
+    "__fseeko",
+    "__ftello",
+    "__fwalk",
+    "__getenvp",
+    "__getutxent",
+    "__getutxid",
+    "__getutxline",
+    "__inet_aton_check",
+    "__init_clock_port",
+    "__int_to_time",
+    "__libc_fork_child",
+    "__libc_initializer",
+    "__long_to_time",
+    "__mkpath_np",
+    "__mktemp",
+    "__openutx",
+    "__os_assert_log",
+    "__os_assert_log_ctx",
+    "__os_assumes_log",
+    "__os_assumes_log_ctx",
+    "__os_avoid_tail_call",
+    "__os_crash",
+    "__os_crash_callback",
+    "__os_crash_fmt",
+    "__os_debug_log",
+    "__os_debug_log_error_str",
+    "__putenvp",
+    "__pututxline",
+    "__rand48_add",
+    "__rand48_mult",
+    "__rand48_seed",
+    "__readdir_unlocked",
+    "__reclaim_telldir",
+    "__seekdir",
+    "__setenvp",
+    "__setutxent",
+    "__sigaction_nobind",
+    "__sigintr",
+    "__signal_nobind",
+    "__sigvec_nobind",
+    "__sread",
+    "__sseek",
+    "__subsystem_init",
+    "__swrite",
+    "__time32_to_time",
+    "__time64_to_time",
+    "__time_to_int",
+    "__time_to_long",
+    "__time_to_time32",
+    "__time_to_time64",
+    "__unsetenvp",
+    "__utmpxname",
+    "_a64l",
+    "_abort",
+    "_abort_report_np",
+    "_abs",
+    "_acl_add_flag_np",
+    "_acl_add_perm",
+    "_acl_calc_mask",
+    "_acl_clear_flags_np",
+    "_acl_clear_perms",
+    "_acl_copy_entry",
+    "_acl_copy_ext",
+    "_acl_copy_ext_native",
+    "_acl_copy_int",
+    "_acl_copy_int_native",
+    "_acl_create_entry",
+    "_acl_create_entry_np",
+    "_acl_delete_def_file",
+    "_acl_delete_entry",
+    "_acl_delete_fd_np",
+    "_acl_delete_file_np",
+    "_acl_delete_flag_np",
+    "_acl_delete_link_np",
+    "_acl_delete_perm",
+    "_acl_dup",
+    "_acl_free",
+    "_acl_from_text",
+    "_acl_get_entry",
+    "_acl_get_fd",
+    "_acl_get_fd_np",
+    "_acl_get_file",
+    "_acl_get_flag_np",
+    "_acl_get_flagset_np",
+    "_acl_get_link_np",
+    "_acl_get_perm_np",
+    "_acl_get_permset",
+    "_acl_get_permset_mask_np",
+    "_acl_get_qualifier",
+    "_acl_get_tag_type",
+    "_acl_init",
+    "_acl_maximal_permset_mask_np",
+    "_acl_set_fd",
+    "_acl_set_fd_np",
+    "_acl_set_file",
+    "_acl_set_flagset_np",
+    "_acl_set_link_np",
+    "_acl_set_permset",
+    "_acl_set_permset_mask_np",
+    "_acl_set_qualifier",
+    "_acl_set_tag_type",
+    "_acl_size",
+    "_acl_to_text",
+    "_acl_valid",
+    "_acl_valid_fd_np",
+    "_acl_valid_file_np",
+    "_acl_valid_link",
+    "_addr2ascii",
+    "_alarm",
+    "_alphasort",
+    "_arc4random",
+    "_arc4random_addrandom",
+    "_arc4random_buf",
+    "_arc4random_stir",
+    "_arc4random_uniform",
+    "_ascii2addr",
+    "_asctime",
+    "_asctime_r",
+    "_asprintf",
+    "_asprintf_l",
+    "_asxprintf",
+    "_asxprintf_exec",
+    "_atexit",
+    "_atexit_b",
+    "_atof",
+    "_atof_l",
+    "_atoi",
+    "_atoi_l",
+    "_atol",
+    "_atol_l",
+    "_atoll",
+    "_atoll_l",
+    "_backtrace",
+    "_backtrace_from_fp",
+    "_backtrace_image_offsets",
+    "_backtrace_symbols",
+    "_backtrace_symbols_fd",
+    "_basename",
+    "_basename_r",
+    "_bcopy",
+    "_brk",
+    "_bsd_signal",
+    "_bsearch",
+    "_bsearch_b",
+    "_btowc",
+    "_btowc_l",
+    "_catclose",
+    "_catgets",
+    "_catopen",
+    "_cfgetispeed",
+    "_cfgetospeed",
+    "_cfmakeraw",
+    "_cfsetispeed",
+    "_cfsetospeed",
+    "_cfsetspeed",
+    "_cgetcap",
+    "_cgetclose",
+    "_cgetent",
+    "_cgetfirst",
+    "_cgetmatch",
+    "_cgetnext",
+    "_cgetnum",
+    "_cgetset",
+    "_cgetstr",
+    "_cgetustr",
+    "_chmodx_np",
+    "_clearerr",
+    "_clearerr_unlocked",
+    "_clock",
+    "_clock_getres",
+    "_clock_gettime",
+    "_clock_gettime_nsec_np",
+    "_clock_port",
+    "_clock_sem",
+    "_clock_settime",
+    "_closedir",
+    "_compat_mode",
+    "_confstr",
+    "_copy_printf_domain",
+    "_creat",
+    "_crypt",
+    "_ctermid",
+    "_ctermid_r",
+    "_ctime",
+    "_ctime_r",
+    "_daemon",
+    "_daylight",
+    "_dbm_clearerr",
+    "_dbm_close",
+    "_dbm_delete",
+    "_dbm_dirfno",
+    "_dbm_error",
+    "_dbm_fetch",
+    "_dbm_firstkey",
+    "_dbm_nextkey",
+    "_dbm_open",
+    "_dbm_store",
+    "_dbopen",
+    "_devname",
+    "_devname_r",
+    "_
diff time",
+    "_digittoint",
+    "_digittoint_l",
+    "_dirfd",
+    "_dirname",
+    "_dirname_r",
+    "_div",
+    "_dprintf",
+    "_dprintf_l",
+    "_drand48",
+    "_duplocale",
+    "_dxprintf",
+    "_dxprintf_exec",
+    "_ecvt",
+    "_encrypt",
+    "_endttyent",
+    "_endusershell",
+    "_endutxent",
+    "_endutxent_wtmp",
+    "_erand48",
+    "_err",
+    "_err_set_exit",
+    "_err_set_exit_b",
+    "_err_set_file",
+    "_errc",
+    "_errx",
+    "_execl",
+    "_execle",
+    "_execlp",
+    "_execv",
+    "_execvP",
+    "_execvp",
+    "_exit",
+    "_f_prealloc",
+    "_fchmodx_np",
+    "_fclose",
+    "_fcvt",
+    "_fdopen",
+    "_fdopendir",
+    "_feof",
+    "_feof_unlocked",
+    "_ferror",
+    "_ferror_unlocked",
+    "_fflagstostr",
+    "_fflush",
+    "_fgetc",
+    "_fgetln",
+    "_fgetpos",
+    "_fgetrune",
+    "_fgets",
+    "_fgetwc",
+    "_fgetwc_l",
+    "_fgetwln",
+    "_fgetwln_l",
+    "_fgetws",
+    "_fgetws_l",
+    "_fileno",
+    "_fileno_unlocked",
+    "_filesec_dup",
+    "_filesec_free",
+    "_filesec_get_property",
+    "_filesec_init",
+    "_filesec_query_property",
+    "_filesec_set_property",
+    "_filesec_unset_property",
+    "_flockfile",
+    "_fmemopen",
+    "_fmtcheck",
+    "_fmtmsg",
+    "_fnmatch",
+    "_fopen",
+    "_fork",
+    "_forkpty",
+    "_fparseln",
+    "_fprintf",
+    "_fprintf_l",
+    "_fpurge",
+    "_fputc",
+    "_fputrune",
+    "_fputs",
+    "_fputwc",
+    "_fputwc_l",
+    "_fputws",
+    "_fputws_l",
+    "_fread",
+    "_free_printf_comp",
+    "_free_printf_domain",
+    "_freelocale",
+    "_freopen",
+    "_fscanf",
+    "_fscanf_l",
+    "_fseek",
+    "_fseeko",
+    "_fsetpos",
+    "_fstatvfs",
+    "_fstatx_np",
+    "_fsync_volume_np",
+    "_ftell",
+    "_ftello",
+    "_ftime",
+    "_ftok",
+    "_ftrylockfile",
+    "_fts_children",
+    "_fts_close",
+    "_fts_open",
+    "_fts_open_b",
+    "_fts_read",
+    "_fts_set",
+    "_ftw",
+    "_fungetrune",
+    "_funlockfile",
+    "_funopen",
+    "_fwide",
+    "_fwprintf",
+    "_fwprintf_l",
+    "_fwrite",
+    "_fwscanf",
+    "_fwscanf_l",
+    "_fxprintf",
+    "_fxprintf_exec",
+    "_gcvt",
+    "_getbsize",
+    "_getc",
+    "_getc_unlocked",
+    "_getchar",
+    "_getchar_unlocked",
+    "_getcwd",
+    "_getdate",
+    "_getdate_err",
+    "_getdelim",
+    "_getdiskbyname",
+    "_getenv",
+    "_gethostid",
+    "_gethostname",
+    "_getipv4sourcefilter",
+    "_getlastlogx",
+    "_getlastlogxbyname",
+    "_getline",
+    "_getloadavg",
+    "_getlogin",
+    "_getlogin_r",
+    "_getmntinfo",
+    "_getmntinfo_r_np",
+    "_getmode",
+    "_getopt",
+    "_getopt_long",
+    "_getopt_long_only",
+    "_getpagesize",
+    "_getpass",
+    "_getpeereid",
+    "_getprogname",
+    "_gets",
+    "_getsourcefilter",
+    "_getsubopt",
+    "_gettimeofday",
+    "_getttyent",
+    "_getttynam",
+    "_getusershell",
+    "_getutmp",
+    "_getutmpx",
+    "_getutxent",
+    "_getutxent_wtmp",
+    "_getutxid",
+    "_getutxline",
+    "_getvfsbyname",
+    "_getw",
+    "_getwc",
+    "_getwc_l",
+    "_getwchar",
+    "_getwchar_l",
+    "_getwd",
+    "_glob",
+    "_glob_b",
+    "_globfree",
+    "_gmtime",
+    "_gmtime_r",
+    "_grantpt",
+    "_hash_create",
+    "_hash_destroy",
+    "_hash_purge",
+    "_hash_search",
+    "_hash_stats",
+    "_hash_traverse",
+    "_hcreate",
+    "_hdestroy",
+    "_heapsort",
+    "_heapsort_b",
+    "_hsearch",
+    "_imaxabs",
+    "_imaxdiv",
+    "_inet_addr",
+    "_inet_aton",
+    "_inet_lnaof",
+    "_inet_makeaddr",
+    "_inet_net_ntop",
+    "_inet_net_pton",
+    "_inet_neta",
+    "_inet_netof",
+    "_inet_network",
+    "_inet_nsap_addr",
+    "_inet_nsap_ntoa",
+    "_inet_ntoa",
+    "_inet_ntop",
+    "_inet_ntop4",
+    "_inet_ntop6",
+    "_inet_pton",
+    "_initstate",
+    "_insque",
+    "_isalnum",
+    "_isalnum_l",
+    "_isalpha",
+    "_isalpha_l",
+    "_isascii",
+    "_isatty",
+    "_isblank",
+    "_isblank_l",
+    "_iscntrl",
+    "_iscntrl_l",
+    "_isdigit",
+    "_isdigit_l",
+    "_isgraph",
+    "_isgraph_l",
+    "_ishexnumber",
+    "_ishexnumber_l",
+    "_isideogram",
+    "_isideogram_l",
+    "_islower",
+    "_islower_l",
+    "_isnumber",
+    "_isnumber_l",
+    "_isphonogram",
+    "_isphonogram_l",
+    "_isprint",
+    "_isprint_l",
+    "_ispunct",
+    "_ispunct_l",
+    "_isrune",
+    "_isrune_l",
+    "_isspace",
+    "_isspace_l",
+    "_isspecial",
+    "_isspecial_l",
+    "_isupper",
+    "_isupper_l",
+    "_iswalnum",
+    "_iswalnum_l",
+    "_iswalpha",
+    "_iswalpha_l",
+    "_iswascii",
+    "_iswblank",
+    "_iswblank_l",
+    "_iswcntrl",
+    "_iswcntrl_l",
+    "_iswctype",
+    "_iswctype_l",
+    "_iswdigit",
+    "_iswdigit_l",
+    "_iswgraph",
+    "_iswgraph_l",
+    "_iswhexnumber",
+    "_iswhexnumber_l",
+    "_iswideogram",
+    "_iswideogram_l",
+    "_iswlower",
+    "_iswlower_l",
+    "_iswnumber",
+    "_iswnumber_l",
+    "_iswphonogram",
+    "_iswphonogram_l",
+    "_iswprint",
+    "_iswprint_l",
+    "_iswpunct",
+    "_iswpunct_l",
+    "_iswrune",
+    "_iswrune_l",
+    "_iswspace",
+    "_iswspace_l",
+    "_iswspecial",
+    "_iswspecial_l",
+    "_iswupper",
+    "_iswupper_l",
+    "_iswxdigit",
+    "_iswxdigit_l",
+    "_isxdigit",
+    "_isxdigit_l",
+    "_jrand48",
+    "_kOSThermalNotificationPressureLevelName",
+    "_killpg",
+    "_l64a",
+    "_labs",
+    "_lchflags",
+    "_lchmod",
+    "_lcong48",
+    "_ldiv",
+    "_lfind",
+    "_link_addr",
+    "_link_ntoa",
+    "_llabs",
+    "_lldiv",
+    "_localeconv",
+    "_localeconv_l",
+    "_localtime",
+    "_localtime_r",
+    "_lockf",
+    "_login",
+    "_login_tty",
+    "_logout",
+    "_logwtmp",
+    "_lrand48",
+    "_lsearch",
+    "_lstatx_np",
+    "_lutimes",
+    "_mblen",
+    "_mblen_l",
+    "_mbmb",
+    "_mbrlen",
+    "_mbrlen_l",
+    "_mbrrune",
+    "_mbrtowc",
+    "_mbrtowc_l",
+    "_mbrune",
+    "_mbsinit",
+    "_mbsinit_l",
+    "_mbsnrtowcs",
+    "_mbsnrtowcs_l",
+    "_mbsrtowcs",
+    "_mbsrtowcs_l",
+    "_mbstowcs",
+    "_mbstowcs_l",
+    "_mbtowc",
+    "_mbtowc_l",
+    "_memmem",
+    "_memset_s",
+    "_mergesort",
+    "_mergesort_b",
+    "_mkdirx_np",
+    "_mkdtemp",
+    "_mkdtempat_np",
+    "_mkfifox_np",
+    "_mkostemp",
+    "_mkostemps",
+    "_mkostempsat_np",
+    "_mkpath_np",
+    "_mkpathat_np",
+    "_mkstemp",
+    "_mkstemp_dprotected_np",
+    "_mkstemps",
+    "_mkstempsat_np",
+    "_mktemp",
+    "_mktime",
+    "_monaddition",
+    "_moncontrol",
+    "_moncount",
+    "_moninit",
+    "_monitor",
+    "_monoutput",
+    "_monreset",
+    "_monstartup",
+    "_mpool_close",
+    "_mpool_filter",
+    "_mpool_get",
+    "_mpool_new",
+    "_mpool_open",
+    "_mpool_put",
+    "_mpool_sync",
+    "_mrand48",
+    "_nanosleep",
+    "_new_printf_comp",
+    "_new_printf_domain",
+    "_newlocale",
+    "_nextwctype",
+    "_nextwctype_l",
+    "_nftw",
+    "_nice",
+    "_nl_langinfo",
+    "_nl_langinfo_l",
+    "_nrand48",
+    "_nvis",
+    "_off32",
+    "_off64",
+    "_offtime",
+    "_open_memstream",
+    "_open_with_subsystem",
+    "_open_wmemstream",
+    "_opendev",
+    "_opendir",
+    "_openpty",
+    "_openx_np",
+    "_optarg",
+    "_opterr",
+    "_optind",
+    "_optopt",
+    "_optreset",
+    "_pause",
+    "_pclose",
+    "_perror",
+    "_popen",
+    "_posix2time",
+    "_posix_openpt",
+    "_posix_spawnp",
+    "_printf",
+    "_printf_l",
+    "_psignal",
+    "_psort",
+    "_psort_b",
+    "_psort_r",
+    "_ptsname",
+    "_ptsname_r",
+    "_putc",
+    "_putc_unlocked",
+    "_putchar",
+    "_putchar_unlocked",
+    "_putenv",
+    "_puts",
+    "_pututxline",
+    "_putw",
+    "_putwc",
+    "_putwc_l",
+    "_putwchar",
+    "_putwchar_l",
+    "_qsort",
+    "_qsort_b",
+    "_qsort_r",
+    "_querylocale",
+    "_radixsort",
+    "_raise",
+    "_rand",
+    "_rand_r",
+    "_random",
+    "_rb_tree_count",
+    "_rb_tree_find_node",
+    "_rb_tree_find_node_geq",
+    "_rb_tree_find_node_leq",
+    "_rb_tree_init",
+    "_rb_tree_insert_node",
+    "_rb_tree_iterate",
+    "_rb_tree_remove_node",
+    "_readdir",
+    "_readdir_r",
+    "_readpassphrase",
+    "_reallocf",
+    "_realpath",
+    "_recv",
+    "_regcomp",
+    "_regcomp_l",
+    "_regerror",
+    "_regexec",
+    "_regfree",
+    "_register_printf_domain_function",
+    "_register_printf_domain_render_std",
+    "_regncomp",
+    "_regncomp_l",
+    "_regnexec",
+    "_regwcomp",
+    "_regwcomp_l",
+    "_regwexec",
+    "_regwncomp",
+    "_regwncomp_l",
+    "_regwnexec",
+    "_remove",
+    "_remque",
+    "_rewind",
+    "_rewinddir",
+    "_rindex",
+    "_rpmatch",
+    "_sbrk",
+    "_scandir",
+    "_scandir_b",
+    "_scanf",
+    "_scanf_l",
+    "_seed48",
+    "_seekdir",
+    "_send",
+    "_setbuf",
+    "_setbuffer",
+    "_setenv",
+    "_sethostid",
+    "_sethostname",
+    "_setinvalidrune",
+    "_setipv4sourcefilter",
+    "_setkey",
+    "_setlinebuf",
+    "_setlocale",
+    "_setlogin",
+    "_setmode",
+    "_setpgrp",
+    "_setprogname",
+    "_setrgid",
+    "_setruid",
+    "_setrunelocale",
+    "_setsourcefilter",
+    "_setstate",
+    "_settimeofday",
+    "_setttyent",
+    "_setusershell",
+    "_setutxent",
+    "_setutxent_wtmp",
+    "_setvbuf",
+    "_sigaction",
+    "_sigaddset",
+    "_sigaltstack",
+    "_sigblock",
+    "_sigdelset",
+    "_sigemptyset",
+    "_sigfillset",
+    "_sighold",
+    "_sigignore",
+    "_siginterrupt",
+    "_sigismember",
+    "_signal",
+    "_sigpause",
+    "_sigrelse",
+    "_sigset",
+    "_sigsetmask",
+    "_sigvec",
+    "_skip",
+    "_sl_add",
+    "_sl_find",
+    "_sl_free",
+    "_sl_init",
+    "_sleep",
+    "_snprintf",
+    "_snprintf_l",
+    "_snvis",
+    "_sockatmark",
+    "_sprintf",
+    "_sprintf_l",
+    "_sradixsort",
+    "_srand",
+    "_srand48",
+    "_sranddev",
+    "_srandom",
+    "_srandomdev",
+    "_sscanf",
+    "_sscanf_l",
+    "_stat_with_subsystem",
+    "_statvfs",
+    "_statx_np",
+    "_stpcpy",
+    "_stpncpy",
+    "_strcasecmp",
+    "_strcasecmp_l",
+    "_strcasestr",
+    "_strcasestr_l",
+    "_strcat",
+    "_strcoll",
+    "_strcoll_l",
+    "_strcspn",
+    "_strdup",
+    "_strenvisx",
+    "_strerror",
+    "_strerror_r",
+    "_strfmon",
+    "_strfmon_l",
+    "_strftime",
+    "_strftime_l",
+    "_strmode",
+    "_strncasecmp",
+    "_strncasecmp_l",
+    "_strncat",
+    "_strndup",
+    "_strnstr",
+    "_strnunvis",
+    "_strnunvisx",
+    "_strnvis",
+    "_strnvisx",
+    "_strpbrk",
+    "_strptime",
+    "_strptime_l",
+    "_strrchr",
+    "_strsenvisx",
+    "_strsep",
+    "_strsignal",
+    "_strsignal_r",
+    "_strsnvis",
+    "_strsnvisx",
+    "_strspn",
+    "_strsvis",
+    "_strsvisx",
+    "_strtod",
+    "_strtod_l",
+    "_strtof",
+    "_strtof_l",
+    "_strtofflags",
+    "_strtoimax",
+    "_strtoimax_l",
+    "_strtok",
+    "_strtok_r",
+    "_strtol",
+    "_strtol_l",
+    "_strtold",
+    "_strtold_l",
+    "_strtoll",
+    "_strtoll_l",
+    "_strtonum",
+    "_strtoq",
+    "_strtoq_l",
+    "_strtoul",
+    "_strtoul_l",
+    "_strtoull",
+    "_strtoull_l",
+    "_strtoumax",
+    "_strtoumax_l",
+    "_strtouq",
+    "_strtouq_l",
+    "_strunvis",
+    "_strunvisx",
+    "_strvis",
+    "_strvisx",
+    "_strxfrm",
+    "_strxfrm_l",
+    "_suboptarg",
+    "_svis",
+    "_swab",
+    "_swprintf",
+    "_swprintf_l",
+    "_swscanf",
+    "_swscanf_l",
+    "_sxprintf",
+    "_sxprintf_exec",
+    "_sync_volume_np",
+    "_sys_errlist",
+    "_sys_nerr",
+    "_sys_siglist",
+    "_sys_signame",
+    "_sysconf",
+    "_sysctl",
+    "_sysctlbyname",
+    "_sysctlnametomib",
+    "_system",
+    "_tcdrain",
+    "_tcflow",
+    "_tcflush",
+    "_tcgetattr",
+    "_tcgetpgrp",
+    "_tcgetsid",
+    "_tcsendbreak",
+    "_tcsetattr",
+    "_tcsetpgrp",
+    "_tdelete",
+    "_telldir",
+    "_tempnam",
+    "_tfind",
+    "_thread_stack_pcs",
+    "_time",
+    "_time2posix",
+    "_timegm",
+    "_timelocal",
+    "_timeoff",
+    "_times",
+    "_timespec_get",
+    "_timezone",
+    "_timingsafe_bcmp",
+    "_tmpfile",
+    "_tmpnam",
+    "_toascii",
+    "_tolower",
+    "_tolower_l",
+    "_toupper",
+    "_toupper_l",
+    "_towctrans",
+    "_towctrans_l",
+    "_towlower",
+    "_towlower_l",
+    "_towupper",
+    "_towupper_l",
+    "_tre_ast_new_catenation",
+    "_tre_ast_new_iter",
+    "_tre_ast_new_literal",
+    "_tre_ast_new_node",
+    "_tre_ast_new_union",
+    "_tre_compile",
+    "_tre_fill_pmatch",
+    "_tre_free",
+    "_tre_mem_alloc_impl",
+    "_tre_mem_destroy",
+    "_tre_mem_new_impl",
+    "_tre_parse",
+    "_tre_stack_destroy",
+    "_tre_stack_new",
+    "_tre_stack_num_objects",
+    "_tre_tnfa_run_backtrack",
+    "_tre_tnfa_run_parallel",
+    "_tsearch",
+    "_ttyname",
+    "_ttyname_r",
+    "_ttyslot",
+    "_twalk",
+    "_tzname",
+    "_tzset",
+    "_tzsetwall",
+    "_ualarm",
+    "_ulimit",
+    "_umaskx_np",
+    "_uname",
+    "_ungetc",
+    "_ungetwc",
+    "_ungetwc_l",
+    "_unlockpt",
+    "_unsetenv",
+    "_unvis",
+    "_uselocale",
+    "_usleep",
+    "_utime",
+    "_utmpxname",
+    "_uuid_clear",
+    "_uuid_compare",
+    "_uuid_copy",
+    "_uuid_generate",
+    "_uuid_generate_random",
+    "_uuid_generate_time",
+    "_uuid_is_null",
+    "_uuid_pack",
+    "_uuid_parse",
+    "_uuid_unpack",
+    "_uuid_unparse",
+    "_uuid_unparse_lower",
+    "_uuid_unparse_upper",
+    "_vasprintf",
+    "_vasprintf_l",
+    "_vasxprintf",
+    "_vasxprintf_exec",
+    "_vdprintf",
+    "_vdprintf_l",
+    "_vdxprintf",
+    "_vdxprintf_exec",
+    "_verr",
+    "_verrc",
+    "_verrx",
+    "_vfprintf",
+    "_vfprintf_l",
+    "_vfscanf",
+    "_vfscanf_l",
+    "_vfwprintf",
+    "_vfwprintf_l",
+    "_vfwscanf",
+    "_vfwscanf_l",
+    "_vfxprintf",
+    "_vfxprintf_exec",
+    "_vis",
+    "_vprintf",
+    "_vprintf_l",
+    "_vscanf",
+    "_vscanf_l",
+    "_vsnprintf",
+    "_vsnprintf_l",
+    "_vsprintf",
+    "_vsprintf_l",
+    "_vsscanf",
+    "_vsscanf_l",
+    "_vswprintf",
+    "_vswprintf_l",
+    "_vswscanf",
+    "_vswscanf_l",
+    "_vsxprintf",
+    "_vsxprintf_exec",
+    "_vwarn",
+    "_vwarnc",
+    "_vwarnx",
+    "_vwprintf",
+    "_vwprintf_l",
+    "_vwscanf",
+    "_vwscanf_l",
+    "_vxprintf",
+    "_vxprintf_exec",
+    "_wait",
+    "_wait3",
+    "_waitpid",
+    "_warn",
+    "_warnc",
+    "_warnx",
+    "_wcpcpy",
+    "_wcpncpy",
+    "_wcrtomb",
+    "_wcrtomb_l",
+    "_wcscasecmp",
+    "_wcscasecmp_l",
+    "_wcscat",
+    "_wcschr",
+    "_wcscmp",
+    "_wcscoll",
+    "_wcscoll_l",
+    "_wcscpy",
+    "_wcscspn",
+    "_wcsdup",
+    "_wcsftime",
+    "_wcsftime_l",
+    "_wcslcat",
+    "_wcslcpy",
+    "_wcslen",
+    "_wcsncasecmp",
+    "_wcsncasecmp_l",
+    "_wcsncat",
+    "_wcsncmp",
+    "_wcsncpy",
+    "_wcsnlen",
+    "_wcsnrtombs",
+    "_wcsnrtombs_l",
+    "_wcspbrk",
+    "_wcsrchr",
+    "_wcsrtombs",
+    "_wcsrtombs_l",
+    "_wcsspn",
+    "_wcsstr",
+    "_wcstod",
+    "_wcstod_l",
+    "_wcstof",
+    "_wcstof_l",
+    "_wcstoimax",
+    "_wcstoimax_l",
+    "_wcstok",
+    "_wcstol",
+    "_wcstol_l",
+    "_wcstold",
+    "_wcstold_l",
+    "_wcstoll",
+    "_wcstoll_l",
+    "_wcstombs",
+    "_wcstombs_l",
+    "_wcstoul",
+    "_wcstoul_l",
+    "_wcstoull",
+    "_wcstoull_l",
+    "_wcstoumax",
+    "_wcstoumax_l",
+    "_wcswidth",
+    "_wcswidth_l",
+    "_wcsxfrm",
+    "_wcsxfrm_l",
+    "_wctob",
+    "_wctob_l",
+    "_wctomb",
+    "_wctomb_l",
+    "_wctrans",
+    "_wctrans_l",
+    "_wctype",
+    "_wctype_l",
+    "_wcwidth",
+    "_wcwidth_l",
+    "_wmemchr",
+    "_wmemcmp",
+    "_wmemcpy",
+    "_wmemmove",
+    "_wmemset",
+    "_wordexp",
+    "_wordfree",
+    "_wprintf",
+    "_wprintf_l",
+    "_wscanf",
+    "_wscanf_l",
+    "_wtmpxname",
+    "_xprintf",
+    "_xprintf_exec",
 )
 
+
 def print_here_head(name):
-  print("""\
-(tee %s.s |llvm-mc -filetype=obj -triple %s -o %s.o) <<END_OF_FILE &""" % (name, triple, name))
+    print(
+        """\
+(tee %s.s |llvm-mc -filetype=obj -triple %s -o %s.o) <<END_OF_FILE &"""
+        % (name, triple, name)
+    )
+
 
 def print_here_tail():
-  print("""\
+    print(
+        """\
 END_OF_FILE
-""")
+"""
+    )
+
 
 def print_function_head(p2align, name):
-  if args.os == "macos":
-      print("""\
+    if args.os == "macos":
+        print(
+            """\
     .section __TEXT,__text,regular,pure_instructions
     .p2align %d, 0x90
     .globl _%s
-_%s:""" % (p2align, name, name))
-  elif args.os == "windows":
-      print("""\
+_%s:"""
+            % (p2align, name, name)
+        )
+    elif args.os == "windows":
+        print(
+            """\
     .text
     .def %s;
     .scl 2;
@@ -332,98 +1321,136 @@ def print_function_head(p2align, name):
     .endef
     .globl %s
     .p2align %d
-%s:""" % (name, name, p2align, name))
-  elif args.os == "linux":
-      print("""\
+%s:"""
+            % (name, name, p2align, name)
+        )
+    elif args.os == "linux":
+        print(
+            """\
     .text
     .p2align %d
     .globl %s
-%s:""" % (p2align, name, name))
+%s:"""
+            % (p2align, name, name)
+        )
+
 
 def print_function(addr, size, addrs):
-  name = "x%08x" % addr
-  calls = random.randint(0, size>>12)
-  print_here_head(name)
-  print("""\
-### %s size=%x calls=%x""" % (name, size, calls))
-  print_function_head(4, name)
-  for i in range(calls):
-      print("    bl %sx%08x\n    .p2align 4" %
-            ("_" if args.os == "macos" else "",
-             addrs[random.randint(0, len(addrs)-1)]))
-      if args.os == "macos":
-        print("    bl %s\n    .p2align 4" %
-              (libSystem_calls[random.randint(0, len(libSystem_calls)-1)]))
-  fill = size - 4 * (calls + 1)
-  assert fill > 0
-  print("""\
+    name = "x%08x" % addr
+    calls = random.randint(0, size >> 12)
+    print_here_head(name)
+    print(
+        """\
+### %s size=%x calls=%x"""
+        % (name, size, calls)
+    )
+    print_function_head(4, name)
+    for i in range(calls):
+        print(
+            "    bl %sx%08x\n    .p2align 4"
+            % (
+                "_" if args.os == "macos" else "",
+                addrs[random.randint(0, len(addrs) - 1)],
+            )
+        )
+        if args.os == "macos":
+            print(
+                "    bl %s\n    .p2align 4"
+                % (libSystem_calls[random.randint(0, len(libSystem_calls) - 1)])
+            )
+    fill = size - 4 * (calls + 1)
+    assert fill > 0
+    print(
+        """\
     .fill 0x%x
-    ret""" % (fill))
-  print_here_tail()
+    ret"""
+        % (fill)
+    )
+    print_here_tail()
+
 
 def random_seed():
-  """Generate a seed that can easily be passed back in via --seed=STRING"""
-  return ''.join(random.choice(string.ascii_lowercase) for i in range(10))
+    """Generate a seed that can easily be passed back in via --seed=STRING"""
+    return "".join(random.choice(string.ascii_lowercase) for i in range(10))
+
 
 def generate_sizes(base, megabytes):
-  total = 0
-  while total < megabytes:
-      size = random.randint(0x100, 0x10000) * 0x10
-      yield size
-      total += size
+    total = 0
+    while total < megabytes:
+        size = random.randint(0x100, 0x10000) * 0x10
+        yield size
+        total += size
+
 
 def generate_addrs(addr, sizes):
-  i = 0
-  while i < len(sizes):
-      yield addr
-      addr += sizes[i]
-      i += 1
+    i = 0
+    while i < len(sizes):
+        yield addr
+        addr += sizes[i]
+        i += 1
+
 
 def main():
-  parser = argparse.ArgumentParser(
-    description=__doc__,
-    epilog="""\
+    parser = argparse.ArgumentParser(
+        description=__doc__,
+        epilog="""\
 WRITEME
-""")
-  parser.add_argument('--seed', type=str, default=random_seed(),
-                      help='Seed the random number generator')
-  parser.add_argument('--size', type=int, default=None,
-                      help='Total text size to generate, in megabytes')
-  parser.add_argument('--os', type=str, default="macos",
-                      help='Target OS: macos, windows, or linux')
-  global args
-  args = parser.parse_args()
-  triples = {
-      "macos": "arm64-apple-macos",
-      "linux": "aarch64-pc-linux",
-      "windows": "aarch64-pc-windows"
-  }
-  global triple
-  triple = triples.get(args.os)
+""",
+    )
+    parser.add_argument(
+        "--seed",
+        type=str,
+        default=random_seed(),
+        help="Seed the random number generator",
+    )
+    parser.add_argument(
+        "--size",
+        type=int,
+        default=None,
+        help="Total text size to generate, in megabytes",
+    )
+    parser.add_argument(
+        "--os", type=str, default="macos", help="Target OS: macos, windows, or linux"
+    )
+    global args
+    args = parser.parse_args()
+    triples = {
+        "macos": "arm64-apple-macos",
+        "linux": "aarch64-pc-linux",
+        "windows": "aarch64-pc-windows",
+    }
+    global triple
+    triple = triples.get(args.os)
 
-  print("""\
+    print(
+        """\
 ### seed=%s triple=%s
-""" % (args.seed, triple))
+"""
+        % (args.seed, triple)
+    )
 
-  random.seed(args.seed)
+    random.seed(args.seed)
 
-  base = 0x4010
-  megabytes = (int(args.size) if args.size else 512) * 1024 * 1024
-  sizes = [size for size in generate_sizes(base, megabytes)]
-  addrs = [addr for addr in generate_addrs(base, sizes)]
+    base = 0x4010
+    megabytes = (int(args.size) if args.size else 512) * 1024 * 1024
+    sizes = [size for size in generate_sizes(base, megabytes)]
+    addrs = [addr for addr in generate_addrs(base, sizes)]
 
-  for i in range(len(addrs)):
-      print_function(addrs[i], sizes[i], addrs)
+    for i in range(len(addrs)):
+        print_function(addrs[i], sizes[i], addrs)
 
-  print_here_head("main")
-  print("""\
+    print_here_head("main")
+    print(
+        """\
 ### _x%08x
-""" % (addrs[-1] + sizes[-1]))
-  print_function_head(14 if args.os == "macos" else 4, "main")
-  print("    ret")
-  print_here_tail()
-  print("wait")
+"""
+        % (addrs[-1] + sizes[-1])
+    )
+    print_function_head(14 if args.os == "macos" else 4, "main")
+    print("    ret")
+    print_here_tail()
+    print("wait")
 
 
-if __name__ == '__main__':
-  main()
+if __name__ == "__main__":
+    main()

diff  --git a/lld/test/MachO/tools/validate-unwind-info.py b/lld/test/MachO/tools/validate-unwind-info.py
index 592751ca39f6b..ac49f1ecb5889 100755
--- a/lld/test/MachO/tools/validate-unwind-info.py
+++ b/lld/test/MachO/tools/validate-unwind-info.py
@@ -9,96 +9,119 @@
 import re
 from pprint import pprint
 
+
 def main():
-  hex = "[a-f\d]"
-  hex8 = hex + "{8}"
-
-  parser = argparse.ArgumentParser(description=__doc__)
-  parser.add_argument('files', metavar='FILES', nargs='*',
-                      help='output of (llvm-objdump --unwind-info --syms) for object file(s) plus final linker output')
-  parser.add_argument('--debug', action='store_true')
-  args = parser.parse_args()
-
-  if args.files:
-    objdump_string = ''.join([open(f).read() for f in args.files])
-  else:
-    objdump_string = sys.stdin.read()
-
-  object_encodings_list = [(symbol, encoding, personality, lsda)
-    for symbol, encoding, personality, lsda in
-    re.findall(r"start:\s+0x%s+\s+(\w+)\s+" % hex +
-               r"length:\s+0x%s+\s+" % hex +
-               r"compact encoding:\s+0x(%s+)(?:\s+" % hex +
-               r"personality function:\s+0x(%s+)\s+\w+\s+" % hex +
-               r"LSDA:\s+0x(%s+)\s+\w+(?: \+ 0x%s+)?)?" % (hex, hex),
-               objdump_string, re.DOTALL)]
-  object_encodings_map = {symbol:encoding
-    for symbol, encoding, _, _ in object_encodings_list}
-  if not object_encodings_map:
-    sys.exit("no object encodings found in input")
-
-  # generate-cfi-funcs.py doesn't generate unwind info for _main.
-  object_encodings_map['_main'] = '00000000'
-
-  program_symbols_map = {address:symbol
-    for address, symbol in
-    re.findall(r"^%s(%s) g\s+F __TEXT,__text (x\1|_main)$" % (hex8, hex8),
-               objdump_string, re.MULTILINE)}
-  if not program_symbols_map:
-    sys.exit("no program symbols found in input")
-
-  program_common_encodings = (
-    re.findall(r"^\s+encoding\[(?:\d|\d\d|1[01]\d|12[0-6])\]: 0x(%s+)$" % hex,
-               objdump_string, re.MULTILINE))
-  if not program_common_encodings:
-    sys.exit("no common encodings found in input")
-
-  program_encodings_map = {program_symbols_map[address]:encoding
-    for address, encoding in
-    re.findall(r"^\s+\[\d+\]: function offset=0x(%s+), " % hex +
-               r"encoding(?:\[\d+\])?=0x(%s+)$" % hex,
-               objdump_string, re.MULTILINE)}
-  if not object_encodings_map:
-    sys.exit("no program encodings found in input")
-
-  # Fold adjacent entries from the object file that have matching encodings
-  # TODO(gkm) add check for personality+lsda
-  encoding0 = 0
-  for symbol in sorted(object_encodings_map):
-    encoding = object_encodings_map[symbol]
-    fold = (encoding == encoding0)
-    if fold:
-      del object_encodings_map[symbol]
-    if args.debug:
-      print("%s %s with %s" % (
-              'delete' if fold else 'retain', symbol, encoding))
-    encoding0 = encoding
-
-  if program_encodings_map != object_encodings_map:
-    if args.debug:
-      print("program encodings map:")
-      pprint(program_encodings_map)
-      print("object encodings map:")
-      pprint(object_encodings_map)
-    sys.exit("encoding maps 
diff er")
-
-  # Count frequency of object-file folded encodings
-  # and compare with the program-file common encodings table
-  encoding_frequency_map = {}
-  for _, encoding in object_encodings_map.items():
-    encoding_frequency_map[encoding] = 1 + encoding_frequency_map.get(encoding, 0)
-  encoding_frequencies = [x for x in
-                          sorted(encoding_frequency_map,
-                                 key=lambda x: (encoding_frequency_map.get(x), x),
-                                 reverse=True)]
-  del encoding_frequencies[127:]
-
-  if program_common_encodings != encoding_frequencies:
-    if args.debug:
-      pprint("program common encodings:\n" + str(program_common_encodings))
-      pprint("object encoding frequencies:\n" + str(encoding_frequencies))
-    sys.exit("encoding frequencies 
diff er")
-
-
-if __name__ == '__main__':
-  main()
+    hex = "[a-f\d]"
+    hex8 = hex + "{8}"
+
+    parser = argparse.ArgumentParser(description=__doc__)
+    parser.add_argument(
+        "files",
+        metavar="FILES",
+        nargs="*",
+        help="output of (llvm-objdump --unwind-info --syms) for object file(s) plus final linker output",
+    )
+    parser.add_argument("--debug", action="store_true")
+    args = parser.parse_args()
+
+    if args.files:
+        objdump_string = "".join([open(f).read() for f in args.files])
+    else:
+        objdump_string = sys.stdin.read()
+
+    object_encodings_list = [
+        (symbol, encoding, personality, lsda)
+        for symbol, encoding, personality, lsda in re.findall(
+            r"start:\s+0x%s+\s+(\w+)\s+" % hex
+            + r"length:\s+0x%s+\s+" % hex
+            + r"compact encoding:\s+0x(%s+)(?:\s+" % hex
+            + r"personality function:\s+0x(%s+)\s+\w+\s+" % hex
+            + r"LSDA:\s+0x(%s+)\s+\w+(?: \+ 0x%s+)?)?" % (hex, hex),
+            objdump_string,
+            re.DOTALL,
+        )
+    ]
+    object_encodings_map = {
+        symbol: encoding for symbol, encoding, _, _ in object_encodings_list
+    }
+    if not object_encodings_map:
+        sys.exit("no object encodings found in input")
+
+    # generate-cfi-funcs.py doesn't generate unwind info for _main.
+    object_encodings_map["_main"] = "00000000"
+
+    program_symbols_map = {
+        address: symbol
+        for address, symbol in re.findall(
+            r"^%s(%s) g\s+F __TEXT,__text (x\1|_main)$" % (hex8, hex8),
+            objdump_string,
+            re.MULTILINE,
+        )
+    }
+    if not program_symbols_map:
+        sys.exit("no program symbols found in input")
+
+    program_common_encodings = re.findall(
+        r"^\s+encoding\[(?:\d|\d\d|1[01]\d|12[0-6])\]: 0x(%s+)$" % hex,
+        objdump_string,
+        re.MULTILINE,
+    )
+    if not program_common_encodings:
+        sys.exit("no common encodings found in input")
+
+    program_encodings_map = {
+        program_symbols_map[address]: encoding
+        for address, encoding in re.findall(
+            r"^\s+\[\d+\]: function offset=0x(%s+), " % hex
+            + r"encoding(?:\[\d+\])?=0x(%s+)$" % hex,
+            objdump_string,
+            re.MULTILINE,
+        )
+    }
+    if not object_encodings_map:
+        sys.exit("no program encodings found in input")
+
+    # Fold adjacent entries from the object file that have matching encodings
+    # TODO(gkm) add check for personality+lsda
+    encoding0 = 0
+    for symbol in sorted(object_encodings_map):
+        encoding = object_encodings_map[symbol]
+        fold = encoding == encoding0
+        if fold:
+            del object_encodings_map[symbol]
+        if args.debug:
+            print("%s %s with %s" % ("delete" if fold else "retain", symbol, encoding))
+        encoding0 = encoding
+
+    if program_encodings_map != object_encodings_map:
+        if args.debug:
+            print("program encodings map:")
+            pprint(program_encodings_map)
+            print("object encodings map:")
+            pprint(object_encodings_map)
+        sys.exit("encoding maps 
diff er")
+
+    # Count frequency of object-file folded encodings
+    # and compare with the program-file common encodings table
+    encoding_frequency_map = {}
+    for _, encoding in object_encodings_map.items():
+        encoding_frequency_map[encoding] = 1 + encoding_frequency_map.get(encoding, 0)
+    encoding_frequencies = [
+        x
+        for x in sorted(
+            encoding_frequency_map,
+            key=lambda x: (encoding_frequency_map.get(x), x),
+            reverse=True,
+        )
+    ]
+    del encoding_frequencies[127:]
+
+    if program_common_encodings != encoding_frequencies:
+        if args.debug:
+            pprint("program common encodings:\n" + str(program_common_encodings))
+            pprint("object encoding frequencies:\n" + str(encoding_frequencies))
+        sys.exit("encoding frequencies 
diff er")
+
+
+if __name__ == "__main__":
+    main()

diff  --git a/lld/test/lit.cfg.py b/lld/test/lit.cfg.py
index 96a1d652573fe..2e60d9fef7dfa 100644
--- a/lld/test/lit.cfg.py
+++ b/lld/test/lit.cfg.py
@@ -14,7 +14,7 @@
 # Configuration file for the 'lit' test runner.
 
 # name: The name of this test suite.
-config.name = 'lld'
+config.name = "lld"
 
 # testFormat: The test format to use to interpret tests.
 #
@@ -22,96 +22,114 @@
 config.test_format = lit.formats.ShTest(not llvm_config.use_lit_shell)
 
 # suffixes: A list of file extensions to treat as test files.
-config.suffixes = ['.ll', '.s', '.test', '.yaml', '.objtxt']
+config.suffixes = [".ll", ".s", ".test", ".yaml", ".objtxt"]
 
 # excludes: A list of directories to exclude from the testsuite. The 'Inputs'
 # subdirectories contain auxiliary inputs for various tests in their parent
 # directories.
-config.excludes = ['Inputs']
+config.excludes = ["Inputs"]
 
 # test_source_root: The root path where tests are located.
 config.test_source_root = os.path.dirname(__file__)
 
-config.test_exec_root = os.path.join(config.lld_obj_root, 'test')
+config.test_exec_root = os.path.join(config.lld_obj_root, "test")
 
 llvm_config.use_default_substitutions()
 llvm_config.use_lld()
 
 tool_patterns = [
-    'llc', 'llvm-as', 'llvm-mc', 'llvm-nm', 'llvm-objdump', 'llvm-otool', 'llvm-pdbutil',
-    'llvm-dwarfdump', 'llvm-readelf', 'llvm-readobj', 'obj2yaml', 'yaml2obj',
-    'opt', 'llvm-dis']
+    "llc",
+    "llvm-as",
+    "llvm-mc",
+    "llvm-nm",
+    "llvm-objdump",
+    "llvm-otool",
+    "llvm-pdbutil",
+    "llvm-dwarfdump",
+    "llvm-readelf",
+    "llvm-readobj",
+    "obj2yaml",
+    "yaml2obj",
+    "opt",
+    "llvm-dis",
+]
 
 llvm_config.add_tool_substitutions(tool_patterns)
 
 # LLD tests tend to be flaky on NetBSD, so add some retries.
 # We don't do this on other platforms because it's slower.
-if platform.system() in ['NetBSD']:
+if platform.system() in ["NetBSD"]:
     config.test_retry_attempts = 2
 
 # When running under valgrind, we mangle '-vg' onto the end of the triple so we
 # can check it with XFAIL and XTARGET.
 if lit_config.useValgrind:
-    config.target_triple += '-vg'
+    config.target_triple += "-vg"
 
 # Running on ELF based *nix
-if platform.system() in ['FreeBSD', 'NetBSD', 'Linux']:
-    config.available_features.add('system-linker-elf')
+if platform.system() in ["FreeBSD", "NetBSD", "Linux"]:
+    config.available_features.add("system-linker-elf")
 
 # Set if host-cxxabi's demangler can handle target's symbols.
-if platform.system() not in ['Windows']:
-    config.available_features.add('demangler')
+if platform.system() not in ["Windows"]:
+    config.available_features.add("demangler")
 
 llvm_config.feature_config(
-    [('--targets-built', {'AArch64': 'aarch64',
-                          'AMDGPU': 'amdgpu',
-                          'ARM': 'arm',
-                          'AVR': 'avr',
-                          'Hexagon': 'hexagon',
-                          'Mips': 'mips',
-                          'MSP430': 'msp430',
-                          'PowerPC': 'ppc',
-                          'RISCV': 'riscv',
-                          'Sparc': 'sparc',
-                          'WebAssembly': 'wasm',
-                          'X86': 'x86'}),
-     ('--assertion-mode', {'ON': 'asserts'}),
-     ])
+    [
+        (
+            "--targets-built",
+            {
+                "AArch64": "aarch64",
+                "AMDGPU": "amdgpu",
+                "ARM": "arm",
+                "AVR": "avr",
+                "Hexagon": "hexagon",
+                "Mips": "mips",
+                "MSP430": "msp430",
+                "PowerPC": "ppc",
+                "RISCV": "riscv",
+                "Sparc": "sparc",
+                "WebAssembly": "wasm",
+                "X86": "x86",
+            },
+        ),
+        ("--assertion-mode", {"ON": "asserts"}),
+    ]
+)
 
 # Set a fake constant version so that we get consistent output.
-config.environment['LLD_VERSION'] = 'LLD 1.0'
+config.environment["LLD_VERSION"] = "LLD 1.0"
 
 # LLD_IN_TEST determines how many times `main` is run inside each process, which
 # lets us test that it's cleaning up after itself and resetting global state
 # correctly (which is important for usage as a library).
-run_lld_main_twice = lit_config.params.get('RUN_LLD_MAIN_TWICE', False)
+run_lld_main_twice = lit_config.params.get("RUN_LLD_MAIN_TWICE", False)
 if not run_lld_main_twice:
-    config.environment['LLD_IN_TEST'] = '1'
+    config.environment["LLD_IN_TEST"] = "1"
 else:
-    config.environment['LLD_IN_TEST'] = '2'
+    config.environment["LLD_IN_TEST"] = "2"
     # Many ELF tests fail in this mode.
-    config.excludes.append('ELF')
+    config.excludes.append("ELF")
     # Some old Mach-O backend tests fail, and it's due for removal anyway.
-    config.excludes.append('mach-o')
+    config.excludes.append("mach-o")
     # Some new Mach-O backend tests fail; give them a way to mark themselves
     # unsupported in this mode.
-    config.available_features.add('main-run-twice')
+    config.available_features.add("main-run-twice")
 
 # Indirectly check if the mt.exe Microsoft utility exists by searching for
 # cvtres, which always accompanies it.  Alternatively, check if we can use
 # libxml2 to merge manifests.
-if (lit.util.which('cvtres', config.environment['PATH']) or
-        config.have_libxml2):
-    config.available_features.add('manifest_tool')
+if lit.util.which("cvtres", config.environment["PATH"]) or config.have_libxml2:
+    config.available_features.add("manifest_tool")
 
 if config.enable_backtrace:
-    config.available_features.add('backtrace')
+    config.available_features.add("backtrace")
 
 if config.have_libxar:
-    config.available_features.add('xar')
+    config.available_features.add("xar")
 
 if config.have_libxml2:
-    config.available_features.add('libxml2')
+    config.available_features.add("libxml2")
 
 if config.have_dia_sdk:
     config.available_features.add("diasdk")
@@ -120,36 +138,44 @@
     config.available_features.add("llvm-64-bits")
 
 if config.has_plugins:
-    config.available_features.add('plugins')
+    config.available_features.add("plugins")
 
 if config.build_examples:
-    config.available_features.add('examples')
+    config.available_features.add("examples")
 
 if config.linked_bye_extension:
-    config.substitutions.append(('%loadbye', ''))
-    config.substitutions.append(('%loadnewpmbye', ''))
+    config.substitutions.append(("%loadbye", ""))
+    config.substitutions.append(("%loadnewpmbye", ""))
 else:
-    config.substitutions.append(('%loadbye',
-                                 '-load={}/Bye{}'.format(config.llvm_shlib_dir,
-                                                         config.llvm_shlib_ext)))
-    config.substitutions.append(('%loadnewpmbye',
-                                 '-load-pass-plugin={}/Bye{}'
-                                 .format(config.llvm_shlib_dir,
-                                         config.llvm_shlib_ext)))
-
-tar_executable = lit.util.which('tar', config.environment['PATH'])
+    config.substitutions.append(
+        (
+            "%loadbye",
+            "-load={}/Bye{}".format(config.llvm_shlib_dir, config.llvm_shlib_ext),
+        )
+    )
+    config.substitutions.append(
+        (
+            "%loadnewpmbye",
+            "-load-pass-plugin={}/Bye{}".format(
+                config.llvm_shlib_dir, config.llvm_shlib_ext
+            ),
+        )
+    )
+
+tar_executable = lit.util.which("tar", config.environment["PATH"])
 if tar_executable:
     env = os.environ
-    env['LANG'] = 'C'
+    env["LANG"] = "C"
     tar_version = subprocess.Popen(
-        [tar_executable, '--version'],
+        [tar_executable, "--version"],
         stdout=subprocess.PIPE,
         stderr=subprocess.PIPE,
-        env=env)
+        env=env,
+    )
     sout, _ = tar_version.communicate()
-    if 'GNU tar' in sout.decode():
-        config.available_features.add('gnutar')
+    if "GNU tar" in sout.decode():
+        config.available_features.add("gnutar")
 
 # ELF tests expect the default target for ld.lld to be ELF.
 if config.ld_lld_default_mingw:
-    config.excludes.append('ELF')
+    config.excludes.append("ELF")

diff  --git a/lld/test/wasm/lit.local.cfg b/lld/test/wasm/lit.local.cfg
index 0b3ba0937d424..71a089c21423f 100644
--- a/lld/test/wasm/lit.local.cfg
+++ b/lld/test/wasm/lit.local.cfg
@@ -1,4 +1,4 @@
-if 'wasm' not in config.available_features:
-  config.unsupported = True
+if "wasm" not in config.available_features:
+    config.unsupported = True
 
-config.suffixes = ['.test', '.yaml', '.ll', '.s']
+config.suffixes = [".test", ".yaml", ".ll", ".s"]

diff  --git a/lld/utils/benchmark.py b/lld/utils/benchmark.py
index 47490b9a98289..a07d5ecc69417 100755
--- a/lld/utils/benchmark.py
+++ b/lld/utils/benchmark.py
@@ -13,6 +13,7 @@
 import json
 import datetime
 import argparse
+
 try:
     from urllib.parse import urlencode
     from urllib.request import urlopen, Request
@@ -22,61 +23,71 @@
 
 
 parser = argparse.ArgumentParser()
-parser.add_argument('benchmark_directory')
-parser.add_argument('--runs', type=int, default=10)
-parser.add_argument('--wrapper', default='')
-parser.add_argument('--machine', required=True)
-parser.add_argument('--revision', required=True)
-parser.add_argument('--threads', action='store_true')
-parser.add_argument('--url', help='The lnt server url to send the results to',
-                    default='http://localhost:8000/db_default/v4/link/submitRun')
+parser.add_argument("benchmark_directory")
+parser.add_argument("--runs", type=int, default=10)
+parser.add_argument("--wrapper", default="")
+parser.add_argument("--machine", required=True)
+parser.add_argument("--revision", required=True)
+parser.add_argument("--threads", action="store_true")
+parser.add_argument(
+    "--url",
+    help="The lnt server url to send the results to",
+    default="http://localhost:8000/db_default/v4/link/submitRun",
+)
 args = parser.parse_args()
 
+
 class Bench:
     def __init__(self, directory, variant):
         self.directory = directory
         self.variant = variant
+
     def __str__(self):
         if not self.variant:
             return self.directory
-        return '%s-%s' % (self.directory, self.variant)
+        return "%s-%s" % (self.directory, self.variant)
+
 
 def getBenchmarks():
     ret = []
-    for i in glob.glob('*/response*.txt'):
-        m = re.match('response-(.*)\.txt', os.path.basename(i))
+    for i in glob.glob("*/response*.txt"):
+        m = re.match("response-(.*)\.txt", os.path.basename(i))
         variant = m.groups()[0] if m else None
         ret.append(Bench(os.path.dirname(i), variant))
     return ret
 
+
 def parsePerfNum(num):
-    num = num.replace(b',',b'')
+    num = num.replace(b",", b"")
     try:
         return int(num)
     except ValueError:
         return float(num)
 
+
 def parsePerfLine(line):
     ret = {}
-    line = line.split(b'#')[0].strip()
+    line = line.split(b"#")[0].strip()
     if len(line) != 0:
         p = line.split()
-        ret[p[1].strip().decode('ascii')] = parsePerfNum(p[0])
+        ret[p[1].strip().decode("ascii")] = parsePerfNum(p[0])
     return ret
 
+
 def parsePerf(output):
     ret = {}
-    lines = [x.strip() for x in output.split(b'\n')]
+    lines = [x.strip() for x in output.split(b"\n")]
 
-    seconds = [x for x in lines if b'seconds time elapsed' in x][0]
+    seconds = [x for x in lines if b"seconds time elapsed" in x][0]
     seconds = seconds.strip().split()[0].strip()
-    ret['seconds-elapsed'] = parsePerfNum(seconds)
+    ret["seconds-elapsed"] = parsePerfNum(seconds)
 
-    measurement_lines = [x for x in lines if b'#' in x]
+    measurement_lines = [x for x in lines if b"#" in x]
     for l in measurement_lines:
         ret.update(parsePerfLine(l))
     return ret
 
+
 def run(cmd):
     try:
         return subprocess.check_output(cmd, stderr=subprocess.STDOUT)
@@ -84,56 +95,62 @@ def run(cmd):
         print(e.output)
         raise e
 
+
 def combinePerfRun(acc, d):
-    for k,v in d.items():
+    for k, v in d.items():
         a = acc.get(k, [])
         a.append(v)
         acc[k] = a
 
+
 def perf(cmd):
     # Discard the first run to warm up any system cache.
     run(cmd)
 
     ret = {}
-    wrapper_args = [x for x in args.wrapper.split(',') if x]
+    wrapper_args = [x for x in args.wrapper.split(",") if x]
     for i in range(args.runs):
-        os.unlink('t')
-        out = run(wrapper_args + ['perf', 'stat'] + cmd)
+        os.unlink("t")
+        out = run(wrapper_args + ["perf", "stat"] + cmd)
         r = parsePerf(out)
         combinePerfRun(ret, r)
-    os.unlink('t')
+    os.unlink("t")
     return ret
 
+
 def runBench(bench):
-    thread_arg = [] if args.threads else ['--no-threads']
+    thread_arg = [] if args.threads else ["--no-threads"]
     os.chdir(bench.directory)
-    suffix = '-%s' % bench.variant if bench.variant else ''
-    response = 'response' + suffix + '.txt'
-    ret = perf(['../ld.lld', '@' + response, '-o', 't'] + thread_arg)
-    ret['name'] = str(bench)
-    os.chdir('..')
+    suffix = "-%s" % bench.variant if bench.variant else ""
+    response = "response" + suffix + ".txt"
+    ret = perf(["../ld.lld", "@" + response, "-o", "t"] + thread_arg)
+    ret["name"] = str(bench)
+    os.chdir("..")
     return ret
 
+
 def buildLntJson(benchmarks):
     start = datetime.datetime.utcnow().isoformat()
     tests = [runBench(b) for b in benchmarks]
     end = datetime.datetime.utcnow().isoformat()
     ret = {
-        'format_version' : 2,
-        'machine' : { 'name' : args.machine },
-        'run' : {
-            'end_time' : start,
-            'start_time' : end,
-            'llvm_project_revision': args.revision
+        "format_version": 2,
+        "machine": {"name": args.machine},
+        "run": {
+            "end_time": start,
+            "start_time": end,
+            "llvm_project_revision": args.revision,
         },
-        'tests' : tests
+        "tests": tests,
     }
     return json.dumps(ret, sort_keys=True, indent=4)
 
+
 def submitToServer(data):
-    data2 = urlencode({ 'input_data' : data }).encode('ascii')
+    data2 = urlencode({"input_data": data}).encode("ascii")
     urlopen(Request(args.url, data2))
 
+
 os.chdir(args.benchmark_directory)
 data = buildLntJson(getBenchmarks())
 submitToServer(data)

diff  --git a/llvm-libgcc/generate_version_script.py b/llvm-libgcc/generate_version_script.py
index 98850d4f4a2de..5332869494a35 100755
--- a/llvm-libgcc/generate_version_script.py
+++ b/llvm-libgcc/generate_version_script.py
@@ -18,13 +18,12 @@ def split_suffix(symbol):
     its priortiy. A symbol that has a '@@' instead of '@' has been designated by
     the linker as the default symbol, and is awarded a priority of -1.
     """
-    if '@' not in symbol:
+    if "@" not in symbol:
         return None
-    data = [i for i in filter(lambda s: s, symbol.split('@'))]
-    _, version = data[-1].split('_')
-    version = version.replace('.', '')
-    priority = -1 if '@@' in symbol else int(version + '0' *
-                                             (3 - len(version)))
+    data = [i for i in filter(lambda s: s, symbol.split("@"))]
+    _, version = data[-1].split("_")
+    version = version.replace(".", "")
+    priority = -1 if "@@" in symbol else int(version + "0" * (3 - len(version)))
     return data[0], data[1], priority
 
 
@@ -43,9 +42,9 @@ def invert_mapping(symbol_map):
 
 def intersection(llvm, gcc):
     """
-  Finds the intersection between the symbols extracted from compiler-rt.a/libunwind.a
-  and libgcc_s.so.1.
-  """
+    Finds the intersection between the symbols extracted from compiler-rt.a/libunwind.a
+    and libgcc_s.so.1.
+    """
     common_symbols = {}
     for i in gcc:
         suffix_triple = split_suffix(i)
@@ -67,59 +66,58 @@ def find_function_names(path):
     Runs readelf on a binary and reduces to only defined functions. Equivalent to
     `llvm-readelf --wide ${path} | grep 'FUNC' | grep -v 'UND' | awk '{print $8}'`.
     """
-    result = subprocess.run(args=['llvm-readelf', '-su', path],
-                            capture_output=True)
+    result = subprocess.run(args=["llvm-readelf", "-su", path], capture_output=True)
 
     if result.returncode != 0:
-        print(result.stderr.decode('utf-8'), file=sys.stderr)
+        print(result.stderr.decode("utf-8"), file=sys.stderr)
         sys.exit(1)
 
-    stdout = result.stdout.decode('utf-8')
-    stdout = filter(lambda x: 'FUNC' in x and 'UND' not in x,
-                    stdout.split('\n'))
-    stdout = chain(
-        map(lambda x: filter(None, x), (i.split(' ') for i in stdout)))
+    stdout = result.stdout.decode("utf-8")
+    stdout = filter(lambda x: "FUNC" in x and "UND" not in x, stdout.split("\n"))
+    stdout = chain(map(lambda x: filter(None, x), (i.split(" ") for i in stdout)))
 
     return [list(i)[7] for i in stdout]
 
 
 def to_file(versioned_symbols):
-    path = f'{os.path.dirname(os.path.realpath(__file__))}/new-gcc_s-symbols'
-    with open(path, 'w') as f:
-        f.write('Do not check this version script in: you should instead work '
-                'out which symbols are missing in `lib/gcc_s.ver` and then '
-                'integrate them into `lib/gcc_s.ver`. For more information, '
-                'please see `doc/LLVMLibgcc.rst`.\n')
+    path = f"{os.path.dirname(os.path.realpath(__file__))}/new-gcc_s-symbols"
+    with open(path, "w") as f:
+        f.write(
+            "Do not check this version script in: you should instead work "
+            "out which symbols are missing in `lib/gcc_s.ver` and then "
+            "integrate them into `lib/gcc_s.ver`. For more information, "
+            "please see `doc/LLVMLibgcc.rst`.\n"
+        )
         for version, symbols in versioned_symbols:
-            f.write(f'{version} {{\n')
+            f.write(f"{version} {{\n")
             for i in symbols:
-                f.write(f'  {i};\n')
-            f.write('};\n\n')
+                f.write(f"  {i};\n")
+            f.write("};\n\n")
 
 
 def read_args():
     parser = argparse.ArgumentParser()
-    parser.add_argument('--compiler_rt',
-                        type=str,
-                        help='Path to `libclang_rt.builtins-${ARCH}.a`.',
-                        required=True)
-    parser.add_argument('--libunwind',
-                        type=str,
-                        help='Path to `libunwind.a`.',
-                        required=True)
     parser.add_argument(
-        '--libgcc_s',
+        "--compiler_rt",
+        type=str,
+        help="Path to `libclang_rt.builtins-${ARCH}.a`.",
+        required=True,
+    )
+    parser.add_argument(
+        "--libunwind", type=str, help="Path to `libunwind.a`.", required=True
+    )
+    parser.add_argument(
+        "--libgcc_s",
         type=str,
-        help=
-        'Path to `libgcc_s.so.1`. Note that unlike the other two arguments, this is a dynamic library.',
-        required=True)
+        help="Path to `libgcc_s.so.1`. Note that unlike the other two arguments, this is a dynamic library.",
+        required=True,
+    )
     return parser.parse_args()
 
 
 def main():
     args = read_args()
-    llvm = find_function_names(args.compiler_rt) + find_function_names(
-        args.libunwind)
+    llvm = find_function_names(args.compiler_rt) + find_function_names(args.libunwind)
     gcc = find_function_names(args.libgcc_s)
     versioned_symbols = intersection(llvm, gcc)
     # TODO(cjdb): work out a way to integrate new symbols in with the existing
@@ -127,5 +125,5 @@ def main():
     to_file(versioned_symbols)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()

diff  --git a/openmp/docs/conf.py b/openmp/docs/conf.py
index 058cfcff8985a..a11814c6ec12c 100644
--- a/openmp/docs/conf.py
+++ b/openmp/docs/conf.py
@@ -17,32 +17,32 @@
 # If extensions (or modules to document with autodoc) are in another directory,
 # add these directories to sys.path here. If the directory is relative to the
 # documentation root, use os.path.abspath to make it absolute, like shown here.
-#sys.path.insert(0, os.path.abspath('.'))
+# sys.path.insert(0, os.path.abspath('.'))
 
 # -- General configuration -----------------------------------------------------
 
 # If your documentation needs a minimal Sphinx version, state it here.
-#needs_sphinx = '1.0'
+# needs_sphinx = '1.0'
 
 # Add any Sphinx extension module names here, as strings. They can be extensions
 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.todo', 'sphinx.ext.mathjax', 'sphinx.ext.intersphinx']
+extensions = ["sphinx.ext.todo", "sphinx.ext.mathjax", "sphinx.ext.intersphinx"]
 
 # Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
+templates_path = ["_templates"]
 
 # The suffix of source filenames.
-source_suffix = '.rst'
+source_suffix = ".rst"
 
 # The encoding of source files.
-#source_encoding = 'utf-8-sig'
+# source_encoding = 'utf-8-sig'
 
 # The master toctree document.
-master_doc = 'index'
+master_doc = "index"
 
 # General information about the project.
-project = u'LLVM/OpenMP'
-copyright = u'2013-%d, LLVM/OpenMP' % date.today().year
+project = "LLVM/OpenMP"
+copyright = "2013-%d, LLVM/OpenMP" % date.today().year
 
 # The version info for the project you're documenting, acts as replacement for
 # |version| and |release|, also used in various other places throughout the
@@ -51,50 +51,50 @@
 # everytime a new release comes out.
 #
 # The short version.
-#version = '0'
+# version = '0'
 # The full version, including alpha/beta/rc tags.
-#release = '0'
+# release = '0'
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
-#language = None
+# language = None
 
 # There are two options for replacing |today|: either, you set today to some
 # non-false value, then it is used:
-#today = ''
+# today = ''
 # Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
+# today_fmt = '%B %d, %Y'
 
 # List of patterns, relative to source directory, that match files and
 # directories to ignore when looking for source files.
-exclude_patterns = ['_build', 'analyzer']
+exclude_patterns = ["_build", "analyzer"]
 
 # The reST default role (used for this markup: `text`) to use for all documents.
-#default_role = None
+# default_role = None
 
 # If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
+# add_function_parentheses = True
 
 # If true, the current module name will be prepended to all description
 # unit titles (such as .. function::).
-#add_module_names = True
+# add_module_names = True
 
 # If true, sectionauthor and moduleauthor directives will be shown in the
 # output. They are ignored by default.
 show_authors = True
 
 # The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'friendly'
+pygments_style = "friendly"
 
 # A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
+# modindex_common_prefix = []
 
 
 # -- Options for HTML output ---------------------------------------------------
 
 # The theme to use for HTML and HTML Help pages.  See the documentation for
 # a list of builtin themes.
-html_theme = 'llvm-openmp-theme'
+html_theme = "llvm-openmp-theme"
 
 # Theme options are theme-specific and customize the look and feel of a theme
 # further.  For a list of options available for each theme, see the
@@ -109,65 +109,65 @@
 # html_title = 'OpenMP Parallel Programming API'
 
 # A shorter title for the navigation bar.  Default is the same as html_title.
-#html_short_title = None
+# html_short_title = None
 
 # The name of an image file (relative to this directory) to place at the top
 # of the sidebar.
-#html_logo = None
+# html_logo = None
 
 # The name of an image file (within the static path) to use as favicon of the
 # docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
 # pixels large.
-#html_favicon = None
+# html_favicon = None
 
 # Add any paths that contain custom static files (such as style sheets) here,
 # relative to this directory. They are copied after the builtin static files,
 # so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
+html_static_path = ["_static"]
 
 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
 # using the given strftime format.
-html_last_updated_fmt = '%Y-%m-%d'
+html_last_updated_fmt = "%Y-%m-%d"
 
 # If true, SmartyPants will be used to convert quotes and dashes to
 # typographically correct entities.
-#html_use_smartypants = True
+# html_use_smartypants = True
 
 # Custom sidebar templates, maps document names to template names.
-#html_sidebars = {}
+# html_sidebars = {}
 
 # Additional templates that should be rendered to pages, maps page names to
 # template names.
-#html_additional_pages = {}
+# html_additional_pages = {}
 
 # If false, no module index is generated.
-#html_domain_indices = True
+# html_domain_indices = True
 
 # If false, no index is generated.
-#html_use_index = True
+# html_use_index = True
 
 # If true, the index is split into individual pages for each letter.
-#html_split_index = False
+# html_split_index = False
 
 # If true, links to the reST sources are added to the pages.
-#html_show_sourcelink = True
+# html_show_sourcelink = True
 
 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
-#html_show_sphinx = True
+# html_show_sphinx = True
 
 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
-#html_show_copyright = True
+# html_show_copyright = True
 
 # If true, an OpenSearch description file will be output, and all pages will
 # contain a <link> tag referring to it.  The value of this option must be the
 # base URL from which the finished HTML is served.
-#html_use_opensearch = ''
+# html_use_opensearch = ''
 
 # This is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = None
+# html_file_suffix = None
 
 # Output file base name for HTML help builder.
-htmlhelp_basename = 'OpenMPdoc'
+htmlhelp_basename = "OpenMPdoc"
 
 # If true, the reST sources are included in the HTML build as
 # _sources/name. The default is True.
@@ -176,42 +176,39 @@
 # -- Options for LaTeX output --------------------------------------------------
 
 latex_elements = {
-# The paper size ('letterpaper' or 'a4paper').
-#'papersize': 'letterpaper',
-
-# The font size ('10pt', '11pt' or '12pt').
-#'pointsize': '10pt',
-
-# Additional stuff for the LaTeX preamble.
-#'preamble': '',
+    # The paper size ('letterpaper' or 'a4paper').
+    #'papersize': 'letterpaper',
+    # The font size ('10pt', '11pt' or '12pt').
+    #'pointsize': '10pt',
+    # Additional stuff for the LaTeX preamble.
+    #'preamble': '',
 }
 
 # Grouping the document tree into LaTeX files. List of tuples
 # (source start file, target name, title, author, documentclass [howto/manual]).
 latex_documents = [
-  ('index', 'OpenMP.tex', u'LLVM/OpenMP Documentation',
-   u'LLVM/OpenMP', 'manual'),
+    ("index", "OpenMP.tex", "LLVM/OpenMP Documentation", "LLVM/OpenMP", "manual"),
 ]
 
 # The name of an image file (relative to this directory) to place at the top of
 # the title page.
-#latex_logo = None
+# latex_logo = None
 
 # For "manual" documents, if this is true, then toplevel headings are parts,
 # not chapters.
-#latex_use_parts = False
+# latex_use_parts = False
 
 # If true, show page references after internal links.
-#latex_show_pagerefs = False
+# latex_show_pagerefs = False
 
 # If true, show URL addresses after external links.
-#latex_show_urls = False
+# latex_show_urls = False
 
 # Documents to append as an appendix to all manuals.
-#latex_appendices = []
+# latex_appendices = []
 
 # If false, no module index is generated.
-#latex_domain_indices = True
+# latex_domain_indices = True
 
 
 # -- Options for manual page output --------------------------------------------
@@ -221,7 +218,7 @@
 # man_pages = [('man/lldb', 'lldb', u'LLDB Documentation', [u'LLVM project'], 1)]
 
 # If true, show URL addresses after external links.
-#man_show_urls = False
+# man_show_urls = False
 
 
 # -- Options for Texinfo output ------------------------------------------------
@@ -230,16 +227,22 @@
 # (source start file, target name, title, author,
 #  dir menu entry, description, category)
 texinfo_documents = [
-  ('index', 'LLVM/OpenMP', u'LLVM/OpenMP Documentation',
-   u'LLVM/OpenMP', 'LLVM/OpenMP', 'One line description of project.',
-   'Miscellaneous'),
+    (
+        "index",
+        "LLVM/OpenMP",
+        "LLVM/OpenMP Documentation",
+        "LLVM/OpenMP",
+        "LLVM/OpenMP",
+        "One line description of project.",
+        "Miscellaneous",
+    ),
 ]
 
 # Documents to append as an appendix to all manuals.
-#texinfo_appendices = []
+# texinfo_appendices = []
 
 # If false, no module index is generated.
-#texinfo_domain_indices = True
+# texinfo_domain_indices = True
 
 # How to display URL addresses: 'footnote', 'no', or 'inline'.
-#texinfo_show_urls = 'footnote'
+# texinfo_show_urls = 'footnote'

diff  --git a/openmp/libompd/gdb-plugin/ompd/__init__.py b/openmp/libompd/gdb-plugin/ompd/__init__.py
index c2a12f54b3261..b9f572dce8dce 100644
--- a/openmp/libompd/gdb-plugin/ompd/__init__.py
+++ b/openmp/libompd/gdb-plugin/ompd/__init__.py
@@ -3,13 +3,14 @@
 import traceback
 
 if __name__ == "__main__":
-	try:
-		sys.path.append(os.path.dirname(__file__))
-		
-		import ompd
-		ompd.main()
-		print('OMPD GDB support loaded')
-		print('Run \'ompd init\' to start debugging')
-	except Exception as e:
-		traceback.print_exc()
-		print('Error: OMPD support could not be loaded', e)
+    try:
+        sys.path.append(os.path.dirname(__file__))
+
+        import ompd
+
+        ompd.main()
+        print("OMPD GDB support loaded")
+        print("Run 'ompd init' to start debugging")
+    except Exception as e:
+        traceback.print_exc()
+        print("Error: OMPD support could not be loaded", e)

diff  --git a/openmp/libompd/gdb-plugin/ompd/frame_filter.py b/openmp/libompd/gdb-plugin/ompd/frame_filter.py
index 9f6b914402fbd..0b049bdabd13f 100644
--- a/openmp/libompd/gdb-plugin/ompd/frame_filter.py
+++ b/openmp/libompd/gdb-plugin/ompd/frame_filter.py
@@ -9,248 +9,303 @@
 
 
 class OmpdFrameDecorator(FrameDecorator):
-	
-	def __init__(self, fobj, curr_task_handle):
-		"""Initializes a FrameDecorator with the given GDB Frame object. The global OMPD address space defined in 
-		ompd.py is set as well.
-		"""
-		super(OmpdFrameDecorator, self).__init__(fobj)
-		self.addr_space = ompd.addr_space
-		self.fobj = None
-		if isinstance(fobj, gdb.Frame):
-			self.fobj = fobj
-		elif isinstance(fobj, FrameDecorator):
-			self.fobj = fobj.inferior_frame()
-		self.curr_task_handle = curr_task_handle
-	
-	def function(self):
-		"""This appends the name of a frame that is printed with the information whether the task started in the frame 
-		is implicit or explicit. The ICVs are evaluated to determine that.
-		"""
-		name = str(self.fobj.name())
-		
-		if self.curr_task_handle is None:
-			return name
-		
-		icv_value = ompdModule.call_ompd_get_icv_from_scope(self.curr_task_handle, ompd.icv_map['implicit-task-var'][1], ompd.icv_map['implicit-task-var'][0])
-		if icv_value == 0:
-			name = '@thread %i: %s "#pragma omp task"' % (gdb.selected_thread().num, name)
-		elif icv_value == 1:
-			name = '@thread %i: %s "#pragma omp parallel"' % (gdb.selected_thread().num, name)
-		else:
-			name = '@thread %i: %s' % (gdb.selected_thread().num, name)
-		return name
+    def __init__(self, fobj, curr_task_handle):
+        """Initializes a FrameDecorator with the given GDB Frame object. The global OMPD address space defined in
+        ompd.py is set as well.
+        """
+        super(OmpdFrameDecorator, self).__init__(fobj)
+        self.addr_space = ompd.addr_space
+        self.fobj = None
+        if isinstance(fobj, gdb.Frame):
+            self.fobj = fobj
+        elif isinstance(fobj, FrameDecorator):
+            self.fobj = fobj.inferior_frame()
+        self.curr_task_handle = curr_task_handle
+
+    def function(self):
+        """This appends the name of a frame that is printed with the information whether the task started in the frame
+        is implicit or explicit. The ICVs are evaluated to determine that.
+        """
+        name = str(self.fobj.name())
+
+        if self.curr_task_handle is None:
+            return name
+
+        icv_value = ompdModule.call_ompd_get_icv_from_scope(
+            self.curr_task_handle,
+            ompd.icv_map["implicit-task-var"][1],
+            ompd.icv_map["implicit-task-var"][0],
+        )
+        if icv_value == 0:
+            name = '@thread %i: %s "#pragma omp task"' % (
+                gdb.selected_thread().num,
+                name,
+            )
+        elif icv_value == 1:
+            name = '@thread %i: %s "#pragma omp parallel"' % (
+                gdb.selected_thread().num,
+                name,
+            )
+        else:
+            name = "@thread %i: %s" % (gdb.selected_thread().num, name)
+        return name
+
 
 class OmpdFrameDecoratorThread(FrameDecorator):
-	
-	def __init__(self, fobj):
-		"""Initializes a FrameDecorator with the given GDB Frame object."""
-		super(OmpdFrameDecoratorThread, self).__init__(fobj)
-		if isinstance(fobj, gdb.Frame):
-			self.fobj = fobj
-		elif isinstance(fobj, FrameDecorator):
-			self.fobj = fobj.inferior_frame()
-	
-	def function(self):
-		name = str(self.fobj.name())
-		return '@thread %i: %s' % (gdb.selected_thread().num, name)
-
-class FrameFilter():
-	
-	def __init__(self, addr_space):
-		"""Initializes the FrameFilter, registers is in the GDB runtime and saves the given OMPD address space capsule.
-		"""
-		self.addr_space = addr_space
-		self.name = "Filter"
-		self.priority = 100
-		self.enabled = True
-		gdb.frame_filters[self.name] = self
-		self.switched_on = False
-		self.continue_to_master = False
-	
-	def set_switch(self, on_off):
-		"""Prints output when executing 'ompd bt on' or 'ompd bt off'.
-		"""
-		self.switched_on = on_off
-		if self.switched_on:
-			print('Enabled filter for "bt" output successfully.')
-		else:
-			print('Disabled filter for "bt" output successfully.')
-	
-	def set_switch_continue(self, on_off):
-		"""Prints output when executing 'ompd bt on continued'."
-		"""
-		self.continue_to_master = on_off
-		if self.continue_to_master:
-			print('Enabled "bt" mode that continues backtrace on to master thread for worker threads.')
-		else:
-			print('Disabled "bt" mode that continues onto master thread.')
-	
-	def get_master_frames_for_worker(self, past_thread_num, latest_sp):
-		"""Prints master frames for worker thread with id past_thread_num.
-		"""
-		gdb.execute('t 1')
-		gdb.execute('ompd bt on')
-		gdb.execute('bt')
-		
-		frame = gdb.newest_frame()
-		
-		while frame.older() is not None:
-			print('master frame sp:', str(frame.read_register('sp')))
-			yield OmpdFrameDecorator(frame)
-			frame = frame.older()
-		print('latest sp:', str(latest_sp))
-		
-		gdb.execute('ompd bt on continued')
-		gdb.execute('t %d' % int(past_thread_num))
-	
-	
-	def filter_frames(self, frame_iter):
-		"""Iterates through frames and only returns those that are relevant to the application
-		being debugged. The OmpdFrameDecorator is applied automatically.
-		"""
-		curr_thread_num = gdb.selected_thread().num
-		is_no_omp_thread = False
-		if curr_thread_num in self.addr_space.threads:
-			curr_thread_obj = self.addr_space.threads[curr_thread_num]
-			self.curr_task = curr_thread_obj.get_current_task()
-			self.frames = self.curr_task.get_task_frame()
-		else:
-			is_no_omp_thread = True
-			print('Thread %d is no OpenMP thread, printing all frames:' % curr_thread_num)
-		
-		stop_iter = False
-		for x in frame_iter:
-			if is_no_omp_thread:
-				yield OmpdFrameDecoratorThread(x)
-				continue
-			
-			if x.inferior_frame().older() is None:
-				continue
-			if self.curr_task.task_handle is None:
-				continue
-			
-			gdb_sp = int(str(x.inferior_frame().read_register('sp')), 16)
-			gdb_sp_next_new = int(str(x.inferior_frame()).split(",")[0].split("=")[1], 16)
-			if x.inferior_frame().older():
-				gdb_sp_next = int(str(x.inferior_frame().older().read_register('sp')), 16)
-			else:
-				gdb_sp_next = int(str(x.inferior_frame().read_register('sp')), 16)
-			while(1):
-				(ompd_enter_frame, ompd_exit_frame) = self.frames
-				
-				if (ompd_enter_frame != 0 and gdb_sp_next_new < ompd_enter_frame):
-					break
-				if (ompd_exit_frame != 0 and gdb_sp_next_new < ompd_exit_frame):
-					if x.inferior_frame().older().older() and int(str(x.inferior_frame().older().older().read_register('sp')), 16) < ompd_exit_frame:
-						if self.continue_to_master:
-							yield OmpdFrameDecoratorThread(x)
-						else:
-							yield OmpdFrameDecorator(x, self.curr_task.task_handle)
-					else:
-						yield OmpdFrameDecorator(x, self.curr_task.task_handle)
-					break
-				sched_task_handle = self.curr_task.get_scheduling_task_handle()
-				
-				if(sched_task_handle is None):
-					stop_iter = True
-					break
-				
-				self.curr_task = self.curr_task.get_scheduling_task()
-				self.frames = self.curr_task.get_task_frame()
-			if stop_iter:
-				break
-		
-		# implementation of "ompd bt continued"
-		if self.continue_to_master:
-			
-			orig_thread = gdb.selected_thread().num
-			gdb_threads = dict([(t.num, t) for t in gdb.selected_inferior().threads()])
-			
-			# iterate through generating tasks until outermost task is reached
-			while(1):
-				# get OMPD thread id for master thread (systag in GDB output)
-				try:
-					master_num = self.curr_task.get_task_parallel().get_thread_in_parallel(0).get_thread_id()
-				except:
-					break
-				# search for thread id without the "l" for long via "thread find" and get GDB thread num from output
-				hex_str = str(hex(master_num))
-				thread_output = gdb.execute('thread find %s' % hex_str[0:len(hex_str)-1], to_string=True).split(" ")
-				if thread_output[0] == "No":
-					raise ValueError('Master thread num could not be found!')
-				gdb_master_num = int(thread_output[1])
-				# get task that generated last task of worker thread
-				try:
-					self.curr_task = self.curr_task.get_task_parallel().get_task_in_parallel(0).get_generating_task()
-				except:
-					break;
-				self.frames = self.curr_task.get_task_frame()
-				(enter_frame, exit_frame) = self.frames
-				if exit_frame == 0:
-					print('outermost generating task was reached')
-					break
-				
-				# save GDB num for worker thread to change back to it later
-				worker_thread = gdb.selected_thread().num
-				
-				# use InferiorThread.switch()
-				gdb_threads = dict([(t.num, t) for t in gdb.selected_inferior().threads()])
-				gdb_threads[gdb_master_num].switch()
-				print('#### switching to thread %i ####' % gdb_master_num)
-				
-				frame = gdb.newest_frame()
-				stop_iter = False
-				
-				while(not stop_iter):
-					if self.curr_task.task_handle is None:
-						break
-					self.frames = self.curr_task.get_task_frame()
-					
-					while frame:
-						if self.curr_task.task_handle is None:
-							break
-						
-						gdb_sp_next_new = int(str(frame).split(",")[0].split("=")[1], 16)
-						
-						if frame.older():
-							gdb_sp_next = int(str(frame.older().read_register('sp')), 16)
-						else:
-							gdb_sp_next = int(str(frame.read_register('sp')), 16)
-						
-						while(1):
-							(ompd_enter_frame, ompd_exit_frame) = self.frames
-							
-							if (ompd_enter_frame != 0 and gdb_sp_next_new < ompd_enter_frame):
-								break
-							if (ompd_exit_frame == 0 or gdb_sp_next_new < ompd_exit_frame):
-								if ompd_exit_frame == 0 or frame.older() and frame.older().older() and int(str(frame.older().older().read_register('sp')), 16) < ompd_exit_frame:
-									yield OmpdFrameDecoratorThread(frame)
-								else:
-									yield OmpdFrameDecorator(frame, self.curr_task.task_handle)
-								break
-							sched_task_handle = ompdModule.call_ompd_get_scheduling_task_handle(self.curr_task.task_handle)
-							
-							if(sched_task_handle is None):
-								stop_iter = True
-								break
-							self.curr_task = self.curr_task.get_generating_task()
-							self.frames = self.curr_task.get_task_frame()
-							
-						frame = frame.older()
-					break
-			
-				gdb_threads[worker_thread].switch()
-				
-			gdb_threads[orig_thread].switch()
-	
-	
-	def filter(self, frame_iter):
-		"""Function is called automatically with every 'bt' executed. If switched on, this will only let revelant frames be printed 
-		or all frames otherwise. If switched on, a FrameDecorator will be applied to state whether '.ompd_task_entry.' refers to an 
-		explicit or implicit task.
-		"""
-		if self.switched_on:
-			return self.filter_frames(frame_iter)
-		else:
-			return frame_iter
+    def __init__(self, fobj):
+        """Initializes a FrameDecorator with the given GDB Frame object."""
+        super(OmpdFrameDecoratorThread, self).__init__(fobj)
+        if isinstance(fobj, gdb.Frame):
+            self.fobj = fobj
+        elif isinstance(fobj, FrameDecorator):
+            self.fobj = fobj.inferior_frame()
+
+    def function(self):
+        name = str(self.fobj.name())
+        return "@thread %i: %s" % (gdb.selected_thread().num, name)
+
+
+class FrameFilter:
+    def __init__(self, addr_space):
+        """Initializes the FrameFilter, registers is in the GDB runtime and saves the given OMPD address space capsule."""
+        self.addr_space = addr_space
+        self.name = "Filter"
+        self.priority = 100
+        self.enabled = True
+        gdb.frame_filters[self.name] = self
+        self.switched_on = False
+        self.continue_to_master = False
+
+    def set_switch(self, on_off):
+        """Prints output when executing 'ompd bt on' or 'ompd bt off'."""
+        self.switched_on = on_off
+        if self.switched_on:
+            print('Enabled filter for "bt" output successfully.')
+        else:
+            print('Disabled filter for "bt" output successfully.')
+
+    def set_switch_continue(self, on_off):
+        """Prints output when executing 'ompd bt on continued'." """
+        self.continue_to_master = on_off
+        if self.continue_to_master:
+            print(
+                'Enabled "bt" mode that continues backtrace on to master thread for worker threads.'
+            )
+        else:
+            print('Disabled "bt" mode that continues onto master thread.')
+
+    def get_master_frames_for_worker(self, past_thread_num, latest_sp):
+        """Prints master frames for worker thread with id past_thread_num."""
+        gdb.execute("t 1")
+        gdb.execute("ompd bt on")
+        gdb.execute("bt")
+
+        frame = gdb.newest_frame()
+
+        while frame.older() is not None:
+            print("master frame sp:", str(frame.read_register("sp")))
+            yield OmpdFrameDecorator(frame)
+            frame = frame.older()
+        print("latest sp:", str(latest_sp))
+
+        gdb.execute("ompd bt on continued")
+        gdb.execute("t %d" % int(past_thread_num))
+
+    def filter_frames(self, frame_iter):
+        """Iterates through frames and only returns those that are relevant to the application
+        being debugged. The OmpdFrameDecorator is applied automatically.
+        """
+        curr_thread_num = gdb.selected_thread().num
+        is_no_omp_thread = False
+        if curr_thread_num in self.addr_space.threads:
+            curr_thread_obj = self.addr_space.threads[curr_thread_num]
+            self.curr_task = curr_thread_obj.get_current_task()
+            self.frames = self.curr_task.get_task_frame()
+        else:
+            is_no_omp_thread = True
+            print(
+                "Thread %d is no OpenMP thread, printing all frames:" % curr_thread_num
+            )
+
+        stop_iter = False
+        for x in frame_iter:
+            if is_no_omp_thread:
+                yield OmpdFrameDecoratorThread(x)
+                continue
+
+            if x.inferior_frame().older() is None:
+                continue
+            if self.curr_task.task_handle is None:
+                continue
+
+            gdb_sp = int(str(x.inferior_frame().read_register("sp")), 16)
+            gdb_sp_next_new = int(
+                str(x.inferior_frame()).split(",")[0].split("=")[1], 16
+            )
+            if x.inferior_frame().older():
+                gdb_sp_next = int(
+                    str(x.inferior_frame().older().read_register("sp")), 16
+                )
+            else:
+                gdb_sp_next = int(str(x.inferior_frame().read_register("sp")), 16)
+            while 1:
+                (ompd_enter_frame, ompd_exit_frame) = self.frames
+
+                if ompd_enter_frame != 0 and gdb_sp_next_new < ompd_enter_frame:
+                    break
+                if ompd_exit_frame != 0 and gdb_sp_next_new < ompd_exit_frame:
+                    if (
+                        x.inferior_frame().older().older()
+                        and int(
+                            str(x.inferior_frame().older().older().read_register("sp")),
+                            16,
+                        )
+                        < ompd_exit_frame
+                    ):
+                        if self.continue_to_master:
+                            yield OmpdFrameDecoratorThread(x)
+                        else:
+                            yield OmpdFrameDecorator(x, self.curr_task.task_handle)
+                    else:
+                        yield OmpdFrameDecorator(x, self.curr_task.task_handle)
+                    break
+                sched_task_handle = self.curr_task.get_scheduling_task_handle()
+
+                if sched_task_handle is None:
+                    stop_iter = True
+                    break
+
+                self.curr_task = self.curr_task.get_scheduling_task()
+                self.frames = self.curr_task.get_task_frame()
+            if stop_iter:
+                break
+
+        # implementation of "ompd bt continued"
+        if self.continue_to_master:
+
+            orig_thread = gdb.selected_thread().num
+            gdb_threads = dict([(t.num, t) for t in gdb.selected_inferior().threads()])
+
+            # iterate through generating tasks until outermost task is reached
+            while 1:
+                # get OMPD thread id for master thread (systag in GDB output)
+                try:
+                    master_num = (
+                        self.curr_task.get_task_parallel()
+                        .get_thread_in_parallel(0)
+                        .get_thread_id()
+                    )
+                except:
+                    break
+                # search for thread id without the "l" for long via "thread find" and get GDB thread num from output
+                hex_str = str(hex(master_num))
+                thread_output = gdb.execute(
+                    "thread find %s" % hex_str[0 : len(hex_str) - 1], to_string=True
+                ).split(" ")
+                if thread_output[0] == "No":
+                    raise ValueError("Master thread num could not be found!")
+                gdb_master_num = int(thread_output[1])
+                # get task that generated last task of worker thread
+                try:
+                    self.curr_task = (
+                        self.curr_task.get_task_parallel()
+                        .get_task_in_parallel(0)
+                        .get_generating_task()
+                    )
+                except:
+                    break
+                self.frames = self.curr_task.get_task_frame()
+                (enter_frame, exit_frame) = self.frames
+                if exit_frame == 0:
+                    print("outermost generating task was reached")
+                    break
+
+                # save GDB num for worker thread to change back to it later
+                worker_thread = gdb.selected_thread().num
+
+                # use InferiorThread.switch()
+                gdb_threads = dict(
+                    [(t.num, t) for t in gdb.selected_inferior().threads()]
+                )
+                gdb_threads[gdb_master_num].switch()
+                print("#### switching to thread %i ####" % gdb_master_num)
+
+                frame = gdb.newest_frame()
+                stop_iter = False
+
+                while not stop_iter:
+                    if self.curr_task.task_handle is None:
+                        break
+                    self.frames = self.curr_task.get_task_frame()
+
+                    while frame:
+                        if self.curr_task.task_handle is None:
+                            break
+
+                        gdb_sp_next_new = int(
+                            str(frame).split(",")[0].split("=")[1], 16
+                        )
+
+                        if frame.older():
+                            gdb_sp_next = int(
+                                str(frame.older().read_register("sp")), 16
+                            )
+                        else:
+                            gdb_sp_next = int(str(frame.read_register("sp")), 16)
+
+                        while 1:
+                            (ompd_enter_frame, ompd_exit_frame) = self.frames
+
+                            if (
+                                ompd_enter_frame != 0
+                                and gdb_sp_next_new < ompd_enter_frame
+                            ):
+                                break
+                            if (
+                                ompd_exit_frame == 0
+                                or gdb_sp_next_new < ompd_exit_frame
+                            ):
+                                if (
+                                    ompd_exit_frame == 0
+                                    or frame.older()
+                                    and frame.older().older()
+                                    and int(
+                                        str(frame.older().older().read_register("sp")),
+                                        16,
+                                    )
+                                    < ompd_exit_frame
+                                ):
+                                    yield OmpdFrameDecoratorThread(frame)
+                                else:
+                                    yield OmpdFrameDecorator(
+                                        frame, self.curr_task.task_handle
+                                    )
+                                break
+                            sched_task_handle = (
+                                ompdModule.call_ompd_get_scheduling_task_handle(
+                                    self.curr_task.task_handle
+                                )
+                            )
+
+                            if sched_task_handle is None:
+                                stop_iter = True
+                                break
+                            self.curr_task = self.curr_task.get_generating_task()
+                            self.frames = self.curr_task.get_task_frame()
+
+                        frame = frame.older()
+                    break
+
+                gdb_threads[worker_thread].switch()
+
+            gdb_threads[orig_thread].switch()
+
+    def filter(self, frame_iter):
+        """Function is called automatically with every 'bt' executed. If switched on, this will only let revelant frames be printed
+        or all frames otherwise. If switched on, a FrameDecorator will be applied to state whether '.ompd_task_entry.' refers to an
+        explicit or implicit task.
+        """
+        if self.switched_on:
+            return self.filter_frames(frame_iter)
+        else:
+            return frame_iter

diff  --git a/openmp/libompd/gdb-plugin/ompd/ompd.py b/openmp/libompd/gdb-plugin/ompd/ompd.py
index 3259f98357708..a404e621e77bb 100644
--- a/openmp/libompd/gdb-plugin/ompd/ompd.py
+++ b/openmp/libompd/gdb-plugin/ompd/ompd.py
@@ -11,559 +11,675 @@
 addr_space = None
 ff = None
 icv_map = None
-ompd_scope_map = {1:'global', 2:'address_space', 3:'thread', 4:'parallel', 5:'implicit_task', 6:'task'}
+ompd_scope_map = {
+    1: "global",
+    2: "address_space",
+    3: "thread",
+    4: "parallel",
+    5: "implicit_task",
+    6: "task",
+}
 in_task_function = False
 
+
 class ompd(gdb.Command):
-	def __init__(self):
-		super(ompd, self).__init__('ompd',
-			gdb.COMMAND_STATUS,
-			gdb.COMPLETE_NONE,
-			True)
+    def __init__(self):
+        super(ompd, self).__init__("ompd", gdb.COMMAND_STATUS, gdb.COMPLETE_NONE, True)
+
 
 class ompd_init(gdb.Command):
-	"""Find and initialize ompd library"""
-
-	# first parameter is command-line input, second parameter is gdb-specific data
-	def __init__(self):
-		self.__doc__ = 'Find and initialize OMPD library\n usage: ompd init'
-		super(ompd_init, self).__init__('ompd init',
-						gdb.COMMAND_DATA)
-
-	def invoke(self, arg, from_tty):
-		global addr_space
-		global ff
-		try:
-			try:
-				print(gdb.newest_frame())
-			except:
-				gdb.execute("start")
-			try:
-				lib_list = gdb.parse_and_eval("(char**)ompd_dll_locations")
-			except gdb.error:
-				raise ValueError("No ompd_dll_locations symbol in execution, make sure to have an OMPD enabled OpenMP runtime");
-			
-			while(gdb.parse_and_eval("(char**)ompd_dll_locations") == False):
-				gdb.execute("tbreak ompd_dll_locations_valid")
-				gdb.execute("continue")
-			
-			lib_list = gdb.parse_and_eval("(char**)ompd_dll_locations")
-			
-			i = 0
-			while(lib_list[i]):
-				ret = ompdModule.ompd_open(lib_list[i].string())
-				if ret == -1:
-					raise ValueError("Handle of OMPD library is not a valid string!")
-				if ret == -2:
-					print("ret == -2")
-					pass # It's ok to fail on dlopen
-				if ret == -3:
-					print("ret == -3")
-					pass # It's ok to fail on dlsym
-				if ret < -10:
-					raise ValueError("OMPD error code %i!" % (-10 - ret))
-					
-				if ret > 0:
-					print("Loaded OMPD lib successfully!")
-					try:
-						addr_space = ompd_address_space()
-						ff = FrameFilter(addr_space)
-					except:
-						traceback.print_exc()
-					return
-				i = i+1
-			
-			raise ValueError("OMPD library could not be loaded!")
-		except:
-			traceback.print_exc()
+    """Find and initialize ompd library"""
+
+    # first parameter is command-line input, second parameter is gdb-specific data
+    def __init__(self):
+        self.__doc__ = "Find and initialize OMPD library\n usage: ompd init"
+        super(ompd_init, self).__init__("ompd init", gdb.COMMAND_DATA)
+
+    def invoke(self, arg, from_tty):
+        global addr_space
+        global ff
+        try:
+            try:
+                print(gdb.newest_frame())
+            except:
+                gdb.execute("start")
+            try:
+                lib_list = gdb.parse_and_eval("(char**)ompd_dll_locations")
+            except gdb.error:
+                raise ValueError(
+                    "No ompd_dll_locations symbol in execution, make sure to have an OMPD enabled OpenMP runtime"
+                )
+
+            while gdb.parse_and_eval("(char**)ompd_dll_locations") == False:
+                gdb.execute("tbreak ompd_dll_locations_valid")
+                gdb.execute("continue")
+
+            lib_list = gdb.parse_and_eval("(char**)ompd_dll_locations")
+
+            i = 0
+            while lib_list[i]:
+                ret = ompdModule.ompd_open(lib_list[i].string())
+                if ret == -1:
+                    raise ValueError("Handle of OMPD library is not a valid string!")
+                if ret == -2:
+                    print("ret == -2")
+                    pass  # It's ok to fail on dlopen
+                if ret == -3:
+                    print("ret == -3")
+                    pass  # It's ok to fail on dlsym
+                if ret < -10:
+                    raise ValueError("OMPD error code %i!" % (-10 - ret))
+
+                if ret > 0:
+                    print("Loaded OMPD lib successfully!")
+                    try:
+                        addr_space = ompd_address_space()
+                        ff = FrameFilter(addr_space)
+                    except:
+                        traceback.print_exc()
+                    return
+                i = i + 1
+
+            raise ValueError("OMPD library could not be loaded!")
+        except:
+            traceback.print_exc()
+
 
 class ompd_threads(gdb.Command):
-	"""Register thread ids of current context"""
-	def __init__(self):
-		self.__doc__ = 'Provide information on threads of current context.\n usage: ompd threads'
-		super(ompd_threads, self).__init__('ompd threads',
-						gdb.COMMAND_STATUS)
-	
-	def invoke(self, arg, from_tty):
-		global addr_space
-		if init_error():
-			return
-		addr_space.list_threads(True)
+    """Register thread ids of current context"""
+
+    def __init__(self):
+        self.__doc__ = (
+            "Provide information on threads of current context.\n usage: ompd threads"
+        )
+        super(ompd_threads, self).__init__("ompd threads", gdb.COMMAND_STATUS)
+
+    def invoke(self, arg, from_tty):
+        global addr_space
+        if init_error():
+            return
+        addr_space.list_threads(True)
+
 
 def print_parallel_region(curr_parallel, team_size):
-	"""Helper function for ompd_parallel_region. To print out the details of the parallel region."""
-	for omp_thr in range(team_size):
-		thread = curr_parallel.get_thread_in_parallel(omp_thr)
-		ompd_state = str(addr_space.states[thread.get_state()[0]])
-		ompd_wait_id = thread.get_state()[1]
-		task = curr_parallel.get_task_in_parallel(omp_thr)
-		task_func_addr = task.get_task_function()
-		# Get the function this addr belongs to
-		sal = gdb.find_pc_line(task_func_addr)
-		block = gdb.block_for_pc(task_func_addr)
-		while block and not block.function:
-			block = block.superblock
-		if omp_thr == 0:
-			print('%6d (master) %-37s %ld    0x%lx %-25s %-17s:%d' % \
-			(omp_thr, ompd_state, ompd_wait_id, task_func_addr, \
-			 block.function.print_name, sal.symtab.filename, sal.line))
-		else:
-			print('%6d          %-37s %ld    0x%lx %-25s %-17s:%d' % \
-			(omp_thr, ompd_state, ompd_wait_id, task_func_addr, \
-			 block.function.print_name, sal.symtab.filename, sal.line))
+    """Helper function for ompd_parallel_region. To print out the details of the parallel region."""
+    for omp_thr in range(team_size):
+        thread = curr_parallel.get_thread_in_parallel(omp_thr)
+        ompd_state = str(addr_space.states[thread.get_state()[0]])
+        ompd_wait_id = thread.get_state()[1]
+        task = curr_parallel.get_task_in_parallel(omp_thr)
+        task_func_addr = task.get_task_function()
+        # Get the function this addr belongs to
+        sal = gdb.find_pc_line(task_func_addr)
+        block = gdb.block_for_pc(task_func_addr)
+        while block and not block.function:
+            block = block.superblock
+        if omp_thr == 0:
+            print(
+                "%6d (master) %-37s %ld    0x%lx %-25s %-17s:%d"
+                % (
+                    omp_thr,
+                    ompd_state,
+                    ompd_wait_id,
+                    task_func_addr,
+                    block.function.print_name,
+                    sal.symtab.filename,
+                    sal.line,
+                )
+            )
+        else:
+            print(
+                "%6d          %-37s %ld    0x%lx %-25s %-17s:%d"
+                % (
+                    omp_thr,
+                    ompd_state,
+                    ompd_wait_id,
+                    task_func_addr,
+                    block.function.print_name,
+                    sal.symtab.filename,
+                    sal.line,
+                )
+            )
+
 
 class ompd_parallel_region(gdb.Command):
-	"""Parallel Region Details"""
-	def __init__(self):
-		self.__doc__ = 'Display the details of the current and enclosing parallel regions.\n usage: ompd parallel'
-		super(ompd_parallel_region, self).__init__('ompd parallel',
-							   gdb.COMMAND_STATUS)
-
-	def invoke(self, arg, from_tty):
-		global addr_space
-		if init_error():
-			return
-		if addr_space.icv_map is None:
-			addr_space.get_icv_map()
-		if addr_space.states is None:
-			addr_space.enumerate_states()
-		curr_thread_handle = addr_space.get_curr_thread()
-		curr_parallel_handle = curr_thread_handle.get_current_parallel_handle()
-		curr_parallel = ompd_parallel(curr_parallel_handle)
-		while curr_parallel_handle is not None and curr_parallel is not None:
-			nest_level = ompdModule.call_ompd_get_icv_from_scope(curr_parallel_handle,\
-				     addr_space.icv_map['levels-var'][1], addr_space.icv_map['levels-var'][0])
-			if nest_level == 0:
-				break
-			team_size = ompdModule.call_ompd_get_icv_from_scope(curr_parallel_handle, \
-				    addr_space.icv_map['team-size-var'][1], \
-				    addr_space.icv_map['team-size-var'][0])
-			print ("")
-			print ("Parallel Region: Nesting Level %d: Team Size: %d" % (nest_level, team_size))
-			print ("================================================")
-			print ("")
-			print ("OMP Thread Nbr  Thread State                     Wait Id  EntryAddr FuncName                 File:Line");
-			print ("======================================================================================================");
-			print_parallel_region(curr_parallel, team_size)
-			enclosing_parallel = curr_parallel.get_enclosing_parallel()
-			enclosing_parallel_handle = curr_parallel.get_enclosing_parallel_handle()
-			curr_parallel = enclosing_parallel
-			curr_parallel_handle = enclosing_parallel_handle
+    """Parallel Region Details"""
+
+    def __init__(self):
+        self.__doc__ = "Display the details of the current and enclosing parallel regions.\n usage: ompd parallel"
+        super(ompd_parallel_region, self).__init__("ompd parallel", gdb.COMMAND_STATUS)
+
+    def invoke(self, arg, from_tty):
+        global addr_space
+        if init_error():
+            return
+        if addr_space.icv_map is None:
+            addr_space.get_icv_map()
+        if addr_space.states is None:
+            addr_space.enumerate_states()
+        curr_thread_handle = addr_space.get_curr_thread()
+        curr_parallel_handle = curr_thread_handle.get_current_parallel_handle()
+        curr_parallel = ompd_parallel(curr_parallel_handle)
+        while curr_parallel_handle is not None and curr_parallel is not None:
+            nest_level = ompdModule.call_ompd_get_icv_from_scope(
+                curr_parallel_handle,
+                addr_space.icv_map["levels-var"][1],
+                addr_space.icv_map["levels-var"][0],
+            )
+            if nest_level == 0:
+                break
+            team_size = ompdModule.call_ompd_get_icv_from_scope(
+                curr_parallel_handle,
+                addr_space.icv_map["team-size-var"][1],
+                addr_space.icv_map["team-size-var"][0],
+            )
+            print("")
+            print(
+                "Parallel Region: Nesting Level %d: Team Size: %d"
+                % (nest_level, team_size)
+            )
+            print("================================================")
+            print("")
+            print(
+                "OMP Thread Nbr  Thread State                     Wait Id  EntryAddr FuncName                 File:Line"
+            )
+            print(
+                "======================================================================================================"
+            )
+            print_parallel_region(curr_parallel, team_size)
+            enclosing_parallel = curr_parallel.get_enclosing_parallel()
+            enclosing_parallel_handle = curr_parallel.get_enclosing_parallel_handle()
+            curr_parallel = enclosing_parallel
+            curr_parallel_handle = enclosing_parallel_handle
+
 
 class ompd_icvs(gdb.Command):
-	"""ICVs"""
-	def __init__(self):
-		self.__doc__ = 'Display the values of the Internal Control Variables.\n usage: ompd icvs'
-		super(ompd_icvs, self).__init__('ompd icvs',
-						 gdb.COMMAND_STATUS)
-
-	def invoke(self, arg, from_tty):
-		global addr_space
-		global ompd_scope_map
-		if init_error():
-			return
-		curr_thread_handle = addr_space.get_curr_thread()
-		if addr_space.icv_map is None:
-			addr_space.get_icv_map()
-		print("ICV Name                        Scope                     Value")
-		print("===============================================================")
-
-		try:
-			for icv_name in addr_space.icv_map:
-				scope = addr_space.icv_map[icv_name][1]
-				#{1:'global', 2:'address_space', 3:'thread', 4:'parallel', 5:'implicit_task', 6:'task'}
-				if scope == 2:
-					handle = addr_space.addr_space
-				elif scope == 3:
-					handle = curr_thread_handle.thread_handle
-				elif scope == 4:
-					handle = curr_thread_handle.get_current_parallel_handle()
-				elif scope == 6:
-					handle = curr_thread_handle.get_current_task_handle()
-				else:
-					raise ValueError("Invalid scope")
-
-				if (icv_name == "nthreads-var" or icv_name == "bind-var"):
-					icv_value = ompdModule.call_ompd_get_icv_from_scope(
-						    handle, scope, addr_space.icv_map[icv_name][0])
-					if icv_value is None:
-						icv_string = ompdModule.call_ompd_get_icv_string_from_scope( \
-							     handle, scope, addr_space.icv_map[icv_name][0])
-						print('%-31s %-26s %s' % (icv_name, ompd_scope_map[scope], icv_string))
-					else:
-						print('%-31s %-26s %d' % (icv_name, ompd_scope_map[scope], icv_value))
-
-				elif (icv_name == "affinity-format-var" or icv_name == "run-sched-var" or \
-                                         icv_name == "tool-libraries-var" or icv_name == "tool-verbose-init-var"):
-					icv_string = ompdModule.call_ompd_get_icv_string_from_scope( \
-						     handle, scope, addr_space.icv_map[icv_name][0])
-					print('%-31s %-26s %s' % (icv_name, ompd_scope_map[scope], icv_string))
-				else:
-					icv_value = ompdModule.call_ompd_get_icv_from_scope(handle, \
-						    scope, addr_space.icv_map[icv_name][0])
-					print('%-31s %-26s %d' % (icv_name, ompd_scope_map[scope], icv_value))
-		except:
-		       traceback.print_exc()
+    """ICVs"""
+
+    def __init__(self):
+        self.__doc__ = (
+            "Display the values of the Internal Control Variables.\n usage: ompd icvs"
+        )
+        super(ompd_icvs, self).__init__("ompd icvs", gdb.COMMAND_STATUS)
+
+    def invoke(self, arg, from_tty):
+        global addr_space
+        global ompd_scope_map
+        if init_error():
+            return
+        curr_thread_handle = addr_space.get_curr_thread()
+        if addr_space.icv_map is None:
+            addr_space.get_icv_map()
+        print("ICV Name                        Scope                     Value")
+        print("===============================================================")
+
+        try:
+            for icv_name in addr_space.icv_map:
+                scope = addr_space.icv_map[icv_name][1]
+                # {1:'global', 2:'address_space', 3:'thread', 4:'parallel', 5:'implicit_task', 6:'task'}
+                if scope == 2:
+                    handle = addr_space.addr_space
+                elif scope == 3:
+                    handle = curr_thread_handle.thread_handle
+                elif scope == 4:
+                    handle = curr_thread_handle.get_current_parallel_handle()
+                elif scope == 6:
+                    handle = curr_thread_handle.get_current_task_handle()
+                else:
+                    raise ValueError("Invalid scope")
+
+                if icv_name == "nthreads-var" or icv_name == "bind-var":
+                    icv_value = ompdModule.call_ompd_get_icv_from_scope(
+                        handle, scope, addr_space.icv_map[icv_name][0]
+                    )
+                    if icv_value is None:
+                        icv_string = ompdModule.call_ompd_get_icv_string_from_scope(
+                            handle, scope, addr_space.icv_map[icv_name][0]
+                        )
+                        print(
+                            "%-31s %-26s %s"
+                            % (icv_name, ompd_scope_map[scope], icv_string)
+                        )
+                    else:
+                        print(
+                            "%-31s %-26s %d"
+                            % (icv_name, ompd_scope_map[scope], icv_value)
+                        )
+
+                elif (
+                    icv_name == "affinity-format-var"
+                    or icv_name == "run-sched-var"
+                    or icv_name == "tool-libraries-var"
+                    or icv_name == "tool-verbose-init-var"
+                ):
+                    icv_string = ompdModule.call_ompd_get_icv_string_from_scope(
+                        handle, scope, addr_space.icv_map[icv_name][0]
+                    )
+                    print(
+                        "%-31s %-26s %s" % (icv_name, ompd_scope_map[scope], icv_string)
+                    )
+                else:
+                    icv_value = ompdModule.call_ompd_get_icv_from_scope(
+                        handle, scope, addr_space.icv_map[icv_name][0]
+                    )
+                    print(
+                        "%-31s %-26s %d" % (icv_name, ompd_scope_map[scope], icv_value)
+                    )
+        except:
+            traceback.print_exc()
+
 
 def curr_thread():
-	"""Helper function for ompd_step. Returns the thread object for the current thread number."""
-	global addr_space
-	if addr_space is not None:
-		return addr_space.threads[int(gdb.selected_thread().num)]
-	return None
+    """Helper function for ompd_step. Returns the thread object for the current thread number."""
+    global addr_space
+    if addr_space is not None:
+        return addr_space.threads[int(gdb.selected_thread().num)]
+    return None
 
-class ompd_test(gdb.Command):
-	"""Test area"""
-	def __init__(self):
-		self.__doc__ = 'Test functionalities for correctness\n usage: ompd test'
-		super(ompd_test, self).__init__('ompd test',
-						gdb.COMMAND_OBSCURE)
-	
-	def invoke(self, arg, from_tty):
-		global addr_space
-		if init_error():
-			return
-		# get task function for current task of current thread
-		try:
-			current_thread = int(gdb.selected_thread().num)
-			current_thread_obj = addr_space.threads[current_thread]
-			task_function = current_thread_obj.get_current_task().get_task_function()
-			print("bt value:", int("0x0000000000400b6c",0))
-			print("get_task_function value:", task_function)
-
-			# get task function of implicit task in current parallel region for current thread
-			current_parallel_obj = current_thread_obj.get_current_parallel()
-			task_in_parallel = current_parallel_obj.get_task_in_parallel(current_thread)
-			task_function_in_parallel = task_in_parallel.get_task_function()
-			print("task_function_in_parallel:", task_function_in_parallel)
-		except:
-			print('Task function value not found for this thread')
-
-class ompdtestapi (gdb.Command):
-	""" To test API's return code """
-	def __init__(self):
-		self.__doc__ = 'Test OMPD tool Interface APIs.\nUsage: ompdtestapi <api name>'
-		super (ompdtestapi, self).__init__('ompdtestapi', gdb.COMMAND_OBSCURE)
-
-	def invoke (self, arg, from_tty):
-		global addr_space
-		if init_error():
-			print ("Error in Initialization.");
-			return
-		if not arg:
-			print ("No API provided to test, eg: ompdtestapi ompd_initialize")
-
-		if arg == "ompd_get_thread_handle":
-			addr_handle = addr_space.addr_space
-			threadId = gdb.selected_thread().ptid[1]
-			ompdModule.test_ompd_get_thread_handle(addr_handle, threadId)
-		elif arg == "ompd_get_curr_parallel_handle":
-			addr_handle = addr_space.addr_space
-			threadId = gdb.selected_thread().ptid[1]
-			thread_handle = ompdModule.get_thread_handle(threadId, addr_handle)
-			ompdModule.test_ompd_get_curr_parallel_handle(thread_handle)
-		elif arg == "ompd_get_thread_in_parallel":
-			addr_handle = addr_space.addr_space
-			threadId = gdb.selected_thread().ptid[1]
-			thread_handle = ompdModule.get_thread_handle(threadId, addr_handle)
-			parallel_handle = ompdModule.call_ompd_get_curr_parallel_handle(thread_handle)
-			ompdModule.test_ompd_get_thread_in_parallel(parallel_handle)
-		elif arg == "ompd_thread_handle_compare":
-			addr_handle = addr_space.addr_space
-			threadId = gdb.selected_thread().ptid[1]
-			thread_handle = ompdModule.get_thread_handle(threadId, addr_handle)
-			parallel_handle = ompdModule.call_ompd_get_curr_parallel_handle(thread_handle)
-			thread_handle1 = ompdModule.call_ompd_get_thread_in_parallel(parallel_handle, 1);
-			thread_handle2 = ompdModule.call_ompd_get_thread_in_parallel(parallel_handle, 2);
-			ompdModule.test_ompd_thread_handle_compare(thread_handle1, thread_handle1)
-			ompdModule.test_ompd_thread_handle_compare(thread_handle1, thread_handle2)
-		elif arg == "ompd_get_thread_id":
-			addr_handle = addr_space.addr_space
-			threadId = gdb.selected_thread().ptid[1]
-			thread_handle = ompdModule.get_thread_handle(threadId, addr_handle)
-			ompdModule.test_ompd_get_thread_id(thread_handle)
-		elif arg == "ompd_rel_thread_handle":
-			addr_handle = addr_space.addr_space
-			threadId = gdb.selected_thread().ptid[1]
-			thread_handle = ompdModule.get_thread_handle(threadId, addr_handle)
-			ompdModule.test_ompd_rel_thread_handle(thread_handle)
-		elif arg == "ompd_get_enclosing_parallel_handle":
-			addr_handle = addr_space.addr_space
-			threadId = gdb.selected_thread().ptid[1]
-			thread_handle = ompdModule.get_thread_handle(threadId, addr_handle)
-			parallel_handle = ompdModule.call_ompd_get_curr_parallel_handle(thread_handle)
-			ompdModule.test_ompd_get_enclosing_parallel_handle(parallel_handle)
-		elif arg == "ompd_parallel_handle_compare":
-			addr_handle = addr_space.addr_space
-			threadId = gdb.selected_thread().ptid[1]
-			thread_handle = ompdModule.get_thread_handle(threadId, addr_handle)
-			parallel_handle1 = ompdModule.call_ompd_get_curr_parallel_handle(thread_handle)
-			parallel_handle2 = ompdModule.call_ompd_get_enclosing_parallel_handle(parallel_handle1)
-			ompdModule.test_ompd_parallel_handle_compare(parallel_handle1, parallel_handle1)
-			ompdModule.test_ompd_parallel_handle_compare(parallel_handle1, parallel_handle2)
-		elif arg == "ompd_rel_parallel_handle":
-			addr_handle = addr_space.addr_space
-			threadId = gdb.selected_thread().ptid[1]
-			thread_handle = ompdModule.get_thread_handle(threadId, addr_handle)
-			parallel_handle = ompdModule.call_ompd_get_curr_parallel_handle(thread_handle)
-			ompdModule.test_ompd_rel_parallel_handle(parallel_handle)
-		elif arg == "ompd_initialize":
-			ompdModule.test_ompd_initialize()
-		elif arg == "ompd_get_api_version":
-			ompdModule.test_ompd_get_api_version()
-		elif arg == "ompd_get_version_string":
-			ompdModule.test_ompd_get_version_string()
-		elif arg == "ompd_finalize":
-			ompdModule.test_ompd_finalize()
-		elif arg == "ompd_process_initialize":
-			ompdModule.call_ompd_initialize()
-			ompdModule.test_ompd_process_initialize()
-		elif arg == "ompd_device_initialize":
-			ompdModule.test_ompd_device_initialize()
-		elif arg == "ompd_rel_address_space_handle":
-			ompdModule.test_ompd_rel_address_space_handle()
-		elif arg == "ompd_get_omp_version":
-			addr_handle = addr_space.addr_space
-			ompdModule.test_ompd_get_omp_version(addr_handle)
-		elif arg == "ompd_get_omp_version_string":
-			addr_handle = addr_space.addr_space
-			ompdModule.test_ompd_get_omp_version_string(addr_handle)
-		elif arg == "ompd_get_curr_task_handle":
-			addr_handle = addr_space.addr_space
-			threadId = gdb.selected_thread().ptid[1]
-			thread_handle = ompdModule.get_thread_handle(threadId, addr_handle)
-			ompdModule.test_ompd_get_curr_task_handle(thread_handle)
-		elif arg == "ompd_get_task_parallel_handle":
-			addr_handle = addr_space.addr_space
-			threadId = gdb.selected_thread().ptid[1]
-			thread_handle = ompdModule.get_thread_handle(threadId, addr_handle)
-			task_handle = ompdModule.call_ompd_get_curr_task_handle(thread_handle)
-			ompdModule.test_ompd_get_task_parallel_handle(task_handle)
-		elif arg == "ompd_get_generating_task_handle":
-			addr_handle = addr_space.addr_space
-			threadId = gdb.selected_thread().ptid[1]
-			thread_handle = ompdModule.get_thread_handle(threadId, addr_handle)
-			task_handle = ompdModule.call_ompd_get_curr_task_handle(thread_handle)
-			ompdModule.test_ompd_get_generating_task_handle(task_handle)
-		elif arg == "ompd_get_scheduling_task_handle":
-			addr_handle = addr_space.addr_space
-			threadId = gdb.selected_thread().ptid[1]
-			thread_handle = ompdModule.get_thread_handle(threadId, addr_handle)
-			task_handle = ompdModule.call_ompd_get_curr_task_handle(thread_handle)
-			ompdModule.test_ompd_get_scheduling_task_handle(task_handle)
-		elif arg == "ompd_get_task_in_parallel":
-			addr_handle = addr_space.addr_space
-			threadId = gdb.selected_thread().ptid[1]
-			thread_handle = ompdModule.get_thread_handle(threadId, addr_handle)
-			parallel_handle = ompdModule.call_ompd_get_curr_parallel_handle(thread_handle)
-			ompdModule.test_ompd_get_task_in_parallel(parallel_handle)
-		elif arg == "ompd_rel_task_handle":
-			addr_handle = addr_space.addr_space
-			threadId = gdb.selected_thread().ptid[1]
-			thread_handle = ompdModule.get_thread_handle(threadId, addr_handle)
-			task_handle = ompdModule.call_ompd_get_curr_task_handle(thread_handle)
-			ompdModule.test_ompd_rel_task_handle(task_handle)
-		elif arg == "ompd_task_handle_compare":
-			addr_handle = addr_space.addr_space
-			threadId = gdb.selected_thread().ptid[1]
-			thread_handle = ompdModule.get_thread_handle(threadId, addr_handle)
-			task_handle1 = ompdModule.call_ompd_get_curr_task_handle(thread_handle)
-			task_handle2 = ompdModule.call_ompd_get_generating_task_handle(task_handle1)
-			ompdModule.test_ompd_task_handle_compare(task_handle1, task_handle2)
-			ompdModule.test_ompd_task_handle_compare(task_handle2, task_handle1)
-		elif arg == "ompd_get_task_function":
-			addr_handle = addr_space.addr_space
-			threadId = gdb.selected_thread().ptid[1]
-			thread_handle = ompdModule.get_thread_handle(threadId, addr_handle)
-			task_handle = ompdModule.call_ompd_get_curr_task_handle(thread_handle)
-			ompdModule.test_ompd_get_task_function(task_handle)
-		elif arg == "ompd_get_task_frame":
-			addr_handle = addr_space.addr_space
-			threadId = gdb.selected_thread().ptid[1]
-			thread_handle = ompdModule.get_thread_handle(threadId, addr_handle)
-			task_handle = ompdModule.call_ompd_get_curr_task_handle(thread_handle)
-			ompdModule.test_ompd_get_task_frame(task_handle)
-		elif arg == "ompd_get_state":
-			addr_handle = addr_space.addr_space
-			threadId = gdb.selected_thread().ptid[1]
-			thread_handle = ompdModule.get_thread_handle(threadId, addr_handle)
-			ompdModule.test_ompd_get_state(thread_handle)
-		elif arg == "ompd_get_display_control_vars":
-			addr_handle = addr_space.addr_space
-			ompdModule.test_ompd_get_display_control_vars(addr_handle)
-		elif arg == "ompd_rel_display_control_vars":
-			ompdModule.test_ompd_rel_display_control_vars()
-		elif arg == "ompd_enumerate_icvs":
-			addr_handle = addr_space.addr_space
-			ompdModule.test_ompd_enumerate_icvs(addr_handle)
-		elif arg== "ompd_get_icv_from_scope":
-			addr_handle = addr_space.addr_space
-			threadId = gdb.selected_thread().ptid[1]
-			thread_handle = ompdModule.get_thread_handle(threadId, addr_handle)
-			parallel_handle = ompdModule.call_ompd_get_curr_parallel_handle(thread_handle)
-			task_handle = ompdModule.call_ompd_get_curr_task_handle(thread_handle)	
-			ompdModule.test_ompd_get_icv_from_scope_with_addr_handle(addr_handle)
-			ompdModule.test_ompd_get_icv_from_scope_with_thread_handle(thread_handle)
-			ompdModule.test_ompd_get_icv_from_scope_with_parallel_handle(parallel_handle)
-			ompdModule.test_ompd_get_icv_from_scope_with_task_handle(task_handle)
-		elif arg == "ompd_get_icv_string_from_scope":
-			addr_handle = addr_space.addr_space
-			ompdModule.test_ompd_get_icv_string_from_scope(addr_handle)
-		elif arg == "ompd_get_tool_data":
-			ompdModule.test_ompd_get_tool_data()
-		elif arg == "ompd_enumerate_states":
-			ompdModule.test_ompd_enumerate_states()
-		else:
-			print ("Invalid API.")
 
+class ompd_test(gdb.Command):
+    """Test area"""
+
+    def __init__(self):
+        self.__doc__ = "Test functionalities for correctness\n usage: ompd test"
+        super(ompd_test, self).__init__("ompd test", gdb.COMMAND_OBSCURE)
+
+    def invoke(self, arg, from_tty):
+        global addr_space
+        if init_error():
+            return
+        # get task function for current task of current thread
+        try:
+            current_thread = int(gdb.selected_thread().num)
+            current_thread_obj = addr_space.threads[current_thread]
+            task_function = current_thread_obj.get_current_task().get_task_function()
+            print("bt value:", int("0x0000000000400b6c", 0))
+            print("get_task_function value:", task_function)
+
+            # get task function of implicit task in current parallel region for current thread
+            current_parallel_obj = current_thread_obj.get_current_parallel()
+            task_in_parallel = current_parallel_obj.get_task_in_parallel(current_thread)
+            task_function_in_parallel = task_in_parallel.get_task_function()
+            print("task_function_in_parallel:", task_function_in_parallel)
+        except:
+            print("Task function value not found for this thread")
+
+
+class ompdtestapi(gdb.Command):
+    """To test API's return code"""
+
+    def __init__(self):
+        self.__doc__ = "Test OMPD tool Interface APIs.\nUsage: ompdtestapi <api name>"
+        super(ompdtestapi, self).__init__("ompdtestapi", gdb.COMMAND_OBSCURE)
+
+    def invoke(self, arg, from_tty):
+        global addr_space
+        if init_error():
+            print("Error in Initialization.")
+            return
+        if not arg:
+            print("No API provided to test, eg: ompdtestapi ompd_initialize")
+
+        if arg == "ompd_get_thread_handle":
+            addr_handle = addr_space.addr_space
+            threadId = gdb.selected_thread().ptid[1]
+            ompdModule.test_ompd_get_thread_handle(addr_handle, threadId)
+        elif arg == "ompd_get_curr_parallel_handle":
+            addr_handle = addr_space.addr_space
+            threadId = gdb.selected_thread().ptid[1]
+            thread_handle = ompdModule.get_thread_handle(threadId, addr_handle)
+            ompdModule.test_ompd_get_curr_parallel_handle(thread_handle)
+        elif arg == "ompd_get_thread_in_parallel":
+            addr_handle = addr_space.addr_space
+            threadId = gdb.selected_thread().ptid[1]
+            thread_handle = ompdModule.get_thread_handle(threadId, addr_handle)
+            parallel_handle = ompdModule.call_ompd_get_curr_parallel_handle(
+                thread_handle
+            )
+            ompdModule.test_ompd_get_thread_in_parallel(parallel_handle)
+        elif arg == "ompd_thread_handle_compare":
+            addr_handle = addr_space.addr_space
+            threadId = gdb.selected_thread().ptid[1]
+            thread_handle = ompdModule.get_thread_handle(threadId, addr_handle)
+            parallel_handle = ompdModule.call_ompd_get_curr_parallel_handle(
+                thread_handle
+            )
+            thread_handle1 = ompdModule.call_ompd_get_thread_in_parallel(
+                parallel_handle, 1
+            )
+            thread_handle2 = ompdModule.call_ompd_get_thread_in_parallel(
+                parallel_handle, 2
+            )
+            ompdModule.test_ompd_thread_handle_compare(thread_handle1, thread_handle1)
+            ompdModule.test_ompd_thread_handle_compare(thread_handle1, thread_handle2)
+        elif arg == "ompd_get_thread_id":
+            addr_handle = addr_space.addr_space
+            threadId = gdb.selected_thread().ptid[1]
+            thread_handle = ompdModule.get_thread_handle(threadId, addr_handle)
+            ompdModule.test_ompd_get_thread_id(thread_handle)
+        elif arg == "ompd_rel_thread_handle":
+            addr_handle = addr_space.addr_space
+            threadId = gdb.selected_thread().ptid[1]
+            thread_handle = ompdModule.get_thread_handle(threadId, addr_handle)
+            ompdModule.test_ompd_rel_thread_handle(thread_handle)
+        elif arg == "ompd_get_enclosing_parallel_handle":
+            addr_handle = addr_space.addr_space
+            threadId = gdb.selected_thread().ptid[1]
+            thread_handle = ompdModule.get_thread_handle(threadId, addr_handle)
+            parallel_handle = ompdModule.call_ompd_get_curr_parallel_handle(
+                thread_handle
+            )
+            ompdModule.test_ompd_get_enclosing_parallel_handle(parallel_handle)
+        elif arg == "ompd_parallel_handle_compare":
+            addr_handle = addr_space.addr_space
+            threadId = gdb.selected_thread().ptid[1]
+            thread_handle = ompdModule.get_thread_handle(threadId, addr_handle)
+            parallel_handle1 = ompdModule.call_ompd_get_curr_parallel_handle(
+                thread_handle
+            )
+            parallel_handle2 = ompdModule.call_ompd_get_enclosing_parallel_handle(
+                parallel_handle1
+            )
+            ompdModule.test_ompd_parallel_handle_compare(
+                parallel_handle1, parallel_handle1
+            )
+            ompdModule.test_ompd_parallel_handle_compare(
+                parallel_handle1, parallel_handle2
+            )
+        elif arg == "ompd_rel_parallel_handle":
+            addr_handle = addr_space.addr_space
+            threadId = gdb.selected_thread().ptid[1]
+            thread_handle = ompdModule.get_thread_handle(threadId, addr_handle)
+            parallel_handle = ompdModule.call_ompd_get_curr_parallel_handle(
+                thread_handle
+            )
+            ompdModule.test_ompd_rel_parallel_handle(parallel_handle)
+        elif arg == "ompd_initialize":
+            ompdModule.test_ompd_initialize()
+        elif arg == "ompd_get_api_version":
+            ompdModule.test_ompd_get_api_version()
+        elif arg == "ompd_get_version_string":
+            ompdModule.test_ompd_get_version_string()
+        elif arg == "ompd_finalize":
+            ompdModule.test_ompd_finalize()
+        elif arg == "ompd_process_initialize":
+            ompdModule.call_ompd_initialize()
+            ompdModule.test_ompd_process_initialize()
+        elif arg == "ompd_device_initialize":
+            ompdModule.test_ompd_device_initialize()
+        elif arg == "ompd_rel_address_space_handle":
+            ompdModule.test_ompd_rel_address_space_handle()
+        elif arg == "ompd_get_omp_version":
+            addr_handle = addr_space.addr_space
+            ompdModule.test_ompd_get_omp_version(addr_handle)
+        elif arg == "ompd_get_omp_version_string":
+            addr_handle = addr_space.addr_space
+            ompdModule.test_ompd_get_omp_version_string(addr_handle)
+        elif arg == "ompd_get_curr_task_handle":
+            addr_handle = addr_space.addr_space
+            threadId = gdb.selected_thread().ptid[1]
+            thread_handle = ompdModule.get_thread_handle(threadId, addr_handle)
+            ompdModule.test_ompd_get_curr_task_handle(thread_handle)
+        elif arg == "ompd_get_task_parallel_handle":
+            addr_handle = addr_space.addr_space
+            threadId = gdb.selected_thread().ptid[1]
+            thread_handle = ompdModule.get_thread_handle(threadId, addr_handle)
+            task_handle = ompdModule.call_ompd_get_curr_task_handle(thread_handle)
+            ompdModule.test_ompd_get_task_parallel_handle(task_handle)
+        elif arg == "ompd_get_generating_task_handle":
+            addr_handle = addr_space.addr_space
+            threadId = gdb.selected_thread().ptid[1]
+            thread_handle = ompdModule.get_thread_handle(threadId, addr_handle)
+            task_handle = ompdModule.call_ompd_get_curr_task_handle(thread_handle)
+            ompdModule.test_ompd_get_generating_task_handle(task_handle)
+        elif arg == "ompd_get_scheduling_task_handle":
+            addr_handle = addr_space.addr_space
+            threadId = gdb.selected_thread().ptid[1]
+            thread_handle = ompdModule.get_thread_handle(threadId, addr_handle)
+            task_handle = ompdModule.call_ompd_get_curr_task_handle(thread_handle)
+            ompdModule.test_ompd_get_scheduling_task_handle(task_handle)
+        elif arg == "ompd_get_task_in_parallel":
+            addr_handle = addr_space.addr_space
+            threadId = gdb.selected_thread().ptid[1]
+            thread_handle = ompdModule.get_thread_handle(threadId, addr_handle)
+            parallel_handle = ompdModule.call_ompd_get_curr_parallel_handle(
+                thread_handle
+            )
+            ompdModule.test_ompd_get_task_in_parallel(parallel_handle)
+        elif arg == "ompd_rel_task_handle":
+            addr_handle = addr_space.addr_space
+            threadId = gdb.selected_thread().ptid[1]
+            thread_handle = ompdModule.get_thread_handle(threadId, addr_handle)
+            task_handle = ompdModule.call_ompd_get_curr_task_handle(thread_handle)
+            ompdModule.test_ompd_rel_task_handle(task_handle)
+        elif arg == "ompd_task_handle_compare":
+            addr_handle = addr_space.addr_space
+            threadId = gdb.selected_thread().ptid[1]
+            thread_handle = ompdModule.get_thread_handle(threadId, addr_handle)
+            task_handle1 = ompdModule.call_ompd_get_curr_task_handle(thread_handle)
+            task_handle2 = ompdModule.call_ompd_get_generating_task_handle(task_handle1)
+            ompdModule.test_ompd_task_handle_compare(task_handle1, task_handle2)
+            ompdModule.test_ompd_task_handle_compare(task_handle2, task_handle1)
+        elif arg == "ompd_get_task_function":
+            addr_handle = addr_space.addr_space
+            threadId = gdb.selected_thread().ptid[1]
+            thread_handle = ompdModule.get_thread_handle(threadId, addr_handle)
+            task_handle = ompdModule.call_ompd_get_curr_task_handle(thread_handle)
+            ompdModule.test_ompd_get_task_function(task_handle)
+        elif arg == "ompd_get_task_frame":
+            addr_handle = addr_space.addr_space
+            threadId = gdb.selected_thread().ptid[1]
+            thread_handle = ompdModule.get_thread_handle(threadId, addr_handle)
+            task_handle = ompdModule.call_ompd_get_curr_task_handle(thread_handle)
+            ompdModule.test_ompd_get_task_frame(task_handle)
+        elif arg == "ompd_get_state":
+            addr_handle = addr_space.addr_space
+            threadId = gdb.selected_thread().ptid[1]
+            thread_handle = ompdModule.get_thread_handle(threadId, addr_handle)
+            ompdModule.test_ompd_get_state(thread_handle)
+        elif arg == "ompd_get_display_control_vars":
+            addr_handle = addr_space.addr_space
+            ompdModule.test_ompd_get_display_control_vars(addr_handle)
+        elif arg == "ompd_rel_display_control_vars":
+            ompdModule.test_ompd_rel_display_control_vars()
+        elif arg == "ompd_enumerate_icvs":
+            addr_handle = addr_space.addr_space
+            ompdModule.test_ompd_enumerate_icvs(addr_handle)
+        elif arg == "ompd_get_icv_from_scope":
+            addr_handle = addr_space.addr_space
+            threadId = gdb.selected_thread().ptid[1]
+            thread_handle = ompdModule.get_thread_handle(threadId, addr_handle)
+            parallel_handle = ompdModule.call_ompd_get_curr_parallel_handle(
+                thread_handle
+            )
+            task_handle = ompdModule.call_ompd_get_curr_task_handle(thread_handle)
+            ompdModule.test_ompd_get_icv_from_scope_with_addr_handle(addr_handle)
+            ompdModule.test_ompd_get_icv_from_scope_with_thread_handle(thread_handle)
+            ompdModule.test_ompd_get_icv_from_scope_with_parallel_handle(
+                parallel_handle
+            )
+            ompdModule.test_ompd_get_icv_from_scope_with_task_handle(task_handle)
+        elif arg == "ompd_get_icv_string_from_scope":
+            addr_handle = addr_space.addr_space
+            ompdModule.test_ompd_get_icv_string_from_scope(addr_handle)
+        elif arg == "ompd_get_tool_data":
+            ompdModule.test_ompd_get_tool_data()
+        elif arg == "ompd_enumerate_states":
+            ompdModule.test_ompd_enumerate_states()
+        else:
+            print("Invalid API.")
 
 
 class ompd_bt(gdb.Command):
-	"""Turn filter for 'bt' on/off for output to only contain frames relevant to the application or all frames."""
-	def __init__(self):
-		self.__doc__ = 'Turn filter for "bt" output on or off. Specify "on continued" option to trace worker threads back to master threads.\n usage: ompd bt on|on continued|off'
-		super(ompd_bt, self).__init__('ompd bt',
-					gdb.COMMAND_STACK)
-	
-	def invoke(self, arg, from_tty):
-		global ff
-		global addr_space
-		global icv_map
-		global ompd_scope_map
-		if init_error():
-			return
-		if icv_map is None:
-			icv_map = {}
-			current = 0
-			more = 1
-			while more > 0:
-				tup = ompdModule.call_ompd_enumerate_icvs(addr_space.addr_space, current)
-				(current, next_icv, next_scope, more) = tup
-				icv_map[next_icv] = (current, next_scope, ompd_scope_map[next_scope])
-			print('Initialized ICV map successfully for filtering "bt".')
-		
-		arg_list = gdb.string_to_argv(arg)
-		if len(arg_list) == 0:
-			print('When calling "ompd bt", you must either specify "on", "on continued" or "off". Check "help ompd".')
-		elif len(arg_list) == 1 and arg_list[0] == 'on':
-			addr_space.list_threads(False)
-			ff.set_switch(True)
-			ff.set_switch_continue(False)
-		elif arg_list[0] == 'on' and arg_list[1] == 'continued':
-			ff.set_switch(True)
-			ff.set_switch_continue(True)
-		elif len(arg_list) == 1 and arg_list[0] == 'off':
-			ff.set_switch(False)
-			ff.set_switch_continue(False)
-		else:
-			print('When calling "ompd bt", you must either specify "on", "on continued" or "off". Check "help ompd".')
+    """Turn filter for 'bt' on/off for output to only contain frames relevant to the application or all frames."""
+
+    def __init__(self):
+        self.__doc__ = 'Turn filter for "bt" output on or off. Specify "on continued" option to trace worker threads back to master threads.\n usage: ompd bt on|on continued|off'
+        super(ompd_bt, self).__init__("ompd bt", gdb.COMMAND_STACK)
+
+    def invoke(self, arg, from_tty):
+        global ff
+        global addr_space
+        global icv_map
+        global ompd_scope_map
+        if init_error():
+            return
+        if icv_map is None:
+            icv_map = {}
+            current = 0
+            more = 1
+            while more > 0:
+                tup = ompdModule.call_ompd_enumerate_icvs(
+                    addr_space.addr_space, current
+                )
+                (current, next_icv, next_scope, more) = tup
+                icv_map[next_icv] = (current, next_scope, ompd_scope_map[next_scope])
+            print('Initialized ICV map successfully for filtering "bt".')
+
+        arg_list = gdb.string_to_argv(arg)
+        if len(arg_list) == 0:
+            print(
+                'When calling "ompd bt", you must either specify "on", "on continued" or "off". Check "help ompd".'
+            )
+        elif len(arg_list) == 1 and arg_list[0] == "on":
+            addr_space.list_threads(False)
+            ff.set_switch(True)
+            ff.set_switch_continue(False)
+        elif arg_list[0] == "on" and arg_list[1] == "continued":
+            ff.set_switch(True)
+            ff.set_switch_continue(True)
+        elif len(arg_list) == 1 and arg_list[0] == "off":
+            ff.set_switch(False)
+            ff.set_switch_continue(False)
+        else:
+            print(
+                'When calling "ompd bt", you must either specify "on", "on continued" or "off". Check "help ompd".'
+            )
+
 
 # TODO: remove
 class ompd_taskframes(gdb.Command):
-	"""Prints task handles for relevant task frames. Meant for debugging."""
-	def __init__(self):
-		self.__doc__ = 'Prints list of tasks.\nUsage: ompd taskframes'
-		super(ompd_taskframes, self).__init__('ompd taskframes',
-					gdb.COMMAND_STACK)
-	
-	def invoke(self, arg, from_tty):
-		global addr_space
-		if init_error():
-			return
-		frame = gdb.newest_frame()
-		while(frame):
-			print (frame.read_register('sp'))
-			frame = frame.older()
-		curr_task_handle = None
-		if(addr_space.threads and addr_space.threads.get(gdb.selected_thread().num)):
-			curr_thread_handle = curr_thread().thread_handle
-			curr_task_handle = ompdModule.call_ompd_get_curr_task_handle(curr_thread_handle)
-		if(not curr_task_handle):
-			return None
-		prev_frames = None
-		try:
-			while(1):
-				frames_with_flags = ompdModule.call_ompd_get_task_frame(curr_task_handle)
-				frames = (frames_with_flags[0], frames_with_flags[3])
-				if(prev_frames == frames):
-					break
-				if(not isinstance(frames,tuple)):
-					break
-				(ompd_enter_frame, ompd_exit_frame) = frames
-				print(hex(ompd_enter_frame), hex(ompd_exit_frame))
-				curr_task_handle = ompdModule.call_ompd_get_scheduling_task_handle(curr_task_handle)
-				prev_frames = frames
-				if(not curr_task_handle):
-					break
-		except:
-			traceback.print_exc()
+    """Prints task handles for relevant task frames. Meant for debugging."""
+
+    def __init__(self):
+        self.__doc__ = "Prints list of tasks.\nUsage: ompd taskframes"
+        super(ompd_taskframes, self).__init__("ompd taskframes", gdb.COMMAND_STACK)
+
+    def invoke(self, arg, from_tty):
+        global addr_space
+        if init_error():
+            return
+        frame = gdb.newest_frame()
+        while frame:
+            print(frame.read_register("sp"))
+            frame = frame.older()
+        curr_task_handle = None
+        if addr_space.threads and addr_space.threads.get(gdb.selected_thread().num):
+            curr_thread_handle = curr_thread().thread_handle
+            curr_task_handle = ompdModule.call_ompd_get_curr_task_handle(
+                curr_thread_handle
+            )
+        if not curr_task_handle:
+            return None
+        prev_frames = None
+        try:
+            while 1:
+                frames_with_flags = ompdModule.call_ompd_get_task_frame(
+                    curr_task_handle
+                )
+                frames = (frames_with_flags[0], frames_with_flags[3])
+                if prev_frames == frames:
+                    break
+                if not isinstance(frames, tuple):
+                    break
+                (ompd_enter_frame, ompd_exit_frame) = frames
+                print(hex(ompd_enter_frame), hex(ompd_exit_frame))
+                curr_task_handle = ompdModule.call_ompd_get_scheduling_task_handle(
+                    curr_task_handle
+                )
+                prev_frames = frames
+                if not curr_task_handle:
+                    break
+        except:
+            traceback.print_exc()
+
 
 def print_and_exec(string):
-	"""Helper function for ompd_step. Executes the given command in GDB and prints it."""
-	print(string)
-	gdb.execute(string)
+    """Helper function for ompd_step. Executes the given command in GDB and prints it."""
+    print(string)
+    gdb.execute(string)
+
 
 class TempFrameFunctionBp(gdb.Breakpoint):
-	"""Helper class for ompd_step. Defines stop function for breakpoint on frame function."""
-	def stop(self):
-		global in_task_function
-		in_task_function = True
-		self.enabled = False
+    """Helper class for ompd_step. Defines stop function for breakpoint on frame function."""
+
+    def stop(self):
+        global in_task_function
+        in_task_function = True
+        self.enabled = False
+
 
 class ompd_step(gdb.Command):
-	"""Executes 'step' and skips frames irrelevant to the application / the ones without debug information."""
-	def __init__(self):
-		self.__doc__ = 'Executes "step" and skips runtime frames as much as possible.'
-		super(ompd_step, self).__init__('ompd step', gdb.COMMAND_STACK)
-	
-	class TaskBeginBp(gdb.Breakpoint):
-		"""Helper class. Defines stop function for breakpoint ompd_bp_task_begin."""
-		def stop(self):
-			try:
-				code_line = curr_thread().get_current_task().get_task_function()
-				frame_fct_bp = TempFrameFunctionBp(('*%i' % code_line), temporary=True, internal=True)
-				frame_fct_bp.thread = self.thread
-				return False
-			except:
-				return False
-	
-	def invoke(self, arg, from_tty):
-		global in_task_function
-		if init_error():
-			return
-		tbp = self.TaskBeginBp('ompd_bp_task_begin', temporary=True, internal=True)
-		tbp.thread = int(gdb.selected_thread().num)
-		print_and_exec('step')
-		while gdb.selected_frame().find_sal().symtab is None:
-			if not in_task_function:
-				print_and_exec('finish')
-			else:
-				print_and_exec('si')
+    """Executes 'step' and skips frames irrelevant to the application / the ones without debug information."""
+
+    def __init__(self):
+        self.__doc__ = 'Executes "step" and skips runtime frames as much as possible.'
+        super(ompd_step, self).__init__("ompd step", gdb.COMMAND_STACK)
+
+    class TaskBeginBp(gdb.Breakpoint):
+        """Helper class. Defines stop function for breakpoint ompd_bp_task_begin."""
+
+        def stop(self):
+            try:
+                code_line = curr_thread().get_current_task().get_task_function()
+                frame_fct_bp = TempFrameFunctionBp(
+                    ("*%i" % code_line), temporary=True, internal=True
+                )
+                frame_fct_bp.thread = self.thread
+                return False
+            except:
+                return False
+
+    def invoke(self, arg, from_tty):
+        global in_task_function
+        if init_error():
+            return
+        tbp = self.TaskBeginBp("ompd_bp_task_begin", temporary=True, internal=True)
+        tbp.thread = int(gdb.selected_thread().num)
+        print_and_exec("step")
+        while gdb.selected_frame().find_sal().symtab is None:
+            if not in_task_function:
+                print_and_exec("finish")
+            else:
+                print_and_exec("si")
+
 
 def init_error():
-	global addr_space
-	if (gdb.selected_thread() is None) or (addr_space is None) or (not addr_space):
-		print("Run 'ompd init' before running any of the ompd commands")
-		return True
-	return False
+    global addr_space
+    if (gdb.selected_thread() is None) or (addr_space is None) or (not addr_space):
+        print("Run 'ompd init' before running any of the ompd commands")
+        return True
+    return False
+
 
 def main():
-	ompd()
-	ompd_init()
-	ompd_threads()
-	ompd_icvs()
-	ompd_parallel_region()
-	ompd_test()
-	ompdtestapi()
-	ompd_taskframes()
-	ompd_bt()
-	ompd_step()
+    ompd()
+    ompd_init()
+    ompd_threads()
+    ompd_icvs()
+    ompd_parallel_region()
+    ompd_test()
+    ompdtestapi()
+    ompd_taskframes()
+    ompd_bt()
+    ompd_step()
+
 
 if __name__ == "__main__":
-	try:
-		main()
-	except:
-		traceback.print_exc()
+    try:
+        main()
+    except:
+        traceback.print_exc()
 
 # NOTE: test code using:
 # OMP_NUM_THREADS=... gdb a.out -x ../../projects/gdb_plugin/gdb-ompd/__init__.py

diff  --git a/openmp/libompd/gdb-plugin/ompd/ompd_address_space.py b/openmp/libompd/gdb-plugin/ompd/ompd_address_space.py
index 8b8f5bb9953c7..3c4f35715a1e7 100644
--- a/openmp/libompd/gdb-plugin/ompd/ompd_address_space.py
+++ b/openmp/libompd/gdb-plugin/ompd/ompd_address_space.py
@@ -6,309 +6,464 @@
 import traceback
 from enum import Enum
 
+
 class ompd_scope(Enum):
-	ompd_scope_global = 1
-	ompd_scope_address_space = 2
-	ompd_scope_thread = 3
-	ompd_scope_parallel = 4
-	ompd_scope_implicit_task = 5
-	ompd_scope_task = 6
+    ompd_scope_global = 1
+    ompd_scope_address_space = 2
+    ompd_scope_thread = 3
+    ompd_scope_parallel = 4
+    ompd_scope_implicit_task = 5
+    ompd_scope_task = 6
+
 
 class ompd_address_space(object):
-	
-	def __init__(self):
-		"""Initializes an ompd_address_space object by calling ompd_initialize
-		in ompdModule.c
-		"""
-		self.addr_space = ompdModule.call_ompd_initialize()
-		# maps thread_num (thread id given by gdb) to ompd_thread object with thread handle
-		self.threads = {}
-		self.states = None
-		self.icv_map = None
-		self.ompd_tool_test_bp = None
-		self.scope_map = {1:'global', 2:'address_space', 3:'thread', 4:'parallel', 5:'implicit_task', 6:'task'}
-		self.sched_map = {1:'static', 2:'dynamic', 3:'guided', 4:'auto'}
-		gdb.events.stop.connect(self.handle_stop_event)
-		self.new_thread_breakpoint = gdb.Breakpoint("ompd_bp_thread_begin", internal=True)
-		tool_break_symbol = gdb.lookup_global_symbol("ompd_tool_break")
-		if (tool_break_symbol is not None):
-			self.ompd_tool_test_bp = gdb.Breakpoint("ompd_tool_break", internal=True)
-	
-	def handle_stop_event(self, event):
-		"""Sets a breakpoint at 
diff erent events, e.g. when a new OpenMP 
-		thread is created.
-		"""
-		if (isinstance(event, gdb.BreakpointEvent)):
-			# check if breakpoint has already been hit
-			if (self.new_thread_breakpoint in event.breakpoints):
-				self.add_thread()
-				gdb.execute('continue')
-				return
-			elif (self.ompd_tool_test_bp is not None and self.ompd_tool_test_bp in event.breakpoints):
-				try:
-					self.compare_ompt_data()
-					gdb.execute('continue')
-				except():
-					traceback.print_exc()
-		elif (isinstance(event, gdb.SignalEvent)):
-			# TODO: what do we need to do on SIGNALS?
-			pass
-		else:
-			# TODO: probably not possible?
-			pass
-	
-	def get_icv_map(self):
-		"""Fills ICV map.
-		"""
-		self.icv_map = {}
-		current = 0
-		more = 1
-		while more > 0:
-			tup = ompdModule.call_ompd_enumerate_icvs(self.addr_space, current)
-			(current, next_icv, next_scope, more) = tup
-			self.icv_map[next_icv] = (current, next_scope, self.scope_map[next_scope])
-		print('Initialized ICV map successfully for checking OMP API values.')
-		
-	def compare_ompt_data(self):
-		"""Compares OMPT tool data about parallel region to data returned by OMPD functions.
-		"""
-		# make sure all threads and states are set
-		self.list_threads(False)
-		
-		thread_id = gdb.selected_thread().ptid[1]
-		curr_thread = self.get_curr_thread()
-		
-		# check if current thread is LWP thread; return if "ompd_rc_unavailable"
-		thread_handle = ompdModule.get_thread_handle(thread_id, self.addr_space)
-		if thread_handle == -1:
-			print("Skipping OMPT-OMPD checks for non-LWP thread.")
-			return
-		
-		print('Comparing OMPT data to OMPD data...')
-		field_names = [i.name for i in gdb.parse_and_eval('thread_data').type.fields()]
-		thread_data = gdb.parse_and_eval('thread_data')
-		
-		if self.icv_map is None:
-			self.get_icv_map()
-		
-		# compare state values
-		if 'ompt_state' in field_names:
-			if self.states is None:
-				self.enumerate_states()
-			ompt_state = str(thread_data['ompt_state'])
-			ompd_state = str(self.states[curr_thread.get_state()[0]])
-			if ompt_state != ompd_state:
-				print('OMPT-OMPD mismatch: ompt_state (%s) does not match OMPD state (%s)!' % (ompt_state, ompd_state))
-		
-		# compare wait_id values
-		if 'ompt_wait_id' in field_names:
-			ompt_wait_id = thread_data['ompt_wait_id']
-			ompd_wait_id = curr_thread.get_state()[1]
-			if ompt_wait_id != ompd_wait_id:
-				print('OMPT-OMPD mismatch: ompt_wait_id (%d) does not match OMPD wait id (%d)!' % (ompt_wait_id, ompd_wait_id))
-		
-		# compare thread id
-		if 'omp_thread_num' in field_names and 'thread-num-var' in self.icv_map:
-			ompt_thread_num = thread_data['omp_thread_num']
-			icv_value = ompdModule.call_ompd_get_icv_from_scope(curr_thread.thread_handle, self.icv_map['thread-num-var'][1], self.icv_map['thread-num-var'][0])
-			if ompt_thread_num != icv_value:
-				print('OMPT-OMPD mismatch: omp_thread_num (%d) does not match OMPD thread num according to ICVs (%d)!' % (ompt_thread_num, icv_value))
-		
-		# compare thread data
-		if 'ompt_thread_data' in field_names:
-			ompt_thread_data = thread_data['ompt_thread_data'].dereference()['value']
-			ompd_value = ompdModule.call_ompd_get_tool_data(3, curr_thread.thread_handle)[0]
-			if ompt_thread_data != ompd_value:
-				print('OMPT-OMPD mismatch: value of ompt_thread_data (%d) does not match that of OMPD data union (%d)!' % (ompt_thread_data, ompd_value))
-		
-		# compare number of threads
-		if 'omp_num_threads' in field_names and 'team-size-var' in self.icv_map:
-			ompt_num_threads = thread_data['omp_num_threads']
-			icv_value = ompdModule.call_ompd_get_icv_from_scope(curr_thread.get_current_parallel_handle(), self.icv_map['team-size-var'][1], self.icv_map['team-size-var'][0])
-			if ompt_num_threads != icv_value:
-				print('OMPT-OMPD mismatch: omp_num_threads (%d) does not match OMPD num threads according to ICVs (%d)!' % (ompt_num_threads, icv_value))
-		
-		# compare omp level
-		if 'omp_level' in field_names and 'levels-var' in self.icv_map:
-			ompt_levels = thread_data['omp_level']
-			icv_value = ompdModule.call_ompd_get_icv_from_scope(curr_thread.get_current_parallel_handle(), self.icv_map['levels-var'][1], self.icv_map['levels-var'][0])
-			if ompt_levels != icv_value:
-				print('OMPT-OMPD mismatch: omp_level (%d) does not match OMPD levels according to ICVs (%d)!' % (ompt_levels, icv_value))
-		
-		# compare active level
-		if 'omp_active_level' in field_names and 'active-levels-var' in self.icv_map:
-			ompt_active_levels = thread_data['omp_active_level']
-			icv_value = ompdModule.call_ompd_get_icv_from_scope(curr_thread.get_current_parallel_handle(), self.icv_map['active-levels-var'][1], self.icv_map['active-levels-var'][0])
-			if ompt_active_levels != icv_value:
-				print('OMPT-OMPD mismatch: active levels (%d) do not match active levels according to ICVs (%d)!' % (ompt_active_levels, icv_value))
-		
-		# compare parallel data
-		if 'ompt_parallel_data' in field_names:
-			ompt_parallel_data = thread_data['ompt_parallel_data'].dereference()['value']
-			current_parallel_handle = curr_thread.get_current_parallel_handle()
-			ompd_value = ompdModule.call_ompd_get_tool_data(4, current_parallel_handle)[0]
-			if ompt_parallel_data != ompd_value:
-				print('OMPT-OMPD mismatch: value of ompt_parallel_data (%d) does not match that of OMPD data union (%d)!' % (ompt_parallel_data, ompd_value))
-		
-		# compare max threads
-		if 'omp_max_threads' in field_names and 'nthreads-var' in self.icv_map:
-			ompt_max_threads = thread_data['omp_max_threads']
-			icv_value = ompdModule.call_ompd_get_icv_from_scope(curr_thread.thread_handle, self.icv_map['nthreads-var'][1], self.icv_map['nthreads-var'][0])
-			if icv_value is None:
-				icv_string = ompdModule.call_ompd_get_icv_string_from_scope(curr_thread.thread_handle, self.icv_map['nthreads-var'][1], self.icv_map['nthreads-var'][0])
-				if icv_string is None:
-					print('OMPT-OMPD mismatch: omp_max_threads (%d) does not match OMPD thread limit according to ICVs (None Object)' % (ompt_max_threads))
-				else:
-					if ompt_max_threads != int(icv_string.split(',')[0]):
-						print('OMPT-OMPD mismatch: omp_max_threads (%d) does not match OMPD thread limit according to ICVs (%d)!' % (ompt_max_threads, int(icv_string.split(',')[0])))
-			else:
-				if ompt_max_threads != icv_value:
-					print('OMPT-OMPD mismatch: omp_max_threads (%d) does not match OMPD thread limit according to ICVs (%d)!' % (ompt_max_threads, icv_value))
-		
-		# compare omp_parallel
-		# NOTE: omp_parallel = true if active-levels-var > 0
-		if 'omp_parallel' in field_names:
-			ompt_parallel = thread_data['omp_parallel']
-			icv_value = ompdModule.call_ompd_get_icv_from_scope(curr_thread.get_current_parallel_handle(), self.icv_map['active-levels-var'][1], self.icv_map['active-levels-var'][0])
-			if ompt_parallel == 1 and icv_value <= 0 or ompt_parallel == 0 and icv_value > 0:
-				print('OMPT-OMPD mismatch: ompt_parallel (%d) does not match OMPD parallel according to ICVs (%d)!' % (ompt_parallel, icv_value))
-		
-		# compare omp_final
-		if 'omp_final' in field_names and 'final-task-var' in self.icv_map:
-			ompt_final = thread_data['omp_final']
-			current_task_handle = curr_thread.get_current_task_handle()
-			icv_value = ompdModule.call_ompd_get_icv_from_scope(current_task_handle, self.icv_map['final-task-var'][1], self.icv_map['final-task-var'][0])
-			if icv_value != ompt_final:
-				print('OMPT-OMPD mismatch: omp_final (%d) does not match OMPD final according to ICVs (%d)!' % (ompt_final, icv_value))
-		
-		# compare omp_dynamic
-		if 'omp_dynamic' in field_names and 'dyn-var' in self.icv_map:
-			ompt_dynamic = thread_data['omp_dynamic']
-			icv_value = ompdModule.call_ompd_get_icv_from_scope(curr_thread.thread_handle, self.icv_map['dyn-var'][1], self.icv_map['dyn-var'][0])
-			if icv_value != ompt_dynamic:
-				print('OMPT-OMPD mismatch: omp_dynamic (%d) does not match OMPD dynamic according to ICVs (%d)!' % (ompt_dynamic, icv_value))
-		
-		# compare omp_max_active_levels
-		if 'omp_max_active_levels' in field_names and 'max-active-levels-var' in self.icv_map:
-			ompt_max_active_levels = thread_data['omp_max_active_levels']
-			icv_value = ompdModule.call_ompd_get_icv_from_scope(curr_thread.get_current_task_handle(), self.icv_map['max-active-levels-var'][1], self.icv_map['max-active-levels-var'][0])
-			if ompt_max_active_levels != icv_value:
-				print('OMPT-OMPD mismatch: omp_max_active_levels (%d) does not match OMPD max active levels (%d)!' % (ompt_max_active_levels, icv_value))
-		
+    def __init__(self):
+        """Initializes an ompd_address_space object by calling ompd_initialize
+        in ompdModule.c
+        """
+        self.addr_space = ompdModule.call_ompd_initialize()
+        # maps thread_num (thread id given by gdb) to ompd_thread object with thread handle
+        self.threads = {}
+        self.states = None
+        self.icv_map = None
+        self.ompd_tool_test_bp = None
+        self.scope_map = {
+            1: "global",
+            2: "address_space",
+            3: "thread",
+            4: "parallel",
+            5: "implicit_task",
+            6: "task",
+        }
+        self.sched_map = {1: "static", 2: "dynamic", 3: "guided", 4: "auto"}
+        gdb.events.stop.connect(self.handle_stop_event)
+        self.new_thread_breakpoint = gdb.Breakpoint(
+            "ompd_bp_thread_begin", internal=True
+        )
+        tool_break_symbol = gdb.lookup_global_symbol("ompd_tool_break")
+        if tool_break_symbol is not None:
+            self.ompd_tool_test_bp = gdb.Breakpoint("ompd_tool_break", internal=True)
+
+    def handle_stop_event(self, event):
+        """Sets a breakpoint at 
diff erent events, e.g. when a new OpenMP
+        thread is created.
+        """
+        if isinstance(event, gdb.BreakpointEvent):
+            # check if breakpoint has already been hit
+            if self.new_thread_breakpoint in event.breakpoints:
+                self.add_thread()
+                gdb.execute("continue")
+                return
+            elif (
+                self.ompd_tool_test_bp is not None
+                and self.ompd_tool_test_bp in event.breakpoints
+            ):
+                try:
+                    self.compare_ompt_data()
+                    gdb.execute("continue")
+                except ():
+                    traceback.print_exc()
+        elif isinstance(event, gdb.SignalEvent):
+            # TODO: what do we need to do on SIGNALS?
+            pass
+        else:
+            # TODO: probably not possible?
+            pass
+
+    def get_icv_map(self):
+        """Fills ICV map."""
+        self.icv_map = {}
+        current = 0
+        more = 1
+        while more > 0:
+            tup = ompdModule.call_ompd_enumerate_icvs(self.addr_space, current)
+            (current, next_icv, next_scope, more) = tup
+            self.icv_map[next_icv] = (current, next_scope, self.scope_map[next_scope])
+        print("Initialized ICV map successfully for checking OMP API values.")
+
+    def compare_ompt_data(self):
+        """Compares OMPT tool data about parallel region to data returned by OMPD functions."""
+        # make sure all threads and states are set
+        self.list_threads(False)
+
+        thread_id = gdb.selected_thread().ptid[1]
+        curr_thread = self.get_curr_thread()
+
+        # check if current thread is LWP thread; return if "ompd_rc_unavailable"
+        thread_handle = ompdModule.get_thread_handle(thread_id, self.addr_space)
+        if thread_handle == -1:
+            print("Skipping OMPT-OMPD checks for non-LWP thread.")
+            return
+
+        print("Comparing OMPT data to OMPD data...")
+        field_names = [i.name for i in gdb.parse_and_eval("thread_data").type.fields()]
+        thread_data = gdb.parse_and_eval("thread_data")
+
+        if self.icv_map is None:
+            self.get_icv_map()
+
+        # compare state values
+        if "ompt_state" in field_names:
+            if self.states is None:
+                self.enumerate_states()
+            ompt_state = str(thread_data["ompt_state"])
+            ompd_state = str(self.states[curr_thread.get_state()[0]])
+            if ompt_state != ompd_state:
+                print(
+                    "OMPT-OMPD mismatch: ompt_state (%s) does not match OMPD state (%s)!"
+                    % (ompt_state, ompd_state)
+                )
+
+        # compare wait_id values
+        if "ompt_wait_id" in field_names:
+            ompt_wait_id = thread_data["ompt_wait_id"]
+            ompd_wait_id = curr_thread.get_state()[1]
+            if ompt_wait_id != ompd_wait_id:
+                print(
+                    "OMPT-OMPD mismatch: ompt_wait_id (%d) does not match OMPD wait id (%d)!"
+                    % (ompt_wait_id, ompd_wait_id)
+                )
+
+        # compare thread id
+        if "omp_thread_num" in field_names and "thread-num-var" in self.icv_map:
+            ompt_thread_num = thread_data["omp_thread_num"]
+            icv_value = ompdModule.call_ompd_get_icv_from_scope(
+                curr_thread.thread_handle,
+                self.icv_map["thread-num-var"][1],
+                self.icv_map["thread-num-var"][0],
+            )
+            if ompt_thread_num != icv_value:
+                print(
+                    "OMPT-OMPD mismatch: omp_thread_num (%d) does not match OMPD thread num according to ICVs (%d)!"
+                    % (ompt_thread_num, icv_value)
+                )
+
+        # compare thread data
+        if "ompt_thread_data" in field_names:
+            ompt_thread_data = thread_data["ompt_thread_data"].dereference()["value"]
+            ompd_value = ompdModule.call_ompd_get_tool_data(
+                3, curr_thread.thread_handle
+            )[0]
+            if ompt_thread_data != ompd_value:
+                print(
+                    "OMPT-OMPD mismatch: value of ompt_thread_data (%d) does not match that of OMPD data union (%d)!"
+                    % (ompt_thread_data, ompd_value)
+                )
+
+        # compare number of threads
+        if "omp_num_threads" in field_names and "team-size-var" in self.icv_map:
+            ompt_num_threads = thread_data["omp_num_threads"]
+            icv_value = ompdModule.call_ompd_get_icv_from_scope(
+                curr_thread.get_current_parallel_handle(),
+                self.icv_map["team-size-var"][1],
+                self.icv_map["team-size-var"][0],
+            )
+            if ompt_num_threads != icv_value:
+                print(
+                    "OMPT-OMPD mismatch: omp_num_threads (%d) does not match OMPD num threads according to ICVs (%d)!"
+                    % (ompt_num_threads, icv_value)
+                )
+
+        # compare omp level
+        if "omp_level" in field_names and "levels-var" in self.icv_map:
+            ompt_levels = thread_data["omp_level"]
+            icv_value = ompdModule.call_ompd_get_icv_from_scope(
+                curr_thread.get_current_parallel_handle(),
+                self.icv_map["levels-var"][1],
+                self.icv_map["levels-var"][0],
+            )
+            if ompt_levels != icv_value:
+                print(
+                    "OMPT-OMPD mismatch: omp_level (%d) does not match OMPD levels according to ICVs (%d)!"
+                    % (ompt_levels, icv_value)
+                )
+
+        # compare active level
+        if "omp_active_level" in field_names and "active-levels-var" in self.icv_map:
+            ompt_active_levels = thread_data["omp_active_level"]
+            icv_value = ompdModule.call_ompd_get_icv_from_scope(
+                curr_thread.get_current_parallel_handle(),
+                self.icv_map["active-levels-var"][1],
+                self.icv_map["active-levels-var"][0],
+            )
+            if ompt_active_levels != icv_value:
+                print(
+                    "OMPT-OMPD mismatch: active levels (%d) do not match active levels according to ICVs (%d)!"
+                    % (ompt_active_levels, icv_value)
+                )
+
+        # compare parallel data
+        if "ompt_parallel_data" in field_names:
+            ompt_parallel_data = thread_data["ompt_parallel_data"].dereference()[
+                "value"
+            ]
+            current_parallel_handle = curr_thread.get_current_parallel_handle()
+            ompd_value = ompdModule.call_ompd_get_tool_data(4, current_parallel_handle)[
+                0
+            ]
+            if ompt_parallel_data != ompd_value:
+                print(
+                    "OMPT-OMPD mismatch: value of ompt_parallel_data (%d) does not match that of OMPD data union (%d)!"
+                    % (ompt_parallel_data, ompd_value)
+                )
+
+        # compare max threads
+        if "omp_max_threads" in field_names and "nthreads-var" in self.icv_map:
+            ompt_max_threads = thread_data["omp_max_threads"]
+            icv_value = ompdModule.call_ompd_get_icv_from_scope(
+                curr_thread.thread_handle,
+                self.icv_map["nthreads-var"][1],
+                self.icv_map["nthreads-var"][0],
+            )
+            if icv_value is None:
+                icv_string = ompdModule.call_ompd_get_icv_string_from_scope(
+                    curr_thread.thread_handle,
+                    self.icv_map["nthreads-var"][1],
+                    self.icv_map["nthreads-var"][0],
+                )
+                if icv_string is None:
+                    print(
+                        "OMPT-OMPD mismatch: omp_max_threads (%d) does not match OMPD thread limit according to ICVs (None Object)"
+                        % (ompt_max_threads)
+                    )
+                else:
+                    if ompt_max_threads != int(icv_string.split(",")[0]):
+                        print(
+                            "OMPT-OMPD mismatch: omp_max_threads (%d) does not match OMPD thread limit according to ICVs (%d)!"
+                            % (ompt_max_threads, int(icv_string.split(",")[0]))
+                        )
+            else:
+                if ompt_max_threads != icv_value:
+                    print(
+                        "OMPT-OMPD mismatch: omp_max_threads (%d) does not match OMPD thread limit according to ICVs (%d)!"
+                        % (ompt_max_threads, icv_value)
+                    )
+
+        # compare omp_parallel
+        # NOTE: omp_parallel = true if active-levels-var > 0
+        if "omp_parallel" in field_names:
+            ompt_parallel = thread_data["omp_parallel"]
+            icv_value = ompdModule.call_ompd_get_icv_from_scope(
+                curr_thread.get_current_parallel_handle(),
+                self.icv_map["active-levels-var"][1],
+                self.icv_map["active-levels-var"][0],
+            )
+            if (
+                ompt_parallel == 1
+                and icv_value <= 0
+                or ompt_parallel == 0
+                and icv_value > 0
+            ):
+                print(
+                    "OMPT-OMPD mismatch: ompt_parallel (%d) does not match OMPD parallel according to ICVs (%d)!"
+                    % (ompt_parallel, icv_value)
+                )
+
+        # compare omp_final
+        if "omp_final" in field_names and "final-task-var" in self.icv_map:
+            ompt_final = thread_data["omp_final"]
+            current_task_handle = curr_thread.get_current_task_handle()
+            icv_value = ompdModule.call_ompd_get_icv_from_scope(
+                current_task_handle,
+                self.icv_map["final-task-var"][1],
+                self.icv_map["final-task-var"][0],
+            )
+            if icv_value != ompt_final:
+                print(
+                    "OMPT-OMPD mismatch: omp_final (%d) does not match OMPD final according to ICVs (%d)!"
+                    % (ompt_final, icv_value)
+                )
+
+        # compare omp_dynamic
+        if "omp_dynamic" in field_names and "dyn-var" in self.icv_map:
+            ompt_dynamic = thread_data["omp_dynamic"]
+            icv_value = ompdModule.call_ompd_get_icv_from_scope(
+                curr_thread.thread_handle,
+                self.icv_map["dyn-var"][1],
+                self.icv_map["dyn-var"][0],
+            )
+            if icv_value != ompt_dynamic:
+                print(
+                    "OMPT-OMPD mismatch: omp_dynamic (%d) does not match OMPD dynamic according to ICVs (%d)!"
+                    % (ompt_dynamic, icv_value)
+                )
+
+        # compare omp_max_active_levels
+        if (
+            "omp_max_active_levels" in field_names
+            and "max-active-levels-var" in self.icv_map
+        ):
+            ompt_max_active_levels = thread_data["omp_max_active_levels"]
+            icv_value = ompdModule.call_ompd_get_icv_from_scope(
+                curr_thread.get_current_task_handle(),
+                self.icv_map["max-active-levels-var"][1],
+                self.icv_map["max-active-levels-var"][0],
+            )
+            if ompt_max_active_levels != icv_value:
+                print(
+                    "OMPT-OMPD mismatch: omp_max_active_levels (%d) does not match OMPD max active levels (%d)!"
+                    % (ompt_max_active_levels, icv_value)
+                )
+
                 # compare omp_kind: TODO: Add the test for monotonic/nonmonotonic modifier
-		if 'omp_kind' in field_names and 'run-sched-var' in self.icv_map:
-			ompt_sched_kind = thread_data['omp_kind']
-			icv_value = ompdModule.call_ompd_get_icv_string_from_scope(curr_thread.get_current_task_handle(), self.icv_map['run-sched-var'][1], self.icv_map['run-sched-var'][0])
-			ompd_sched_kind = icv_value.split(',')[0]
-			if self.sched_map.get(int(ompt_sched_kind)) != ompd_sched_kind:
-				print('OMPT-OMPD mismatch: omp_kind kind (%s) does not match OMPD schedule kind according to ICVs (%s)!' % (self.sched_map.get(int(ompt_sched_kind)), ompd_sched_kind))
-		
-		# compare omp_modifier
-		if 'omp_modifier' in field_names and 'run-sched-var' in self.icv_map:
-			ompt_sched_mod = thread_data['omp_modifier']
-			icv_value = ompdModule.call_ompd_get_icv_string_from_scope(curr_thread.get_current_task_handle(), self.icv_map['run-sched-var'][1], self.icv_map['run-sched-var'][0])
-			token = icv_value.split(',')[1]
-			if token is not None:
-				ompd_sched_mod = int(token)
-			else:
-				ompd_sched_mod = 0
-			if ompt_sched_mod != ompd_sched_mod:
-				print('OMPT-OMPD mismatch: omp_kind modifier does not match OMPD schedule modifier according to ICVs!')
-			
-		# compare omp_proc_bind
-		if 'omp_proc_bind' in field_names and 'bind-var' in self.icv_map:
-			ompt_proc_bind = thread_data['omp_proc_bind']
-			icv_value = ompdModule.call_ompd_get_icv_from_scope(curr_thread.get_current_task_handle(), self.icv_map['bind-var'][1], self.icv_map['bind-var'][0])
-			if icv_value is None:
-				icv_string = ompdModule.call_ompd_get_icv_string_from_scope(curr_thread.get_current_task_handle(), self.icv_map['bind-var'][1], self.icv_map['bind-var'][0])
-				if icv_string is None:
-					print('OMPT-OMPD mismatch: omp_proc_bind (%d) does not match OMPD proc bind according to ICVs (None Object)' % (ompt_proc_bind))
-				else:
-					if ompt_proc_bind != int(icv_string.split(',')[0]):
-						print('OMPT-OMPD mismatch: omp_proc_bind (%d) does not match OMPD proc bind according to ICVs (%d)!' % (ompt_proc_bind, int(icv_string.split(',')[0])))
-			else:
-				if ompt_proc_bind != icv_value:
-					print('OMPT-OMPD mismatch: omp_proc_bind (%d) does not match OMPD proc bind according to ICVs (%d)!' % (ompt_proc_bind, icv_value))
-		
-		# compare enter and exit frames
-		if 'ompt_frame_list' in field_names:
-			ompt_task_frame_dict = thread_data['ompt_frame_list'].dereference()
-			ompt_task_frames = (int(ompt_task_frame_dict['enter_frame'].cast(gdb.lookup_type('long'))), int(ompt_task_frame_dict['exit_frame'].cast(gdb.lookup_type('long'))))
-			current_task = curr_thread.get_current_task()
-			ompd_task_frames = current_task.get_task_frame()
-			if ompt_task_frames != ompd_task_frames:
-				print('OMPT-OMPD mismatch: ompt_task_frames (%s) do not match OMPD task frames (%s)!' % (ompt_task_frames, ompd_task_frames))
-		
-		# compare task data
-		if 'ompt_task_data' in field_names:
-			ompt_task_data = thread_data['ompt_task_data'].dereference()['value']
-			current_task_handle = curr_thread.get_current_task_handle()
-			ompd_value = ompdModule.call_ompd_get_tool_data(6, current_task_handle)[0]
-			if ompt_task_data != ompd_value:
-				print('OMPT-OMPD mismatch: value of ompt_task_data (%d) does not match that of OMPD data union (%d)!' % (ompt_task_data, ompd_value))
-	
-	def save_thread_object(self, thread_num, thread_id, addr_space):
-		"""Saves thread object for thread_num inside threads dictionary.
-		"""
-		thread_handle = ompdModule.get_thread_handle(thread_id, addr_space)
-		self.threads[int(thread_num)] = ompd_thread(thread_handle)
-	
-	def get_thread(self, thread_num):
-		""" Get thread object from map.
-		"""
-		return self.threads[int(thread_num)]
-	
-	def get_curr_thread(self):
-		""" Get current thread object from map or add new one to map, if missing.
-		"""
-		thread_num = int(gdb.selected_thread().num)
-		if thread_num not in self.threads:
-			self.add_thread()
-		return self.threads[thread_num]
-	
-	def add_thread(self):
-		"""Add currently selected (*) thread to dictionary threads.
-		"""
-		inf_thread = gdb.selected_thread()
-		try:
-			self.save_thread_object(inf_thread.num, inf_thread.ptid[1], self.addr_space)
-		except:
-			traceback.print_exc()
-	
-	def list_threads(self, verbose):
-		"""Prints OpenMP threads only that are being tracking inside the "threads" dictionary.
-		See handle_stop_event and add_thread.
-		"""
-		list_tids = []
-		curr_inferior = gdb.selected_inferior()
-		
-		for inf_thread in curr_inferior.threads():
-			list_tids.append((inf_thread.num, inf_thread.ptid))
-		if verbose:
-			if self.states is None:
-				self.enumerate_states()
-			for (thread_num, thread_ptid) in sorted(list_tids):
-				if thread_num in self.threads:
-					try:
-						print('Thread %i (%i) is an OpenMP thread; state: %s' % (thread_num, thread_ptid[1], self.states[self.threads[thread_num].get_state()[0]]))
-					except:
-						traceback.print_exc()
-				else:
-					print('Thread %i (%i) is no OpenMP thread' % (thread_num, thread_ptid[1]))
-	
-	def enumerate_states(self):
-		"""Helper function for list_threads: initializes map of OMPD states for output of
-		'ompd threads'.
-		"""
-		if self.states is None:
-			self.states = {}
-			current = int("0x102", 0)
-			count = 0
-			more = 1
-			
-			while more > 0:
-				tup = ompdModule.call_ompd_enumerate_states(self.addr_space, current)
-				(next_state, next_state_name, more) = tup
-				
-				self.states[next_state] = next_state_name
-				current = next_state
+        if "omp_kind" in field_names and "run-sched-var" in self.icv_map:
+            ompt_sched_kind = thread_data["omp_kind"]
+            icv_value = ompdModule.call_ompd_get_icv_string_from_scope(
+                curr_thread.get_current_task_handle(),
+                self.icv_map["run-sched-var"][1],
+                self.icv_map["run-sched-var"][0],
+            )
+            ompd_sched_kind = icv_value.split(",")[0]
+            if self.sched_map.get(int(ompt_sched_kind)) != ompd_sched_kind:
+                print(
+                    "OMPT-OMPD mismatch: omp_kind kind (%s) does not match OMPD schedule kind according to ICVs (%s)!"
+                    % (self.sched_map.get(int(ompt_sched_kind)), ompd_sched_kind)
+                )
+
+        # compare omp_modifier
+        if "omp_modifier" in field_names and "run-sched-var" in self.icv_map:
+            ompt_sched_mod = thread_data["omp_modifier"]
+            icv_value = ompdModule.call_ompd_get_icv_string_from_scope(
+                curr_thread.get_current_task_handle(),
+                self.icv_map["run-sched-var"][1],
+                self.icv_map["run-sched-var"][0],
+            )
+            token = icv_value.split(",")[1]
+            if token is not None:
+                ompd_sched_mod = int(token)
+            else:
+                ompd_sched_mod = 0
+            if ompt_sched_mod != ompd_sched_mod:
+                print(
+                    "OMPT-OMPD mismatch: omp_kind modifier does not match OMPD schedule modifier according to ICVs!"
+                )
+
+        # compare omp_proc_bind
+        if "omp_proc_bind" in field_names and "bind-var" in self.icv_map:
+            ompt_proc_bind = thread_data["omp_proc_bind"]
+            icv_value = ompdModule.call_ompd_get_icv_from_scope(
+                curr_thread.get_current_task_handle(),
+                self.icv_map["bind-var"][1],
+                self.icv_map["bind-var"][0],
+            )
+            if icv_value is None:
+                icv_string = ompdModule.call_ompd_get_icv_string_from_scope(
+                    curr_thread.get_current_task_handle(),
+                    self.icv_map["bind-var"][1],
+                    self.icv_map["bind-var"][0],
+                )
+                if icv_string is None:
+                    print(
+                        "OMPT-OMPD mismatch: omp_proc_bind (%d) does not match OMPD proc bind according to ICVs (None Object)"
+                        % (ompt_proc_bind)
+                    )
+                else:
+                    if ompt_proc_bind != int(icv_string.split(",")[0]):
+                        print(
+                            "OMPT-OMPD mismatch: omp_proc_bind (%d) does not match OMPD proc bind according to ICVs (%d)!"
+                            % (ompt_proc_bind, int(icv_string.split(",")[0]))
+                        )
+            else:
+                if ompt_proc_bind != icv_value:
+                    print(
+                        "OMPT-OMPD mismatch: omp_proc_bind (%d) does not match OMPD proc bind according to ICVs (%d)!"
+                        % (ompt_proc_bind, icv_value)
+                    )
+
+        # compare enter and exit frames
+        if "ompt_frame_list" in field_names:
+            ompt_task_frame_dict = thread_data["ompt_frame_list"].dereference()
+            ompt_task_frames = (
+                int(ompt_task_frame_dict["enter_frame"].cast(gdb.lookup_type("long"))),
+                int(ompt_task_frame_dict["exit_frame"].cast(gdb.lookup_type("long"))),
+            )
+            current_task = curr_thread.get_current_task()
+            ompd_task_frames = current_task.get_task_frame()
+            if ompt_task_frames != ompd_task_frames:
+                print(
+                    "OMPT-OMPD mismatch: ompt_task_frames (%s) do not match OMPD task frames (%s)!"
+                    % (ompt_task_frames, ompd_task_frames)
+                )
+
+        # compare task data
+        if "ompt_task_data" in field_names:
+            ompt_task_data = thread_data["ompt_task_data"].dereference()["value"]
+            current_task_handle = curr_thread.get_current_task_handle()
+            ompd_value = ompdModule.call_ompd_get_tool_data(6, current_task_handle)[0]
+            if ompt_task_data != ompd_value:
+                print(
+                    "OMPT-OMPD mismatch: value of ompt_task_data (%d) does not match that of OMPD data union (%d)!"
+                    % (ompt_task_data, ompd_value)
+                )
+
+    def save_thread_object(self, thread_num, thread_id, addr_space):
+        """Saves thread object for thread_num inside threads dictionary."""
+        thread_handle = ompdModule.get_thread_handle(thread_id, addr_space)
+        self.threads[int(thread_num)] = ompd_thread(thread_handle)
+
+    def get_thread(self, thread_num):
+        """Get thread object from map."""
+        return self.threads[int(thread_num)]
+
+    def get_curr_thread(self):
+        """Get current thread object from map or add new one to map, if missing."""
+        thread_num = int(gdb.selected_thread().num)
+        if thread_num not in self.threads:
+            self.add_thread()
+        return self.threads[thread_num]
+
+    def add_thread(self):
+        """Add currently selected (*) thread to dictionary threads."""
+        inf_thread = gdb.selected_thread()
+        try:
+            self.save_thread_object(inf_thread.num, inf_thread.ptid[1], self.addr_space)
+        except:
+            traceback.print_exc()
+
+    def list_threads(self, verbose):
+        """Prints OpenMP threads only that are being tracking inside the "threads" dictionary.
+        See handle_stop_event and add_thread.
+        """
+        list_tids = []
+        curr_inferior = gdb.selected_inferior()
+
+        for inf_thread in curr_inferior.threads():
+            list_tids.append((inf_thread.num, inf_thread.ptid))
+        if verbose:
+            if self.states is None:
+                self.enumerate_states()
+            for (thread_num, thread_ptid) in sorted(list_tids):
+                if thread_num in self.threads:
+                    try:
+                        print(
+                            "Thread %i (%i) is an OpenMP thread; state: %s"
+                            % (
+                                thread_num,
+                                thread_ptid[1],
+                                self.states[self.threads[thread_num].get_state()[0]],
+                            )
+                        )
+                    except:
+                        traceback.print_exc()
+                else:
+                    print(
+                        "Thread %i (%i) is no OpenMP thread"
+                        % (thread_num, thread_ptid[1])
+                    )
+
+    def enumerate_states(self):
+        """Helper function for list_threads: initializes map of OMPD states for output of
+        'ompd threads'.
+        """
+        if self.states is None:
+            self.states = {}
+            current = int("0x102", 0)
+            count = 0
+            more = 1
+
+            while more > 0:
+                tup = ompdModule.call_ompd_enumerate_states(self.addr_space, current)
+                (next_state, next_state_name, more) = tup
+
+                self.states[next_state] = next_state_name
+                current = next_state

diff  --git a/openmp/libompd/gdb-plugin/ompd/ompd_callbacks.py b/openmp/libompd/gdb-plugin/ompd/ompd_callbacks.py
index bb520e50c6c64..ada09d75579f0 100644
--- a/openmp/libompd/gdb-plugin/ompd/ompd_callbacks.py
+++ b/openmp/libompd/gdb-plugin/ompd/ompd_callbacks.py
@@ -9,88 +9,104 @@
 
 """ Have the debugger print a string.
 """
+
+
 def _print(*args):
-	# args is a tuple with just one string element
-	print_string = args[0]
-	gdb.execute('printf "%s\n"' % args[0])
+    # args is a tuple with just one string element
+    print_string = args[0]
+    gdb.execute('printf "%s\n"' % args[0])
+
 
 """ Look up the address of a global symbol in the target.
 """
+
+
 def _sym_addr(*args):
-	# args is a tuple consisting of thread_id and symbol_name
-	thread_id = args[0]
-	symbol_name = args[1]
-	if(thread_id >= 0):
-		gdb.execute('thread %d\n' % thread_id, to_string=True)
-	return int(gdb.parse_and_eval("&"+symbol_name))
+    # args is a tuple consisting of thread_id and symbol_name
+    thread_id = args[0]
+    symbol_name = args[1]
+    if thread_id >= 0:
+        gdb.execute("thread %d\n" % thread_id, to_string=True)
+    return int(gdb.parse_and_eval("&" + symbol_name))
+
 
 """ Read string from the target and copy it into the provided buffer.
 """
+
+
 def _read_string(*args):
-	# args is a tuple with just the source address
-	addr = args[0]
-	try:
-		buf = gdb.parse_and_eval('(unsigned char*)%li' % addr).string()
-	except:
-		traceback.print_exc()
-	return buf
+    # args is a tuple with just the source address
+    addr = args[0]
+    try:
+        buf = gdb.parse_and_eval("(unsigned char*)%li" % addr).string()
+    except:
+        traceback.print_exc()
+    return buf
+
 
 """ Read memory from the target and copy it into the provided buffer.
 """
+
+
 def _read(*args):
-	# args is a tuple consisting of address and number of bytes to be read
-	addr = args[0]
-	nbytes = args[1]
-#	print("_read(%i,%i)"%(addr, nbytes))
-	ret_buf = bytearray()
-#	try:
-	buf = gdb.parse_and_eval('(unsigned char*)%li' % addr)
-	for i in range(nbytes):
-		ret_buf.append(int(buf[i]))
-#	except:
-#		traceback.print_exc()
-	return ret_buf
+    # args is a tuple consisting of address and number of bytes to be read
+    addr = args[0]
+    nbytes = args[1]
+    # 	print("_read(%i,%i)"%(addr, nbytes))
+    ret_buf = bytearray()
+    # 	try:
+    buf = gdb.parse_and_eval("(unsigned char*)%li" % addr)
+    for i in range(nbytes):
+        ret_buf.append(int(buf[i]))
+    # 	except:
+    # 		traceback.print_exc()
+    return ret_buf
 
 
 """ Get thread-specific context.
 Return -1 if no match is found.
 """
+
+
 def _thread_context(*args):
-	# args is a tuple consisting of thread_id and the thread kind
-	thread_id = args[1]
-	pthread = False
-	lwp = False
-	if args[0] == 0:
-		pthread = True
-	else:
-		lwp = True
-	info = gdb.execute('info threads', to_string=True).splitlines()
-	
-	for line in info:
-		if pthread:
-			m = re.search(r'(0x[a-fA-F0-9]+)', line)
-		elif lwp:
-			m = re.search(r'\([^)]*?(\d+)[^)]*?\)', line)
-		if m == None:
-			continue
-		pid = int(m.group(1),0)
-		if pid == thread_id:
-			return int(line[2:6],0)
-	return -1
+    # args is a tuple consisting of thread_id and the thread kind
+    thread_id = args[1]
+    pthread = False
+    lwp = False
+    if args[0] == 0:
+        pthread = True
+    else:
+        lwp = True
+    info = gdb.execute("info threads", to_string=True).splitlines()
+
+    for line in info:
+        if pthread:
+            m = re.search(r"(0x[a-fA-F0-9]+)", line)
+        elif lwp:
+            m = re.search(r"\([^)]*?(\d+)[^)]*?\)", line)
+        if m == None:
+            continue
+        pid = int(m.group(1), 0)
+        if pid == thread_id:
+            return int(line[2:6], 0)
+    return -1
+
 
 """ Test info threads / list threads / how to split output to get thread id 
 and its size.
 """
+
+
 def _test_threads(*args):
-	info = gdb.execute('info threads', to_string=True).splitlines()
-	for line in info[1:]:
-		content = line.split()
-		thread_id = None
-		# fetch pointer to id
-		if(content[0].startswith('*')):
-			thread_id = (content[3])
-		else:
-			thread_id = (content[2])
-		sizeof_tid = sys.getsizeof(thread_id)
-		print(sizeof_tid)
-	print(info)
+    info = gdb.execute("info threads", to_string=True).splitlines()
+    for line in info[1:]:
+        content = line.split()
+        thread_id = None
+        # fetch pointer to id
+        if content[0].startswith("*"):
+            thread_id = content[3]
+        else:
+            thread_id = content[2]
+        sizeof_tid = sys.getsizeof(thread_id)
+        print(sizeof_tid)
+    print(info)

diff  --git a/openmp/libompd/gdb-plugin/ompd/ompd_handles.py b/openmp/libompd/gdb-plugin/ompd/ompd_handles.py
index 1aaccb19138da..1929a92617415 100644
--- a/openmp/libompd/gdb-plugin/ompd/ompd_handles.py
+++ b/openmp/libompd/gdb-plugin/ompd/ompd_handles.py
@@ -1,178 +1,200 @@
 import ompdModule
 import imp
 
+
 class ompd_parallel(object):
-	
-	def __init__(self, parallel_handle):
-		""" Initializes an ompd_parallel object with the pointer
-		to a handle of a parallel region."""
-		self.parallel_handle = parallel_handle
-		self.threads = {}
-		self.itasks = {}
-		self.enclosing_parallel_handle = None
-		self.enclosing_parallel = False
-		self.task_handle = None
-	
-	def get_thread_in_parallel(self, thread_num):
-		"""Obtains thread handles for the threads associated with the
-		parallel region specified by parallel_handle."""
-		if not thread_num in self.threads:
-			thread_handle = ompdModule.call_ompd_get_thread_in_parallel(self.parallel_handle, thread_num)
-			self.threads[thread_num] = ompd_thread(thread_handle)
-		return self.threads[thread_num]
-	
-	def get_enclosing_parallel_handle(self):
-		"""Obtains a parallel handle for the parallel region enclosing
-		the parallel region specified by parallel_handle."""
-		if not self.enclosing_parallel_handle:
-			self.enclosing_parallel_handle = ompdModule.call_ompd_get_enclosing_parallel_handle(self.parallel_handle)
-		return self.enclosing_parallel_handle
-	
-	def get_enclosing_parallel(self):
-		if not self.enclosing_parallel:
-			self.enclosing_parallel = ompd_parallel(self.get_enclosing_parallel_handle())
-		return self.enclosing_parallel
-	
-	def get_task_in_parallel(self, thread_num):
-		"""Obtains handles for the implicit tasks associated with the
-		parallel region specified by parallel_handle."""
-		if not thread_num in self.itasks:
-			task_handle = ompdModule.call_ompd_get_task_in_parallel(self.parallel_handle, thread_num)
-			self.itasks[thread_num] = ompd_task(task_handle)
-		return self.itasks[thread_num]
-	
-	def __del__(self):
-		"""Releases the parallel handle."""
-		pass # let capsule destructors do the job
+    def __init__(self, parallel_handle):
+        """Initializes an ompd_parallel object with the pointer
+        to a handle of a parallel region."""
+        self.parallel_handle = parallel_handle
+        self.threads = {}
+        self.itasks = {}
+        self.enclosing_parallel_handle = None
+        self.enclosing_parallel = False
+        self.task_handle = None
+
+    def get_thread_in_parallel(self, thread_num):
+        """Obtains thread handles for the threads associated with the
+        parallel region specified by parallel_handle."""
+        if not thread_num in self.threads:
+            thread_handle = ompdModule.call_ompd_get_thread_in_parallel(
+                self.parallel_handle, thread_num
+            )
+            self.threads[thread_num] = ompd_thread(thread_handle)
+        return self.threads[thread_num]
+
+    def get_enclosing_parallel_handle(self):
+        """Obtains a parallel handle for the parallel region enclosing
+        the parallel region specified by parallel_handle."""
+        if not self.enclosing_parallel_handle:
+            self.enclosing_parallel_handle = (
+                ompdModule.call_ompd_get_enclosing_parallel_handle(self.parallel_handle)
+            )
+        return self.enclosing_parallel_handle
+
+    def get_enclosing_parallel(self):
+        if not self.enclosing_parallel:
+            self.enclosing_parallel = ompd_parallel(
+                self.get_enclosing_parallel_handle()
+            )
+        return self.enclosing_parallel
+
+    def get_task_in_parallel(self, thread_num):
+        """Obtains handles for the implicit tasks associated with the
+        parallel region specified by parallel_handle."""
+        if not thread_num in self.itasks:
+            task_handle = ompdModule.call_ompd_get_task_in_parallel(
+                self.parallel_handle, thread_num
+            )
+            self.itasks[thread_num] = ompd_task(task_handle)
+        return self.itasks[thread_num]
+
+    def __del__(self):
+        """Releases the parallel handle."""
+        pass  # let capsule destructors do the job
+
 
 class ompd_task(object):
-	
-	def __init__(self, task_handle):
-		"""Initializes a new ompd_task_handle object and sets the attribute
-		to the task handle specified."""
-		self.task_handle = task_handle
-		self.task_parallel_handle = False
-		self.generating_task_handle = False
-		self.scheduling_task_handle = False
-		self.task_parallel = False
-		self.generating_task = False
-		self.scheduling_task = False
-		self.task_frames = None
-		self.task_frame_flags = None
-	
-	def get_task_parallel_handle(self):
-		"""Obtains a task parallel handle for the parallel region enclosing
-		the task region specified."""
-		if not self.task_parallel_handle:
-			self.task_parallel_handle = ompdModule.call_ompd_get_task_parallel_handle(self.task_handle)
-		return self.task_parallel_handle
-	
-	def get_task_parallel(self):
-		if not self.task_parallel:
-			self.task_parallel = ompd_parallel(self.get_task_parallel_handle())
-		return self.task_parallel
-	
-	def get_generating_task_handle(self):
-		"""Obtains the task handle for the task that created the task specified
-		by the task handle."""
-		if not self.generating_task_handle:
-			self.generating_task_handle = ompdModule.call_ompd_get_generating_task_handle(self.task_handle)
-		return self.generating_task_handle
-	
-	def get_generating_task(self):
-		if not self.generating_task:
-			self.generating_task = ompd_task(ompdModule.call_ompd_get_generating_task_handle(self.task_handle))
-		return self.generating_task
-	
-	def get_scheduling_task_handle(self):
-		"""Obtains the task handle for the task that scheduled the task specified."""
-		if not self.scheduling_task_handle:
-			self.scheduling_task_handle = ompdModule.call_ompd_get_scheduling_task_handle(self.task_handle)
-		return self.scheduling_task_handle
-	
-	def get_scheduling_task(self):
-		"""Returns ompd_task object for the task that scheduled the current task."""
-		if not self.scheduling_task:
-			self.scheduling_task = ompd_task(self.get_scheduling_task_handle())
-		return self.scheduling_task
-
-	def get_task_function(self):
-		"""Returns long with address of function entry point."""
-		return ompdModule.call_ompd_get_task_function(self.task_handle)
-	
-	def get_task_frame_with_flags(self):
-		"""Returns enter frame address and flag, exit frame address and flag for current task handle."""
-		if self.task_frames is None or self.task_frame_flags is None:
-			ret_value = ompdModule.call_ompd_get_task_frame(self.task_handle)
-			if isinstance(ret_value, tuple):
-				self.task_frames = (ret_value[0], ret_value[2])
-				self.task_frame_flags = (ret_value[1], ret_value[3])
-			else:
-				return ret_value
-		return (self.task_frames[0], self.task_frame_flags[0], self.task_frames[1], self.task_frame_flags[1])
-	
-	def get_task_frame(self):
-		"""Returns enter and exit frame address for current task handle."""
-		if self.task_frames is None:
-			ret_value = ompdModule.call_ompd_get_task_frame(self.task_handle)
-			if isinstance(ret_value, tuple):
-				self.task_frames = (ret_value[0], ret_value[2])
-			else:
-				return ret_value
-		return self.task_frames
-	
-	def __del__(self):
-		"""Releases the task handle."""
-		pass # let capsule destructors do the job
+    def __init__(self, task_handle):
+        """Initializes a new ompd_task_handle object and sets the attribute
+        to the task handle specified."""
+        self.task_handle = task_handle
+        self.task_parallel_handle = False
+        self.generating_task_handle = False
+        self.scheduling_task_handle = False
+        self.task_parallel = False
+        self.generating_task = False
+        self.scheduling_task = False
+        self.task_frames = None
+        self.task_frame_flags = None
+
+    def get_task_parallel_handle(self):
+        """Obtains a task parallel handle for the parallel region enclosing
+        the task region specified."""
+        if not self.task_parallel_handle:
+            self.task_parallel_handle = ompdModule.call_ompd_get_task_parallel_handle(
+                self.task_handle
+            )
+        return self.task_parallel_handle
+
+    def get_task_parallel(self):
+        if not self.task_parallel:
+            self.task_parallel = ompd_parallel(self.get_task_parallel_handle())
+        return self.task_parallel
+
+    def get_generating_task_handle(self):
+        """Obtains the task handle for the task that created the task specified
+        by the task handle."""
+        if not self.generating_task_handle:
+            self.generating_task_handle = (
+                ompdModule.call_ompd_get_generating_task_handle(self.task_handle)
+            )
+        return self.generating_task_handle
+
+    def get_generating_task(self):
+        if not self.generating_task:
+            self.generating_task = ompd_task(
+                ompdModule.call_ompd_get_generating_task_handle(self.task_handle)
+            )
+        return self.generating_task
+
+    def get_scheduling_task_handle(self):
+        """Obtains the task handle for the task that scheduled the task specified."""
+        if not self.scheduling_task_handle:
+            self.scheduling_task_handle = (
+                ompdModule.call_ompd_get_scheduling_task_handle(self.task_handle)
+            )
+        return self.scheduling_task_handle
+
+    def get_scheduling_task(self):
+        """Returns ompd_task object for the task that scheduled the current task."""
+        if not self.scheduling_task:
+            self.scheduling_task = ompd_task(self.get_scheduling_task_handle())
+        return self.scheduling_task
+
+    def get_task_function(self):
+        """Returns long with address of function entry point."""
+        return ompdModule.call_ompd_get_task_function(self.task_handle)
+
+    def get_task_frame_with_flags(self):
+        """Returns enter frame address and flag, exit frame address and flag for current task handle."""
+        if self.task_frames is None or self.task_frame_flags is None:
+            ret_value = ompdModule.call_ompd_get_task_frame(self.task_handle)
+            if isinstance(ret_value, tuple):
+                self.task_frames = (ret_value[0], ret_value[2])
+                self.task_frame_flags = (ret_value[1], ret_value[3])
+            else:
+                return ret_value
+        return (
+            self.task_frames[0],
+            self.task_frame_flags[0],
+            self.task_frames[1],
+            self.task_frame_flags[1],
+        )
+
+    def get_task_frame(self):
+        """Returns enter and exit frame address for current task handle."""
+        if self.task_frames is None:
+            ret_value = ompdModule.call_ompd_get_task_frame(self.task_handle)
+            if isinstance(ret_value, tuple):
+                self.task_frames = (ret_value[0], ret_value[2])
+            else:
+                return ret_value
+        return self.task_frames
+
+    def __del__(self):
+        """Releases the task handle."""
+        pass  # let capsule destructors do the job
 
 
 class ompd_thread(object):
-	
-	def __init__(self, thread_handle):
-		"""Initializes an ompd_thread with the data received from
-		GDB."""
-		self.thread_handle = thread_handle
-		self.parallel_handle = None
-		self.task_handle = None
-		self.current_task = False
-		self.current_parallel = False
-		self.thread_id = False
-	
-	def get_current_parallel_handle(self):
-		"""Obtains the parallel handle for the parallel region associated with
-		the given thread handle."""
-		#TODO: invalidate thread objects based on `gdb.event.cont`. This should invalidate all internal state.
-		self.parallel_handle = ompdModule.call_ompd_get_curr_parallel_handle(self.thread_handle)
-		return self.parallel_handle
-	
-	def get_current_parallel(self):
-		"""Returns parallel object for parallel handle of the parallel region 
-		associated with the current thread handle."""
-		if not self.current_parallel:
-			self.current_parallel = ompd_parallel(self.get_current_parallel_handle())
-		return self.current_parallel
-		
-	def get_current_task_handle(self):
-		"""Obtains the task handle for the current task region of the
-		given thread."""
-		return ompdModule.call_ompd_get_curr_task_handle(self.thread_handle)
-
-	def get_thread_id(self):
-		"""Obtains the ID for the given thread."""
-		if not self.thread_id:
-			self.thread_id = ompdModule.call_ompd_get_thread_id(self.thread_handle)
-		return self.thread_id
-
-	def get_current_task(self):
-		"""Returns task object for task handle of the current task region."""
-		return ompd_task(self.get_current_task_handle())
-	
-	def get_state(self):
-		"""Returns tuple with OMPD state (long) and wait_id, in case the thread is in a 
-		waiting state. Helper function for 'ompd threads' command."""
-		(state, wait_id) = ompdModule.call_ompd_get_state(self.thread_handle)
-		return (state, wait_id)
-	
-	def __del__(self):
-		"""Releases the given thread handle."""
-		pass # let capsule destructors do the job
+    def __init__(self, thread_handle):
+        """Initializes an ompd_thread with the data received from
+        GDB."""
+        self.thread_handle = thread_handle
+        self.parallel_handle = None
+        self.task_handle = None
+        self.current_task = False
+        self.current_parallel = False
+        self.thread_id = False
+
+    def get_current_parallel_handle(self):
+        """Obtains the parallel handle for the parallel region associated with
+        the given thread handle."""
+        # TODO: invalidate thread objects based on `gdb.event.cont`. This should invalidate all internal state.
+        self.parallel_handle = ompdModule.call_ompd_get_curr_parallel_handle(
+            self.thread_handle
+        )
+        return self.parallel_handle
+
+    def get_current_parallel(self):
+        """Returns parallel object for parallel handle of the parallel region
+        associated with the current thread handle."""
+        if not self.current_parallel:
+            self.current_parallel = ompd_parallel(self.get_current_parallel_handle())
+        return self.current_parallel
+
+    def get_current_task_handle(self):
+        """Obtains the task handle for the current task region of the
+        given thread."""
+        return ompdModule.call_ompd_get_curr_task_handle(self.thread_handle)
+
+    def get_thread_id(self):
+        """Obtains the ID for the given thread."""
+        if not self.thread_id:
+            self.thread_id = ompdModule.call_ompd_get_thread_id(self.thread_handle)
+        return self.thread_id
+
+    def get_current_task(self):
+        """Returns task object for task handle of the current task region."""
+        return ompd_task(self.get_current_task_handle())
+
+    def get_state(self):
+        """Returns tuple with OMPD state (long) and wait_id, in case the thread is in a
+        waiting state. Helper function for 'ompd threads' command."""
+        (state, wait_id) = ompdModule.call_ompd_get_state(self.thread_handle)
+        return (state, wait_id)
+
+    def __del__(self):
+        """Releases the given thread handle."""
+        pass  # let capsule destructors do the job

diff  --git a/openmp/libomptarget/utils/generate_microtask_cases.py b/openmp/libomptarget/utils/generate_microtask_cases.py
index f162b2c75c32a..1376c3ef54725 100755
--- a/openmp/libomptarget/utils/generate_microtask_cases.py
+++ b/openmp/libomptarget/utils/generate_microtask_cases.py
@@ -2,30 +2,37 @@
 
 import argparse
 
+
 def main():
     parser = argparse.ArgumentParser()
-    parser.add_argument('--max_args', type=int, help='Max number of arguments to generate case statements for', required=True)
-    parser.add_argument('--output', help='Output header file to include', required=True)
+    parser.add_argument(
+        "--max_args",
+        type=int,
+        help="Max number of arguments to generate case statements for",
+        required=True,
+    )
+    parser.add_argument("--output", help="Output header file to include", required=True)
     args = parser.parse_args()
 
-    output=''
-    for i in range(args.max_args+1):
-        output += 'case %d:\n'%(i)
-        output += '((void (*)(kmp_int32 *, kmp_int32 *\n'
+    output = ""
+    for i in range(args.max_args + 1):
+        output += "case %d:\n" % (i)
+        output += "((void (*)(kmp_int32 *, kmp_int32 *\n"
         for j in range(i):
-            output += ', void *'
-            if (j+1)%4 == 0:
-                output += '\n'
-        output += '))fn)(&global_tid, &bound_tid\n'
+            output += ", void *"
+            if (j + 1) % 4 == 0:
+                output += "\n"
+        output += "))fn)(&global_tid, &bound_tid\n"
         for j in range(i):
-            output += ', args[%d]'%(j)
-            if (j+1)%4 == 0:
-                output += '\n'
-        output += ');\n'
-        output += 'break;\n'
+            output += ", args[%d]" % (j)
+            if (j + 1) % 4 == 0:
+                output += "\n"
+        output += ");\n"
+        output += "break;\n"
 
-    with open(args.output, 'w') as f:
+    with open(args.output, "w") as f:
         print(output, file=f)
 
+
 if __name__ == "__main__":
     main()

diff  --git a/openmp/runtime/test/affinity/format/check.py b/openmp/runtime/test/affinity/format/check.py
index 0adddbdf23bf5..76d44288478df 100644
--- a/openmp/runtime/test/affinity/format/check.py
+++ b/openmp/runtime/test/affinity/format/check.py
@@ -3,6 +3,7 @@
 import argparse
 import re
 
+
 class Checks(object):
     class CheckError(Exception):
         pass
@@ -13,32 +14,39 @@ def __init__(self, filename, prefix):
         self.check_no_output = False
         self.filename = filename
         self.prefix = prefix
+
     def readStdin(self):
-        self.lines = [l.rstrip('\r\n') for l in sys.stdin.readlines()]
+        self.lines = [l.rstrip("\r\n") for l in sys.stdin.readlines()]
+
     def readChecks(self):
         with open(self.filename) as f:
             for line in f:
-                match = re.search('{}: NO_OUTPUT'.format(self.prefix), line)
+                match = re.search("{}: NO_OUTPUT".format(self.prefix), line)
                 if match is not None:
                     self.check_no_output = True
                     return
-                match = re.search('{}: num_threads=([0-9]+) (.*)$'.format(self.prefix), line)
+                match = re.search(
+                    "{}: num_threads=([0-9]+) (.*)$".format(self.prefix), line
+                )
                 if match is not None:
                     num_threads = int(match.group(1))
                     for i in range(num_threads):
                         self.checks.append(match.group(2))
                     continue
+
     def check(self):
         # If no checks at all, then nothing to do
         if len(self.checks) == 0 and not self.check_no_output:
-            print('Nothing to check for')
+            print("Nothing to check for")
             return
         # Check if we are expecting no output
         if self.check_no_output:
             if len(self.lines) == 0:
                 return
             else:
-                raise Checks.CheckError('{}: Output was found when expecting none.'.format(self.prefix))
+                raise Checks.CheckError(
+                    "{}: Output was found when expecting none.".format(self.prefix)
+                )
         # Run through each check line and see if it exists in the output
         # If it does, then delete the line from output and look for the
         # next check line.
@@ -53,18 +61,28 @@ def check(self):
                     index = idx
                     break
             if not found:
-                raise Checks.CheckError('{}: Did not find: {}'.format(self.prefix, c))
+                raise Checks.CheckError("{}: Did not find: {}".format(self.prefix, c))
             else:
                 del self.lines[index]
         if len(self.lines) != 0:
-            raise Checks.CheckError('{}: Extra output: {}'.format(self.prefix, self.lines))
+            raise Checks.CheckError(
+                "{}: Extra output: {}".format(self.prefix, self.lines)
+            )
+
 
 # Setup argument parsing
-parser = argparse.ArgumentParser(description='''This script checks output of
-    a program against "CHECK" lines in filename''')
-parser.add_argument('filename', default=None, help='filename to check against')
-parser.add_argument('-c', '--check-prefix', dest='prefix',
-                    default='CHECK', help='check prefix token default: %(default)s')
+parser = argparse.ArgumentParser(
+    description="""This script checks output of
+    a program against "CHECK" lines in filename"""
+)
+parser.add_argument("filename", default=None, help="filename to check against")
+parser.add_argument(
+    "-c",
+    "--check-prefix",
+    dest="prefix",
+    default="CHECK",
+    help="check prefix token default: %(default)s",
+)
 command_args = parser.parse_args()
 # Do the checking
 checks = Checks(command_args.filename, command_args.prefix)

diff  --git a/polly/docs/conf.py b/polly/docs/conf.py
index b35c4a2ae9bcc..908f4d6fa6e17 100644
--- a/polly/docs/conf.py
+++ b/polly/docs/conf.py
@@ -17,66 +17,66 @@
 # If extensions (or modules to document with autodoc) are in another directory,
 # add these directories to sys.path here. If the directory is relative to the
 # documentation root, use os.path.abspath to make it absolute, like shown here.
-#sys.path.insert(0, os.path.abspath('.'))
+# sys.path.insert(0, os.path.abspath('.'))
 
 # -- General configuration -----------------------------------------------------
 
 # If your documentation needs a minimal Sphinx version, state it here.
-#needs_sphinx = '1.0'
+# needs_sphinx = '1.0'
 
 # Add any Sphinx extension module names here, as strings. They can be extensions
 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.todo', 'sphinx.ext.mathjax']
+extensions = ["sphinx.ext.todo", "sphinx.ext.mathjax"]
 
 # Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
+templates_path = ["_templates"]
 
 # The suffix of source filenames.
-source_suffix = '.rst'
+source_suffix = ".rst"
 
 # The encoding of source files.
-#source_encoding = 'utf-8-sig'
+# source_encoding = 'utf-8-sig'
 
 # The master toctree document.
-master_doc = 'index'
+master_doc = "index"
 
 # General information about the project.
-project = u'Polly'
-copyright = u'2010-%d, The Polly Team' % date.today().year
+project = "Polly"
+copyright = "2010-%d, The Polly Team" % date.today().year
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
-#language = None
+# language = None
 
 # There are two options for replacing |today|: either, you set today to some
 # non-false value, then it is used:
-#today = ''
+# today = ''
 # Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
+# today_fmt = '%B %d, %Y'
 
 # List of patterns, relative to source directory, that match files and
 # directories to ignore when looking for source files.
-exclude_patterns = ['_build', 'analyzer']
+exclude_patterns = ["_build", "analyzer"]
 
 # The reST default role (used for this markup: `text`) to use for all documents.
-#default_role = None
+# default_role = None
 
 # If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
+# add_function_parentheses = True
 
 # If true, the current module name will be prepended to all description
 # unit titles (such as .. function::).
-#add_module_names = True
+# add_module_names = True
 
 # If true, sectionauthor and moduleauthor directives will be shown in the
 # output. They are ignored by default.
-#show_authors = False
+# show_authors = False
 
 # The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'friendly'
+pygments_style = "friendly"
 
 # A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
+# modindex_common_prefix = []
 
 in_progress_title = "(In-Progress) " if tags.has("PreRelease") else ""
 
@@ -90,35 +90,36 @@
 # a list of builtin themes.
 try:
     import sphinx_rtd_theme
+
     html_theme = "sphinx_rtd_theme"
     html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
 except ImportError:
-    html_theme = 'haiku'
+    html_theme = "haiku"
 
 
 # Theme options are theme-specific and customize the look and feel of a theme
 # further.  For a list of options available for each theme, see the
 # documentation.
-#html_theme_options = {}
+# html_theme_options = {}
 
 # Add any paths that contain custom themes here, relative to this directory.
-#html_theme_path = []
+# html_theme_path = []
 
 # The name for this set of Sphinx documents.  If None, it defaults to
 # "<project> v<release> documentation".
-#html_title = None
+# html_title = None
 
 # A shorter title for the navigation bar.  Default is the same as html_title.
-#html_short_title = None
+# html_short_title = None
 
 # The name of an image file (relative to this directory) to place at the top
 # of the sidebar.
-#html_logo = None
+# html_logo = None
 
 # The name of an image file (within the static path) to use as favicon of the
 # docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
 # pixels large.
-#html_favicon = None
+# html_favicon = None
 
 # Add any paths that contain custom static files (such as style sheets) here,
 # relative to this directory. They are copied after the builtin static files,
@@ -127,92 +128,89 @@
 
 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
 # using the given strftime format.
-#html_last_updated_fmt = '%b %d, %Y'
+# html_last_updated_fmt = '%b %d, %Y'
 
 # If true, SmartyPants will be used to convert quotes and dashes to
 # typographically correct entities.
-#html_use_smartypants = True
+# html_use_smartypants = True
 
 # Custom sidebar templates, maps document names to template names.
-#html_sidebars = {}
+# html_sidebars = {}
 
 # Additional templates that should be rendered to pages, maps page names to
 # template names.
-#html_additional_pages = {}
+# html_additional_pages = {}
 
 # If false, no module index is generated.
-#html_domain_indices = True
+# html_domain_indices = True
 
 # If false, no index is generated.
-#html_use_index = True
+# html_use_index = True
 
 # If true, the index is split into individual pages for each letter.
-#html_split_index = False
+# html_split_index = False
 
 # If true, links to the reST sources are added to the pages.
-#html_show_sourcelink = True
+# html_show_sourcelink = True
 
 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
-#html_show_sphinx = True
+# html_show_sphinx = True
 
 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
-#html_show_copyright = True
+# html_show_copyright = True
 
 # If true, an OpenSearch description file will be output, and all pages will
 # contain a <link> tag referring to it.  The value of this option must be the
 # base URL from which the finished HTML is served.
-#html_use_opensearch = ''
+# html_use_opensearch = ''
 
 # This is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = None
+# html_file_suffix = None
 
 # Output file base name for HTML help builder.
-htmlhelp_basename = 'Pollydoc'
+htmlhelp_basename = "Pollydoc"
 
 
 # -- Options for LaTeX output --------------------------------------------------
 
 latex_elements = {
-# The paper size ('letterpaper' or 'a4paper').
-#'papersize': 'letterpaper',
-
-# The font size ('10pt', '11pt' or '12pt').
-#'pointsize': '10pt',
-
-# Additional stuff for the LaTeX preamble.
-#'preamble': '',
+    # The paper size ('letterpaper' or 'a4paper').
+    #'papersize': 'letterpaper',
+    # The font size ('10pt', '11pt' or '12pt').
+    #'pointsize': '10pt',
+    # Additional stuff for the LaTeX preamble.
+    #'preamble': '',
 }
 
 # Grouping the document tree into LaTeX files. List of tuples
 # (source start file, target name, title, author, documentclass [howto/manual]).
 latex_documents = [
-  ('index', 'Polly.tex', u'Polly Documentation',
-   u'The Polly Team', 'manual'),
+    ("index", "Polly.tex", "Polly Documentation", "The Polly Team", "manual"),
 ]
 
 # The name of an image file (relative to this directory) to place at the top of
 # the title page.
-#latex_logo = None
+# latex_logo = None
 
 # For "manual" documents, if this is true, then toplevel headings are parts,
 # not chapters.
-#latex_use_parts = False
+# latex_use_parts = False
 
 # If true, show page references after internal links.
-#latex_show_pagerefs = False
+# latex_show_pagerefs = False
 
 # If true, show URL addresses after external links.
-#latex_show_urls = False
+# latex_show_urls = False
 
 # Documents to append as an appendix to all manuals.
-#latex_appendices = []
+# latex_appendices = []
 
 # If false, no module index is generated.
-#latex_domain_indices = True
+# latex_domain_indices = True
 
 
 # If true, show URL addresses after external links.
-#man_show_urls = False
+# man_show_urls = False
 
 
 # -- Options for Texinfo output ------------------------------------------------
@@ -221,16 +219,22 @@
 # (source start file, target name, title, author,
 #  dir menu entry, description, category)
 texinfo_documents = [
-  ('index', 'Polly', u'Polly Documentation',
-   u'The Polly Team', 'Polly', 'One line description of project.',
-   'Miscellaneous'),
+    (
+        "index",
+        "Polly",
+        "Polly Documentation",
+        "The Polly Team",
+        "Polly",
+        "One line description of project.",
+        "Miscellaneous",
+    ),
 ]
 
 # Documents to append as an appendix to all manuals.
-#texinfo_appendices = []
+# texinfo_appendices = []
 
 # If false, no module index is generated.
-#texinfo_domain_indices = True
+# texinfo_domain_indices = True
 
 # How to display URL addresses: 'footnote', 'no', or 'inline'.
-#texinfo_show_urls = 'footnote'
+# texinfo_show_urls = 'footnote'

diff  --git a/polly/lib/External/isl/imath/tests/gmp-compat-test/genctest.py b/polly/lib/External/isl/imath/tests/gmp-compat-test/genctest.py
index d2771a4567e68..ed0e6c6a00727 100644
--- a/polly/lib/External/isl/imath/tests/gmp-compat-test/genctest.py
+++ b/polly/lib/External/isl/imath/tests/gmp-compat-test/genctest.py
@@ -73,8 +73,12 @@ def init_var_from_param(self, ty, var, param):
         code = "\t"
         if ty == mpz_t or ty == mpq_t:
             code += self.api_call_prefix(ty) + "init(" + var + ");\n\t"
-            code += self.api_call_prefix(ty) + "set_str(" + ",".join(
-                [var, param, "10"]) + ")"
+            code += (
+                self.api_call_prefix(ty)
+                + "set_str("
+                + ",".join([var, param, "10"])
+                + ")"
+            )
             if ty == mpq_t:
                 code += ";\n\t"
                 code += self.api_call_prefix(ty) + "canonicalize(" + var + ")"
@@ -101,8 +105,12 @@ def make_api_call(self):
         ret = "\t"
         ret_ty = self.api.ret_ty
         if ret_ty != void:
-            ret += self.test_var_type(ret_ty) + " " + self.test_var_name(
-                ret_ty, "_ret") + " = "
+            ret += (
+                self.test_var_type(ret_ty)
+                + " "
+                + self.test_var_name(ret_ty, "_ret")
+                + " = "
+            )
         # call mpq or mpz function
         if self.api.name.startswith("mpz_"):
             prefix = self.api_call_prefix(mpz_t)
@@ -118,34 +126,35 @@ def normalize_cmp(self, ty):
 	  {var} = 1;
 	else if ({var} < 0)
 	  {var} = -1;\n\t
-""".format(var=cmpval)
+""".format(
+            var=cmpval
+        )
         return code
 
     def extract_result(self, ty, pos):
         code = ""
         if ty == mpz_t or ty == mpq_t:
             var = self.test_var_name(ty, pos)
-            code += self.api_call_prefix(
-                ty) + "get_str(out+offset, 10," + var + ");\n"
+            code += self.api_call_prefix(ty) + "get_str(out+offset, 10," + var + ");\n"
             code += "\toffset = offset + strlen(out); "
             code += "out[offset] = ' '; out[offset+1] = 0; offset += 1;"
         else:
             assert pos == -1, "expected a return value, not a param value"
             if ty == ilong:
                 var = self.test_var_name(ty, "_ret")
-                code += 'offset = sprintf(out+offset, " %ld ", ' + var + ');'
+                code += 'offset = sprintf(out+offset, " %ld ", ' + var + ");"
             elif ty == ulong:
                 var = self.test_var_name(ty, "_ret")
-                code += 'offset = sprintf(out+offset, " %lu ", ' + var + ');'
+                code += 'offset = sprintf(out+offset, " %lu ", ' + var + ");"
             elif ty == iint:
                 var = self.test_var_name(ty, "_ret")
-                code += 'offset = sprintf(out+offset, " %d ", ' + var + ');'
+                code += 'offset = sprintf(out+offset, " %d ", ' + var + ");"
             elif ty == size_t:
                 var = self.test_var_name(ty, "_ret")
-                code += 'offset = sprintf(out+offset, " %zu ", ' + var + ');'
+                code += 'offset = sprintf(out+offset, " %zu ", ' + var + ");"
             elif ty == charp:
                 var = self.test_var_name(ty, "_ret")
-                code += 'offset = sprintf(out+offset, " %s ", ' + var + ');'
+                code += 'offset = sprintf(out+offset, " %s ", ' + var + ");"
             else:
                 raise RuntimeError("Unknown param type: " + str(ty))
         return code
@@ -160,9 +169,12 @@ def extract_results(self):
 
         # call canonicalize for mpq_set_ui
         if self.api.name == "mpq_set_ui":
-            code += self.api_call_prefix(
-                mpq_t) + "canonicalize(" + self.test_var_name(mpq_t,
-                                                              0) + ");\n\t"
+            code += (
+                self.api_call_prefix(mpq_t)
+                + "canonicalize("
+                + self.test_var_name(mpq_t, 0)
+                + ");\n\t"
+            )
 
         # get return value
         if ret_ty != void:
@@ -180,8 +192,7 @@ def clear_local_vars(self):
         for (i, p) in enumerate(self.api.params):
             if p == mpz_t or p == mpq_t:
                 var = self.test_var_name(p, i)
-                code += "\t" + self.api_call_prefix(
-                    p) + "clear(" + var + ");\n"
+                code += "\t" + self.api_call_prefix(p) + "clear(" + var + ");\n"
         return code
 
     def print_test_code(self, outf):
@@ -190,8 +201,9 @@ def print_test_code(self, outf):
             self.test_param_type(p) + " " + self.test_param_name(p, i)
             for (i, p) in enumerate(api.params)
         ]
-        code = "void {}_{}(char *out, {})".format(self.test_prefix(), api.name,
-                                                  ", ".join(params))
+        code = "void {}_{}(char *out, {})".format(
+            self.test_prefix(), api.name, ", ".join(params)
+        )
         code += "{\n"
         code += self.init_vars_from_params()
         code += self.make_api_call()

diff  --git a/polly/lib/External/isl/imath/tests/gmp-compat-test/gendata.py b/polly/lib/External/isl/imath/tests/gmp-compat-test/gendata.py
index c1422dde09c22..3ca17ff359d91 100644
--- a/polly/lib/External/isl/imath/tests/gmp-compat-test/gendata.py
+++ b/polly/lib/External/isl/imath/tests/gmp-compat-test/gendata.py
@@ -42,10 +42,13 @@ def apply(fun, lst):
 mm_all = mm_slong + mm_ulong + mm_sint + mm_uint + mm_sshort + mm_ushort
 zero_one_all = mzero_one + zero_one
 
-mpz_std_list = zero_one_all + mm_all + apply(plus1, mm_all) + apply(
-    minus1, mm_all)
-si_std_list = zero_one + mm_slong + mm_sint + mm_sshort + mm_slong1 + mm_sint1 + mm_sshort1
-ui_std_list = zero_one + mm_ulong + mm_uint + mm_ushort + mm_ulong1 + mm_uint1 + mm_ushort1
+mpz_std_list = zero_one_all + mm_all + apply(plus1, mm_all) + apply(minus1, mm_all)
+si_std_list = (
+    zero_one + mm_slong + mm_sint + mm_sshort + mm_slong1 + mm_sint1 + mm_sshort1
+)
+ui_std_list = (
+    zero_one + mm_ulong + mm_uint + mm_ushort + mm_ulong1 + mm_uint1 + mm_ushort1
+)
 
 
 def gen_random_mpz(mindigits=1, maxdigits=100, allowneg=True):
@@ -73,8 +76,9 @@ def gen_digits(length):
     if length == 1:
         i = random.randint(1, 9)
     else:
-        digits = [random.randint(1, 9)
-                  ] + [random.randint(0, 9) for x in range(length - 1)]
+        digits = [random.randint(1, 9)] + [
+            random.randint(0, 9) for x in range(length - 1)
+        ]
         digits = map(str, digits)
         i = "".join(digits)
     return str(i)
@@ -82,8 +86,7 @@ def gen_digits(length):
 
 def gen_mpzs(mindigits=1, maxdigits=100, count=10):
     return [
-        gen_random_mpz(mindigits=mindigits, maxdigits=maxdigits)
-        for x in range(count)
+        gen_random_mpz(mindigits=mindigits, maxdigits=maxdigits) for x in range(count)
     ]
 
 
@@ -123,8 +126,7 @@ def is_large_mpz(s):
 
 
 def gen_mpz_spread(count=default_count):
-    return gen_small_mpzs(count) + gen_medium_mpzs(count) + gen_large_mpzs(
-        count)
+    return gen_small_mpzs(count) + gen_medium_mpzs(count) + gen_large_mpzs(count)
 
 
 def gen_mpz_args(count=default_count):
@@ -257,8 +259,7 @@ def mpz_export_data(api):
     size = ["1", "2", "4", "8"]
     endian = ["0"]
     nails = ["0"]
-    ops = gen_mpz_args(1000) + gen_mpzs(
-        count=100, mindigits=100, maxdigits=1000)
+    ops = gen_mpz_args(1000) + gen_mpzs(count=100, mindigits=100, maxdigits=1000)
 
     args = []
     for r in rop:
@@ -274,8 +275,7 @@ def mpz_export_data(api):
 
 def mpz_sizeinbase_data(api):
     bases = list(map(str, range(2, 37)))
-    ops = gen_mpz_args(1000) + gen_mpzs(
-        count=1000, mindigits=100, maxdigits=2000)
+    ops = gen_mpz_args(1000) + gen_mpzs(count=1000, mindigits=100, maxdigits=2000)
     return [(op, b) for op in ops for b in bases]
 
 
@@ -381,7 +381,7 @@ def fixup_args(name, args):
 }
 
 if __name__ == "__main__":
-    #apis = [gmpapi.get_api("mpq_set_str"),]
+    # apis = [gmpapi.get_api("mpq_set_str"),]
     apis = gmpapi.apis
     for api in apis:
         tests = gen_args(api)

diff  --git a/polly/lib/External/isl/imath/tests/gmp-compat-test/genpytest.py b/polly/lib/External/isl/imath/tests/gmp-compat-test/genpytest.py
index 1f3d787c75b42..1b5a38ce829b3 100644
--- a/polly/lib/External/isl/imath/tests/gmp-compat-test/genpytest.py
+++ b/polly/lib/External/isl/imath/tests/gmp-compat-test/genpytest.py
@@ -5,7 +5,8 @@
 
 
 def print_header(outf):
-    outf.write("""
+    outf.write(
+        """
 #AUTOGENERATED FILE
 import ctypes
 import os
@@ -14,11 +15,13 @@ def print_header(outf):
 
 verbose = False
 fork    = False
-  """)
+  """
+    )
 
 
 def print_cmp(outf):
-    outf.write("""
+    outf.write(
+        """
 def passt(line, name, a_s, b_s):
     if verbose:
       print("PASS: {}@{} {} == {}".format(line, name, a_s, b_s))
@@ -101,11 +104,13 @@ def test_mpz_import(line, name, gmp_test_so, imath_test_so, *args):
   #print(iout.raw[:70])
   return cstr_eq(line, name, gout, iout)
 
-""")
+"""
+    )
 
 
 def print_api(name, outf):
-    outf.write("""
+    outf.write(
+        """
 def test_{0}(line, name, gmp_test_so, imath_test_so, *args):
   gout = ctypes.create_string_buffer(1024*4);
   iout = ctypes.create_string_buffer(1024*4);
@@ -113,14 +118,19 @@ def test_{0}(line, name, gmp_test_so, imath_test_so, *args):
   imath_test_so.test_{0}(iout, *args)
   eq = cstr_eq(line, name, gout, iout)
   return eq
-""".format(name))
+""".format(
+            name
+        )
+    )
 
 
 def print_api_map(outf):
-    outf.write("""
+    outf.write(
+        """
 def get_wrapper(name):
   test_wrappers = {
-""")
+"""
+    )
     for api in gmpapi.apis:
         outf.write('    "{}" : {},\n'.format(api.name, "test_" + api.name))
     outf.write("  }\n")

diff  --git a/polly/lib/External/isl/imath/tests/gmp-compat-test/gmpapi.py b/polly/lib/External/isl/imath/tests/gmp-compat-test/gmpapi.py
index 53b54d3615d78..d3eeeba288384 100644
--- a/polly/lib/External/isl/imath/tests/gmp-compat-test/gmpapi.py
+++ b/polly/lib/External/isl/imath/tests/gmp-compat-test/gmpapi.py
@@ -13,10 +13,10 @@ def __str__(self):
 
 class GMPAPI:
     def __init__(self, ret_ty, name, *params, **kw):
-        out = kw.get('out', [0])
-        inout = kw.get('inout', [])
-        mixed = kw.get('mixed', False)
-        custom = kw.get('custom', False)
+        out = kw.get("out", [0])
+        inout = kw.get("inout", [])
+        mixed = kw.get("mixed", False)
+        custom = kw.get("custom", False)
         self.name = name
         self.ret_ty = ret_ty
         self.params = params
@@ -27,7 +27,7 @@ def __init__(self, ret_ty, name, *params, **kw):
         if self.ret_ty != void and not mixed:
             self.out_params = []
         else:
-            self.out_params = out  #param location of the output result
+            self.out_params = out  # param location of the output result
 
     def is_write_only(self, pos):
         if pos in self.out_params and pos not in self.inout_params:
@@ -35,8 +35,9 @@ def is_write_only(self, pos):
         return False
 
     def __str__(self):
-        return ("{} {}({})".format(self.ret_ty, self.name, ",".join(
-            map(str, self.params))))
+        return "{} {}({})".format(
+            self.ret_ty, self.name, ",".join(map(str, self.params))
+        )
 
     def __repr__(self):
         return str(self)
@@ -96,7 +97,8 @@ def __repr__(self):
         iint,
         size_t,
         mpz_t,
-        custom=True),
+        custom=True,
+    ),
     # The mpz_import signature is a bit of a lie, but it is ok because it is custom
     GMPAPI(
         void,
@@ -108,10 +110,10 @@ def __repr__(self):
         iint,
         size_t,
         mpz_t,
-        custom=True),
+        custom=True,
+    ),
     GMPAPI(size_t, "mpz_sizeinbase", mpz_t, iint),
     GMPAPI(charp, "mpz_get_str", charp, iint, mpz_t),
-
     # mpq functions
     GMPAPI(iint, "mpq_set_str", mpq_t, charp, iint, out=[0], mixed=True),
     GMPAPI(void, "mpq_canonicalize", mpq_t, inout=[0]),

diff  --git a/polly/lib/External/isl/imath/tests/gmp-compat-test/runtest.py b/polly/lib/External/isl/imath/tests/gmp-compat-test/runtest.py
index 35ac598e15ae6..3677acb480573 100644
--- a/polly/lib/External/isl/imath/tests/gmp-compat-test/runtest.py
+++ b/polly/lib/External/isl/imath/tests/gmp-compat-test/runtest.py
@@ -36,15 +36,17 @@ def run_tests(test_file, options):
         if options.skip > 0 and line < options.skip:
             continue
         name, args = test.split("|")
-        if options.verbose or (options.progress > 0
-                               and line % options.progress == 0):
+        if options.verbose or (options.progress > 0 and line % options.progress == 0):
             print("TEST: {}@{}".format(line, test), end="")
         api = gmpapi.get_api(name)
         wrapper = wrappers.get_wrapper(name)
         input_args = args.split(",")
         if len(api.params) != len(input_args):
-            raise RuntimeError("Mismatch in args length: {} != {}".format(
-                len(api.params), len(input_args)))
+            raise RuntimeError(
+                "Mismatch in args length: {} != {}".format(
+                    len(api.params), len(input_args)
+                )
+            )
 
         call_args = []
         for i in range(len(api.params)):
@@ -72,8 +74,9 @@ def run_tests(test_file, options):
             else:
                 raise RuntimeError("Unknown param type: {}".format(param))
 
-        res = wrappers.run_test(wrapper, line, name, gmp_test_so,
-                                imath_test_so, *call_args)
+        res = wrappers.run_test(
+            wrapper, line, name, gmp_test_so, imath_test_so, *call_args
+        )
         if not res:
             failures += 1
             print_failure(line, test)
@@ -90,27 +93,26 @@ def parse_args():
         "--fork",
         help="fork() before each operation",
         action="store_true",
-        default=False)
+        default=False,
+    )
     parser.add_option(
         "-v",
         "--verbose",
         help="print PASS and FAIL tests",
         action="store_true",
-        default=False)
+        default=False,
+    )
     parser.add_option(
         "-p",
         "--progress",
         help="print progress every N tests ",
         metavar="N",
         type="int",
-        default=0)
+        default=0,
+    )
     parser.add_option(
-        "-s",
-        "--skip",
-        help="skip to test N",
-        metavar="N",
-        type="int",
-        default=0)
+        "-s", "--skip", help="skip to test N", metavar="N", type="int", default=0
+    )
     return parser.parse_args()
 
 
@@ -128,16 +130,22 @@ def parse_args():
     for test_file in tests:
         print("Running tests in {}".format(test_file))
         (passes, failures, fail_lines) = run_tests(test_file, options)
-        print("  Tests: {}. Passes: {}. Failures: {}.".format(
-            passes + failures, passes, failures))
+        print(
+            "  Tests: {}. Passes: {}. Failures: {}.".format(
+                passes + failures, passes, failures
+            )
+        )
         total_pass += passes
         total_fail += failures
         all_fail_lines += fail_lines
 
     print("=" * 70)
     print("Total")
-    print("  Tests: {}. Passes: {}. Failures: {}.".format(
-        total_pass + total_fail, total_pass, total_fail))
+    print(
+        "  Tests: {}. Passes: {}. Failures: {}.".format(
+            total_pass + total_fail, total_pass, total_fail
+        )
+    )
     if len(all_fail_lines) > 0:
         print("Failing Tests:")
         for (line, test) in all_fail_lines:

diff  --git a/polly/lib/External/isl/imath/tools/mkdoc.py b/polly/lib/External/isl/imath/tools/mkdoc.py
index 4744781ee7537..34432dd28a2d8 100644
--- a/polly/lib/External/isl/imath/tools/mkdoc.py
+++ b/polly/lib/External/isl/imath/tools/mkdoc.py
@@ -14,13 +14,15 @@
 # A regular expression to match commented declarations.
 # This is specific to C and not very general; it should work fine for the imath
 # headers but will not adapt well to arbitrary code or to C++.
-doc = re.compile(r'''(?mx)/\*\* # open  /**
+doc = re.compile(
+    r"""(?mx)/\*\* # open  /**
 (?P<text>(?:[^*]|\*[^/])*)      # text      Does a thing
 \*/\n                           # close */
-(?P<decl>[^;{]*(?:;$|\{))''')  # decl  void f(x);
+(?P<decl>[^;{]*(?:;$|\{))"""
+)  # decl  void f(x);
 
 # A regular expression matching up to 4 spaces at the head of a line.
-spc = re.compile(r'(?m)^ {1,4}')
+spc = re.compile(r"(?m)^ {1,4}")
 
 # A regular expression matching an insertion point.  An insertion point has the
 # form {{include "header" name ...}}.  If no names are given, all the names in
@@ -28,7 +30,7 @@
 ins = re.compile(r'{{insert "(?P<file>[^"]*)"(?P<names>(?:\s+\w+)+)?\s*}}')
 
 # A regular expression matching non-identifier characters, for splitting.
-nid = re.compile(r'\W+')
+nid = re.compile(r"\W+")
 
 # A cache of already-parsed files, maps filename to declarations.
 CACHE = {}
@@ -43,17 +45,19 @@ def typeset(text):
     """Renders text with verbatim sections into markdown."""
     lines = []
     fence = False
-    for line in text.split('\n'):
-        if fence != line.startswith(' '):
-            lines.append('```')
+    for line in text.split("\n"):
+        if fence != line.startswith(" "):
+            lines.append("```")
             fence = not fence
         lines.append(line)
     if fence:
-        lines.append('```')
+        lines.append("```")
     for i, line in enumerate(lines):
-        if i == 0: lines[i] = ' -  ' + line
-        elif line: lines[i] = '    ' + line
-    return '\n'.join(lines)
+        if i == 0:
+            lines[i] = " -  " + line
+        elif line:
+            lines[i] = "    " + line
+    return "\n".join(lines)
 
 
 class LIndex(object):
@@ -69,7 +73,7 @@ def __init__(self, text):
         # Scan forward for newlines or EOF, and push the offsets of the line
         # breaks onto the list so we can binary search them later.
         while pos < len(text):
-            next = text.find('\n', pos)
+            next = text.find("\n", pos)
             if next < 0:
                 break
             idx.append(next)
@@ -115,13 +119,13 @@ def __init__(self, com, decl, line=None):
           decl: the raw text of the declaration
           line: the line number of the declaration
         """
-        lp = decl.find('(')
+        lp = decl.find("(")
         if lp < 0:
-            self.name = last_word(decl.rstrip(';'))
+            self.name = last_word(decl.rstrip(";"))
         else:
             self.name = last_word(decl[:lp])
-        self.decl = ' '.join(decl.rstrip(';{').strip().split())
-        self.comment = spc.sub('', com.rstrip())
+        self.decl = " ".join(decl.rstrip(";{").strip().split())
+        self.comment = spc.sub("", com.rstrip())
         self.line = line
 
     def __repr__(self):
@@ -134,14 +138,16 @@ def markdown(self, path):
             path,
             self.line,
             self.name,
-            self.decl[pos + len(self.name):],
+            self.decl[pos + len(self.name) :],
         )
-        return '''------------
+        return """------------
 <a id="{name}"></a><pre>
 {decl};
 </pre>
 {comment}
-'''.format(name=self.name, decl=decl, comment=typeset(self.comment))
+""".format(
+            name=self.name, decl=decl, comment=typeset(self.comment)
+        )
 
 
 def parse_decls(text):
@@ -149,8 +155,8 @@ def parse_decls(text):
     decls = collections.OrderedDict()
     idx = LIndex(text)
     for m in doc.finditer(text):
-        line, _ = idx.linecol(m.span('decl')[0])
-        d = Decl(m.group('text'), m.group('decl'), line)
+        line, _ = idx.linecol(m.span("decl")[0])
+        d = Decl(m.group("text"), m.group("decl"), line)
         decls[d.name] = d
     return decls
 
@@ -158,7 +164,7 @@ def parse_decls(text):
 def load_file(path):
     """Load declarations from path, or use cached results."""
     if path not in CACHE:
-        with file(path, 'rU') as fp:
+        with file(path, "rU") as fp:
             CACHE[path] = parse_decls(fp.read())
     return CACHE[path]
 
@@ -171,35 +177,38 @@ def main(args):
     doc_template = args[0]
     doc_markdown = args[1]
 
-    with file(doc_template, 'rU') as input:
+    with file(doc_template, "rU") as input:
         template = input.read()
 
-    with file(doc_markdown, 'wt') as output:
+    with file(doc_markdown, "wt") as output:
         print(
-            '''<!--
+            """<!--
   This file was generated from "{0}" by mkdoc.py
   DO NOT EDIT
 -->
-'''.format(doc_template),
-            file=output)
+""".format(
+                doc_template
+            ),
+            file=output,
+        )
 
         pos = 0  # last position of input copied
 
         # Look for substitution markers in the template, and replace them with
         # their content.
         for ip in ins.finditer(template):
-            output.write(template[pos:ip.start()])
+            output.write(template[pos : ip.start()])
             pos = ip.end()
 
-            decls = load_file(ip.group('file'))
-            if ip.group('names'):  # pick the selected names, in order
+            decls = load_file(ip.group("file"))
+            if ip.group("names"):  # pick the selected names, in order
                 decls = collections.OrderedDict(
-                    (key, decls[key])
-                    for key in ip.group('names').strip().split())
+                    (key, decls[key]) for key in ip.group("names").strip().split()
+                )
 
             # Render the selected declarations.
             for decl in decls.values():
-                print(decl.markdown(ip.group('file')), file=output)
+                print(decl.markdown(ip.group("file")), file=output)
 
         # Clean up any remaining template bits
         output.write(template[pos:])

diff  --git a/polly/lib/External/isl/isl_test_python.py b/polly/lib/External/isl/isl_test_python.py
index 3894ad1cced91..05bb0c8246421 100755
--- a/polly/lib/External/isl/isl_test_python.py
+++ b/polly/lib/External/isl/isl_test_python.py
@@ -21,39 +21,42 @@
 #  multiple overloaded constructors and overload resolution is tested.
 #
 def test_constructors():
-	zero1 = isl.val("0")
-	assert(zero1.is_zero())
+    zero1 = isl.val("0")
+    assert zero1.is_zero()
 
-	zero2 = isl.val(0)
-	assert(zero2.is_zero())
+    zero2 = isl.val(0)
+    assert zero2.is_zero()
 
-	zero3 = isl.val.zero()
-	assert(zero3.is_zero())
+    zero3 = isl.val.zero()
+    assert zero3.is_zero()
 
-	bs = isl.basic_set("{ [1] }")
-	result = isl.set("{ [1] }")
-	s = isl.set(bs)
-	assert(s.is_equal(result))
+    bs = isl.basic_set("{ [1] }")
+    result = isl.set("{ [1] }")
+    s = isl.set(bs)
+    assert s.is_equal(result)
+
+    us = isl.union_set("{ A[1]; B[2, 3] }")
+    empty = isl.union_set.empty()
+    assert us.is_equal(us.union(empty))
 
-	us = isl.union_set("{ A[1]; B[2, 3] }")
-	empty = isl.union_set.empty()
-	assert(us.is_equal(us.union(empty)))
 
 # Test integer function parameters for a particular integer value.
 #
 def test_int(i):
-	val_int = isl.val(i)
-	val_str = isl.val(str(i))
-	assert(val_int.eq(val_str))
+    val_int = isl.val(i)
+    val_str = isl.val(str(i))
+    assert val_int.eq(val_str)
+
 
 # Test integer function parameters.
 #
 # Verify that extreme values and zero work.
 #
 def test_parameters_int():
-	test_int(sys.maxsize)
-	test_int(-sys.maxsize - 1)
-	test_int(0)
+    test_int(sys.maxsize)
+    test_int(-sys.maxsize - 1)
+    test_int(0)
+
 
 # Test isl objects parameters.
 #
@@ -64,33 +67,35 @@ def test_parameters_int():
 # the method is called.
 #
 def test_parameters_obj():
-	a = isl.set("{ [0] }")
-	b = isl.set("{ [1] }")
-	c = isl.set("{ [2] }")
-	expected = isl.set("{ [i] : 0 <= i <= 2 }")
+    a = isl.set("{ [0] }")
+    b = isl.set("{ [1] }")
+    c = isl.set("{ [2] }")
+    expected = isl.set("{ [i] : 0 <= i <= 2 }")
+
+    tmp = a.union(b)
+    res_lvalue_param = tmp.union(c)
+    assert res_lvalue_param.is_equal(expected)
 
-	tmp = a.union(b)
-	res_lvalue_param = tmp.union(c)
-	assert(res_lvalue_param.is_equal(expected))
+    res_rvalue_param = a.union(b).union(c)
+    assert res_rvalue_param.is_equal(expected)
 
-	res_rvalue_param = a.union(b).union(c)
-	assert(res_rvalue_param.is_equal(expected))
+    a2 = isl.basic_set("{ [0] }")
+    assert a.is_equal(a2)
 
-	a2 = isl.basic_set("{ [0] }")
-	assert(a.is_equal(a2))
+    two = isl.val(2)
+    half = isl.val("1/2")
+    res_only_this_param = two.inv()
+    assert res_only_this_param.eq(half)
 
-	two = isl.val(2)
-	half = isl.val("1/2")
-	res_only_this_param = two.inv()
-	assert(res_only_this_param.eq(half))
 
 # Test 
diff erent kinds of parameters to be passed to functions.
 #
 # This includes integer and isl object parameters.
 #
 def test_parameters():
-	test_parameters_int()
-	test_parameters_obj()
+    test_parameters_int()
+    test_parameters_obj()
+
 
 # Test that isl objects are returned correctly.
 #
@@ -98,55 +103,59 @@ def test_parameters():
 # returned.
 #
 def test_return_obj():
-	one = isl.val("1")
-	two = isl.val("2")
-	three = isl.val("3")
+    one = isl.val("1")
+    two = isl.val("2")
+    three = isl.val("3")
+
+    res = one.add(two)
 
-	res = one.add(two)
+    assert res.eq(three)
 
-	assert(res.eq(three))
 
 # Test that integer values are returned correctly.
 #
 def test_return_int():
-	one = isl.val("1")
-	neg_one = isl.val("-1")
-	zero = isl.val("0")
+    one = isl.val("1")
+    neg_one = isl.val("-1")
+    zero = isl.val("0")
+
+    assert one.sgn() > 0
+    assert neg_one.sgn() < 0
+    assert zero.sgn() == 0
 
-	assert(one.sgn() > 0)
-	assert(neg_one.sgn() < 0)
-	assert(zero.sgn() == 0)
 
 # Test that isl_bool values are returned correctly.
 #
 # In particular, check the conversion to bool in case of true and false.
 #
 def test_return_bool():
-	empty = isl.set("{ : false }")
-	univ = isl.set("{ : }")
+    empty = isl.set("{ : false }")
+    univ = isl.set("{ : }")
 
-	b_true = empty.is_empty()
-	b_false = univ.is_empty()
+    b_true = empty.is_empty()
+    b_false = univ.is_empty()
+
+    assert b_true
+    assert not b_false
 
-	assert(b_true)
-	assert(not b_false)
 
 # Test that strings are returned correctly.
 # Do so by calling overloaded isl.ast_build.from_expr methods.
 #
 def test_return_string():
-	context = isl.set("[n] -> { : }")
-	build = isl.ast_build.from_context(context)
-	pw_aff = isl.pw_aff("[n] -> { [n] }")
-	set = isl.set("[n] -> { : n >= 0 }")
+    context = isl.set("[n] -> { : }")
+    build = isl.ast_build.from_context(context)
+    pw_aff = isl.pw_aff("[n] -> { [n] }")
+    set = isl.set("[n] -> { : n >= 0 }")
+
+    expr = build.expr_from(pw_aff)
+    expected_string = "n"
+    assert expected_string == expr.to_C_str()
 
-	expr = build.expr_from(pw_aff)
-	expected_string = "n"
-	assert(expected_string == expr.to_C_str())
+    expr = build.expr_from(set)
+    expected_string = "n >= 0"
+    assert expected_string == expr.to_C_str()
 
-	expr = build.expr_from(set)
-	expected_string = "n >= 0"
-	assert(expected_string == expr.to_C_str())
 
 # Test that return values are handled correctly.
 #
@@ -154,16 +163,18 @@ def test_return_string():
 # returned correctly.
 #
 def test_return():
-	test_return_obj()
-	test_return_int()
-	test_return_bool()
-	test_return_string()
+    test_return_obj()
+    test_return_int()
+    test_return_bool()
+    test_return_string()
+
 
 # A class that is used to test isl.id.user.
 #
 class S:
-	def __init__(self):
-		self.value = 42
+    def __init__(self):
+        self.value = 42
+
 
 # Test isl.id.user.
 #
@@ -171,14 +182,15 @@ def __init__(self):
 # can be retrieved again.
 #
 def test_user():
-	id = isl.id("test", 5)
-	id2 = isl.id("test2")
-	id3 = isl.id("S", S())
-	assert id.user() == 5, f"unexpected user object {id.user()}"
-	assert id2.user() is None, f"unexpected user object {id2.user()}"
-	s = id3.user()
-	assert isinstance(s, S), f"unexpected user object {s}"
-	assert s.value == 42, f"unexpected user object {s}"
+    id = isl.id("test", 5)
+    id2 = isl.id("test2")
+    id3 = isl.id("S", S())
+    assert id.user() == 5, f"unexpected user object {id.user()}"
+    assert id2.user() is None, f"unexpected user object {id2.user()}"
+    s = id3.user()
+    assert isinstance(s, S), f"unexpected user object {s}"
+    assert s.value == 42, f"unexpected user object {s}"
+
 
 # Test that foreach functions are modeled correctly.
 #
@@ -188,30 +200,33 @@ def test_user():
 # the closure and that it propagates the exception.
 #
 def test_foreach():
-	s = isl.set("{ [0]; [1]; [2] }")
-
-	list = []
-	def add(bs):
-		list.append(bs)
-	s.foreach_basic_set(add)
-
-	assert(len(list) == 3)
-	assert(list[0].is_subset(s))
-	assert(list[1].is_subset(s))
-	assert(list[2].is_subset(s))
-	assert(not list[0].is_equal(list[1]))
-	assert(not list[0].is_equal(list[2]))
-	assert(not list[1].is_equal(list[2]))
-
-	def fail(bs):
-		raise Exception("fail")
-
-	caught = False
-	try:
-		s.foreach_basic_set(fail)
-	except:
-		caught = True
-	assert(caught)
+    s = isl.set("{ [0]; [1]; [2] }")
+
+    list = []
+
+    def add(bs):
+        list.append(bs)
+
+    s.foreach_basic_set(add)
+
+    assert len(list) == 3
+    assert list[0].is_subset(s)
+    assert list[1].is_subset(s)
+    assert list[2].is_subset(s)
+    assert not list[0].is_equal(list[1])
+    assert not list[0].is_equal(list[2])
+    assert not list[1].is_equal(list[2])
+
+    def fail(bs):
+        raise Exception("fail")
+
+    caught = False
+    try:
+        s.foreach_basic_set(fail)
+    except:
+        caught = True
+    assert caught
+
 
 # Test the functionality of "foreach_scc" functions.
 #
@@ -219,29 +234,31 @@ def fail(bs):
 # but where two of the elements ("a" and "b") are incomparable.
 #
 def test_foreach_scc():
-	list = isl.id_list(3)
-	sorted = [isl.id_list(3)]
-	data = {
-		'a' : isl.map("{ [0] -> [1] }"),
-		'b' : isl.map("{ [1] -> [0] }"),
-		'c' : isl.map("{ [i = 0:1] -> [i] }"),
-	}
-	for k, v in data.items():
-		list = list.add(k)
-	id = data['a'].space().domain().identity_multi_pw_aff_on_domain()
-	def follows(a, b):
-		map = data[b.name()].apply_domain(data[a.name()])
-		return not map.lex_ge_at(id).is_empty()
-
-	def add_single(scc):
-		assert(scc.size() == 1)
-		sorted[0] = sorted[0].concat(scc)
-
-	list.foreach_scc(follows, add_single)
-	assert(sorted[0].size() == 3)
-	assert(sorted[0].at(0).name() == "b")
-	assert(sorted[0].at(1).name() == "c")
-	assert(sorted[0].at(2).name() == "a")
+    list = isl.id_list(3)
+    sorted = [isl.id_list(3)]
+    data = {
+        "a": isl.map("{ [0] -> [1] }"),
+        "b": isl.map("{ [1] -> [0] }"),
+        "c": isl.map("{ [i = 0:1] -> [i] }"),
+    }
+    for k, v in data.items():
+        list = list.add(k)
+    id = data["a"].space().domain().identity_multi_pw_aff_on_domain()
+
+    def follows(a, b):
+        map = data[b.name()].apply_domain(data[a.name()])
+        return not map.lex_ge_at(id).is_empty()
+
+    def add_single(scc):
+        assert scc.size() == 1
+        sorted[0] = sorted[0].concat(scc)
+
+    list.foreach_scc(follows, add_single)
+    assert sorted[0].size() == 3
+    assert sorted[0].at(0).name() == "b"
+    assert sorted[0].at(1).name() == "c"
+    assert sorted[0].at(2).name() == "a"
+
 
 # Test the functionality of "every" functions.
 #
@@ -249,74 +266,81 @@ def add_single(scc):
 # test that exceptions are properly propagated.
 #
 def test_every():
-	us = isl.union_set("{ A[i]; B[j] }")
+    us = isl.union_set("{ A[i]; B[j] }")
+
+    def is_empty(s):
+        return s.is_empty()
 
-	def is_empty(s):
-		return s.is_empty()
-	assert(not us.every_set(is_empty))
+    assert not us.every_set(is_empty)
 
-	def is_non_empty(s):
-		return not s.is_empty()
-	assert(us.every_set(is_non_empty))
+    def is_non_empty(s):
+        return not s.is_empty()
 
-	def in_A(s):
-		return s.is_subset(isl.set("{ A[x] }"))
-	assert(not us.every_set(in_A))
+    assert us.every_set(is_non_empty)
 
-	def not_in_A(s):
-		return not s.is_subset(isl.set("{ A[x] }"))
-	assert(not us.every_set(not_in_A))
+    def in_A(s):
+        return s.is_subset(isl.set("{ A[x] }"))
 
-	def fail(s):
-		raise Exception("fail")
+    assert not us.every_set(in_A)
+
+    def not_in_A(s):
+        return not s.is_subset(isl.set("{ A[x] }"))
+
+    assert not us.every_set(not_in_A)
+
+    def fail(s):
+        raise Exception("fail")
+
+    caught = False
+    try:
+        us.ever_set(fail)
+    except:
+        caught = True
+    assert caught
 
-	caught = False
-	try:
-		us.ever_set(fail)
-	except:
-		caught = True
-	assert(caught)
 
 # Check basic construction of spaces.
 #
 def test_space():
-	unit = isl.space.unit()
-	set_space = unit.add_named_tuple("A", 3)
-	map_space = set_space.add_named_tuple("B", 2)
+    unit = isl.space.unit()
+    set_space = unit.add_named_tuple("A", 3)
+    map_space = set_space.add_named_tuple("B", 2)
+
+    set = isl.set.universe(set_space)
+    map = isl.map.universe(map_space)
+    assert set.is_equal(isl.set("{ A[*,*,*] }"))
+    assert map.is_equal(isl.map("{ A[*,*,*] -> B[*,*] }"))
 
-	set = isl.set.universe(set_space)
-	map = isl.map.universe(map_space)
-	assert(set.is_equal(isl.set("{ A[*,*,*] }")))
-	assert(map.is_equal(isl.map("{ A[*,*,*] -> B[*,*] }")))
 
 # Construct a simple schedule tree with an outer sequence node and
 # a single-dimensional band node in each branch, with one of them
 # marked coincident.
 #
 def construct_schedule_tree():
-	A = isl.union_set("{ A[i] : 0 <= i < 10 }")
-	B = isl.union_set("{ B[i] : 0 <= i < 20 }")
+    A = isl.union_set("{ A[i] : 0 <= i < 10 }")
+    B = isl.union_set("{ B[i] : 0 <= i < 20 }")
+
+    node = isl.schedule_node.from_domain(A.union(B))
+    node = node.child(0)
 
-	node = isl.schedule_node.from_domain(A.union(B))
-	node = node.child(0)
+    filters = isl.union_set_list(A).add(B)
+    node = node.insert_sequence(filters)
 
-	filters = isl.union_set_list(A).add(B)
-	node = node.insert_sequence(filters)
+    f_A = isl.multi_union_pw_aff("[ { A[i] -> [i] } ]")
+    node = node.child(0)
+    node = node.child(0)
+    node = node.insert_partial_schedule(f_A)
+    node = node.member_set_coincident(0, True)
+    node = node.ancestor(2)
 
-	f_A = isl.multi_union_pw_aff("[ { A[i] -> [i] } ]")
-	node = node.child(0)
-	node = node.child(0)
-	node = node.insert_partial_schedule(f_A)
-	node = node.member_set_coincident(0, True)
-	node = node.ancestor(2)
+    f_B = isl.multi_union_pw_aff("[ { B[i] -> [i] } ]")
+    node = node.child(1)
+    node = node.child(0)
+    node = node.insert_partial_schedule(f_B)
+    node = node.ancestor(2)
 
-	f_B = isl.multi_union_pw_aff("[ { B[i] -> [i] } ]")
-	node = node.child(1)
-	node = node.child(0)
-	node = node.insert_partial_schedule(f_B)
-	node = node.ancestor(2)
+    return node.schedule()
 
-	return node.schedule()
 
 # Test basic schedule tree functionality.
 #
@@ -327,64 +351,76 @@ def construct_schedule_tree():
 # - test every_descendant
 #
 def test_schedule_tree():
-	schedule = construct_schedule_tree()
-	root = schedule.root()
-
-	assert(type(root) == isl.schedule_node_domain)
-
-	count = [0]
-	def inc_count(node):
-		count[0] += 1
-		return node
-	root = root.map_descendant_bottom_up(inc_count)
-	assert(count[0] == 8)
-
-	def fail_map(node):
-		raise Exception("fail")
-		return node
-	caught = False
-	try:
-		root.map_descendant_bottom_up(fail_map)
-	except:
-		caught = True
-	assert(caught)
-
-	count = [0]
-	def inc_count(node):
-		count[0] += 1
-		return True
-	root.foreach_descendant_top_down(inc_count)
-	assert(count[0] == 8)
-
-	count = [0]
-	def inc_count(node):
-		count[0] += 1
-		return False
-	root.foreach_descendant_top_down(inc_count)
-	assert(count[0] == 1)
-
-	def is_not_domain(node):
-		return type(node) != isl.schedule_node_domain
-	assert(root.child(0).every_descendant(is_not_domain))
-	assert(not root.every_descendant(is_not_domain))
-
-	def fail(node):
-		raise Exception("fail")
-	caught = False
-	try:
-		root.every_descendant(fail)
-	except:
-		caught = True
-	assert(caught)
-
-	domain = root.domain()
-	filters = [isl.union_set("{}")]
-	def collect_filters(node):
-		if type(node) == isl.schedule_node_filter:
-			filters[0] = filters[0].union(node.filter())
-		return True
-	root.every_descendant(collect_filters)
-	assert(domain.is_equal(filters[0]))
+    schedule = construct_schedule_tree()
+    root = schedule.root()
+
+    assert type(root) == isl.schedule_node_domain
+
+    count = [0]
+
+    def inc_count(node):
+        count[0] += 1
+        return node
+
+    root = root.map_descendant_bottom_up(inc_count)
+    assert count[0] == 8
+
+    def fail_map(node):
+        raise Exception("fail")
+        return node
+
+    caught = False
+    try:
+        root.map_descendant_bottom_up(fail_map)
+    except:
+        caught = True
+    assert caught
+
+    count = [0]
+
+    def inc_count(node):
+        count[0] += 1
+        return True
+
+    root.foreach_descendant_top_down(inc_count)
+    assert count[0] == 8
+
+    count = [0]
+
+    def inc_count(node):
+        count[0] += 1
+        return False
+
+    root.foreach_descendant_top_down(inc_count)
+    assert count[0] == 1
+
+    def is_not_domain(node):
+        return type(node) != isl.schedule_node_domain
+
+    assert root.child(0).every_descendant(is_not_domain)
+    assert not root.every_descendant(is_not_domain)
+
+    def fail(node):
+        raise Exception("fail")
+
+    caught = False
+    try:
+        root.every_descendant(fail)
+    except:
+        caught = True
+    assert caught
+
+    domain = root.domain()
+    filters = [isl.union_set("{}")]
+
+    def collect_filters(node):
+        if type(node) == isl.schedule_node_filter:
+            filters[0] = filters[0].union(node.filter())
+        return True
+
+    root.every_descendant(collect_filters)
+    assert domain.is_equal(filters[0])
+
 
 # Test marking band members for unrolling.
 # "schedule" is the schedule created by construct_schedule_tree.
@@ -393,23 +429,27 @@ def collect_filters(node):
 # by the AST generator.
 #
 def test_ast_build_unroll(schedule):
-	root = schedule.root()
-	def mark_unroll(node):
-		if type(node) == isl.schedule_node_band:
-			node = node.member_set_ast_loop_unroll(0)
-		return node
-	root = root.map_descendant_bottom_up(mark_unroll)
-	schedule = root.schedule()
-
-	count_ast = [0]
-	def inc_count_ast(node, build):
-		count_ast[0] += 1
-		return node
-
-	build = isl.ast_build()
-	build = build.set_at_each_domain(inc_count_ast)
-	ast = build.node_from(schedule)
-	assert(count_ast[0] == 30)
+    root = schedule.root()
+
+    def mark_unroll(node):
+        if type(node) == isl.schedule_node_band:
+            node = node.member_set_ast_loop_unroll(0)
+        return node
+
+    root = root.map_descendant_bottom_up(mark_unroll)
+    schedule = root.schedule()
+
+    count_ast = [0]
+
+    def inc_count_ast(node, build):
+        count_ast[0] += 1
+        return node
+
+    build = isl.ast_build()
+    build = build.set_at_each_domain(inc_count_ast)
+    ast = build.node_from(schedule)
+    assert count_ast[0] == 30
+
 
 # Test basic AST generation from a schedule tree.
 #
@@ -419,62 +459,67 @@ def inc_count_ast(node, build):
 # - test unrolling
 #
 def test_ast_build():
-	schedule = construct_schedule_tree()
-
-	count_ast = [0]
-	def inc_count_ast(node, build):
-		count_ast[0] += 1
-		return node
-
-	build = isl.ast_build()
-	build_copy = build.set_at_each_domain(inc_count_ast)
-	ast = build.node_from(schedule)
-	assert(count_ast[0] == 0)
-	count_ast[0] = 0
-	ast = build_copy.node_from(schedule)
-	assert(count_ast[0] == 2)
-	build = build_copy
-	count_ast[0] = 0
-	ast = build.node_from(schedule)
-	assert(count_ast[0] == 2)
-
-	do_fail = True
-	count_ast_fail = [0]
-	def fail_inc_count_ast(node, build):
-		count_ast_fail[0] += 1
-		if do_fail:
-			raise Exception("fail")
-		return node
-	build = isl.ast_build()
-	build = build.set_at_each_domain(fail_inc_count_ast)
-	caught = False
-	try:
-		ast = build.node_from(schedule)
-	except:
-		caught = True
-	assert(caught)
-	assert(count_ast_fail[0] > 0)
-	build_copy = build
-	build_copy = build_copy.set_at_each_domain(inc_count_ast)
-	count_ast[0] = 0
-	ast = build_copy.node_from(schedule)
-	assert(count_ast[0] == 2)
-	count_ast_fail[0] = 0
-	do_fail = False
-	ast = build.node_from(schedule)
-	assert(count_ast_fail[0] == 2)
-
-	test_ast_build_unroll(schedule)
+    schedule = construct_schedule_tree()
+
+    count_ast = [0]
+
+    def inc_count_ast(node, build):
+        count_ast[0] += 1
+        return node
+
+    build = isl.ast_build()
+    build_copy = build.set_at_each_domain(inc_count_ast)
+    ast = build.node_from(schedule)
+    assert count_ast[0] == 0
+    count_ast[0] = 0
+    ast = build_copy.node_from(schedule)
+    assert count_ast[0] == 2
+    build = build_copy
+    count_ast[0] = 0
+    ast = build.node_from(schedule)
+    assert count_ast[0] == 2
+
+    do_fail = True
+    count_ast_fail = [0]
+
+    def fail_inc_count_ast(node, build):
+        count_ast_fail[0] += 1
+        if do_fail:
+            raise Exception("fail")
+        return node
+
+    build = isl.ast_build()
+    build = build.set_at_each_domain(fail_inc_count_ast)
+    caught = False
+    try:
+        ast = build.node_from(schedule)
+    except:
+        caught = True
+    assert caught
+    assert count_ast_fail[0] > 0
+    build_copy = build
+    build_copy = build_copy.set_at_each_domain(inc_count_ast)
+    count_ast[0] = 0
+    ast = build_copy.node_from(schedule)
+    assert count_ast[0] == 2
+    count_ast_fail[0] = 0
+    do_fail = False
+    ast = build.node_from(schedule)
+    assert count_ast_fail[0] == 2
+
+    test_ast_build_unroll(schedule)
+
 
 # Test basic AST expression generation from an affine expression.
 #
 def test_ast_build_expr():
-	pa = isl.pw_aff("[n] -> { [n + 1] }")
-	build = isl.ast_build.from_context(pa.domain())
+    pa = isl.pw_aff("[n] -> { [n + 1] }")
+    build = isl.ast_build.from_context(pa.domain())
+
+    op = build.expr_from(pa)
+    assert type(op) == isl.ast_expr_op_add
+    assert op.n_arg() == 2
 
-	op = build.expr_from(pa)
-	assert(type(op) == isl.ast_expr_op_add)
-	assert(op.n_arg() == 2)
 
 # Test the isl Python interface
 #

diff  --git a/polly/lib/External/isl/libisl-gdb.py b/polly/lib/External/isl/libisl-gdb.py
index fd15626c7739f..bf01bc583d15d 100644
--- a/polly/lib/External/isl/libisl-gdb.py
+++ b/polly/lib/External/isl/libisl-gdb.py
@@ -3,98 +3,116 @@
 
 # GDB Pretty Printers for most isl objects
 class IslObjectPrinter:
-	"""Print an isl object"""
-	def __init__ (self, val, type):
-		self.val = val
-		self.type = type
-
-	def to_string (self):
-		# Cast val to a void pointer to stop gdb using this pretty
-		# printer for the pointer which would lead to an infinite loop.
-		void_ptr = gdb.lookup_type('void').pointer()
-		value = str(self.val.cast(void_ptr))
-		printer = gdb.parse_and_eval("isl_printer_to_str(isl_"
-					     + str(self.type)
-					     + "_get_ctx(" + value + "))")
-		printer = gdb.parse_and_eval("isl_printer_print_"
-					     + str(self.type) + "("
-					     + str(printer) + ", "
-					     + value + ")")
-		string = gdb.parse_and_eval("(char*)isl_printer_get_str("
-					    + str(printer) + ")")
-		gdb.parse_and_eval("isl_printer_free(" + str(printer) + ")")
-		return string
-
-	def display_hint (self):
-		return 'string'
+    """Print an isl object"""
+
+    def __init__(self, val, type):
+        self.val = val
+        self.type = type
+
+    def to_string(self):
+        # Cast val to a void pointer to stop gdb using this pretty
+        # printer for the pointer which would lead to an infinite loop.
+        void_ptr = gdb.lookup_type("void").pointer()
+        value = str(self.val.cast(void_ptr))
+        printer = gdb.parse_and_eval(
+            "isl_printer_to_str(isl_" + str(self.type) + "_get_ctx(" + value + "))"
+        )
+        printer = gdb.parse_and_eval(
+            "isl_printer_print_"
+            + str(self.type)
+            + "("
+            + str(printer)
+            + ", "
+            + value
+            + ")"
+        )
+        string = gdb.parse_and_eval("(char*)isl_printer_get_str(" + str(printer) + ")")
+        gdb.parse_and_eval("isl_printer_free(" + str(printer) + ")")
+        return string
+
+    def display_hint(self):
+        return "string"
+
 
 class IslIntPrinter:
-	"""Print an isl_int """
-	def __init__ (self, val):
-		self.val = val
-
-	def to_string (self):
-		# Cast val to a void pointer to stop gdb using this pretty
-		# printer for the pointer which would lead to an infinite loop.
-		void_ptr = gdb.lookup_type('void').pointer()
-		value = str(self.val.cast(void_ptr))
-
-		context = gdb.parse_and_eval("isl_ctx_alloc()")
-		printer = gdb.parse_and_eval("isl_printer_to_str("
-					     + str(context) + ")")
-		printer = gdb.parse_and_eval("isl_printer_print_isl_int("
-					     + str(printer) + ", "
-					     + value + ")")
-		string = gdb.parse_and_eval("(char*)isl_printer_get_str("
-					    + str(printer) + ")")
-		gdb.parse_and_eval("isl_printer_free(" + str(printer) + ")")
-		gdb.parse_and_eval("isl_ctx_free(" + str(context) + ")")
-		return string
-
-	def display_hint (self):
-		return 'string'
-
-class IslPrintCommand (gdb.Command):
-	"""Print an isl value."""
-	def __init__ (self):
-		super (IslPrintCommand, self).__init__ ("islprint",
-							gdb.COMMAND_OBSCURE)
-	def invoke (self, arg, from_tty):
-		arg = gdb.parse_and_eval(arg);
-		printer = str_lookup_function(arg)
-
-		if printer == None:
-			print("No isl printer for this type")
-			return
-
-		print(printer.to_string())
+    """Print an isl_int"""
+
+    def __init__(self, val):
+        self.val = val
+
+    def to_string(self):
+        # Cast val to a void pointer to stop gdb using this pretty
+        # printer for the pointer which would lead to an infinite loop.
+        void_ptr = gdb.lookup_type("void").pointer()
+        value = str(self.val.cast(void_ptr))
+
+        context = gdb.parse_and_eval("isl_ctx_alloc()")
+        printer = gdb.parse_and_eval("isl_printer_to_str(" + str(context) + ")")
+        printer = gdb.parse_and_eval(
+            "isl_printer_print_isl_int(" + str(printer) + ", " + value + ")"
+        )
+        string = gdb.parse_and_eval("(char*)isl_printer_get_str(" + str(printer) + ")")
+        gdb.parse_and_eval("isl_printer_free(" + str(printer) + ")")
+        gdb.parse_and_eval("isl_ctx_free(" + str(context) + ")")
+        return string
+
+    def display_hint(self):
+        return "string"
+
+
+class IslPrintCommand(gdb.Command):
+    """Print an isl value."""
+
+    def __init__(self):
+        super(IslPrintCommand, self).__init__("islprint", gdb.COMMAND_OBSCURE)
+
+    def invoke(self, arg, from_tty):
+        arg = gdb.parse_and_eval(arg)
+        printer = str_lookup_function(arg)
+
+        if printer == None:
+            print("No isl printer for this type")
+            return
+
+        print(printer.to_string())
+
 
 IslPrintCommand()
 
-def str_lookup_function (val):
-	if val.type.code != gdb.TYPE_CODE_PTR:
-		if str(val.type) == "isl_int":
-			return IslIntPrinter(val)
-		else:
-			return None
-
-	lookup_tag = val.type.target()
-	regex = re.compile ("^isl_(.*)$")
-
-	if lookup_tag == None:
-		return None
-
-	m = regex.match (str(lookup_tag))
-
-	if m:
-		# Those types of printers defined in isl.
-		if m.group(1) in ["basic_set", "set", "union_set", "basic_map",
-				  "map", "union_map", "qpolynomial",
-				  "pw_qpolynomial", "pw_qpolynomial_fold",
-				  "union_pw_qpolynomial",
-				  "union_pw_qpolynomial_fold"]:
-			return IslObjectPrinter(val, m.group(1))
-	return None
+
+def str_lookup_function(val):
+    if val.type.code != gdb.TYPE_CODE_PTR:
+        if str(val.type) == "isl_int":
+            return IslIntPrinter(val)
+        else:
+            return None
+
+    lookup_tag = val.type.target()
+    regex = re.compile("^isl_(.*)$")
+
+    if lookup_tag == None:
+        return None
+
+    m = regex.match(str(lookup_tag))
+
+    if m:
+        # Those types of printers defined in isl.
+        if m.group(1) in [
+            "basic_set",
+            "set",
+            "union_set",
+            "basic_map",
+            "map",
+            "union_map",
+            "qpolynomial",
+            "pw_qpolynomial",
+            "pw_qpolynomial_fold",
+            "union_pw_qpolynomial",
+            "union_pw_qpolynomial_fold",
+        ]:
+            return IslObjectPrinter(val, m.group(1))
+    return None
+
 
 # Do not register the pretty printer.
 # gdb.current_objfile().pretty_printers.append(str_lookup_function)

diff  --git a/polly/test/update_check.py b/polly/test/update_check.py
index 9890843a68109..88d95c247c063 100644
--- a/polly/test/update_check.py
+++ b/polly/test/update_check.py
@@ -11,19 +11,28 @@
 import re
 
 
-polly_src_dir = '''@POLLY_SOURCE_DIR@'''
-polly_lib_dir = '''@POLLY_LIB_DIR@'''
-shlibext = '''@LLVM_SHLIBEXT@'''
-llvm_tools_dir = '''@LLVM_TOOLS_DIR@'''
-llvm_polly_link_into_tools = not '''@LLVM_POLLY_LINK_INTO_TOOLS@'''.lower() in {'','0','n','no','off','false','notfound','llvm_polly_link_into_tools-notfound'}
-
-runre = re.compile(r'\s*\;\s*RUN\s*\:(?P<tool>.*)')
-filecheckre = re.compile(r'\s*(?P<tool>.*)\|\s*(?P<filecheck>FileCheck\s[^|]*)')
-emptyline = re.compile(r'\s*(\;\s*)?')
-commentline = re.compile(r'\s*(\;.*)?')
-
-
-def ltrim_emptylines(lines,meta=None):
+polly_src_dir = """@POLLY_SOURCE_DIR@"""
+polly_lib_dir = """@POLLY_LIB_DIR@"""
+shlibext = """@LLVM_SHLIBEXT@"""
+llvm_tools_dir = """@LLVM_TOOLS_DIR@"""
+llvm_polly_link_into_tools = not """@LLVM_POLLY_LINK_INTO_TOOLS@""".lower() in {
+    "",
+    "0",
+    "n",
+    "no",
+    "off",
+    "false",
+    "notfound",
+    "llvm_polly_link_into_tools-notfound",
+}
+
+runre = re.compile(r"\s*\;\s*RUN\s*\:(?P<tool>.*)")
+filecheckre = re.compile(r"\s*(?P<tool>.*)\|\s*(?P<filecheck>FileCheck\s[^|]*)")
+emptyline = re.compile(r"\s*(\;\s*)?")
+commentline = re.compile(r"\s*(\;.*)?")
+
+
+def ltrim_emptylines(lines, meta=None):
     while len(lines) and emptyline.fullmatch(lines[0]):
         del lines[0]
         if meta is not None:
@@ -44,14 +53,14 @@ def complete_exename(path, filename):
     complpath = os.path.join(path, filename)
     if os.path.isfile(complpath):
         return complpath
-    elif os.path.isfile(complpath + '.exe'):
-        return complpath + '.exe'
+    elif os.path.isfile(complpath + ".exe"):
+        return complpath + ".exe"
     return filename
 
 
 def indention(line):
-    for i,c in enumerate(line):
-        if c != ' ' and c != '\t':
+    for i, c in enumerate(line):
+        if c != " " and c != "\t":
             return i
     return None
 
@@ -59,127 +68,130 @@ def indention(line):
 def common_indent(lines):
     indentions = (indention(line) for line in lines)
     indentions = (indent for indent in indentions if indent is not None)
-    return min(indentions,default=0)
+    return min(indentions, default=0)
 
 
-funcre = re.compile(r'^    Function: \S*$')
-regionre = re.compile(r'^    Region: \S*$')
-depthre = re.compile(r'^    Max Loop Depth: .*')
-paramre = re.compile(r'    [0-9a-z-A-Z_]+\: .*')
+funcre = re.compile(r"^    Function: \S*$")
+regionre = re.compile(r"^    Region: \S*$")
+depthre = re.compile(r"^    Max Loop Depth: .*")
+paramre = re.compile(r"    [0-9a-z-A-Z_]+\: .*")
+
 
 def classyfier1(lines):
     i = iter(lines)
     line = i.__next__()
     while True:
-        if line.startswith("Printing analysis 'Polly - Calculate dependences' for region: "):
-            yield {'PrintingDependenceInfo'}
+        if line.startswith(
+            "Printing analysis 'Polly - Calculate dependences' for region: "
+        ):
+            yield {"PrintingDependenceInfo"}
         elif line.startswith("remark: "):
-            yield {'Remark'}
+            yield {"Remark"}
         elif funcre.fullmatch(line):
-            yield {'Function'}
+            yield {"Function"}
         elif regionre.fullmatch(line):
-            yield  { 'Region'}
+            yield {"Region"}
         elif depthre.fullmatch(line):
-            yield  {'MaxLoopDepth'}
-        elif line == '    Invariant Accesses: {':
+            yield {"MaxLoopDepth"}
+        elif line == "    Invariant Accesses: {":
             while True:
-                yield { 'InvariantAccesses'}
-                if line == '    }':
+                yield {"InvariantAccesses"}
+                if line == "    }":
                     break
                 line = i.__next__()
-        elif line == '    Context:':
-            yield  {'Context'}
+        elif line == "    Context:":
+            yield {"Context"}
             line = i.__next__()
-            yield  {'Context'}
-        elif line == '    Assumed Context:':
-            yield  {'AssumedContext'}
+            yield {"Context"}
+        elif line == "    Assumed Context:":
+            yield {"AssumedContext"}
             line = i.__next__()
-            yield  {'AssumedContext'}
-        elif line == '    Invalid Context:':
-            yield  {'InvalidContext'}
+            yield {"AssumedContext"}
+        elif line == "    Invalid Context:":
+            yield {"InvalidContext"}
             line = i.__next__()
-            yield  {'InvalidContext'}
-        elif line == '    Boundary Context:':
-            yield  {'BoundaryContext'}
+            yield {"InvalidContext"}
+        elif line == "    Boundary Context:":
+            yield {"BoundaryContext"}
             line = i.__next__()
-            yield  {'BoundaryContext'}
+            yield {"BoundaryContext"}
             line = i.__next__()
             while paramre.fullmatch(line):
-                yield  {'Param'}
+                yield {"Param"}
                 line = i.__next__()
             continue
-        elif line == '    Arrays {':
+        elif line == "    Arrays {":
             while True:
-                yield  {'Arrays'}
-                if line == '    }':
+                yield {"Arrays"}
+                if line == "    }":
                     break
                 line = i.__next__()
-        elif line == '    Arrays (Bounds as pw_affs) {':
+        elif line == "    Arrays (Bounds as pw_affs) {":
             while True:
-                yield  {'PwAffArrays'}
-                if line == '    }':
+                yield {"PwAffArrays"}
+                if line == "    }":
                     break
                 line = i.__next__()
-        elif line.startswith('    Alias Groups ('):
+        elif line.startswith("    Alias Groups ("):
             while True:
-                yield  {'AliasGroups'}
+                yield {"AliasGroups"}
                 line = i.__next__()
-                if not line.startswith('        '):
+                if not line.startswith("        "):
                     break
             continue
-        elif line == '    Statements {':
+        elif line == "    Statements {":
             while True:
-                yield  {'Statements'}
-                if line == '    }':
+                yield {"Statements"}
+                if line == "    }":
                     break
                 line = i.__next__()
-        elif line == '    RAW dependences:':
-            yield {'RAWDep','BasicDep','Dep','DepInfo'}
+        elif line == "    RAW dependences:":
+            yield {"RAWDep", "BasicDep", "Dep", "DepInfo"}
             line = i.__next__()
-            while line.startswith('        '):
-                yield  {'RAWDep','BasicDep','Dep','DepInfo'}
+            while line.startswith("        "):
+                yield {"RAWDep", "BasicDep", "Dep", "DepInfo"}
                 line = i.__next__()
             continue
-        elif line == '    WAR dependences:':
-            yield {'WARDep','BasicDep','Dep','DepInfo'}
+        elif line == "    WAR dependences:":
+            yield {"WARDep", "BasicDep", "Dep", "DepInfo"}
             line = i.__next__()
-            while line.startswith('        '):
-                yield  {'WARDep','BasicDep','Dep','DepInfo'}
+            while line.startswith("        "):
+                yield {"WARDep", "BasicDep", "Dep", "DepInfo"}
                 line = i.__next__()
             continue
-        elif line == '    WAW dependences:':
-            yield {'WAWDep','BasicDep','Dep','DepInfo'}
+        elif line == "    WAW dependences:":
+            yield {"WAWDep", "BasicDep", "Dep", "DepInfo"}
             line = i.__next__()
-            while line.startswith('        '):
-                yield  {'WAWDep','BasicDep','Dep','DepInfo'}
+            while line.startswith("        "):
+                yield {"WAWDep", "BasicDep", "Dep", "DepInfo"}
                 line = i.__next__()
             continue
-        elif line == '    Reduction dependences:':
-            yield {'RedDep','Dep','DepInfo'}
+        elif line == "    Reduction dependences:":
+            yield {"RedDep", "Dep", "DepInfo"}
             line = i.__next__()
-            while line.startswith('        '):
-                yield  {'RedDep','Dep','DepInfo'}
+            while line.startswith("        "):
+                yield {"RedDep", "Dep", "DepInfo"}
                 line = i.__next__()
             continue
-        elif line == '    Transitive closure of reduction dependences:':
-            yield {'TransitiveClosureDep','DepInfo'}
+        elif line == "    Transitive closure of reduction dependences:":
+            yield {"TransitiveClosureDep", "DepInfo"}
             line = i.__next__()
-            while line.startswith('        '):
-                yield  {'TransitiveClosureDep','DepInfo'}
+            while line.startswith("        "):
+                yield {"TransitiveClosureDep", "DepInfo"}
                 line = i.__next__()
             continue
         elif line.startswith("New access function '"):
-            yield {'NewAccessFunction'}
-        elif line == 'Schedule before flattening {':
+            yield {"NewAccessFunction"}
+        elif line == "Schedule before flattening {":
             while True:
-                yield  {'ScheduleBeforeFlattening'}
-                if line == '}':
+                yield {"ScheduleBeforeFlattening"}
+                if line == "}":
                     break
                 line = i.__next__()
-        elif line == 'Schedule after flattening {':
+        elif line == "Schedule after flattening {":
             while True:
-                yield  {'ScheduleAfterFlattening'}
-                if line == '}':
+                yield {"ScheduleAfterFlattening"}
+                if line == "}":
                     break
                 line = i.__next__()
         else:
@@ -192,15 +204,17 @@ def classyfier2(lines):
     line = i.__next__()
     while True:
         if funcre.fullmatch(line):
-            while line.startswith('    '):
-                yield  {'FunctionDetail'}
+            while line.startswith("    "):
+                yield {"FunctionDetail"}
                 line = i.__next__()
             continue
-        elif line.startswith("Printing analysis 'Polly - Generate an AST from the SCoP (isl)' for region: "):
-            yield {'PrintingIslAst'}
+        elif line.startswith(
+            "Printing analysis 'Polly - Generate an AST from the SCoP (isl)' for region: "
+        ):
+            yield {"PrintingIslAst"}
             line = i.__next__()
-            while not line.startswith('Printing analysis'):
-                yield  {'AstDetail'}
+            while not line.startswith("Printing analysis"):
+                yield {"AstDetail"}
                 line = i.__next__()
             continue
         else:
@@ -208,22 +222,56 @@ def classyfier2(lines):
         line = i.__next__()
 
 
-replrepl = {'{{':'{{[{][{]}}','}}': '{{[}][}]}}', '[[':'{{\[\[}}',']]': '{{\]\]}}'}
-replre = re.compile('|'.join(re.escape(k) for k in replrepl.keys()))
+replrepl = {"{{": "{{[{][{]}}", "}}": "{{[}][}]}}", "[[": "{{\[\[}}", "]]": "{{\]\]}}"}
+replre = re.compile("|".join(re.escape(k) for k in replrepl.keys()))
+
 
 def main():
     parser = argparse.ArgumentParser(description="Update CHECK lines")
-    parser.add_argument('testfile',help="File to update (absolute or relative to --testdir)")
-    parser.add_argument('--check-style',choices=['CHECK','CHECK-NEXT'],default='CHECK-NEXT',help="What kind of checks lines to generate")
-    parser.add_argument('--check-position',choices=['end','before-content','autodetect'],default='autodetect',help="Where to add the CHECK lines into the file; 'autodetect' searches for the first 'CHECK' line ind inserts it there")
-    parser.add_argument('--check-include',action='append',default=[], help="What parts of the output lines to check; use syntax 'CHECK=include' to apply to one CHECK-prefix only (by default, everything)")
-    parser.add_argument('--check-label-include',action='append',default=[],help="Use CHECK-LABEL for these includes")
-    parser.add_argument('--check-part-newline',action='store_true',help="Add empty line between 
diff erent check parts")
-    parser.add_argument('--prefix-only',action='append',default=None,help="Update only these prefixes (default: all)")
-    parser.add_argument('--bindir',help="Location of the opt program")
-    parser.add_argument('--testdir',help="Root dir for unit tests")
-    parser.add_argument('--inplace','-i',action='store_true',help="Replace input file")
-    parser.add_argument('--output','-o',help="Write changed input to this file")
+    parser.add_argument(
+        "testfile", help="File to update (absolute or relative to --testdir)"
+    )
+    parser.add_argument(
+        "--check-style",
+        choices=["CHECK", "CHECK-NEXT"],
+        default="CHECK-NEXT",
+        help="What kind of checks lines to generate",
+    )
+    parser.add_argument(
+        "--check-position",
+        choices=["end", "before-content", "autodetect"],
+        default="autodetect",
+        help="Where to add the CHECK lines into the file; 'autodetect' searches for the first 'CHECK' line ind inserts it there",
+    )
+    parser.add_argument(
+        "--check-include",
+        action="append",
+        default=[],
+        help="What parts of the output lines to check; use syntax 'CHECK=include' to apply to one CHECK-prefix only (by default, everything)",
+    )
+    parser.add_argument(
+        "--check-label-include",
+        action="append",
+        default=[],
+        help="Use CHECK-LABEL for these includes",
+    )
+    parser.add_argument(
+        "--check-part-newline",
+        action="store_true",
+        help="Add empty line between 
diff erent check parts",
+    )
+    parser.add_argument(
+        "--prefix-only",
+        action="append",
+        default=None,
+        help="Update only these prefixes (default: all)",
+    )
+    parser.add_argument("--bindir", help="Location of the opt program")
+    parser.add_argument("--testdir", help="Root dir for unit tests")
+    parser.add_argument(
+        "--inplace", "-i", action="store_true", help="Replace input file"
+    )
+    parser.add_argument("--output", "-o", help="Write changed input to this file")
     known = parser.parse_args()
 
     if not known.inplace and known.output is None:
@@ -236,13 +284,13 @@ def main():
     outfile = known.output
 
     filecheckparser = argparse.ArgumentParser(add_help=False)
-    filecheckparser.add_argument('-check-prefix','--check-prefix',default='CHECK')
+    filecheckparser.add_argument("-check-prefix", "--check-prefix", default="CHECK")
 
     filename = known.testfile
-    for dir in ['.', known.testdir, os.path.join(polly_src_dir,'test'), polly_src_dir]:
+    for dir in [".", known.testdir, os.path.join(polly_src_dir, "test"), polly_src_dir]:
         if not dir:
             continue
-        testfilename = os.path.join(dir,filename)
+        testfilename = os.path.join(dir, filename)
         if os.path.isfile(testfilename):
             filename = testfilename
             break
@@ -253,23 +301,23 @@ def main():
     allchecklines = []
     checkprefixes = []
 
-    with open(filename, 'r') as file:
-        oldlines = [line.rstrip('\r\n') for line in file.readlines()]
+    with open(filename, "r") as file:
+        oldlines = [line.rstrip("\r\n") for line in file.readlines()]
 
     runlines = []
     for line in oldlines:
         m = runre.match(line)
         if m:
-            runlines.append(m.group('tool'))
+            runlines.append(m.group("tool"))
 
-    continuation = ''
+    continuation = ""
     newrunlines = []
     for line in runlines:
-        if line.endswith('\\'):
-            continuation += line[:-2] + ' '
+        if line.endswith("\\"):
+            continuation += line[:-2] + " "
         else:
             newrunlines.append(continuation + line)
-            continuation = ''
+            continuation = ""
     if continuation:
         newrunlines.append(continuation)
 
@@ -278,7 +326,7 @@ def main():
         if not m:
             continue
 
-        tool, filecheck = m.group('tool', 'filecheck')
+        tool, filecheck = m.group("tool", "filecheck")
         filecheck = shlex.split(filecheck)
         tool = shlex.split(tool)
         if known.bindir is not None:
@@ -295,14 +343,17 @@ def main():
         newtool = []
         optstderr = None
         for toolarg in tool:
-            toolarg = toolarg.replace('%s', filename)
-            toolarg = toolarg.replace('%S', os.path.dirname(filename))
-            if toolarg == '%loadPolly':
+            toolarg = toolarg.replace("%s", filename)
+            toolarg = toolarg.replace("%S", os.path.dirname(filename))
+            if toolarg == "%loadPolly":
                 if not llvm_polly_link_into_tools:
-                    newtool += ['-load',os.path.join(polly_lib_dir,'LLVMPolly' + shlibext)]
-                newtool.append('-polly-process-unprofitable')
-                newtool.append('-polly-remarks-minimal')
-            elif toolarg == '2>&1':
+                    newtool += [
+                        "-load",
+                        os.path.join(polly_lib_dir, "LLVMPolly" + shlibext),
+                    ]
+                newtool.append("-polly-process-unprofitable")
+                newtool.append("-polly-remarks-minimal")
+            elif toolarg == "2>&1":
                 optstderr = subprocess.STDOUT
             else:
                 newtool.append(toolarg)
@@ -310,21 +361,25 @@ def main():
 
         inpfile = None
         i = 1
-        while i <  len(tool):
-            if tool[i] == '<':
+        while i < len(tool):
+            if tool[i] == "<":
                 inpfile = tool[i + 1]
-                del tool[i:i + 2]
+                del tool[i : i + 2]
                 continue
             i += 1
         if inpfile:
             with open(inpfile) as inp:
-                retlines = subprocess.check_output(tool,universal_newlines=True,stdin=inp,stderr=optstderr)
+                retlines = subprocess.check_output(
+                    tool, universal_newlines=True, stdin=inp, stderr=optstderr
+                )
         else:
-            retlines = subprocess.check_output(tool,universal_newlines=True,stderr=optstderr)
-        retlines = [line.replace('\t', '    ') for line in retlines.splitlines()]
+            retlines = subprocess.check_output(
+                tool, universal_newlines=True, stderr=optstderr
+            )
+        retlines = [line.replace("\t", "    ") for line in retlines.splitlines()]
         check_include = []
         for checkme in known.check_include + known.check_label_include:
-            parts = checkme.split('=')
+            parts = checkme.split("=")
             if len(parts) == 2:
                 if parts[0] == check_prefix:
                     check_include.append(parts[1])
@@ -335,12 +390,17 @@ def main():
             filtered_retlines = []
             classified_retlines = []
             lastmatch = None
-            for line,kind in ((line,class1.union(class2)) for line,class1,class2 in zip(retlines,classyfier1(retlines), classyfier2(retlines))):
+            for line, kind in (
+                (line, class1.union(class2))
+                for line, class1, class2 in zip(
+                    retlines, classyfier1(retlines), classyfier2(retlines)
+                )
+            ):
                 match = kind.intersection(check_include)
                 if match:
                     if lastmatch != match:
-                        filtered_retlines.append('')
-                        classified_retlines.append({'Separator'})
+                        filtered_retlines.append("")
+                        classified_retlines.append({"Separator"})
                     filtered_retlines.append(line)
                     classified_retlines.append(kind)
                 lastmatch = match
@@ -350,44 +410,50 @@ def main():
             classified_retlines = (set() for line in retlines)
 
         rtrim_emptylines(retlines)
-        ltrim_emptylines(retlines,classified_retlines)
-        retlines = [replre.sub(lambda m: replrepl[m.group(0)], line) for line in retlines]
+        ltrim_emptylines(retlines, classified_retlines)
+        retlines = [
+            replre.sub(lambda m: replrepl[m.group(0)], line) for line in retlines
+        ]
         indent = common_indent(retlines)
         retlines = [line[indent:] for line in retlines]
         checklines = []
         previous_was_empty = True
-        for line,kind in zip(retlines,classified_retlines):
+        for line, kind in zip(retlines, classified_retlines):
             if line:
-                if known.check_style == 'CHECK' and known.check_label_include:
+                if known.check_style == "CHECK" and known.check_label_include:
                     if not kind.isdisjoint(known.check_label_include):
-                        checklines.append('; ' + check_prefix + '-LABEL: ' + line)
+                        checklines.append("; " + check_prefix + "-LABEL: " + line)
                     else:
-                        checklines.append('; ' + check_prefix + ':       ' + line)
-                elif known.check_style == 'CHECK':
-                    checklines.append('; ' + check_prefix + ': ' + line)
+                        checklines.append("; " + check_prefix + ":       " + line)
+                elif known.check_style == "CHECK":
+                    checklines.append("; " + check_prefix + ": " + line)
                 elif known.check_label_include and known.check_label_include:
                     if not kind.isdisjoint(known.check_label_include):
-                        checklines.append('; ' + check_prefix + '-LABEL: ' + line)
+                        checklines.append("; " + check_prefix + "-LABEL: " + line)
                     elif previous_was_empty:
-                        checklines.append('; ' + check_prefix + ':       ' + line)
+                        checklines.append("; " + check_prefix + ":       " + line)
                     else:
-                        checklines.append('; ' + check_prefix + '-NEXT:  ' + line)
+                        checklines.append("; " + check_prefix + "-NEXT:  " + line)
                 else:
                     if previous_was_empty:
-                        checklines.append('; ' + check_prefix + ':      ' + line)
+                        checklines.append("; " + check_prefix + ":      " + line)
                     else:
-                        checklines.append('; ' + check_prefix + '-NEXT: ' + line)
+                        checklines.append("; " + check_prefix + "-NEXT: " + line)
                 previous_was_empty = False
             else:
-                if not 'Separator' in kind or known.check_part_newline:
-                    checklines.append(';')
+                if not "Separator" in kind or known.check_part_newline:
+                    checklines.append(";")
                 previous_was_empty = True
         allchecklines.append(checklines)
 
     if not checkprefixes:
         return
 
-    checkre = re.compile(r'^\s*\;\s*(' + '|'.join([re.escape(s) for s in checkprefixes]) + ')(\-NEXT|\-DAG|\-NOT|\-LABEL|\-SAME)?\s*\:')
+    checkre = re.compile(
+        r"^\s*\;\s*("
+        + "|".join([re.escape(s) for s in checkprefixes])
+        + ")(\-NEXT|\-DAG|\-NOT|\-LABEL|\-SAME)?\s*\:"
+    )
     firstcheckline = None
     firstnoncommentline = None
     headerlines = []
@@ -413,27 +479,30 @@ def main():
             uptonowlines = []
             lastwascheck = False
 
-    for i,line in enumerate(newlines):
+    for i, line in enumerate(newlines):
         if not commentline.fullmatch(line):
             firstnoncommentline = i
             break
 
-    with open(outfile,'w',newline='') as file:
+    with open(outfile, "w", newline="") as file:
+
         def writelines(lines):
             for line in lines:
                 file.write(line)
-                file.write('\n')
+                file.write("\n")
 
-        if firstcheckline is not None and known.check_position == 'autodetect':
+        if firstcheckline is not None and known.check_position == "autodetect":
             writelines(newlines[:firstcheckline])
             writelines(uptonowlines)
-            for i,checklines in enumerate(allchecklines):
+            for i, checklines in enumerate(allchecklines):
                 if i != 0:
-                    file.write('\n')
+                    file.write("\n")
                 writelines(checklines)
             writelines(newlines[firstcheckline:])
             writelines(emptylines)
-        elif firstnoncommentline is not None and known.check_position == 'before-content':
+        elif (
+            firstnoncommentline is not None and known.check_position == "before-content"
+        ):
             headerlines = newlines[:firstnoncommentline]
             rtrim_emptylines(headerlines)
             contentlines = newlines[firstnoncommentline:]
@@ -441,9 +510,9 @@ def writelines(lines):
 
             writelines(headerlines)
             for checklines in allchecklines:
-                file.write('\n')
+                file.write("\n")
                 writelines(checklines)
-            file.write('\n')
+            file.write("\n")
             writelines(contentlines)
             writelines(uptonowlines)
             writelines(emptylines)
@@ -451,9 +520,9 @@ def writelines(lines):
             writelines(newlines)
             rtrim_emptylines(newlines)
             for checklines in allchecklines:
-                file.write('\n\n')
+                file.write("\n\n")
                 writelines(checklines)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()

diff  --git a/polly/utils/argparse.py b/polly/utils/argparse.py
index a0601290db2cd..434d326951287 100644
--- a/polly/utils/argparse.py
+++ b/polly/utils/argparse.py
@@ -75,17 +75,17 @@
 still considered an implementation detail.)
 """
 
-__version__ = '1.1'
+__version__ = "1.1"
 __all__ = [
-    'ArgumentParser',
-    'ArgumentError',
-    'Namespace',
-    'Action',
-    'FileType',
-    'HelpFormatter',
-    'RawDescriptionHelpFormatter',
-    'RawTextHelpFormatter',
-    'ArgumentDefaultsHelpFormatter',
+    "ArgumentParser",
+    "ArgumentError",
+    "Namespace",
+    "Action",
+    "FileType",
+    "HelpFormatter",
+    "RawDescriptionHelpFormatter",
+    "RawTextHelpFormatter",
+    "ArgumentDefaultsHelpFormatter",
 ]
 
 
@@ -120,30 +120,34 @@ def _sorted(iterable, reverse=False):
 
 
 def _callable(obj):
-    return hasattr(obj, '__call__') or hasattr(obj, '__bases__')
+    return hasattr(obj, "__call__") or hasattr(obj, "__bases__")
+
 
 # silence Python 2.6 buggy warnings about Exception.message
 if _sys.version_info[:2] == (2, 6):
     import warnings
+
     warnings.filterwarnings(
-        action='ignore',
-        message='BaseException.message has been deprecated as of Python 2.6',
+        action="ignore",
+        message="BaseException.message has been deprecated as of Python 2.6",
         category=DeprecationWarning,
-        module='argparse')
+        module="argparse",
+    )
 
 
-SUPPRESS = '==SUPPRESS=='
+SUPPRESS = "==SUPPRESS=="
 
-OPTIONAL = '?'
-ZERO_OR_MORE = '*'
-ONE_OR_MORE = '+'
-PARSER = 'A...'
-REMAINDER = '...'
+OPTIONAL = "?"
+ZERO_OR_MORE = "*"
+ONE_OR_MORE = "+"
+PARSER = "A..."
+REMAINDER = "..."
 
 # =============================
 # Utility functions and classes
 # =============================
 
+
 class _AttributeHolder(object):
     """Abstract base class that provides __repr__.
 
@@ -159,8 +163,8 @@ def __repr__(self):
         for arg in self._get_args():
             arg_strings.append(repr(arg))
         for name, value in self._get_kwargs():
-            arg_strings.append('%s=%r' % (name, value))
-        return '%s(%s)' % (type_name, ', '.join(arg_strings))
+            arg_strings.append("%s=%r" % (name, value))
+        return "%s(%s)" % (type_name, ", ".join(arg_strings))
 
     def _get_kwargs(self):
         return _sorted(self.__dict__.items())
@@ -179,6 +183,7 @@ def _ensure_value(namespace, name, value):
 # Formatting Help
 # ===============
 
+
 class HelpFormatter(object):
     """Formatter for generating usage messages and argument help strings.
 
@@ -186,16 +191,12 @@ class HelpFormatter(object):
     provided by the class are considered an implementation detail.
     """
 
-    def __init__(self,
-                 prog,
-                 indent_increment=2,
-                 max_help_position=24,
-                 width=None):
+    def __init__(self, prog, indent_increment=2, max_help_position=24, width=None):
 
         # default setting for width
         if width is None:
             try:
-                width = int(_os.environ['COLUMNS'])
+                width = int(_os.environ["COLUMNS"])
             except (KeyError, ValueError):
                 width = 80
             width -= 2
@@ -212,8 +213,8 @@ def __init__(self,
         self._root_section = self._Section(self, None)
         self._current_section = self._root_section
 
-        self._whitespace_matcher = _re.compile(r'\s+')
-        self._long_break_matcher = _re.compile(r'\n\n\n+')
+        self._whitespace_matcher = _re.compile(r"\s+")
+        self._long_break_matcher = _re.compile(r"\n\n\n+")
 
     # ===============================
     # Section and indentation methods
@@ -224,11 +225,10 @@ def _indent(self):
 
     def _dedent(self):
         self._current_indent -= self._indent_increment
-        assert self._current_indent >= 0, 'Indent decreased below 0.'
+        assert self._current_indent >= 0, "Indent decreased below 0."
         self._level -= 1
 
     class _Section(object):
-
         def __init__(self, formatter, parent, heading=None):
             self.formatter = formatter
             self.parent = parent
@@ -248,17 +248,17 @@ def format_help(self):
 
             # return nothing if the section was empty
             if not item_help:
-                return ''
+                return ""
 
             # add the heading if the section was non-empty
             if self.heading is not SUPPRESS and self.heading is not None:
                 current_indent = self.formatter._current_indent
-                heading = '%*s%s:\n' % (current_indent, '', self.heading)
+                heading = "%*s%s:\n" % (current_indent, "", self.heading)
             else:
-                heading = ''
+                heading = ""
 
             # join the section-initial newline, the heading and the help
-            return join(['\n', heading, item_help, '\n'])
+            return join(["\n", heading, item_help, "\n"])
 
     def _add_item(self, func, args):
         self._current_section.items.append((func, args))
@@ -297,8 +297,7 @@ def add_argument(self, action):
             # update the maximum item length
             invocation_length = max([len(s) for s in invocations])
             action_length = invocation_length + self._current_indent
-            self._action_max_length = max(self._action_max_length,
-                                          action_length)
+            self._action_max_length = max(self._action_max_length, action_length)
 
             # add the item to the list
             self._add_item(self._format_action, [action])
@@ -313,18 +312,16 @@ def add_arguments(self, actions):
     def format_help(self):
         help = self._root_section.format_help()
         if help:
-            help = self._long_break_matcher.sub('\n\n', help)
-            help = help.strip('\n') + '\n'
+            help = self._long_break_matcher.sub("\n\n", help)
+            help = help.strip("\n") + "\n"
         return help
 
     def _join_parts(self, part_strings):
-        return ''.join([part
-                        for part in part_strings
-                        if part and part is not SUPPRESS])
+        return "".join([part for part in part_strings if part and part is not SUPPRESS])
 
     def _format_usage(self, usage, actions, groups, prefix):
         if prefix is None:
-            prefix = _('usage: ')
+            prefix = _("usage: ")
 
         # if usage is specified, use that
         if usage is not None:
@@ -332,11 +329,11 @@ def _format_usage(self, usage, actions, groups, prefix):
 
         # if no optionals or positionals are available, usage is just prog
         elif usage is None and not actions:
-            usage = '%(prog)s' % dict(prog=self._prog)
+            usage = "%(prog)s" % dict(prog=self._prog)
 
         # if optionals and positionals are available, calculate usage
         elif usage is None:
-            prog = '%(prog)s' % dict(prog=self._prog)
+            prog = "%(prog)s" % dict(prog=self._prog)
 
             # split optionals from positionals
             optionals = []
@@ -350,20 +347,20 @@ def _format_usage(self, usage, actions, groups, prefix):
             # build full usage string
             format = self._format_actions_usage
             action_usage = format(optionals + positionals, groups)
-            usage = ' '.join([s for s in [prog, action_usage] if s])
+            usage = " ".join([s for s in [prog, action_usage] if s])
 
             # wrap the usage parts if it's too long
             text_width = self._width - self._current_indent
             if len(prefix) + len(usage) > text_width:
 
                 # break usage into wrappable parts
-                part_regexp = r'\(.*?\)+|\[.*?\]+|\S+'
+                part_regexp = r"\(.*?\)+|\[.*?\]+|\S+"
                 opt_usage = format(optionals, groups)
                 pos_usage = format(positionals, groups)
                 opt_parts = _re.findall(part_regexp, opt_usage)
                 pos_parts = _re.findall(part_regexp, pos_usage)
-                assert ' '.join(opt_parts) == opt_usage
-                assert ' '.join(pos_parts) == pos_usage
+                assert " ".join(opt_parts) == opt_usage
+                assert " ".join(pos_parts) == pos_usage
 
                 # helper for wrapping lines
                 def get_lines(parts, indent, prefix=None):
@@ -375,20 +372,20 @@ def get_lines(parts, indent, prefix=None):
                         line_len = len(indent) - 1
                     for part in parts:
                         if line_len + 1 + len(part) > text_width:
-                            lines.append(indent + ' '.join(line))
+                            lines.append(indent + " ".join(line))
                             line = []
                             line_len = len(indent) - 1
                         line.append(part)
                         line_len += len(part) + 1
                     if line:
-                        lines.append(indent + ' '.join(line))
+                        lines.append(indent + " ".join(line))
                     if prefix is not None:
-                        lines[0] = lines[0][len(indent):]
+                        lines[0] = lines[0][len(indent) :]
                     return lines
 
                 # if prog is short, follow it with optionals or positionals
                 if len(prefix) + len(prog) <= 0.75 * text_width:
-                    indent = ' ' * (len(prefix) + len(prog) + 1)
+                    indent = " " * (len(prefix) + len(prog) + 1)
                     if opt_parts:
                         lines = get_lines([prog] + opt_parts, indent, prefix)
                         lines.extend(get_lines(pos_parts, indent))
@@ -399,7 +396,7 @@ def get_lines(parts, indent, prefix=None):
 
                 # if prog is long, put it on its own line
                 else:
-                    indent = ' ' * len(prefix)
+                    indent = " " * len(prefix)
                     parts = opt_parts + pos_parts
                     lines = get_lines(parts, indent)
                     if len(lines) > 1:
@@ -409,10 +406,10 @@ def get_lines(parts, indent, prefix=None):
                     lines = [prog] + lines
 
                 # join lines into usage
-                usage = '\n'.join(lines)
+                usage = "\n".join(lines)
 
         # prefix with 'usage:'
-        return '%s%s\n\n' % (prefix, usage)
+        return "%s%s\n\n" % (prefix, usage)
 
     def _format_actions_usage(self, actions, groups):
         # find group indices and identify actions in groups
@@ -429,13 +426,13 @@ def _format_actions_usage(self, actions, groups):
                     for action in group._group_actions:
                         group_actions.add(action)
                     if not group.required:
-                        inserts[start] = '['
-                        inserts[end] = ']'
+                        inserts[start] = "["
+                        inserts[end] = "]"
                     else:
-                        inserts[start] = '('
-                        inserts[end] = ')'
+                        inserts[start] = "("
+                        inserts[end] = ")"
                     for i in range(start + 1, end):
-                        inserts[i] = '|'
+                        inserts[i] = "|"
 
         # collect all actions format strings
         parts = []
@@ -445,9 +442,9 @@ def _format_actions_usage(self, actions, groups):
             # remove | separators for suppressed arguments
             if action.help is SUPPRESS:
                 parts.append(None)
-                if inserts.get(i) == '|':
+                if inserts.get(i) == "|":
                     inserts.pop(i)
-                elif inserts.get(i + 1) == '|':
+                elif inserts.get(i + 1) == "|":
                     inserts.pop(i + 1)
 
             # produce all arg strings
@@ -456,7 +453,7 @@ def _format_actions_usage(self, actions, groups):
 
                 # if it's in a group, strip the outer []
                 if action in group_actions:
-                    if part[0] == '[' and part[-1] == ']':
+                    if part[0] == "[" and part[-1] == "]":
                         part = part[1:-1]
 
                 # add the action string to the list
@@ -469,18 +466,18 @@ def _format_actions_usage(self, actions, groups):
                 # if the Optional doesn't take a value, format is:
                 #    -s or --long
                 if action.nargs == 0:
-                    part = '%s' % option_string
+                    part = "%s" % option_string
 
                 # if the Optional takes a value, format is:
                 #    -s ARGS or --long ARGS
                 else:
                     default = action.dest.upper()
                     args_string = self._format_args(action, default)
-                    part = '%s %s' % (option_string, args_string)
+                    part = "%s %s" % (option_string, args_string)
 
                 # make it look optional if it's not required or in a group
                 if not action.required and action not in group_actions:
-                    part = '[%s]' % part
+                    part = "[%s]" % part
 
                 # add the action string to the list
                 parts.append(part)
@@ -490,50 +487,49 @@ def _format_actions_usage(self, actions, groups):
             parts[i:i] = [inserts[i]]
 
         # join all the action items with spaces
-        text = ' '.join([item for item in parts if item is not None])
+        text = " ".join([item for item in parts if item is not None])
 
         # clean up separators for mutually exclusive groups
-        open = r'[\[(]'
-        close = r'[\])]'
-        text = _re.sub(r'(%s) ' % open, r'\1', text)
-        text = _re.sub(r' (%s)' % close, r'\1', text)
-        text = _re.sub(r'%s *%s' % (open, close), r'', text)
-        text = _re.sub(r'\(([^|]*)\)', r'\1', text)
+        open = r"[\[(]"
+        close = r"[\])]"
+        text = _re.sub(r"(%s) " % open, r"\1", text)
+        text = _re.sub(r" (%s)" % close, r"\1", text)
+        text = _re.sub(r"%s *%s" % (open, close), r"", text)
+        text = _re.sub(r"\(([^|]*)\)", r"\1", text)
         text = text.strip()
 
         # return the text
         return text
 
     def _format_text(self, text):
-        if '%(prog)' in text:
+        if "%(prog)" in text:
             text = text % dict(prog=self._prog)
         text_width = self._width - self._current_indent
-        indent = ' ' * self._current_indent
-        return self._fill_text(text, text_width, indent) + '\n\n'
+        indent = " " * self._current_indent
+        return self._fill_text(text, text_width, indent) + "\n\n"
 
     def _format_action(self, action):
         # determine the required width and the entry label
-        help_position = min(self._action_max_length + 2,
-                            self._max_help_position)
+        help_position = min(self._action_max_length + 2, self._max_help_position)
         help_width = self._width - help_position
         action_width = help_position - self._current_indent - 2
         action_header = self._format_action_invocation(action)
 
         # ho nelp; start on same line and add a final newline
         if not action.help:
-            tup = self._current_indent, '', action_header
-            action_header = '%*s%s\n' % tup
+            tup = self._current_indent, "", action_header
+            action_header = "%*s%s\n" % tup
 
         # short action name; start on the same line and pad two spaces
         elif len(action_header) <= action_width:
-            tup = self._current_indent, '', action_width, action_header
-            action_header = '%*s%-*s  ' % tup
+            tup = self._current_indent, "", action_width, action_header
+            action_header = "%*s%-*s  " % tup
             indent_first = 0
 
         # long action name; start on the next line
         else:
-            tup = self._current_indent, '', action_header
-            action_header = '%*s%s\n' % tup
+            tup = self._current_indent, "", action_header
+            action_header = "%*s%s\n" % tup
             indent_first = help_position
 
         # collect the pieces of the action help
@@ -543,13 +539,13 @@ def _format_action(self, action):
         if action.help:
             help_text = self._expand_help(action)
             help_lines = self._split_lines(help_text, help_width)
-            parts.append('%*s%s\n' % (indent_first, '', help_lines[0]))
+            parts.append("%*s%s\n" % (indent_first, "", help_lines[0]))
             for line in help_lines[1:]:
-                parts.append('%*s%s\n' % (help_position, '', line))
+                parts.append("%*s%s\n" % (help_position, "", line))
 
         # or add a newline if the description doesn't end with one
-        elif not action_header.endswith('\n'):
-            parts.append('\n')
+        elif not action_header.endswith("\n"):
+            parts.append("\n")
 
         # if there are any sub-actions, add their help as well
         for subaction in self._iter_indented_subactions(action):
@@ -560,7 +556,7 @@ def _format_action(self, action):
 
     def _format_action_invocation(self, action):
         if not action.option_strings:
-            metavar, = self._metavar_formatter(action, action.dest)(1)
+            (metavar,) = self._metavar_formatter(action, action.dest)(1)
             return metavar
 
         else:
@@ -577,16 +573,16 @@ def _format_action_invocation(self, action):
                 default = action.dest.upper()
                 args_string = self._format_args(action, default)
                 for option_string in action.option_strings:
-                    parts.append('%s %s' % (option_string, args_string))
+                    parts.append("%s %s" % (option_string, args_string))
 
-            return ', '.join(parts)
+            return ", ".join(parts)
 
     def _metavar_formatter(self, action, default_metavar):
         if action.metavar is not None:
             result = action.metavar
         elif action.choices is not None:
             choice_strs = [str(choice) for choice in action.choices]
-            result = '{%s}' % ','.join(choice_strs)
+            result = "{%s}" % ",".join(choice_strs)
         else:
             result = default_metavar
 
@@ -594,26 +590,27 @@ def format(tuple_size):
             if isinstance(result, tuple):
                 return result
             else:
-                return (result, ) * tuple_size
+                return (result,) * tuple_size
+
         return format
 
     def _format_args(self, action, default_metavar):
         get_metavar = self._metavar_formatter(action, default_metavar)
         if action.nargs is None:
-            result = '%s' % get_metavar(1)
+            result = "%s" % get_metavar(1)
         elif action.nargs == OPTIONAL:
-            result = '[%s]' % get_metavar(1)
+            result = "[%s]" % get_metavar(1)
         elif action.nargs == ZERO_OR_MORE:
-            result = '[%s [%s ...]]' % get_metavar(2)
+            result = "[%s [%s ...]]" % get_metavar(2)
         elif action.nargs == ONE_OR_MORE:
-            result = '%s [%s ...]' % get_metavar(2)
+            result = "%s [%s ...]" % get_metavar(2)
         elif action.nargs == REMAINDER:
-            result = '...'
+            result = "..."
         elif action.nargs == PARSER:
-            result = '%s ...' % get_metavar(1)
+            result = "%s ..." % get_metavar(1)
         else:
-            formats = ['%s' for _ in range(action.nargs)]
-            result = ' '.join(formats) % get_metavar(action.nargs)
+            formats = ["%s" for _ in range(action.nargs)]
+            result = " ".join(formats) % get_metavar(action.nargs)
         return result
 
     def _expand_help(self, action):
@@ -622,11 +619,11 @@ def _expand_help(self, action):
             if params[name] is SUPPRESS:
                 del params[name]
         for name in list(params):
-            if hasattr(params[name], '__name__'):
+            if hasattr(params[name], "__name__"):
                 params[name] = params[name].__name__
-        if params.get('choices') is not None:
-            choices_str = ', '.join([str(c) for c in params['choices']])
-            params['choices'] = choices_str
+        if params.get("choices") is not None:
+            choices_str = ", ".join([str(c) for c in params["choices"]])
+            params["choices"] = choices_str
         return self._get_help_string(action) % params
 
     def _iter_indented_subactions(self, action):
@@ -641,13 +638,14 @@ def _iter_indented_subactions(self, action):
             self._dedent()
 
     def _split_lines(self, text, width):
-        text = self._whitespace_matcher.sub(' ', text).strip()
+        text = self._whitespace_matcher.sub(" ", text).strip()
         return _textwrap.wrap(text, width)
 
     def _fill_text(self, text, width, indent):
-        text = self._whitespace_matcher.sub(' ', text).strip()
-        return _textwrap.fill(text, width, initial_indent=indent,
-                                           subsequent_indent=indent)
+        text = self._whitespace_matcher.sub(" ", text).strip()
+        return _textwrap.fill(
+            text, width, initial_indent=indent, subsequent_indent=indent
+        )
 
     def _get_help_string(self, action):
         return action.help
@@ -661,7 +659,7 @@ class RawDescriptionHelpFormatter(HelpFormatter):
     """
 
     def _fill_text(self, text, width, indent):
-        return ''.join([indent + line for line in text.splitlines(True)])
+        return "".join([indent + line for line in text.splitlines(True)])
 
 
 class RawTextHelpFormatter(RawDescriptionHelpFormatter):
@@ -684,11 +682,11 @@ class ArgumentDefaultsHelpFormatter(HelpFormatter):
 
     def _get_help_string(self, action):
         help = action.help
-        if '%(default)' not in action.help:
+        if "%(default)" not in action.help:
             if action.default is not SUPPRESS:
                 defaulting_nargs = [OPTIONAL, ZERO_OR_MORE]
                 if action.option_strings or action.nargs in defaulting_nargs:
-                    help += ' (default: %(default)s)'
+                    help += " (default: %(default)s)"
         return help
 
 
@@ -696,11 +694,12 @@ def _get_help_string(self, action):
 # Options and Arguments
 # =====================
 
+
 def _get_action_name(argument):
     if argument is None:
         return None
     elif argument.option_strings:
-        return  '/'.join(argument.option_strings)
+        return "/".join(argument.option_strings)
     elif argument.metavar not in (None, SUPPRESS):
         return argument.metavar
     elif argument.dest not in (None, SUPPRESS):
@@ -722,15 +721,15 @@ def __init__(self, argument, message):
 
     def __str__(self):
         if self.argument_name is None:
-            format = '%(message)s'
+            format = "%(message)s"
         else:
-            format = 'argument %(argument_name)s: %(message)s'
-        return format % dict(message=self.message,
-                             argument_name=self.argument_name)
+            format = "argument %(argument_name)s: %(message)s"
+        return format % dict(message=self.message, argument_name=self.argument_name)
 
 
 class ArgumentTypeError(Exception):
     """An error from trying to convert a command line string to a type."""
+
     pass
 
 
@@ -738,6 +737,7 @@ class ArgumentTypeError(Exception):
 # Action classes
 # ==============
 
+
 class Action(_AttributeHolder):
     """Information about how to convert command line strings to Python objects.
 
@@ -789,17 +789,19 @@ class Action(_AttributeHolder):
             help string. If None, the 'dest' value will be used as the name.
     """
 
-    def __init__(self,
-                 option_strings,
-                 dest,
-                 nargs=None,
-                 const=None,
-                 default=None,
-                 type=None,
-                 choices=None,
-                 required=False,
-                 help=None,
-                 metavar=None):
+    def __init__(
+        self,
+        option_strings,
+        dest,
+        nargs=None,
+        const=None,
+        default=None,
+        type=None,
+        choices=None,
+        required=False,
+        help=None,
+        metavar=None,
+    ):
         self.option_strings = option_strings
         self.dest = dest
         self.nargs = nargs
@@ -813,41 +815,44 @@ def __init__(self,
 
     def _get_kwargs(self):
         names = [
-            'option_strings',
-            'dest',
-            'nargs',
-            'const',
-            'default',
-            'type',
-            'choices',
-            'help',
-            'metavar',
+            "option_strings",
+            "dest",
+            "nargs",
+            "const",
+            "default",
+            "type",
+            "choices",
+            "help",
+            "metavar",
         ]
         return [(name, getattr(self, name)) for name in names]
 
     def __call__(self, parser, namespace, values, option_string=None):
-        raise NotImplementedError(_('.__call__() not defined'))
+        raise NotImplementedError(_(".__call__() not defined"))
 
 
 class _StoreAction(Action):
-
-    def __init__(self,
-                 option_strings,
-                 dest,
-                 nargs=None,
-                 const=None,
-                 default=None,
-                 type=None,
-                 choices=None,
-                 required=False,
-                 help=None,
-                 metavar=None):
+    def __init__(
+        self,
+        option_strings,
+        dest,
+        nargs=None,
+        const=None,
+        default=None,
+        type=None,
+        choices=None,
+        required=False,
+        help=None,
+        metavar=None,
+    ):
         if nargs == 0:
-            raise ValueError('nargs for store actions must be > 0; if you '
-                             'have nothing to store, actions such as store '
-                             'true or store const may be more appropriate')
+            raise ValueError(
+                "nargs for store actions must be > 0; if you "
+                "have nothing to store, actions such as store "
+                "true or store const may be more appropriate"
+            )
         if const is not None and nargs != OPTIONAL:
-            raise ValueError('nargs must be %r to supply const' % OPTIONAL)
+            raise ValueError("nargs must be %r to supply const" % OPTIONAL)
         super(_StoreAction, self).__init__(
             option_strings=option_strings,
             dest=dest,
@@ -858,22 +863,24 @@ def __init__(self,
             choices=choices,
             required=required,
             help=help,
-            metavar=metavar)
+            metavar=metavar,
+        )
 
     def __call__(self, parser, namespace, values, option_string=None):
         setattr(namespace, self.dest, values)
 
 
 class _StoreConstAction(Action):
-
-    def __init__(self,
-                 option_strings,
-                 dest,
-                 const,
-                 default=None,
-                 required=False,
-                 help=None,
-                 metavar=None):
+    def __init__(
+        self,
+        option_strings,
+        dest,
+        const,
+        default=None,
+        required=False,
+        help=None,
+        metavar=None,
+    ):
         super(_StoreConstAction, self).__init__(
             option_strings=option_strings,
             dest=dest,
@@ -881,65 +888,59 @@ def __init__(self,
             const=const,
             default=default,
             required=required,
-            help=help)
+            help=help,
+        )
 
     def __call__(self, parser, namespace, values, option_string=None):
         setattr(namespace, self.dest, self.const)
 
 
 class _StoreTrueAction(_StoreConstAction):
-
-    def __init__(self,
-                 option_strings,
-                 dest,
-                 default=False,
-                 required=False,
-                 help=None):
+    def __init__(self, option_strings, dest, default=False, required=False, help=None):
         super(_StoreTrueAction, self).__init__(
             option_strings=option_strings,
             dest=dest,
             const=True,
             default=default,
             required=required,
-            help=help)
+            help=help,
+        )
 
 
 class _StoreFalseAction(_StoreConstAction):
-
-    def __init__(self,
-                 option_strings,
-                 dest,
-                 default=True,
-                 required=False,
-                 help=None):
+    def __init__(self, option_strings, dest, default=True, required=False, help=None):
         super(_StoreFalseAction, self).__init__(
             option_strings=option_strings,
             dest=dest,
             const=False,
             default=default,
             required=required,
-            help=help)
+            help=help,
+        )
 
 
 class _AppendAction(Action):
-
-    def __init__(self,
-                 option_strings,
-                 dest,
-                 nargs=None,
-                 const=None,
-                 default=None,
-                 type=None,
-                 choices=None,
-                 required=False,
-                 help=None,
-                 metavar=None):
+    def __init__(
+        self,
+        option_strings,
+        dest,
+        nargs=None,
+        const=None,
+        default=None,
+        type=None,
+        choices=None,
+        required=False,
+        help=None,
+        metavar=None,
+    ):
         if nargs == 0:
-            raise ValueError('nargs for append actions must be > 0; if arg '
-                             'strings are not supplying the value to append, '
-                             'the append const action may be more appropriate')
+            raise ValueError(
+                "nargs for append actions must be > 0; if arg "
+                "strings are not supplying the value to append, "
+                "the append const action may be more appropriate"
+            )
         if const is not None and nargs != OPTIONAL:
-            raise ValueError('nargs must be %r to supply const' % OPTIONAL)
+            raise ValueError("nargs must be %r to supply const" % OPTIONAL)
         super(_AppendAction, self).__init__(
             option_strings=option_strings,
             dest=dest,
@@ -950,7 +951,8 @@ def __init__(self,
             choices=choices,
             required=required,
             help=help,
-            metavar=metavar)
+            metavar=metavar,
+        )
 
     def __call__(self, parser, namespace, values, option_string=None):
         items = _copy.copy(_ensure_value(namespace, self.dest, []))
@@ -959,15 +961,16 @@ def __call__(self, parser, namespace, values, option_string=None):
 
 
 class _AppendConstAction(Action):
-
-    def __init__(self,
-                 option_strings,
-                 dest,
-                 const,
-                 default=None,
-                 required=False,
-                 help=None,
-                 metavar=None):
+    def __init__(
+        self,
+        option_strings,
+        dest,
+        const,
+        default=None,
+        required=False,
+        help=None,
+        metavar=None,
+    ):
         super(_AppendConstAction, self).__init__(
             option_strings=option_strings,
             dest=dest,
@@ -976,7 +979,8 @@ def __init__(self,
             default=default,
             required=required,
             help=help,
-            metavar=metavar)
+            metavar=metavar,
+        )
 
     def __call__(self, parser, namespace, values, option_string=None):
         items = _copy.copy(_ensure_value(namespace, self.dest, []))
@@ -985,20 +989,15 @@ def __call__(self, parser, namespace, values, option_string=None):
 
 
 class _CountAction(Action):
-
-    def __init__(self,
-                 option_strings,
-                 dest,
-                 default=None,
-                 required=False,
-                 help=None):
+    def __init__(self, option_strings, dest, default=None, required=False, help=None):
         super(_CountAction, self).__init__(
             option_strings=option_strings,
             dest=dest,
             nargs=0,
             default=default,
             required=required,
-            help=help)
+            help=help,
+        )
 
     def __call__(self, parser, namespace, values, option_string=None):
         new_count = _ensure_value(namespace, self.dest, 0) + 1
@@ -1006,18 +1005,14 @@ def __call__(self, parser, namespace, values, option_string=None):
 
 
 class _HelpAction(Action):
-
-    def __init__(self,
-                 option_strings,
-                 dest=SUPPRESS,
-                 default=SUPPRESS,
-                 help=None):
+    def __init__(self, option_strings, dest=SUPPRESS, default=SUPPRESS, help=None):
         super(_HelpAction, self).__init__(
             option_strings=option_strings,
             dest=dest,
             default=default,
             nargs=0,
-            help=help)
+            help=help,
+        )
 
     def __call__(self, parser, namespace, values, option_string=None):
         parser.print_help()
@@ -1025,19 +1020,16 @@ def __call__(self, parser, namespace, values, option_string=None):
 
 
 class _VersionAction(Action):
-
-    def __init__(self,
-                 option_strings,
-                 version=None,
-                 dest=SUPPRESS,
-                 default=SUPPRESS,
-                 help=None):
+    def __init__(
+        self, option_strings, version=None, dest=SUPPRESS, default=SUPPRESS, help=None
+    ):
         super(_VersionAction, self).__init__(
             option_strings=option_strings,
             dest=dest,
             default=default,
             nargs=0,
-            help=help)
+            help=help,
+        )
         self.version = version
 
     def __call__(self, parser, namespace, values, option_string=None):
@@ -1050,20 +1042,14 @@ def __call__(self, parser, namespace, values, option_string=None):
 
 
 class _SubParsersAction(Action):
-
     class _ChoicesPseudoAction(Action):
-
         def __init__(self, name, help):
             sup = super(_SubParsersAction._ChoicesPseudoAction, self)
             sup.__init__(option_strings=[], dest=name, help=help)
 
-    def __init__(self,
-                 option_strings,
-                 prog,
-                 parser_class,
-                 dest=SUPPRESS,
-                 help=None,
-                 metavar=None):
+    def __init__(
+        self, option_strings, prog, parser_class, dest=SUPPRESS, help=None, metavar=None
+    ):
 
         self._prog_prefix = prog
         self._parser_class = parser_class
@@ -1076,16 +1062,17 @@ def __init__(self,
             nargs=PARSER,
             choices=self._name_parser_map,
             help=help,
-            metavar=metavar)
+            metavar=metavar,
+        )
 
     def add_parser(self, name, **kwargs):
         # set prog from the existing prefix
-        if kwargs.get('prog') is None:
-            kwargs['prog'] = '%s %s' % (self._prog_prefix, name)
+        if kwargs.get("prog") is None:
+            kwargs["prog"] = "%s %s" % (self._prog_prefix, name)
 
         # create a pseudo-action to hold the choice help
-        if 'help' in kwargs:
-            help = kwargs.pop('help')
+        if "help" in kwargs:
+            help = kwargs.pop("help")
             choice_action = self._ChoicesPseudoAction(name, help)
             self._choices_actions.append(choice_action)
 
@@ -1109,8 +1096,8 @@ def __call__(self, parser, namespace, values, option_string=None):
         try:
             parser = self._name_parser_map[parser_name]
         except KeyError:
-            tup = parser_name, ', '.join(self._name_parser_map)
-            msg = _('unknown parser %r (choices: %s)' % tup)
+            tup = parser_name, ", ".join(self._name_parser_map)
+            msg = _("unknown parser %r (choices: %s)" % tup)
             raise ArgumentError(self, msg)
 
         # parse all the remaining options into the namespace
@@ -1121,6 +1108,7 @@ def __call__(self, parser, namespace, values, option_string=None):
 # Type classes
 # ==============
 
+
 class FileType(object):
     """Factory for creating file object types
 
@@ -1134,16 +1122,16 @@ class FileType(object):
             the builtin open() function.
     """
 
-    def __init__(self, mode='r', bufsize=None):
+    def __init__(self, mode="r", bufsize=None):
         self._mode = mode
         self._bufsize = bufsize
 
     def __call__(self, string):
         # the special argument "-" means sys.std{in,out}
-        if string == '-':
-            if 'r' in self._mode:
+        if string == "-":
+            if "r" in self._mode:
                 return _sys.stdin
-            elif 'w' in self._mode:
+            elif "w" in self._mode:
                 return _sys.stdout
             else:
                 msg = _('argument "-" with mode %r' % self._mode)
@@ -1157,13 +1145,15 @@ def __call__(self, string):
 
     def __repr__(self):
         args = [self._mode, self._bufsize]
-        args_str = ', '.join([repr(arg) for arg in args if arg is not None])
-        return '%s(%s)' % (type(self).__name__, args_str)
+        args_str = ", ".join([repr(arg) for arg in args if arg is not None])
+        return "%s(%s)" % (type(self).__name__, args_str)
+
 
 # ===========================
 # Optional and Positional Parsing
 # ===========================
 
+
 class Namespace(_AttributeHolder):
     """Simple object for storing attributes.
 
@@ -1186,12 +1176,7 @@ def __contains__(self, key):
 
 
 class _ActionsContainer(object):
-
-    def __init__(self,
-                 description,
-                 prefix_chars,
-                 argument_default,
-                 conflict_handler):
+    def __init__(self, description, prefix_chars, argument_default, conflict_handler):
         super(_ActionsContainer, self).__init__()
 
         self.description = description
@@ -1203,17 +1188,17 @@ def __init__(self,
         self._registries = {}
 
         # register actions
-        self.register('action', None, _StoreAction)
-        self.register('action', 'store', _StoreAction)
-        self.register('action', 'store_const', _StoreConstAction)
-        self.register('action', 'store_true', _StoreTrueAction)
-        self.register('action', 'store_false', _StoreFalseAction)
-        self.register('action', 'append', _AppendAction)
-        self.register('action', 'append_const', _AppendConstAction)
-        self.register('action', 'count', _CountAction)
-        self.register('action', 'help', _HelpAction)
-        self.register('action', 'version', _VersionAction)
-        self.register('action', 'parsers', _SubParsersAction)
+        self.register("action", None, _StoreAction)
+        self.register("action", "store", _StoreAction)
+        self.register("action", "store_const", _StoreConstAction)
+        self.register("action", "store_true", _StoreTrueAction)
+        self.register("action", "store_false", _StoreFalseAction)
+        self.register("action", "append", _AppendAction)
+        self.register("action", "append_const", _AppendConstAction)
+        self.register("action", "count", _CountAction)
+        self.register("action", "help", _HelpAction)
+        self.register("action", "version", _VersionAction)
+        self.register("action", "parsers", _SubParsersAction)
 
         # raise an exception if the conflict handler is invalid
         self._get_handler()
@@ -1230,7 +1215,7 @@ def __init__(self,
         self._defaults = {}
 
         # determines whether an "option" looks like a negative number
-        self._negative_number_matcher = _re.compile(r'^-\d+$|^-\d*\.\d+$')
+        self._negative_number_matcher = _re.compile(r"^-\d+$|^-\d*\.\d+$")
 
         # whether or not there are any optionals that look like negative
         # numbers -- uses a list so it can be shared and edited
@@ -1264,7 +1249,6 @@ def get_default(self, dest):
                 return action.default
         return self._defaults.get(dest, None)
 
-
     # =======================
     # Adding argument actions
     # =======================
@@ -1279,8 +1263,8 @@ def add_argument(self, *args, **kwargs):
         # argument
         chars = self.prefix_chars
         if not args or len(args) == 1 and args[0][0] not in chars:
-            if args and 'dest' in kwargs:
-                raise ValueError('dest supplied twice for positional argument')
+            if args and "dest" in kwargs:
+                raise ValueError("dest supplied twice for positional argument")
             kwargs = self._get_positional_kwargs(*args, **kwargs)
 
         # otherwise, we're adding an optional argument
@@ -1288,12 +1272,12 @@ def add_argument(self, *args, **kwargs):
             kwargs = self._get_optional_kwargs(*args, **kwargs)
 
         # if no default was supplied, use the parser-level default
-        if 'default' not in kwargs:
-            dest = kwargs['dest']
+        if "default" not in kwargs:
+            dest = kwargs["dest"]
             if dest in self._defaults:
-                kwargs['default'] = self._defaults[dest]
+                kwargs["default"] = self._defaults[dest]
             elif self.argument_default is not None:
-                kwargs['default'] = self.argument_default
+                kwargs["default"] = self.argument_default
 
         # create the action object, and add it to the parser
         action_class = self._pop_action_class(kwargs)
@@ -1302,9 +1286,9 @@ def add_argument(self, *args, **kwargs):
         action = action_class(**kwargs)
 
         # raise an error if the action type is not callable
-        type_func = self._registry_get('type', action.type, action.type)
+        type_func = self._registry_get("type", action.type, action.type)
         if not _callable(type_func):
-            raise ValueError('%r is not callable' % type_func)
+            raise ValueError("%r is not callable" % type_func)
 
         return self._add_action(action)
 
@@ -1347,7 +1331,7 @@ def _add_container_actions(self, container):
         title_group_map = {}
         for group in self._action_groups:
             if group.title in title_group_map:
-                msg = _('cannot merge actions - two groups are named %r')
+                msg = _("cannot merge actions - two groups are named %r")
                 raise ValueError(msg % (group.title))
             title_group_map[group.title] = group
 
@@ -1361,7 +1345,8 @@ def _add_container_actions(self, container):
                 title_group_map[group.title] = self.add_argument_group(
                     title=group.title,
                     description=group.description,
-                    conflict_handler=group.conflict_handler)
+                    conflict_handler=group.conflict_handler,
+                )
 
             # map the actions to their new group
             for action in group._group_actions:
@@ -1371,8 +1356,7 @@ def _add_container_actions(self, container):
         # NOTE: if add_mutually_exclusive_group ever gains title= and
         # description= then this code will need to be expanded as above
         for group in container._mutually_exclusive_groups:
-            mutex_group = self.add_mutually_exclusive_group(
-                required=group.required)
+            mutex_group = self.add_mutually_exclusive_group(required=group.required)
 
             # map the actions to their new mutex group
             for action in group._group_actions:
@@ -1384,16 +1368,16 @@ def _add_container_actions(self, container):
 
     def _get_positional_kwargs(self, dest, **kwargs):
         # make sure required is not specified
-        if 'required' in kwargs:
+        if "required" in kwargs:
             msg = _("'required' is an invalid argument for positionals")
             raise TypeError(msg)
 
         # mark positional arguments as required if at least one is
         # always required
-        if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]:
-            kwargs['required'] = True
-        if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs:
-            kwargs['required'] = True
+        if kwargs.get("nargs") not in [OPTIONAL, ZERO_OR_MORE]:
+            kwargs["required"] = True
+        if kwargs.get("nargs") == ZERO_OR_MORE and "default" not in kwargs:
+            kwargs["required"] = True
 
         # return the keyword arguments with no option strings
         return dict(kwargs, dest=dest, option_strings=[])
@@ -1405,8 +1389,7 @@ def _get_optional_kwargs(self, *args, **kwargs):
         for option_string in args:
             # error on strings that don't start with an appropriate prefix
             if not option_string[0] in self.prefix_chars:
-                msg = _('invalid option string %r: '
-                        'must start with a character %r')
+                msg = _("invalid option string %r: " "must start with a character %r")
                 tup = option_string, self.prefix_chars
                 raise ValueError(msg % tup)
 
@@ -1418,7 +1401,7 @@ def _get_optional_kwargs(self, *args, **kwargs):
                         long_option_strings.append(option_string)
 
         # infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x'
-        dest = kwargs.pop('dest', None)
+        dest = kwargs.pop("dest", None)
         if dest is None:
             if long_option_strings:
                 dest_option_string = long_option_strings[0]
@@ -1426,24 +1409,24 @@ def _get_optional_kwargs(self, *args, **kwargs):
                 dest_option_string = option_strings[0]
             dest = dest_option_string.lstrip(self.prefix_chars)
             if not dest:
-                msg = _('dest= is required for options like %r')
+                msg = _("dest= is required for options like %r")
                 raise ValueError(msg % option_string)
-            dest = dest.replace('-', '_')
+            dest = dest.replace("-", "_")
 
         # return the updated keyword arguments
         return dict(kwargs, dest=dest, option_strings=option_strings)
 
     def _pop_action_class(self, kwargs, default=None):
-        action = kwargs.pop('action', default)
-        return self._registry_get('action', action, action)
+        action = kwargs.pop("action", default)
+        return self._registry_get("action", action, action)
 
     def _get_handler(self):
         # determine function from conflict handler string
-        handler_func_name = '_handle_conflict_%s' % self.conflict_handler
+        handler_func_name = "_handle_conflict_%s" % self.conflict_handler
         try:
             return getattr(self, handler_func_name)
         except AttributeError:
-            msg = _('invalid conflict_resolution value: %r')
+            msg = _("invalid conflict_resolution value: %r")
             raise ValueError(msg % self.conflict_handler)
 
     def _check_conflict(self, action):
@@ -1461,10 +1444,10 @@ def _check_conflict(self, action):
             conflict_handler(action, confl_optionals)
 
     def _handle_conflict_error(self, action, conflicting_actions):
-        message = _('conflicting option string(s): %s')
-        conflict_string = ', '.join([option_string
-                                     for option_string, action
-                                     in conflicting_actions])
+        message = _("conflicting option string(s): %s")
+        conflict_string = ", ".join(
+            [option_string for option_string, action in conflicting_actions]
+        )
         raise ArgumentError(action, message % conflict_string)
 
     def _handle_conflict_resolve(self, action, conflicting_actions):
@@ -1483,13 +1466,12 @@ def _handle_conflict_resolve(self, action, conflicting_actions):
 
 
 class _ArgumentGroup(_ActionsContainer):
-
     def __init__(self, container, title=None, description=None, **kwargs):
         # add any missing keyword arguments by checking the container
         update = kwargs.setdefault
-        update('conflict_handler', container.conflict_handler)
-        update('prefix_chars', container.prefix_chars)
-        update('argument_default', container.argument_default)
+        update("conflict_handler", container.conflict_handler)
+        update("prefix_chars", container.prefix_chars)
+        update("argument_default", container.argument_default)
         super_init = super(_ArgumentGroup, self).__init__
         super_init(description=description, **kwargs)
 
@@ -1502,8 +1484,7 @@ def __init__(self, container, title=None, description=None, **kwargs):
         self._actions = container._actions
         self._option_string_actions = container._option_string_actions
         self._defaults = container._defaults
-        self._has_negative_number_optionals = \
-            container._has_negative_number_optionals
+        self._has_negative_number_optionals = container._has_negative_number_optionals
 
     def _add_action(self, action):
         action = super(_ArgumentGroup, self)._add_action(action)
@@ -1516,7 +1497,6 @@ def _remove_action(self, action):
 
 
 class _MutuallyExclusiveGroup(_ArgumentGroup):
-
     def __init__(self, container, required=False):
         super(_MutuallyExclusiveGroup, self).__init__(container)
         self.required = required
@@ -1524,7 +1504,7 @@ def __init__(self, container, required=False):
 
     def _add_action(self, action):
         if action.required:
-            msg = _('mutually exclusive arguments must be optional')
+            msg = _("mutually exclusive arguments must be optional")
             raise ValueError(msg)
         action = self._container._add_action(action)
         self._group_actions.append(action)
@@ -1553,33 +1533,40 @@ class ArgumentParser(_AttributeHolder, _ActionsContainer):
         - add_help -- Add a -h/-help option
     """
 
-    def __init__(self,
-                 prog=None,
-                 usage=None,
-                 description=None,
-                 epilog=None,
-                 version=None,
-                 parents=[],
-                 formatter_class=HelpFormatter,
-                 prefix_chars='-',
-                 fromfile_prefix_chars=None,
-                 argument_default=None,
-                 conflict_handler='error',
-                 add_help=True):
+    def __init__(
+        self,
+        prog=None,
+        usage=None,
+        description=None,
+        epilog=None,
+        version=None,
+        parents=[],
+        formatter_class=HelpFormatter,
+        prefix_chars="-",
+        fromfile_prefix_chars=None,
+        argument_default=None,
+        conflict_handler="error",
+        add_help=True,
+    ):
 
         if version is not None:
             import warnings
+
             warnings.warn(
                 """The "version" argument to ArgumentParser is deprecated. """
                 """Please use """
                 """"add_argument(..., action='version', version="N", ...)" """
-                """instead""", DeprecationWarning)
+                """instead""",
+                DeprecationWarning,
+            )
 
         superinit = super(ArgumentParser, self).__init__
-        superinit(description=description,
-                  prefix_chars=prefix_chars,
-                  argument_default=argument_default,
-                  conflict_handler=conflict_handler)
+        superinit(
+            description=description,
+            prefix_chars=prefix_chars,
+            argument_default=argument_default,
+            conflict_handler=conflict_handler,
+        )
 
         # default setting for prog
         if prog is None:
@@ -1594,26 +1581,35 @@ def __init__(self,
         self.add_help = add_help
 
         add_group = self.add_argument_group
-        self._positionals = add_group(_('positional arguments'))
-        self._optionals = add_group(_('optional arguments'))
+        self._positionals = add_group(_("positional arguments"))
+        self._optionals = add_group(_("optional arguments"))
         self._subparsers = None
 
         # register types
         def identity(string):
             return string
-        self.register('type', None, identity)
+
+        self.register("type", None, identity)
 
         # add help and version arguments if necessary
         # (using explicit default to override global argument_default)
         if self.add_help:
             self.add_argument(
-                '-h', '--help', action='help', default=SUPPRESS,
-                help=_('show this help message and exit'))
+                "-h",
+                "--help",
+                action="help",
+                default=SUPPRESS,
+                help=_("show this help message and exit"),
+            )
         if self.version:
             self.add_argument(
-                '-v', '--version', action='version', default=SUPPRESS,
+                "-v",
+                "--version",
+                action="version",
+                default=SUPPRESS,
                 version=self.version,
-                help=_("show program's version number and exit"))
+                help=_("show program's version number and exit"),
+            )
 
         # add parent arguments and defaults
         for parent in parents:
@@ -1630,13 +1626,13 @@ def identity(string):
     # =======================
     def _get_kwargs(self):
         names = [
-            'prog',
-            'usage',
-            'description',
-            'version',
-            'formatter_class',
-            'conflict_handler',
-            'add_help',
+            "prog",
+            "usage",
+            "description",
+            "version",
+            "formatter_class",
+            "conflict_handler",
+            "add_help",
         ]
         return [(name, getattr(self, name)) for name in names]
 
@@ -1645,29 +1641,29 @@ def _get_kwargs(self):
     # ==================================
     def add_subparsers(self, **kwargs):
         if self._subparsers is not None:
-            self.error(_('cannot have multiple subparser arguments'))
+            self.error(_("cannot have multiple subparser arguments"))
 
         # add the parser class to the arguments if it's not present
-        kwargs.setdefault('parser_class', type(self))
+        kwargs.setdefault("parser_class", type(self))
 
-        if 'title' in kwargs or 'description' in kwargs:
-            title = _(kwargs.pop('title', 'subcommands'))
-            description = _(kwargs.pop('description', None))
+        if "title" in kwargs or "description" in kwargs:
+            title = _(kwargs.pop("title", "subcommands"))
+            description = _(kwargs.pop("description", None))
             self._subparsers = self.add_argument_group(title, description)
         else:
             self._subparsers = self._positionals
 
         # prog defaults to the usage message of this parser, skipping
         # optional arguments and with no "usage:" prefix
-        if kwargs.get('prog') is None:
+        if kwargs.get("prog") is None:
             formatter = self._get_formatter()
             positionals = self._get_positional_actions()
             groups = self._mutually_exclusive_groups
-            formatter.add_usage(self.usage, positionals, groups, '')
-            kwargs['prog'] = formatter.format_help().strip()
+            formatter.add_usage(self.usage, positionals, groups, "")
+            kwargs["prog"] = formatter.format_help().strip()
 
         # create the parsers action and add it to the positionals list
-        parsers_class = self._pop_action_class(kwargs, 'parsers')
+        parsers_class = self._pop_action_class(kwargs, "parsers")
         action = parsers_class(option_strings=[], **kwargs)
         self._subparsers._add_action(action)
 
@@ -1682,14 +1678,10 @@ def _add_action(self, action):
         return action
 
     def _get_optional_actions(self):
-        return [action
-                for action in self._actions
-                if action.option_strings]
+        return [action for action in self._actions if action.option_strings]
 
     def _get_positional_actions(self):
-        return [action
-                for action in self._actions
-                if not action.option_strings]
+        return [action for action in self._actions if not action.option_strings]
 
     # =====================================
     # Command line argument parsing methods
@@ -1697,8 +1689,8 @@ def _get_positional_actions(self):
     def parse_args(self, args=None, namespace=None):
         args, argv = self.parse_known_args(args, namespace)
         if argv:
-            msg = _('unrecognized arguments: %s')
-            self.error(msg % ' '.join(argv))
+            msg = _("unrecognized arguments: %s")
+            self.error(msg % " ".join(argv))
         return args
 
     def parse_known_args(self, args=None, namespace=None):
@@ -1745,7 +1737,7 @@ def _parse_known_args(self, arg_strings, namespace):
             for i, mutex_action in enumerate(mutex_group._group_actions):
                 conflicts = action_conflicts.setdefault(mutex_action, [])
                 conflicts.extend(group_actions[:i])
-                conflicts.extend(group_actions[i + 1:])
+                conflicts.extend(group_actions[i + 1 :])
 
         # find all option indices, and determine the arg_string_pattern
         # which has an 'O' if there is an option at an index,
@@ -1756,24 +1748,24 @@ def _parse_known_args(self, arg_strings, namespace):
         for i, arg_string in enumerate(arg_strings_iter):
 
             # all args after -- are non-options
-            if arg_string == '--':
-                arg_string_pattern_parts.append('-')
+            if arg_string == "--":
+                arg_string_pattern_parts.append("-")
                 for arg_string in arg_strings_iter:
-                    arg_string_pattern_parts.append('A')
+                    arg_string_pattern_parts.append("A")
 
             # otherwise, add the arg to the arg strings
             # and note the index if it was an option
             else:
                 option_tuple = self._parse_optional(arg_string)
                 if option_tuple is None:
-                    pattern = 'A'
+                    pattern = "A"
                 else:
                     option_string_indices[i] = option_tuple
-                    pattern = 'O'
+                    pattern = "O"
                 arg_string_pattern_parts.append(pattern)
 
         # join the pieces together to form the pattern
-        arg_strings_pattern = ''.join(arg_string_pattern_parts)
+        arg_strings_pattern = "".join(arg_string_pattern_parts)
 
         # converts arg strings to the appropriate and then takes the action
         seen_actions = _set()
@@ -1790,7 +1782,7 @@ def take_action(action, argument_strings, option_string=None):
                 seen_non_default_actions.add(action)
                 for conflict_action in action_conflicts.get(action, []):
                     if conflict_action in seen_non_default_actions:
-                        msg = _('not allowed with argument %s')
+                        msg = _("not allowed with argument %s")
                         action_name = _get_action_name(conflict_action)
                         raise ArgumentError(action, msg % action_name)
 
@@ -1820,7 +1812,7 @@ def consume_optional(start_index):
                 # if there is an explicit argument, try to match the
                 # optional's string arguments to only this
                 if explicit_arg is not None:
-                    arg_count = match_argument(action, 'A')
+                    arg_count = match_argument(action, "A")
 
                     # if the action is a single-dash option and takes no
                     # arguments, try to parse more single-dash options out
@@ -1836,7 +1828,7 @@ def consume_optional(start_index):
                                 action = optionals_map[option_string]
                                 break
                         else:
-                            msg = _('ignored explicit argument %r')
+                            msg = _("ignored explicit argument %r")
                             raise ArgumentError(action, msg % explicit_arg)
 
                     # if the action expect exactly one argument, we've
@@ -1850,7 +1842,7 @@ def consume_optional(start_index):
                     # error if a double-dash option did not use the
                     # explicit argument
                     else:
-                        msg = _('ignored explicit argument %r')
+                        msg = _("ignored explicit argument %r")
                         raise ArgumentError(action, msg % explicit_arg)
 
                 # if there is no explicit argument, try to match the
@@ -1886,13 +1878,13 @@ def consume_positionals(start_index):
             # slice off the appropriate arg strings for each Positional
             # and add the Positional and its args to the list
             for action, arg_count in zip(positionals, arg_counts):
-                args = arg_strings[start_index: start_index + arg_count]
+                args = arg_strings[start_index : start_index + arg_count]
                 start_index += arg_count
                 take_action(action, args)
 
             # slice off the Positionals that we just parsed and return the
             # index at which the Positionals' string args stopped
-            positionals[:] = positionals[len(arg_counts):]
+            positionals[:] = positionals[len(arg_counts) :]
             return start_index
 
         # consume Positionals and Optionals alternately, until we have
@@ -1906,10 +1898,9 @@ def consume_positionals(start_index):
         while start_index <= max_option_string_index:
 
             # consume any Positionals preceding the next option
-            next_option_string_index = min([
-                index
-                for index in option_string_indices
-                if index >= start_index])
+            next_option_string_index = min(
+                [index for index in option_string_indices if index >= start_index]
+            )
             if start_index != next_option_string_index:
                 positionals_end_index = consume_positionals(start_index)
 
@@ -1940,14 +1931,14 @@ def consume_positionals(start_index):
         # if we didn't use all the Positional objects, there were too few
         # arg strings supplied.
         if positionals:
-            self.error(_('too few arguments'))
+            self.error(_("too few arguments"))
 
         # make sure all required actions were present
         for action in self._actions:
             if action.required:
                 if action not in seen_actions:
                     name = _get_action_name(action)
-                    self.error(_('argument %s is required') % name)
+                    self.error(_("argument %s is required") % name)
 
         # make sure all required groups had one option present
         for group in self._mutually_exclusive_groups:
@@ -1958,11 +1949,13 @@ def consume_positionals(start_index):
 
                 # if no actions were used, report the error
                 else:
-                    names = [_get_action_name(action)
-                             for action in group._group_actions
-                             if action.help is not SUPPRESS]
-                    msg = _('one of the arguments %s is required')
-                    self.error(msg % ' '.join(names))
+                    names = [
+                        _get_action_name(action)
+                        for action in group._group_actions
+                        if action.help is not SUPPRESS
+                    ]
+                    msg = _("one of the arguments %s is required")
+                    self.error(msg % " ".join(names))
 
         # return the updated namespace and the extra arguments
         return namespace, extras
@@ -2007,11 +2000,11 @@ def _match_argument(self, action, arg_strings_pattern):
         # raise an exception if we weren't able to find a match
         if match is None:
             nargs_errors = {
-                None: _('expected one argument'),
-                OPTIONAL: _('expected at most one argument'),
-                ONE_OR_MORE: _('expected at least one argument'),
+                None: _("expected one argument"),
+                OPTIONAL: _("expected at most one argument"),
+                ONE_OR_MORE: _("expected at least one argument"),
             }
-            default = _('expected %s argument(s)') % action.nargs
+            default = _("expected %s argument(s)") % action.nargs
             msg = nargs_errors.get(action.nargs, default)
             raise ArgumentError(action, msg)
 
@@ -2024,8 +2017,9 @@ def _match_arguments_partial(self, actions, arg_strings_pattern):
         result = []
         for i in range(len(actions), 0, -1):
             actions_slice = actions[:i]
-            pattern = ''.join([self._get_nargs_pattern(action)
-                               for action in actions_slice])
+            pattern = "".join(
+                [self._get_nargs_pattern(action) for action in actions_slice]
+            )
             match = _re.match(pattern, arg_strings_pattern)
             if match is not None:
                 result.extend([len(string) for string in match.groups()])
@@ -2053,8 +2047,8 @@ def _parse_optional(self, arg_string):
             return None
 
         # if the option string before the "=" is present, return the action
-        if '=' in arg_string:
-            option_string, explicit_arg = arg_string.split('=', 1)
+        if "=" in arg_string:
+            option_string, explicit_arg = arg_string.split("=", 1)
             if option_string in self._option_string_actions:
                 action = self._option_string_actions[option_string]
                 return action, option_string, explicit_arg
@@ -2065,15 +2059,16 @@ def _parse_optional(self, arg_string):
 
         # if multiple actions match, the option string was ambiguous
         if len(option_tuples) > 1:
-            options = ', '.join([option_string
-                for action, option_string, explicit_arg in option_tuples])
+            options = ", ".join(
+                [option_string for action, option_string, explicit_arg in option_tuples]
+            )
             tup = arg_string, options
-            self.error(_('ambiguous option: %s could match %s') % tup)
+            self.error(_("ambiguous option: %s could match %s") % tup)
 
         # if exactly one action matched, this segmentation is good,
         # so return the parsed action
         elif len(option_tuples) == 1:
-            option_tuple, = option_tuples
+            (option_tuple,) = option_tuples
             return option_tuple
 
         # if it was not found as an option, but it looks like a negative
@@ -2084,7 +2079,7 @@ def _parse_optional(self, arg_string):
                 return None
 
         # if it contains a space, it was meant to be a positional
-        if ' ' in arg_string:
+        if " " in arg_string:
             return None
 
         # it was meant to be an optional but there is no such option
@@ -2098,8 +2093,8 @@ def _get_option_tuples(self, option_string):
         # split at the '='
         chars = self.prefix_chars
         if option_string[0] in chars and option_string[1] in chars:
-            if '=' in option_string:
-                option_prefix, explicit_arg = option_string.split('=', 1)
+            if "=" in option_string:
+                option_prefix, explicit_arg = option_string.split("=", 1)
             else:
                 option_prefix = option_string
                 explicit_arg = None
@@ -2130,7 +2125,7 @@ def _get_option_tuples(self, option_string):
 
         # shouldn't ever get here
         else:
-            self.error(_('unexpected option string: %s') % option_string)
+            self.error(_("unexpected option string: %s") % option_string)
 
         # return the collected option tuples
         return result
@@ -2142,36 +2137,36 @@ def _get_nargs_pattern(self, action):
 
         # the default (None) is assumed to be a single argument
         if nargs is None:
-            nargs_pattern = '(-*A-*)'
+            nargs_pattern = "(-*A-*)"
 
         # allow zero or one arguments
         elif nargs == OPTIONAL:
-            nargs_pattern = '(-*A?-*)'
+            nargs_pattern = "(-*A?-*)"
 
         # allow zero or more arguments
         elif nargs == ZERO_OR_MORE:
-            nargs_pattern = '(-*[A-]*)'
+            nargs_pattern = "(-*[A-]*)"
 
         # allow one or more arguments
         elif nargs == ONE_OR_MORE:
-            nargs_pattern = '(-*A[A-]*)'
+            nargs_pattern = "(-*A[A-]*)"
 
         # allow any number of options or arguments
         elif nargs == REMAINDER:
-            nargs_pattern = '([-AO]*)'
+            nargs_pattern = "([-AO]*)"
 
         # allow one argument followed by any number of options or arguments
         elif nargs == PARSER:
-            nargs_pattern = '(-*A[-AO]*)'
+            nargs_pattern = "(-*A[-AO]*)"
 
         # all others should be integers
         else:
-            nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs)
+            nargs_pattern = "(-*%s-*)" % "-*".join("A" * nargs)
 
         # if this is an optional action, -- is not allowed
         if action.option_strings:
-            nargs_pattern = nargs_pattern.replace('-*', '')
-            nargs_pattern = nargs_pattern.replace('-', '')
+            nargs_pattern = nargs_pattern.replace("-*", "")
+            nargs_pattern = nargs_pattern.replace("-", "")
 
         # return the pattern
         return nargs_pattern
@@ -2182,7 +2177,7 @@ def _get_nargs_pattern(self, action):
     def _get_values(self, action, arg_strings):
         # for everything but PARSER args, strip out '--'
         if action.nargs not in [PARSER, REMAINDER]:
-            arg_strings = [s for s in arg_strings if s != '--']
+            arg_strings = [s for s in arg_strings if s != "--"]
 
         # optional argument produces a default when not present
         if not arg_strings and action.nargs == OPTIONAL:
@@ -2196,8 +2191,11 @@ def _get_values(self, action, arg_strings):
 
         # when nargs='*' on a positional, if there were no command-line
         # args, use the default if it is anything other than None
-        elif (not arg_strings and action.nargs == ZERO_OR_MORE and
-              not action.option_strings):
+        elif (
+            not arg_strings
+            and action.nargs == ZERO_OR_MORE
+            and not action.option_strings
+        ):
             if action.default is not None:
                 value = action.default
             else:
@@ -2206,7 +2204,7 @@ def _get_values(self, action, arg_strings):
 
         # single argument or optional argument produces a single value
         elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]:
-            arg_string, = arg_strings
+            (arg_string,) = arg_strings
             value = self._get_value(action, arg_string)
             self._check_value(action, value)
 
@@ -2229,9 +2227,9 @@ def _get_values(self, action, arg_strings):
         return value
 
     def _get_value(self, action, arg_string):
-        type_func = self._registry_get('type', action.type, action.type)
+        type_func = self._registry_get("type", action.type, action.type)
         if not _callable(type_func):
-            msg = _('%r is not callable')
+            msg = _("%r is not callable")
             raise ArgumentError(action, msg % type_func)
 
         # convert the value to the appropriate type
@@ -2240,14 +2238,14 @@ def _get_value(self, action, arg_string):
 
         # ArgumentTypeErrors indicate errors
         except ArgumentTypeError:
-            name = getattr(action.type, '__name__', repr(action.type))
+            name = getattr(action.type, "__name__", repr(action.type))
             msg = str(_sys.exc_info()[1])
             raise ArgumentError(action, msg)
 
         # TypeErrors or ValueErrors also indicate errors
         except (TypeError, ValueError):
-            name = getattr(action.type, '__name__', repr(action.type))
-            msg = _('invalid %s value: %r')
+            name = getattr(action.type, "__name__", repr(action.type))
+            msg = _("invalid %s value: %r")
             raise ArgumentError(action, msg % (name, arg_string))
 
         # return the converted value
@@ -2256,8 +2254,8 @@ def _get_value(self, action, arg_string):
     def _check_value(self, action, value):
         # converted value must be one of the choices (if specified)
         if action.choices is not None and value not in action.choices:
-            tup = value, ', '.join(map(repr, action.choices))
-            msg = _('invalid choice: %r (choose from %s)') % tup
+            tup = value, ", ".join(map(repr, action.choices))
+            msg = _("invalid choice: %r (choose from %s)") % tup
             raise ArgumentError(action, msg)
 
     # =======================
@@ -2265,16 +2263,14 @@ def _check_value(self, action, value):
     # =======================
     def format_usage(self):
         formatter = self._get_formatter()
-        formatter.add_usage(self.usage, self._actions,
-                            self._mutually_exclusive_groups)
+        formatter.add_usage(self.usage, self._actions, self._mutually_exclusive_groups)
         return formatter.format_help()
 
     def format_help(self):
         formatter = self._get_formatter()
 
         # usage
-        formatter.add_usage(self.usage, self._actions,
-                            self._mutually_exclusive_groups)
+        formatter.add_usage(self.usage, self._actions, self._mutually_exclusive_groups)
 
         # description
         formatter.add_text(self.description)
@@ -2294,10 +2290,12 @@ def format_help(self):
 
     def format_version(self):
         import warnings
+
         warnings.warn(
             'The format_version method is deprecated -- the "version" '
-            'argument to ArgumentParser is no longer supported.',
-            DeprecationWarning)
+            "argument to ArgumentParser is no longer supported.",
+            DeprecationWarning,
+        )
         formatter = self._get_formatter()
         formatter.add_text(self.version)
         return formatter.format_help()
@@ -2320,10 +2318,12 @@ def print_help(self, file=None):
 
     def print_version(self, file=None):
         import warnings
+
         warnings.warn(
             'The print_version method is deprecated -- the "version" '
-            'argument to ArgumentParser is no longer supported.',
-            DeprecationWarning)
+            "argument to ArgumentParser is no longer supported.",
+            DeprecationWarning,
+        )
         self._print_message(self.format_version(), file)
 
     def _print_message(self, message, file=None):
@@ -2350,4 +2350,4 @@ def error(self, message):
         should either exit or raise an exception.
         """
         self.print_usage(_sys.stderr)
-        self.exit(2, _('%s: error: %s\n') % (self.prog, message))
+        self.exit(2, _("%s: error: %s\n") % (self.prog, message))

diff  --git a/polly/utils/pyscop/isl.py b/polly/utils/pyscop/isl.py
index 0c8de2f59a8d6..5eaf7798e20b9 100644
--- a/polly/utils/pyscop/isl.py
+++ b/polly/utils/pyscop/isl.py
@@ -2,495 +2,485 @@
 
 isl = cdll.LoadLibrary("libisl.so")
 
+
 class Context:
-  defaultInstance = None
-  instances = {}
+    defaultInstance = None
+    instances = {}
+
+    def __init__(self):
+        ptr = isl.isl_ctx_alloc()
+        self.ptr = ptr
+        Context.instances[ptr] = self
 
-  def __init__(self):
-    ptr = isl.isl_ctx_alloc()
-    self.ptr = ptr
-    Context.instances[ptr] = self
+    def __del__(self):
+        isl.isl_ctx_free(self)
 
-  def __del__(self):
-    isl.isl_ctx_free(self)
+    def from_param(self):
+        return self.ptr
 
-  def from_param(self):
-    return self.ptr
+    @staticmethod
+    def from_ptr(ptr):
+        return Context.instances[ptr]
 
-  @staticmethod
-  def from_ptr(ptr):
-    return Context.instances[ptr]
+    @staticmethod
+    def getDefaultInstance():
+        if Context.defaultInstance == None:
+            Context.defaultInstance = Context()
 
-  @staticmethod
-  def getDefaultInstance():
-    if Context.defaultInstance == None:
-      Context.defaultInstance = Context()
+        return Context.defaultInstance
 
-    return Context.defaultInstance
 
 class IslObject:
-  def __init__(self, string = "", ctx = None, ptr = None):
-    self.initialize_isl_methods()
-    if ptr != None:
-      self.ptr = ptr
-      self.ctx = self.get_isl_method("get_ctx")(self)
-      return
-
-    if ctx == None:
-      ctx = Context.getDefaultInstance()
-
-    self.ctx = ctx
-    self.ptr = self.get_isl_method("read_from_str")(ctx, string, -1)
-
-  def __del__(self):
-    self.get_isl_method("free")(self)
-
-  def from_param(self):
-    return self.ptr
-
-  @property
-  def context(self):
-    return self.ctx
-
-  def __repr__(self):
-    p = Printer(self.ctx)
-    self.to_printer(p)
-    return p.getString();
-
-  def __str__(self):
-    p = Printer(self.ctx)
-    self.to_printer(p)
-    return p.getString();
-
-  @staticmethod
-  def isl_name():
-    return "No isl name available"
-
-  def initialize_isl_methods(self):
-    if hasattr(self.__class__, "initialized"):
-      return
-
-    self.__class__.initalized = True
-    self.get_isl_method("read_from_str").argtypes = [Context, c_char_p, c_int]
-    self.get_isl_method("copy").argtypes = [self.__class__]
-    self.get_isl_method("copy").restype = c_int
-    self.get_isl_method("free").argtypes = [self.__class__]
-    self.get_isl_method("get_ctx").argtypes = [self.__class__]
-    self.get_isl_method("get_ctx").restype = Context.from_ptr 
-    getattr(isl, "isl_printer_print_" + self.isl_name()).argtypes = [Printer, self.__class__]
-
-  def get_isl_method(self, name):
-    return getattr(isl, "isl_" + self.isl_name() + "_" + name)
-
-  def to_printer(self, printer):
-    getattr(isl, "isl_printer_print_" + self.isl_name())(printer, self)
+    def __init__(self, string="", ctx=None, ptr=None):
+        self.initialize_isl_methods()
+        if ptr != None:
+            self.ptr = ptr
+            self.ctx = self.get_isl_method("get_ctx")(self)
+            return
+
+        if ctx == None:
+            ctx = Context.getDefaultInstance()
+
+        self.ctx = ctx
+        self.ptr = self.get_isl_method("read_from_str")(ctx, string, -1)
+
+    def __del__(self):
+        self.get_isl_method("free")(self)
+
+    def from_param(self):
+        return self.ptr
+
+    @property
+    def context(self):
+        return self.ctx
+
+    def __repr__(self):
+        p = Printer(self.ctx)
+        self.to_printer(p)
+        return p.getString()
+
+    def __str__(self):
+        p = Printer(self.ctx)
+        self.to_printer(p)
+        return p.getString()
+
+    @staticmethod
+    def isl_name():
+        return "No isl name available"
+
+    def initialize_isl_methods(self):
+        if hasattr(self.__class__, "initialized"):
+            return
+
+        self.__class__.initalized = True
+        self.get_isl_method("read_from_str").argtypes = [Context, c_char_p, c_int]
+        self.get_isl_method("copy").argtypes = [self.__class__]
+        self.get_isl_method("copy").restype = c_int
+        self.get_isl_method("free").argtypes = [self.__class__]
+        self.get_isl_method("get_ctx").argtypes = [self.__class__]
+        self.get_isl_method("get_ctx").restype = Context.from_ptr
+        getattr(isl, "isl_printer_print_" + self.isl_name()).argtypes = [
+            Printer,
+            self.__class__,
+        ]
+
+    def get_isl_method(self, name):
+        return getattr(isl, "isl_" + self.isl_name() + "_" + name)
+
+    def to_printer(self, printer):
+        getattr(isl, "isl_printer_print_" + self.isl_name())(printer, self)
+
 
 class BSet(IslObject):
-  @staticmethod
-  def from_ptr(ptr):
-    if not ptr:
-      return
-    return BSet(ptr = ptr)
+    @staticmethod
+    def from_ptr(ptr):
+        if not ptr:
+            return
+        return BSet(ptr=ptr)
+
+    @staticmethod
+    def isl_name():
+        return "basic_set"
 
-  @staticmethod
-  def isl_name():
-    return "basic_set"
 
 class Set(IslObject):
-  @staticmethod
-  def from_ptr(ptr):
-    if not ptr:
-      return
-    return Set(ptr = ptr)
+    @staticmethod
+    def from_ptr(ptr):
+        if not ptr:
+            return
+        return Set(ptr=ptr)
+
+    @staticmethod
+    def isl_name():
+        return "set"
 
-  @staticmethod
-  def isl_name():
-    return "set"
 
 class USet(IslObject):
-  @staticmethod
-  def from_ptr(ptr):
-    if not ptr:
-      return
-    return USet(ptr = ptr)
+    @staticmethod
+    def from_ptr(ptr):
+        if not ptr:
+            return
+        return USet(ptr=ptr)
 
-  @staticmethod
-  def isl_name():
-    return "union_set"
+    @staticmethod
+    def isl_name():
+        return "union_set"
 
 
 class BMap(IslObject):
-  @staticmethod
-  def from_ptr(ptr):
-    if not ptr:
-      return
-    return BMap(ptr = ptr)
+    @staticmethod
+    def from_ptr(ptr):
+        if not ptr:
+            return
+        return BMap(ptr=ptr)
+
+    def __mul__(self, set):
+        return self.intersect_domain(set)
 
-  def __mul__(self, set):
-    return self.intersect_domain(set)
+    @staticmethod
+    def isl_name():
+        return "basic_map"
 
-  @staticmethod
-  def isl_name():
-    return "basic_map"
 
 class Map(IslObject):
-  @staticmethod
-  def from_ptr(ptr):
-    if not ptr:
-      return
-    return Map(ptr = ptr)
-
-  def __mul__(self, set):
-    return self.intersect_domain(set)
-
-  @staticmethod
-  def isl_name():
-    return "map"
-
-  @staticmethod
-  def lex_lt(dim):
-    dim = isl.isl_dim_copy(dim)
-    return isl.isl_map_lex_lt(dim)
-
-  @staticmethod
-  def lex_le(dim):
-    dim = isl.isl_dim_copy(dim)
-    return isl.isl_map_lex_le(dim)
-
-  @staticmethod
-  def lex_gt(dim):
-    dim = isl.isl_dim_copy(dim)
-    return isl.isl_map_lex_gt(dim)
-
-  @staticmethod
-  def lex_ge(dim):
-    dim = isl.isl_dim_copy(dim)
-    return isl.isl_map_lex_ge(dim)
+    @staticmethod
+    def from_ptr(ptr):
+        if not ptr:
+            return
+        return Map(ptr=ptr)
+
+    def __mul__(self, set):
+        return self.intersect_domain(set)
+
+    @staticmethod
+    def isl_name():
+        return "map"
+
+    @staticmethod
+    def lex_lt(dim):
+        dim = isl.isl_dim_copy(dim)
+        return isl.isl_map_lex_lt(dim)
+
+    @staticmethod
+    def lex_le(dim):
+        dim = isl.isl_dim_copy(dim)
+        return isl.isl_map_lex_le(dim)
+
+    @staticmethod
+    def lex_gt(dim):
+        dim = isl.isl_dim_copy(dim)
+        return isl.isl_map_lex_gt(dim)
+
+    @staticmethod
+    def lex_ge(dim):
+        dim = isl.isl_dim_copy(dim)
+        return isl.isl_map_lex_ge(dim)
+
 
 class UMap(IslObject):
-  @staticmethod
-  def from_ptr(ptr):
-    if not ptr:
-      return
-    return UMap(ptr = ptr)
+    @staticmethod
+    def from_ptr(ptr):
+        if not ptr:
+            return
+        return UMap(ptr=ptr)
+
+    @staticmethod
+    def isl_name():
+        return "union_map"
 
-  @staticmethod
-  def isl_name():
-    return "union_map"
 
 class Dim(IslObject):
-  @staticmethod
-  def from_ptr(ptr):
-    if not ptr:
-      return
-    return Dim(ptr = ptr)
+    @staticmethod
+    def from_ptr(ptr):
+        if not ptr:
+            return
+        return Dim(ptr=ptr)
 
-  @staticmethod
-  def isl_name():
-    return "dim"
+    @staticmethod
+    def isl_name():
+        return "dim"
 
-  def initialize_isl_methods(self):
-    if hasattr(self.__class__, "initialized"):
-      return
+    def initialize_isl_methods(self):
+        if hasattr(self.__class__, "initialized"):
+            return
 
-    self.__class__.initalized = True
-    self.get_isl_method("copy").argtypes = [self.__class__]
-    self.get_isl_method("copy").restype = c_int
-    self.get_isl_method("free").argtypes = [self.__class__]
-    self.get_isl_method("get_ctx").argtypes = [self.__class__]
-    self.get_isl_method("get_ctx").restype = Context.from_ptr 
+        self.__class__.initalized = True
+        self.get_isl_method("copy").argtypes = [self.__class__]
+        self.get_isl_method("copy").restype = c_int
+        self.get_isl_method("free").argtypes = [self.__class__]
+        self.get_isl_method("get_ctx").argtypes = [self.__class__]
+        self.get_isl_method("get_ctx").restype = Context.from_ptr
 
-  def __repr__(self):
-    return str(self)
+    def __repr__(self):
+        return str(self)
 
-  def __str__(self):
+    def __str__(self):
 
-    dimParam = isl.isl_dim_size(self, 1)
-    dimIn = isl.isl_dim_size(self, 2)
-    dimOut = isl.isl_dim_size(self, 3)
+        dimParam = isl.isl_dim_size(self, 1)
+        dimIn = isl.isl_dim_size(self, 2)
+        dimOut = isl.isl_dim_size(self, 3)
 
-    if dimIn:
-      return "<dim In:%s, Out:%s, Param:%s>" % (dimIn, dimOut, dimParam)
+        if dimIn:
+            return "<dim In:%s, Out:%s, Param:%s>" % (dimIn, dimOut, dimParam)
+
+        return "<dim Set:%s, Param:%s>" % (dimOut, dimParam)
 
-    return "<dim Set:%s, Param:%s>" % (dimOut, dimParam)
 
 class Printer:
-  FORMAT_ISL = 0
-  FORMAT_POLYLIB = 1
-  FORMAT_POLYLIB_CONSTRAINTS = 2
-  FORMAT_OMEGA = 3
-  FORMAT_C = 4
-  FORMAT_LATEX = 5
-  FORMAT_EXT_POLYLIB = 6
+    FORMAT_ISL = 0
+    FORMAT_POLYLIB = 1
+    FORMAT_POLYLIB_CONSTRAINTS = 2
+    FORMAT_OMEGA = 3
+    FORMAT_C = 4
+    FORMAT_LATEX = 5
+    FORMAT_EXT_POLYLIB = 6
 
-  def __init__(self, ctx = None):
-    if ctx == None:
-      ctx = Context.getDefaultInstance()
+    def __init__(self, ctx=None):
+        if ctx == None:
+            ctx = Context.getDefaultInstance()
 
-    self.ctx = ctx
-    self.ptr = isl.isl_printer_to_str(ctx)
+        self.ctx = ctx
+        self.ptr = isl.isl_printer_to_str(ctx)
 
-  def setFormat(self, format):
-    self.ptr = isl.isl_printer_set_output_format(self, format);
+    def setFormat(self, format):
+        self.ptr = isl.isl_printer_set_output_format(self, format)
 
-  def from_param(self):
-    return self.ptr
+    def from_param(self):
+        return self.ptr
 
-  def __del__(self):
-    isl.isl_printer_free(self)
+    def __del__(self):
+        isl.isl_printer_free(self)
 
-  def getString(self):
-    return isl.isl_printer_get_str(self)
+    def getString(self):
+        return isl.isl_printer_get_str(self)
 
-functions = [
-             # Unary properties
-             ("is_empty", BSet, [BSet], c_int),
-             ("is_empty", Set, [Set], c_int),
-             ("is_empty", USet, [USet], c_int),
-             ("is_empty", BMap, [BMap], c_int),
-             ("is_empty", Map, [Map], c_int),
-             ("is_empty", UMap, [UMap], c_int),
 
+functions = [
+    # Unary properties
+    ("is_empty", BSet, [BSet], c_int),
+    ("is_empty", Set, [Set], c_int),
+    ("is_empty", USet, [USet], c_int),
+    ("is_empty", BMap, [BMap], c_int),
+    ("is_empty", Map, [Map], c_int),
+    ("is_empty", UMap, [UMap], c_int),
     #         ("is_universe", Set, [Set], c_int),
     #         ("is_universe", Map, [Map], c_int),
-
-             ("is_single_valued", Map, [Map], c_int),
-
-             ("is_bijective", Map, [Map], c_int),
-
-             ("is_wrapping", BSet, [BSet], c_int),
-             ("is_wrapping", Set, [Set], c_int),
-
-             # Binary properties
-             ("is_equal", BSet, [BSet, BSet], c_int),
-             ("is_equal", Set, [Set, Set], c_int),
-             ("is_equal", USet, [USet, USet], c_int),
-             ("is_equal", BMap, [BMap, BMap], c_int),
-             ("is_equal", Map, [Map, Map], c_int),
-             ("is_equal", UMap, [UMap, UMap], c_int),
-
-             # is_disjoint missing
-
-             # ("is_subset", BSet, [BSet, BSet], c_int),
-             ("is_subset", Set, [Set, Set], c_int),
-             ("is_subset", USet, [USet, USet], c_int),
-             ("is_subset", BMap, [BMap, BMap], c_int),
-             ("is_subset", Map, [Map, Map], c_int),
-             ("is_subset", UMap, [UMap, UMap], c_int),
-             #("is_strict_subset", BSet, [BSet, BSet], c_int),
-             ("is_strict_subset", Set, [Set, Set], c_int),
-             ("is_strict_subset", USet, [USet, USet], c_int),
-             ("is_strict_subset", BMap, [BMap, BMap], c_int),
-             ("is_strict_subset", Map, [Map, Map], c_int),
-             ("is_strict_subset", UMap, [UMap, UMap], c_int),
-
-             # Unary Operations
-             ("complement", Set, [Set], Set),
-             ("reverse", BMap, [BMap], BMap),
-             ("reverse", Map, [Map], Map),
-             ("reverse", UMap, [UMap], UMap),
-
-             # Projection missing
-             ("range", BMap, [BMap], BSet),
-             ("range", Map, [Map], Set),
-             ("range", UMap, [UMap], USet),
-             ("domain", BMap, [BMap], BSet),
-             ("domain", Map, [Map], Set),
-             ("domain", UMap, [UMap], USet),
-
-             ("identity", Set, [Set], Map),
-             ("identity", USet, [USet], UMap),
-
-             ("deltas", BMap, [BMap], BSet),
-             ("deltas", Map, [Map], Set),
-             ("deltas", UMap, [UMap], USet),
-
-             ("coalesce", Set, [Set], Set),
-             ("coalesce", USet, [USet], USet),
-             ("coalesce", Map, [Map], Map),
-             ("coalesce", UMap, [UMap], UMap),
-
-             ("detect_equalities", BSet, [BSet], BSet),
-             ("detect_equalities", Set, [Set], Set),
-             ("detect_equalities", USet, [USet], USet),
-             ("detect_equalities", BMap, [BMap], BMap),
-             ("detect_equalities", Map, [Map], Map),
-             ("detect_equalities", UMap, [UMap], UMap),
-
-             ("convex_hull", Set, [Set], Set),
-             ("convex_hull", Map, [Map], Map),
-
-             ("simple_hull", Set, [Set], Set),
-             ("simple_hull", Map, [Map], Map),
-
-             ("affine_hull", BSet, [BSet], BSet),
-             ("affine_hull", Set, [Set], BSet),
-             ("affine_hull", USet, [USet], USet),
-             ("affine_hull", BMap, [BMap], BMap),
-             ("affine_hull", Map, [Map], BMap),
-             ("affine_hull", UMap, [UMap], UMap),
-
-             ("polyhedral_hull", Set, [Set], Set),
-             ("polyhedral_hull", USet, [USet], USet),
-             ("polyhedral_hull", Map, [Map], Map),
-             ("polyhedral_hull", UMap, [UMap], UMap),
-
-             # Power missing
-             # Transitive closure missing
-             # Reaching path lengths missing
-
-             ("wrap", BMap, [BMap], BSet),
-             ("wrap", Map, [Map], Set),
-             ("wrap", UMap, [UMap], USet),
-             ("unwrap", BSet, [BMap], BMap),
-             ("unwrap", Set, [Map], Map),
-             ("unwrap", USet, [UMap], UMap),
-
-             ("flatten", Set, [Set], Set),
-             ("flatten", Map, [Map], Map),
-             ("flatten_map", Set, [Set], Map),
-
-             # Dimension manipulation missing
-
-             # Binary Operations
-             ("intersect", BSet, [BSet, BSet], BSet),
-             ("intersect", Set, [Set, Set], Set),
-             ("intersect", USet, [USet, USet], USet),
-             ("intersect", BMap, [BMap, BMap], BMap),
-             ("intersect", Map, [Map, Map], Map),
-             ("intersect", UMap, [UMap, UMap], UMap),
-             ("intersect_domain", BMap, [BMap, BSet], BMap),
-             ("intersect_domain", Map, [Map, Set], Map),
-             ("intersect_domain", UMap, [UMap, USet], UMap),
-             ("intersect_range", BMap, [BMap, BSet], BMap),
-             ("intersect_range", Map, [Map, Set], Map),
-             ("intersect_range", UMap, [UMap, USet], UMap),
-
-             ("union", BSet, [BSet, BSet], Set),
-             ("union", Set, [Set, Set], Set),
-             ("union", USet, [USet, USet], USet),
-             ("union", BMap, [BMap, BMap], Map),
-             ("union", Map, [Map, Map], Map),
-             ("union", UMap, [UMap, UMap], UMap),
-
-             ("subtract", Set, [Set, Set], Set),
-             ("subtract", Map, [Map, Map], Map),
-             ("subtract", USet, [USet, USet], USet),
-             ("subtract", UMap, [UMap, UMap], UMap),
-
-             ("apply", BSet, [BSet, BMap], BSet),
-             ("apply", Set, [Set, Map], Set),
-             ("apply", USet, [USet, UMap], USet),
-             ("apply_domain", BMap, [BMap, BMap], BMap),
-             ("apply_domain", Map, [Map, Map], Map),
-             ("apply_domain", UMap, [UMap, UMap], UMap),
-             ("apply_range", BMap, [BMap, BMap], BMap),
-             ("apply_range", Map, [Map, Map], Map),
-             ("apply_range", UMap, [UMap, UMap], UMap),
-
-             ("gist", BSet, [BSet, BSet], BSet),
-             ("gist", Set, [Set, Set], Set),
-             ("gist", USet, [USet, USet], USet),
-             ("gist", BMap, [BMap, BMap], BMap),
-             ("gist", Map, [Map, Map], Map),
-             ("gist", UMap, [UMap, UMap], UMap),
-
-             # Lexicographic Optimizations
-             # partial_lexmin missing
-             ("lexmin", BSet, [BSet], BSet),
-             ("lexmin", Set, [Set], Set),
-             ("lexmin", USet, [USet], USet),
-             ("lexmin", BMap, [BMap], BMap),
-             ("lexmin", Map, [Map], Map),
-             ("lexmin", UMap, [UMap], UMap),
-
-             ("lexmax", BSet, [BSet], BSet),
-             ("lexmax", Set, [Set], Set),
-             ("lexmax", USet, [USet], USet),
-             ("lexmax", BMap, [BMap], BMap),
-             ("lexmax", Map, [Map], Map),
-             ("lexmax", UMap, [UMap], UMap),
-
-              # Undocumented
-             ("lex_lt_union_set", USet, [USet, USet], UMap),
-             ("lex_le_union_set", USet, [USet, USet], UMap),
-             ("lex_gt_union_set", USet, [USet, USet], UMap),
-             ("lex_ge_union_set", USet, [USet, USet], UMap),
-
-             ]
+    ("is_single_valued", Map, [Map], c_int),
+    ("is_bijective", Map, [Map], c_int),
+    ("is_wrapping", BSet, [BSet], c_int),
+    ("is_wrapping", Set, [Set], c_int),
+    # Binary properties
+    ("is_equal", BSet, [BSet, BSet], c_int),
+    ("is_equal", Set, [Set, Set], c_int),
+    ("is_equal", USet, [USet, USet], c_int),
+    ("is_equal", BMap, [BMap, BMap], c_int),
+    ("is_equal", Map, [Map, Map], c_int),
+    ("is_equal", UMap, [UMap, UMap], c_int),
+    # is_disjoint missing
+    # ("is_subset", BSet, [BSet, BSet], c_int),
+    ("is_subset", Set, [Set, Set], c_int),
+    ("is_subset", USet, [USet, USet], c_int),
+    ("is_subset", BMap, [BMap, BMap], c_int),
+    ("is_subset", Map, [Map, Map], c_int),
+    ("is_subset", UMap, [UMap, UMap], c_int),
+    # ("is_strict_subset", BSet, [BSet, BSet], c_int),
+    ("is_strict_subset", Set, [Set, Set], c_int),
+    ("is_strict_subset", USet, [USet, USet], c_int),
+    ("is_strict_subset", BMap, [BMap, BMap], c_int),
+    ("is_strict_subset", Map, [Map, Map], c_int),
+    ("is_strict_subset", UMap, [UMap, UMap], c_int),
+    # Unary Operations
+    ("complement", Set, [Set], Set),
+    ("reverse", BMap, [BMap], BMap),
+    ("reverse", Map, [Map], Map),
+    ("reverse", UMap, [UMap], UMap),
+    # Projection missing
+    ("range", BMap, [BMap], BSet),
+    ("range", Map, [Map], Set),
+    ("range", UMap, [UMap], USet),
+    ("domain", BMap, [BMap], BSet),
+    ("domain", Map, [Map], Set),
+    ("domain", UMap, [UMap], USet),
+    ("identity", Set, [Set], Map),
+    ("identity", USet, [USet], UMap),
+    ("deltas", BMap, [BMap], BSet),
+    ("deltas", Map, [Map], Set),
+    ("deltas", UMap, [UMap], USet),
+    ("coalesce", Set, [Set], Set),
+    ("coalesce", USet, [USet], USet),
+    ("coalesce", Map, [Map], Map),
+    ("coalesce", UMap, [UMap], UMap),
+    ("detect_equalities", BSet, [BSet], BSet),
+    ("detect_equalities", Set, [Set], Set),
+    ("detect_equalities", USet, [USet], USet),
+    ("detect_equalities", BMap, [BMap], BMap),
+    ("detect_equalities", Map, [Map], Map),
+    ("detect_equalities", UMap, [UMap], UMap),
+    ("convex_hull", Set, [Set], Set),
+    ("convex_hull", Map, [Map], Map),
+    ("simple_hull", Set, [Set], Set),
+    ("simple_hull", Map, [Map], Map),
+    ("affine_hull", BSet, [BSet], BSet),
+    ("affine_hull", Set, [Set], BSet),
+    ("affine_hull", USet, [USet], USet),
+    ("affine_hull", BMap, [BMap], BMap),
+    ("affine_hull", Map, [Map], BMap),
+    ("affine_hull", UMap, [UMap], UMap),
+    ("polyhedral_hull", Set, [Set], Set),
+    ("polyhedral_hull", USet, [USet], USet),
+    ("polyhedral_hull", Map, [Map], Map),
+    ("polyhedral_hull", UMap, [UMap], UMap),
+    # Power missing
+    # Transitive closure missing
+    # Reaching path lengths missing
+    ("wrap", BMap, [BMap], BSet),
+    ("wrap", Map, [Map], Set),
+    ("wrap", UMap, [UMap], USet),
+    ("unwrap", BSet, [BMap], BMap),
+    ("unwrap", Set, [Map], Map),
+    ("unwrap", USet, [UMap], UMap),
+    ("flatten", Set, [Set], Set),
+    ("flatten", Map, [Map], Map),
+    ("flatten_map", Set, [Set], Map),
+    # Dimension manipulation missing
+    # Binary Operations
+    ("intersect", BSet, [BSet, BSet], BSet),
+    ("intersect", Set, [Set, Set], Set),
+    ("intersect", USet, [USet, USet], USet),
+    ("intersect", BMap, [BMap, BMap], BMap),
+    ("intersect", Map, [Map, Map], Map),
+    ("intersect", UMap, [UMap, UMap], UMap),
+    ("intersect_domain", BMap, [BMap, BSet], BMap),
+    ("intersect_domain", Map, [Map, Set], Map),
+    ("intersect_domain", UMap, [UMap, USet], UMap),
+    ("intersect_range", BMap, [BMap, BSet], BMap),
+    ("intersect_range", Map, [Map, Set], Map),
+    ("intersect_range", UMap, [UMap, USet], UMap),
+    ("union", BSet, [BSet, BSet], Set),
+    ("union", Set, [Set, Set], Set),
+    ("union", USet, [USet, USet], USet),
+    ("union", BMap, [BMap, BMap], Map),
+    ("union", Map, [Map, Map], Map),
+    ("union", UMap, [UMap, UMap], UMap),
+    ("subtract", Set, [Set, Set], Set),
+    ("subtract", Map, [Map, Map], Map),
+    ("subtract", USet, [USet, USet], USet),
+    ("subtract", UMap, [UMap, UMap], UMap),
+    ("apply", BSet, [BSet, BMap], BSet),
+    ("apply", Set, [Set, Map], Set),
+    ("apply", USet, [USet, UMap], USet),
+    ("apply_domain", BMap, [BMap, BMap], BMap),
+    ("apply_domain", Map, [Map, Map], Map),
+    ("apply_domain", UMap, [UMap, UMap], UMap),
+    ("apply_range", BMap, [BMap, BMap], BMap),
+    ("apply_range", Map, [Map, Map], Map),
+    ("apply_range", UMap, [UMap, UMap], UMap),
+    ("gist", BSet, [BSet, BSet], BSet),
+    ("gist", Set, [Set, Set], Set),
+    ("gist", USet, [USet, USet], USet),
+    ("gist", BMap, [BMap, BMap], BMap),
+    ("gist", Map, [Map, Map], Map),
+    ("gist", UMap, [UMap, UMap], UMap),
+    # Lexicographic Optimizations
+    # partial_lexmin missing
+    ("lexmin", BSet, [BSet], BSet),
+    ("lexmin", Set, [Set], Set),
+    ("lexmin", USet, [USet], USet),
+    ("lexmin", BMap, [BMap], BMap),
+    ("lexmin", Map, [Map], Map),
+    ("lexmin", UMap, [UMap], UMap),
+    ("lexmax", BSet, [BSet], BSet),
+    ("lexmax", Set, [Set], Set),
+    ("lexmax", USet, [USet], USet),
+    ("lexmax", BMap, [BMap], BMap),
+    ("lexmax", Map, [Map], Map),
+    ("lexmax", UMap, [UMap], UMap),
+    # Undocumented
+    ("lex_lt_union_set", USet, [USet, USet], UMap),
+    ("lex_le_union_set", USet, [USet, USet], UMap),
+    ("lex_gt_union_set", USet, [USet, USet], UMap),
+    ("lex_ge_union_set", USet, [USet, USet], UMap),
+]
 keep_functions = [
-             # Unary properties
-             ("get_dim", BSet, [BSet], Dim),
-             ("get_dim", Set, [Set], Dim),
-             ("get_dim", USet, [USet], Dim),
-             ("get_dim", BMap, [BMap], Dim),
-             ("get_dim", Map, [Map], Dim),
-             ("get_dim", UMap, [UMap], Dim)
-             ]
+    # Unary properties
+    ("get_dim", BSet, [BSet], Dim),
+    ("get_dim", Set, [Set], Dim),
+    ("get_dim", USet, [USet], Dim),
+    ("get_dim", BMap, [BMap], Dim),
+    ("get_dim", Map, [Map], Dim),
+    ("get_dim", UMap, [UMap], Dim),
+]
+
 
 def addIslFunction(object, name):
     functionName = "isl_" + object.isl_name() + "_" + name
     islFunction = getattr(isl, functionName)
     if len(islFunction.argtypes) == 1:
-      f = lambda a: islFunctionOneOp(islFunction, a)
+        f = lambda a: islFunctionOneOp(islFunction, a)
     elif len(islFunction.argtypes) == 2:
-      f = lambda a, b: islFunctionTwoOp(islFunction, a, b)
+        f = lambda a, b: islFunctionTwoOp(islFunction, a, b)
     object.__dict__[name] = f
 
 
 def islFunctionOneOp(islFunction, ops):
-  ops = getattr(isl, "isl_" + ops.isl_name() + "_copy")(ops)
-  return islFunction(ops)
+    ops = getattr(isl, "isl_" + ops.isl_name() + "_copy")(ops)
+    return islFunction(ops)
+
 
 def islFunctionTwoOp(islFunction, opOne, opTwo):
-  opOne = getattr(isl, "isl_" + opOne.isl_name() + "_copy")(opOne)
-  opTwo = getattr(isl, "isl_" + opTwo.isl_name() + "_copy")(opTwo)
-  return islFunction(opOne, opTwo)
+    opOne = getattr(isl, "isl_" + opOne.isl_name() + "_copy")(opOne)
+    opTwo = getattr(isl, "isl_" + opTwo.isl_name() + "_copy")(opTwo)
+    return islFunction(opOne, opTwo)
+
 
 for (operation, base, operands, ret) in functions:
-  functionName = "isl_" + base.isl_name() + "_" + operation
-  islFunction = getattr(isl, functionName)
-  if len(operands) == 1:
-    islFunction.argtypes = [c_int]
-  elif len(operands) == 2:
-    islFunction.argtypes = [c_int, c_int]
+    functionName = "isl_" + base.isl_name() + "_" + operation
+    islFunction = getattr(isl, functionName)
+    if len(operands) == 1:
+        islFunction.argtypes = [c_int]
+    elif len(operands) == 2:
+        islFunction.argtypes = [c_int, c_int]
+
+    if ret == c_int:
+        islFunction.restype = ret
+    else:
+        islFunction.restype = ret.from_ptr
 
-  if ret == c_int:
-    islFunction.restype = ret
-  else:
-    islFunction.restype = ret.from_ptr
+    addIslFunction(base, operation)
 
-  addIslFunction(base, operation)
 
 def addIslFunctionKeep(object, name):
     functionName = "isl_" + object.isl_name() + "_" + name
     islFunction = getattr(isl, functionName)
     if len(islFunction.argtypes) == 1:
-      f = lambda a: islFunctionOneOpKeep(islFunction, a)
+        f = lambda a: islFunctionOneOpKeep(islFunction, a)
     elif len(islFunction.argtypes) == 2:
-      f = lambda a, b: islFunctionTwoOpKeep(islFunction, a, b)
+        f = lambda a, b: islFunctionTwoOpKeep(islFunction, a, b)
     object.__dict__[name] = f
 
+
 def islFunctionOneOpKeep(islFunction, ops):
-  return islFunction(ops)
+    return islFunction(ops)
+
 
 def islFunctionTwoOpKeep(islFunction, opOne, opTwo):
-  return islFunction(opOne, opTwo)
+    return islFunction(opOne, opTwo)
+
 
 for (operation, base, operands, ret) in keep_functions:
-  functionName = "isl_" + base.isl_name() + "_" + operation
-  islFunction = getattr(isl, functionName)
-  if len(operands) == 1:
-    islFunction.argtypes = [c_int]
-  elif len(operands) == 2:
-    islFunction.argtypes = [c_int, c_int]
+    functionName = "isl_" + base.isl_name() + "_" + operation
+    islFunction = getattr(isl, functionName)
+    if len(operands) == 1:
+        islFunction.argtypes = [c_int]
+    elif len(operands) == 2:
+        islFunction.argtypes = [c_int, c_int]
 
-  if ret == c_int:
-    islFunction.restype = ret
-  else:
-    islFunction.restype = ret.from_ptr
+    if ret == c_int:
+        islFunction.restype = ret
+    else:
+        islFunction.restype = ret.from_ptr
 
-  addIslFunctionKeep(base, operation)
+    addIslFunctionKeep(base, operation)
 
 isl.isl_ctx_free.argtypes = [Context]
 isl.isl_basic_set_read_from_str.argtypes = [Context, c_char_p, c_int]
@@ -554,25 +544,44 @@ def islFunctionTwoOpKeep(islFunction, opOne, opTwo):
 isl.isl_map_lex_ge.argtypes = [c_int]
 isl.isl_map_lex_ge.restype = Map.from_ptr
 
-isl.isl_union_map_compute_flow.argtypes = [c_int, c_int, c_int, c_int, c_void_p,
-                                           c_void_p, c_void_p, c_void_p]
+isl.isl_union_map_compute_flow.argtypes = [
+    c_int,
+    c_int,
+    c_int,
+    c_int,
+    c_void_p,
+    c_void_p,
+    c_void_p,
+    c_void_p,
+]
+
 
 def dependences(sink, must_source, may_source, schedule):
-  sink = getattr(isl, "isl_" + sink.isl_name() + "_copy")(sink)
-  must_source = getattr(isl, "isl_" + must_source.isl_name() + "_copy")(must_source)
-  may_source = getattr(isl, "isl_" + may_source.isl_name() + "_copy")(may_source)
-  schedule = getattr(isl, "isl_" + schedule.isl_name() + "_copy")(schedule)
-  must_dep = c_int()
-  may_dep = c_int()
-  must_no_source = c_int()
-  may_no_source = c_int()
-  isl.isl_union_map_compute_flow(sink, must_source, may_source, schedule, \
-                                 byref(must_dep), byref(may_dep),
-                                 byref(must_no_source),
-                                 byref(may_no_source))
-
-  return (UMap.from_ptr(must_dep), UMap.from_ptr(may_dep), \
-          USet.from_ptr(must_no_source), USet.from_ptr(may_no_source))
-
-
-__all__ = ['Set', 'Map', 'Printer', 'Context']
+    sink = getattr(isl, "isl_" + sink.isl_name() + "_copy")(sink)
+    must_source = getattr(isl, "isl_" + must_source.isl_name() + "_copy")(must_source)
+    may_source = getattr(isl, "isl_" + may_source.isl_name() + "_copy")(may_source)
+    schedule = getattr(isl, "isl_" + schedule.isl_name() + "_copy")(schedule)
+    must_dep = c_int()
+    may_dep = c_int()
+    must_no_source = c_int()
+    may_no_source = c_int()
+    isl.isl_union_map_compute_flow(
+        sink,
+        must_source,
+        may_source,
+        schedule,
+        byref(must_dep),
+        byref(may_dep),
+        byref(must_no_source),
+        byref(may_no_source),
+    )
+
+    return (
+        UMap.from_ptr(must_dep),
+        UMap.from_ptr(may_dep),
+        USet.from_ptr(must_no_source),
+        USet.from_ptr(may_no_source),
+    )
+
+
+__all__ = ["Set", "Map", "Printer", "Context"]

diff  --git a/polly/utils/pyscop/pyscop.py b/polly/utils/pyscop/pyscop.py
index a7c34815a21bf..382e23d8791b6 100644
--- a/polly/utils/pyscop/pyscop.py
+++ b/polly/utils/pyscop/pyscop.py
@@ -1,49 +1,52 @@
 import json
 from isl import *
 
+
 class Scop:
-  def __init__(self, filename):
-    f = open(filename, 'r')
-    self.json = json.load(f)
-    return
+    def __init__(self, filename):
+        f = open(filename, "r")
+        self.json = json.load(f)
+        return
+
+    def __str__(self):
+        return json.dumps(self.json, indent=2)
 
-  def __str__(self):
-    return json.dumps(self.json, indent=2)
+    def __repr__(self):
+        return str(self)
 
-  def __repr__(self):
-    return str(self)
+    @property
+    def statements(self):
+        return self.json["statements"]
 
-  @property
-  def statements(self):
-    return self.json['statements']
 
 class Transforms:
-  """
-  Create a map that interchanges two dimensions 'A' and 'B'
+    """
+    Create a map that interchanges two dimensions 'A' and 'B'
 
-  numberDimensions: The overall number of dimensions
-  dimensionA: The dimension of dimension 'A'
-  dimensionB: The dimension of dimension 'B'
+    numberDimensions: The overall number of dimensions
+    dimensionA: The dimension of dimension 'A'
+    dimensionB: The dimension of dimension 'B'
 
-  getInterchange(2, 0, 1):
-  {[d0, d1] -> [d1, d0]}
-  """
-  @staticmethod
-  def getInterchange(numberDimensions, dimensionA, dimensionB):
+    getInterchange(2, 0, 1):
+    {[d0, d1] -> [d1, d0]}
+    """
 
-    dims = ['d' + str(i) for i in range(numberDimensions)]
-    dimString = ",".join(dims)
+    @staticmethod
+    def getInterchange(numberDimensions, dimensionA, dimensionB):
 
-    changedDims = dims
-    first = dims[dimensionA]
-    second = dims[dimensionB]
-    changedDims[dimensionA] = second
-    changedDims[dimensionB] = first
-    changedDimString = ",".join(changedDims)
+        dims = ["d" + str(i) for i in range(numberDimensions)]
+        dimString = ",".join(dims)
 
-    return Map("{[%s] -> [%s]}" % (dimString, changedDimString))
+        changedDims = dims
+        first = dims[dimensionA]
+        second = dims[dimensionB]
+        changedDims[dimensionA] = second
+        changedDims[dimensionB] = first
+        changedDimString = ",".join(changedDims)
 
-  """
+        return Map("{[%s] -> [%s]}" % (dimString, changedDimString))
+
+    """
   Create a map that strip mines one dimension
 
   numberDimensions: The overall number of dimensions
@@ -53,16 +56,22 @@ def getInterchange(numberDimensions, dimensionA, dimensionB):
   getStripMine(2, 1, 64):
   {[d0, d1] -> [d0, o, d1] : o % 64 = 0 and o <= d1 <= d1 + 63}
   """
-  @staticmethod
-  def getStripMine(numberDimensions, stripMineDim, factor):
-
-    dims = ['d' + str(i) for i in range(numberDimensions)]
-    dimString = ",".join(dims)
-
-    changedDims = dims
-    smd = dims[stripMineDim]
-    changedDims[stripMineDim] = "o,%s" % smd
-    changedDimString = ",".join(changedDims)
-    string = "{[%s] -> [%s]: o %% %i = 0 and o <= %s <= o + %i}" % \
-          (dimString, changedDimString, factor, smd, factor - 1)
-    return Map(string)
+
+    @staticmethod
+    def getStripMine(numberDimensions, stripMineDim, factor):
+
+        dims = ["d" + str(i) for i in range(numberDimensions)]
+        dimString = ",".join(dims)
+
+        changedDims = dims
+        smd = dims[stripMineDim]
+        changedDims[stripMineDim] = "o,%s" % smd
+        changedDimString = ",".join(changedDims)
+        string = "{[%s] -> [%s]: o %% %i = 0 and o <= %s <= o + %i}" % (
+            dimString,
+            changedDimString,
+            factor,
+            smd,
+            factor - 1,
+        )
+        return Map(string)

diff  --git a/pstl/test/std/lit.local.cfg b/pstl/test/std/lit.local.cfg
index 6b1e2c6579062..f259d8f62c936 100644
--- a/pstl/test/std/lit.local.cfg
+++ b/pstl/test/std/lit.local.cfg
@@ -1,2 +1,2 @@
-if 'parallel-algorithms' not in config.available_features:
+if "parallel-algorithms" not in config.available_features:
     config.unsupported = True

diff  --git a/third-party/benchmark/.ycm_extra_conf.py b/third-party/benchmark/.ycm_extra_conf.py
index 5649ddcc749f0..1482c7b00202e 100644
--- a/third-party/benchmark/.ycm_extra_conf.py
+++ b/third-party/benchmark/.ycm_extra_conf.py
@@ -5,21 +5,25 @@
 # compilation database set (by default, one is not set).
 # CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
 flags = [
-'-Wall',
-'-Werror',
-'-pedantic-errors',
-'-std=c++0x',
-'-fno-strict-aliasing',
-'-O3',
-'-DNDEBUG',
-# ...and the same thing goes for the magic -x option which specifies the
-# language that the files to be compiled are written in. This is mostly
-# relevant for c++ headers.
-# For a C project, you would set this to 'c' instead of 'c++'.
-'-x', 'c++',
-'-I', 'include',
-'-isystem', '/usr/include',
-'-isystem', '/usr/local/include',
+    "-Wall",
+    "-Werror",
+    "-pedantic-errors",
+    "-std=c++0x",
+    "-fno-strict-aliasing",
+    "-O3",
+    "-DNDEBUG",
+    # ...and the same thing goes for the magic -x option which specifies the
+    # language that the files to be compiled are written in. This is mostly
+    # relevant for c++ headers.
+    # For a C project, you would set this to 'c' instead of 'c++'.
+    "-x",
+    "c++",
+    "-I",
+    "include",
+    "-isystem",
+    "/usr/include",
+    "-isystem",
+    "/usr/local/include",
 ]
 
 
@@ -29,87 +33,84 @@
 #
 # Most projects will NOT need to set this to anything; you can just change the
 # 'flags' list of compilation flags. Notice that YCM itself uses that approach.
-compilation_database_folder = ''
+compilation_database_folder = ""
 
-if os.path.exists( compilation_database_folder ):
-  database = ycm_core.CompilationDatabase( compilation_database_folder )
+if os.path.exists(compilation_database_folder):
+    database = ycm_core.CompilationDatabase(compilation_database_folder)
 else:
-  database = None
+    database = None
+
+SOURCE_EXTENSIONS = [".cc"]
 
-SOURCE_EXTENSIONS = [ '.cc' ]
 
 def DirectoryOfThisScript():
-  return os.path.dirname( os.path.abspath( __file__ ) )
-
-
-def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
-  if not working_directory:
-    return list( flags )
-  new_flags = []
-  make_next_absolute = False
-  path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
-  for flag in flags:
-    new_flag = flag
-
-    if make_next_absolute:
-      make_next_absolute = False
-      if not flag.startswith( '/' ):
-        new_flag = os.path.join( working_directory, flag )
-
-    for path_flag in path_flags:
-      if flag == path_flag:
-        make_next_absolute = True
-        break
-
-      if flag.startswith( path_flag ):
-        path = flag[ len( path_flag ): ]
-        new_flag = path_flag + os.path.join( working_directory, path )
-        break
-
-    if new_flag:
-      new_flags.append( new_flag )
-  return new_flags
-
-
-def IsHeaderFile( filename ):
-  extension = os.path.splitext( filename )[ 1 ]
-  return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
-
-
-def GetCompilationInfoForFile( filename ):
-  # The compilation_commands.json file generated by CMake does not have entries
-  # for header files. So we do our best by asking the db for flags for a
-  # corresponding source file, if any. If one exists, the flags for that file
-  # should be good enough.
-  if IsHeaderFile( filename ):
-    basename = os.path.splitext( filename )[ 0 ]
-    for extension in SOURCE_EXTENSIONS:
-      replacement_file = basename + extension
-      if os.path.exists( replacement_file ):
-        compilation_info = database.GetCompilationInfoForFile(
-          replacement_file )
-        if compilation_info.compiler_flags_:
-          return compilation_info
-    return None
-  return database.GetCompilationInfoForFile( filename )
-
-
-def FlagsForFile( filename, **kwargs ):
-  if database:
-    # Bear in mind that compilation_info.compiler_flags_ does NOT return a
-    # python list, but a "list-like" StringVec object
-    compilation_info = GetCompilationInfoForFile( filename )
-    if not compilation_info:
-      return None
-
-    final_flags = MakeRelativePathsInFlagsAbsolute(
-      compilation_info.compiler_flags_,
-      compilation_info.compiler_working_dir_ )
-  else:
-    relative_to = DirectoryOfThisScript()
-    final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
-
-  return {
-    'flags': final_flags,
-    'do_cache': True
-  }
+    return os.path.dirname(os.path.abspath(__file__))
+
+
+def MakeRelativePathsInFlagsAbsolute(flags, working_directory):
+    if not working_directory:
+        return list(flags)
+    new_flags = []
+    make_next_absolute = False
+    path_flags = ["-isystem", "-I", "-iquote", "--sysroot="]
+    for flag in flags:
+        new_flag = flag
+
+        if make_next_absolute:
+            make_next_absolute = False
+            if not flag.startswith("/"):
+                new_flag = os.path.join(working_directory, flag)
+
+        for path_flag in path_flags:
+            if flag == path_flag:
+                make_next_absolute = True
+                break
+
+            if flag.startswith(path_flag):
+                path = flag[len(path_flag) :]
+                new_flag = path_flag + os.path.join(working_directory, path)
+                break
+
+        if new_flag:
+            new_flags.append(new_flag)
+    return new_flags
+
+
+def IsHeaderFile(filename):
+    extension = os.path.splitext(filename)[1]
+    return extension in [".h", ".hxx", ".hpp", ".hh"]
+
+
+def GetCompilationInfoForFile(filename):
+    # The compilation_commands.json file generated by CMake does not have entries
+    # for header files. So we do our best by asking the db for flags for a
+    # corresponding source file, if any. If one exists, the flags for that file
+    # should be good enough.
+    if IsHeaderFile(filename):
+        basename = os.path.splitext(filename)[0]
+        for extension in SOURCE_EXTENSIONS:
+            replacement_file = basename + extension
+            if os.path.exists(replacement_file):
+                compilation_info = database.GetCompilationInfoForFile(replacement_file)
+                if compilation_info.compiler_flags_:
+                    return compilation_info
+        return None
+    return database.GetCompilationInfoForFile(filename)
+
+
+def FlagsForFile(filename, **kwargs):
+    if database:
+        # Bear in mind that compilation_info.compiler_flags_ does NOT return a
+        # python list, but a "list-like" StringVec object
+        compilation_info = GetCompilationInfoForFile(filename)
+        if not compilation_info:
+            return None
+
+        final_flags = MakeRelativePathsInFlagsAbsolute(
+            compilation_info.compiler_flags_, compilation_info.compiler_working_dir_
+        )
+    else:
+        relative_to = DirectoryOfThisScript()
+        final_flags = MakeRelativePathsInFlagsAbsolute(flags, relative_to)
+
+    return {"flags": final_flags, "do_cache": True}

diff  --git a/third-party/benchmark/bindings/python/google_benchmark/example.py b/third-party/benchmark/bindings/python/google_benchmark/example.py
index 487acc9f1e092..fb0234b8fd7e3 100644
--- a/third-party/benchmark/bindings/python/google_benchmark/example.py
+++ b/third-party/benchmark/bindings/python/google_benchmark/example.py
@@ -38,6 +38,7 @@ def sum_million(state):
     while state:
         sum(range(1_000_000))
 
+
 @benchmark.register
 def pause_timing(state):
     """Pause timing every iteration."""

diff  --git a/third-party/benchmark/setup.py b/third-party/benchmark/setup.py
index 4eaccf849801e..83069e5668397 100644
--- a/third-party/benchmark/setup.py
+++ b/third-party/benchmark/setup.py
@@ -95,10 +95,13 @@ def bazel_build(self, ext):
 
         self.spawn(bazel_argv)
 
-        shared_lib_suffix = '.dll' if IS_WINDOWS else '.so'
+        shared_lib_suffix = ".dll" if IS_WINDOWS else ".so"
         ext_bazel_bin_path = os.path.join(
-            self.build_temp, 'bazel-bin',
-            ext.relpath, ext.target_name + shared_lib_suffix)
+            self.build_temp,
+            "bazel-bin",
+            ext.relpath,
+            ext.target_name + shared_lib_suffix,
+        )
 
         ext_dest_path = self.get_ext_fullpath(ext.name)
         ext_dest_dir = os.path.dirname(ext_dest_path)

diff  --git a/third-party/benchmark/tools/compare.py b/third-party/benchmark/tools/compare.py
index 01d2c89f50fbb..f1504c96fa2ba 100755
--- a/third-party/benchmark/tools/compare.py
+++ b/third-party/benchmark/tools/compare.py
@@ -1,6 +1,7 @@
 #!/usr/bin/env python
 
 import unittest
+
 """
 compare.py - versatile benchmark output compare tool
 """
@@ -20,160 +21,194 @@ def check_inputs(in1, in2, flags):
     """
     in1_kind, in1_err = classify_input_file(in1)
     in2_kind, in2_err = classify_input_file(in2)
-    output_file = find_benchmark_flag('--benchmark_out=', flags)
-    output_type = find_benchmark_flag('--benchmark_out_format=', flags)
+    output_file = find_benchmark_flag("--benchmark_out=", flags)
+    output_type = find_benchmark_flag("--benchmark_out_format=", flags)
     if in1_kind == IT_Executable and in2_kind == IT_Executable and output_file:
-        print(("WARNING: '--benchmark_out=%s' will be passed to both "
-               "benchmarks causing it to be overwritten") % output_file)
+        print(
+            (
+                "WARNING: '--benchmark_out=%s' will be passed to both "
+                "benchmarks causing it to be overwritten"
+            )
+            % output_file
+        )
     if in1_kind == IT_JSON and in2_kind == IT_JSON and len(flags) > 0:
-        print("WARNING: passing optional flags has no effect since both "
-              "inputs are JSON")
-    if output_type is not None and output_type != 'json':
-        print(("ERROR: passing '--benchmark_out_format=%s' to 'compare.py`"
-               " is not supported.") % output_type)
+        print(
+            "WARNING: passing optional flags has no effect since both "
+            "inputs are JSON"
+        )
+    if output_type is not None and output_type != "json":
+        print(
+            (
+                "ERROR: passing '--benchmark_out_format=%s' to 'compare.py`"
+                " is not supported."
+            )
+            % output_type
+        )
         sys.exit(1)
 
 
 def create_parser():
-    parser = ArgumentParser(
-        description='versatile benchmark output compare tool')
+    parser = ArgumentParser(description="versatile benchmark output compare tool")
 
     parser.add_argument(
-        '-a',
-        '--display_aggregates_only',
-        dest='display_aggregates_only',
+        "-a",
+        "--display_aggregates_only",
+        dest="display_aggregates_only",
         action="store_true",
         help="If there are repetitions, by default, we display everything - the"
-             " actual runs, and the aggregates computed. Sometimes, it is "
-             "desirable to only view the aggregates. E.g. when there are a lot "
-             "of repetitions. Do note that only the display is affected. "
-             "Internally, all the actual runs are still used, e.g. for U test.")
+        " actual runs, and the aggregates computed. Sometimes, it is "
+        "desirable to only view the aggregates. E.g. when there are a lot "
+        "of repetitions. Do note that only the display is affected. "
+        "Internally, all the actual runs are still used, e.g. for U test.",
+    )
 
     parser.add_argument(
-        '--no-color',
-        dest='color',
+        "--no-color",
+        dest="color",
         default=True,
         action="store_false",
-        help="Do not use colors in the terminal output"
+        help="Do not use colors in the terminal output",
     )
 
     parser.add_argument(
-        '-d',
-        '--dump_to_json',
-        dest='dump_to_json',
-        help="Additionally, dump benchmark comparison output to this file in JSON format.")
+        "-d",
+        "--dump_to_json",
+        dest="dump_to_json",
+        help="Additionally, dump benchmark comparison output to this file in JSON format.",
+    )
 
     utest = parser.add_argument_group()
     utest.add_argument(
-        '--no-utest',
-        dest='utest',
+        "--no-utest",
+        dest="utest",
         default=True,
         action="store_false",
-        help="The tool can do a two-tailed Mann-Whitney U test with the null hypothesis that it is equally likely that a randomly selected value from one sample will be less than or greater than a randomly selected value from a second sample.\nWARNING: requires **LARGE** (no less than {}) number of repetitions to be meaningful!\nThe test is being done by default, if at least {} repetitions were done.\nThis option can disable the U Test.".format(report.UTEST_OPTIMAL_REPETITIONS, report.UTEST_MIN_REPETITIONS))
+        help="The tool can do a two-tailed Mann-Whitney U test with the null hypothesis that it is equally likely that a randomly selected value from one sample will be less than or greater than a randomly selected value from a second sample.\nWARNING: requires **LARGE** (no less than {}) number of repetitions to be meaningful!\nThe test is being done by default, if at least {} repetitions were done.\nThis option can disable the U Test.".format(
+            report.UTEST_OPTIMAL_REPETITIONS, report.UTEST_MIN_REPETITIONS
+        ),
+    )
     alpha_default = 0.05
     utest.add_argument(
         "--alpha",
-        dest='utest_alpha',
+        dest="utest_alpha",
         default=alpha_default,
         type=float,
-        help=("significance level alpha. if the calculated p-value is below this value, then the result is said to be statistically significant and the null hypothesis is rejected.\n(default: %0.4f)") %
-        alpha_default)
+        help=(
+            "significance level alpha. if the calculated p-value is below this value, then the result is said to be statistically significant and the null hypothesis is rejected.\n(default: %0.4f)"
+        )
+        % alpha_default,
+    )
 
     subparsers = parser.add_subparsers(
-        help='This tool has multiple modes of operation:',
-        dest='mode')
+        help="This tool has multiple modes of operation:", dest="mode"
+    )
 
     parser_a = subparsers.add_parser(
-        'benchmarks',
-        help='The most simple use-case, compare all the output of these two benchmarks')
-    baseline = parser_a.add_argument_group(
-        'baseline', 'The benchmark baseline')
+        "benchmarks",
+        help="The most simple use-case, compare all the output of these two benchmarks",
+    )
+    baseline = parser_a.add_argument_group("baseline", "The benchmark baseline")
     baseline.add_argument(
-        'test_baseline',
-        metavar='test_baseline',
-        type=argparse.FileType('r'),
+        "test_baseline",
+        metavar="test_baseline",
+        type=argparse.FileType("r"),
         nargs=1,
-        help='A benchmark executable or JSON output file')
+        help="A benchmark executable or JSON output file",
+    )
     contender = parser_a.add_argument_group(
-        'contender', 'The benchmark that will be compared against the baseline')
+        "contender", "The benchmark that will be compared against the baseline"
+    )
     contender.add_argument(
-        'test_contender',
-        metavar='test_contender',
-        type=argparse.FileType('r'),
+        "test_contender",
+        metavar="test_contender",
+        type=argparse.FileType("r"),
         nargs=1,
-        help='A benchmark executable or JSON output file')
+        help="A benchmark executable or JSON output file",
+    )
     parser_a.add_argument(
-        'benchmark_options',
-        metavar='benchmark_options',
+        "benchmark_options",
+        metavar="benchmark_options",
         nargs=argparse.REMAINDER,
-        help='Arguments to pass when running benchmark executables')
+        help="Arguments to pass when running benchmark executables",
+    )
 
     parser_b = subparsers.add_parser(
-        'filters', help='Compare filter one with the filter two of benchmark')
-    baseline = parser_b.add_argument_group(
-        'baseline', 'The benchmark baseline')
+        "filters", help="Compare filter one with the filter two of benchmark"
+    )
+    baseline = parser_b.add_argument_group("baseline", "The benchmark baseline")
     baseline.add_argument(
-        'test',
-        metavar='test',
-        type=argparse.FileType('r'),
+        "test",
+        metavar="test",
+        type=argparse.FileType("r"),
         nargs=1,
-        help='A benchmark executable or JSON output file')
+        help="A benchmark executable or JSON output file",
+    )
     baseline.add_argument(
-        'filter_baseline',
-        metavar='filter_baseline',
+        "filter_baseline",
+        metavar="filter_baseline",
         type=str,
         nargs=1,
-        help='The first filter, that will be used as baseline')
+        help="The first filter, that will be used as baseline",
+    )
     contender = parser_b.add_argument_group(
-        'contender', 'The benchmark that will be compared against the baseline')
+        "contender", "The benchmark that will be compared against the baseline"
+    )
     contender.add_argument(
-        'filter_contender',
-        metavar='filter_contender',
+        "filter_contender",
+        metavar="filter_contender",
         type=str,
         nargs=1,
-        help='The second filter, that will be compared against the baseline')
+        help="The second filter, that will be compared against the baseline",
+    )
     parser_b.add_argument(
-        'benchmark_options',
-        metavar='benchmark_options',
+        "benchmark_options",
+        metavar="benchmark_options",
         nargs=argparse.REMAINDER,
-        help='Arguments to pass when running benchmark executables')
+        help="Arguments to pass when running benchmark executables",
+    )
 
     parser_c = subparsers.add_parser(
-        'benchmarksfiltered',
-        help='Compare filter one of first benchmark with filter two of the second benchmark')
-    baseline = parser_c.add_argument_group(
-        'baseline', 'The benchmark baseline')
+        "benchmarksfiltered",
+        help="Compare filter one of first benchmark with filter two of the second benchmark",
+    )
+    baseline = parser_c.add_argument_group("baseline", "The benchmark baseline")
     baseline.add_argument(
-        'test_baseline',
-        metavar='test_baseline',
-        type=argparse.FileType('r'),
+        "test_baseline",
+        metavar="test_baseline",
+        type=argparse.FileType("r"),
         nargs=1,
-        help='A benchmark executable or JSON output file')
+        help="A benchmark executable or JSON output file",
+    )
     baseline.add_argument(
-        'filter_baseline',
-        metavar='filter_baseline',
+        "filter_baseline",
+        metavar="filter_baseline",
         type=str,
         nargs=1,
-        help='The first filter, that will be used as baseline')
+        help="The first filter, that will be used as baseline",
+    )
     contender = parser_c.add_argument_group(
-        'contender', 'The benchmark that will be compared against the baseline')
+        "contender", "The benchmark that will be compared against the baseline"
+    )
     contender.add_argument(
-        'test_contender',
-        metavar='test_contender',
-        type=argparse.FileType('r'),
+        "test_contender",
+        metavar="test_contender",
+        type=argparse.FileType("r"),
         nargs=1,
-        help='The second benchmark executable or JSON output file, that will be compared against the baseline')
+        help="The second benchmark executable or JSON output file, that will be compared against the baseline",
+    )
     contender.add_argument(
-        'filter_contender',
-        metavar='filter_contender',
+        "filter_contender",
+        metavar="filter_contender",
         type=str,
         nargs=1,
-        help='The second filter, that will be compared against the baseline')
+        help="The second filter, that will be compared against the baseline",
+    )
     parser_c.add_argument(
-        'benchmark_options',
-        metavar='benchmark_options',
+        "benchmark_options",
+        metavar="benchmark_options",
         nargs=argparse.REMAINDER,
-        help='Arguments to pass when running benchmark executables')
+        help="Arguments to pass when running benchmark executables",
+    )
 
     return parser
 
@@ -188,16 +223,16 @@ def main():
     assert not unknown_args
     benchmark_options = args.benchmark_options
 
-    if args.mode == 'benchmarks':
+    if args.mode == "benchmarks":
         test_baseline = args.test_baseline[0].name
         test_contender = args.test_contender[0].name
-        filter_baseline = ''
-        filter_contender = ''
+        filter_baseline = ""
+        filter_contender = ""
 
         # NOTE: if test_baseline == test_contender, you are analyzing the stdev
 
-        description = 'Comparing %s to %s' % (test_baseline, test_contender)
-    elif args.mode == 'filters':
+        description = "Comparing %s to %s" % (test_baseline, test_contender)
+    elif args.mode == "filters":
         test_baseline = args.test[0].name
         test_contender = args.test[0].name
         filter_baseline = args.filter_baseline[0]
@@ -206,9 +241,12 @@ def main():
         # NOTE: if filter_baseline == filter_contender, you are analyzing the
         # stdev
 
-        description = 'Comparing %s to %s (from %s)' % (
-            filter_baseline, filter_contender, args.test[0].name)
-    elif args.mode == 'benchmarksfiltered':
+        description = "Comparing %s to %s (from %s)" % (
+            filter_baseline,
+            filter_contender,
+            args.test[0].name,
+        )
+    elif args.mode == "benchmarksfiltered":
         test_baseline = args.test_baseline[0].name
         test_contender = args.test_contender[0].name
         filter_baseline = args.filter_baseline[0]
@@ -217,8 +255,12 @@ def main():
         # NOTE: if test_baseline == test_contender and
         # filter_baseline == filter_contender, you are analyzing the stdev
 
-        description = 'Comparing %s (from %s) to %s (from %s)' % (
-            filter_baseline, test_baseline, filter_contender, test_contender)
+        description = "Comparing %s (from %s) to %s (from %s)" % (
+            filter_baseline,
+            test_baseline,
+            filter_contender,
+            test_contender,
+        )
     else:
         # should never happen
         print("Unrecognized mode of operation: '%s'" % args.mode)
@@ -228,199 +270,229 @@ def main():
     check_inputs(test_baseline, test_contender, benchmark_options)
 
     if args.display_aggregates_only:
-        benchmark_options += ['--benchmark_display_aggregates_only=true']
+        benchmark_options += ["--benchmark_display_aggregates_only=true"]
 
     options_baseline = []
     options_contender = []
 
     if filter_baseline and filter_contender:
-        options_baseline = ['--benchmark_filter=%s' % filter_baseline]
-        options_contender = ['--benchmark_filter=%s' % filter_contender]
+        options_baseline = ["--benchmark_filter=%s" % filter_baseline]
+        options_contender = ["--benchmark_filter=%s" % filter_contender]
 
     # Run the benchmarks and report the results
-    json1 = json1_orig = gbench.util.sort_benchmark_results(gbench.util.run_or_load_benchmark(
-        test_baseline, benchmark_options + options_baseline))
-    json2 = json2_orig = gbench.util.sort_benchmark_results(gbench.util.run_or_load_benchmark(
-        test_contender, benchmark_options + options_contender))
+    json1 = json1_orig = gbench.util.sort_benchmark_results(
+        gbench.util.run_or_load_benchmark(
+            test_baseline, benchmark_options + options_baseline
+        )
+    )
+    json2 = json2_orig = gbench.util.sort_benchmark_results(
+        gbench.util.run_or_load_benchmark(
+            test_contender, benchmark_options + options_contender
+        )
+    )
 
     # Now, filter the benchmarks so that the 
diff erence report can work
     if filter_baseline and filter_contender:
-        replacement = '[%s vs. %s]' % (filter_baseline, filter_contender)
-        json1 = gbench.report.filter_benchmark(
-            json1_orig, filter_baseline, replacement)
+        replacement = "[%s vs. %s]" % (filter_baseline, filter_contender)
+        json1 = gbench.report.filter_benchmark(json1_orig, filter_baseline, replacement)
         json2 = gbench.report.filter_benchmark(
-            json2_orig, filter_contender, replacement)
+            json2_orig, filter_contender, replacement
+        )
 
-    
diff _report = gbench.report.get_
diff erence_report(
-        json1, json2, args.utest)
+    
diff _report = gbench.report.get_
diff erence_report(json1, json2, args.utest)
     output_lines = gbench.report.print_
diff erence_report(
         
diff _report,
         args.display_aggregates_only,
-        args.utest, args.utest_alpha, args.color)
+        args.utest,
+        args.utest_alpha,
+        args.color,
+    )
     print(description)
     for ln in output_lines:
         print(ln)
 
     # Optionally, 
diff  and output to JSON
     if args.dump_to_json is not None:
-        with open(args.dump_to_json, 'w') as f_json:
+        with open(args.dump_to_json, "w") as f_json:
             json.dump(
diff _report, f_json)
 
+
 class TestParser(unittest.TestCase):
     def setUp(self):
         self.parser = create_parser()
         testInputs = os.path.join(
-            os.path.dirname(
-                os.path.realpath(__file__)),
-            'gbench',
-            'Inputs')
-        self.testInput0 = os.path.join(testInputs, 'test1_run1.json')
-        self.testInput1 = os.path.join(testInputs, 'test1_run2.json')
+            os.path.dirname(os.path.realpath(__file__)), "gbench", "Inputs"
+        )
+        self.testInput0 = os.path.join(testInputs, "test1_run1.json")
+        self.testInput1 = os.path.join(testInputs, "test1_run2.json")
 
     def test_benchmarks_basic(self):
         parsed = self.parser.parse_args(
-            ['benchmarks', self.testInput0, self.testInput1])
+            ["benchmarks", self.testInput0, self.testInput1]
+        )
         self.assertFalse(parsed.display_aggregates_only)
         self.assertTrue(parsed.utest)
-        self.assertEqual(parsed.mode, 'benchmarks')
+        self.assertEqual(parsed.mode, "benchmarks")
         self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
         self.assertEqual(parsed.test_contender[0].name, self.testInput1)
         self.assertFalse(parsed.benchmark_options)
 
     def test_benchmarks_basic_without_utest(self):
         parsed = self.parser.parse_args(
-            ['--no-utest', 'benchmarks', self.testInput0, self.testInput1])
+            ["--no-utest", "benchmarks", self.testInput0, self.testInput1]
+        )
         self.assertFalse(parsed.display_aggregates_only)
         self.assertFalse(parsed.utest)
         self.assertEqual(parsed.utest_alpha, 0.05)
-        self.assertEqual(parsed.mode, 'benchmarks')
+        self.assertEqual(parsed.mode, "benchmarks")
         self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
         self.assertEqual(parsed.test_contender[0].name, self.testInput1)
         self.assertFalse(parsed.benchmark_options)
 
     def test_benchmarks_basic_display_aggregates_only(self):
         parsed = self.parser.parse_args(
-            ['-a', 'benchmarks', self.testInput0, self.testInput1])
+            ["-a", "benchmarks", self.testInput0, self.testInput1]
+        )
         self.assertTrue(parsed.display_aggregates_only)
         self.assertTrue(parsed.utest)
-        self.assertEqual(parsed.mode, 'benchmarks')
+        self.assertEqual(parsed.mode, "benchmarks")
         self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
         self.assertEqual(parsed.test_contender[0].name, self.testInput1)
         self.assertFalse(parsed.benchmark_options)
 
     def test_benchmarks_basic_with_utest_alpha(self):
         parsed = self.parser.parse_args(
-            ['--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1])
+            ["--alpha=0.314", "benchmarks", self.testInput0, self.testInput1]
+        )
         self.assertFalse(parsed.display_aggregates_only)
         self.assertTrue(parsed.utest)
         self.assertEqual(parsed.utest_alpha, 0.314)
-        self.assertEqual(parsed.mode, 'benchmarks')
+        self.assertEqual(parsed.mode, "benchmarks")
         self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
         self.assertEqual(parsed.test_contender[0].name, self.testInput1)
         self.assertFalse(parsed.benchmark_options)
 
     def test_benchmarks_basic_without_utest_with_utest_alpha(self):
         parsed = self.parser.parse_args(
-            ['--no-utest', '--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1])
+            [
+                "--no-utest",
+                "--alpha=0.314",
+                "benchmarks",
+                self.testInput0,
+                self.testInput1,
+            ]
+        )
         self.assertFalse(parsed.display_aggregates_only)
         self.assertFalse(parsed.utest)
         self.assertEqual(parsed.utest_alpha, 0.314)
-        self.assertEqual(parsed.mode, 'benchmarks')
+        self.assertEqual(parsed.mode, "benchmarks")
         self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
         self.assertEqual(parsed.test_contender[0].name, self.testInput1)
         self.assertFalse(parsed.benchmark_options)
 
     def test_benchmarks_with_remainder(self):
         parsed = self.parser.parse_args(
-            ['benchmarks', self.testInput0, self.testInput1, 'd'])
+            ["benchmarks", self.testInput0, self.testInput1, "d"]
+        )
         self.assertFalse(parsed.display_aggregates_only)
         self.assertTrue(parsed.utest)
-        self.assertEqual(parsed.mode, 'benchmarks')
+        self.assertEqual(parsed.mode, "benchmarks")
         self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
         self.assertEqual(parsed.test_contender[0].name, self.testInput1)
-        self.assertEqual(parsed.benchmark_options, ['d'])
+        self.assertEqual(parsed.benchmark_options, ["d"])
 
     def test_benchmarks_with_remainder_after_doubleminus(self):
         parsed = self.parser.parse_args(
-            ['benchmarks', self.testInput0, self.testInput1, '--', 'e'])
+            ["benchmarks", self.testInput0, self.testInput1, "--", "e"]
+        )
         self.assertFalse(parsed.display_aggregates_only)
         self.assertTrue(parsed.utest)
-        self.assertEqual(parsed.mode, 'benchmarks')
+        self.assertEqual(parsed.mode, "benchmarks")
         self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
         self.assertEqual(parsed.test_contender[0].name, self.testInput1)
-        self.assertEqual(parsed.benchmark_options, ['e'])
+        self.assertEqual(parsed.benchmark_options, ["e"])
 
     def test_filters_basic(self):
-        parsed = self.parser.parse_args(
-            ['filters', self.testInput0, 'c', 'd'])
+        parsed = self.parser.parse_args(["filters", self.testInput0, "c", "d"])
         self.assertFalse(parsed.display_aggregates_only)
         self.assertTrue(parsed.utest)
-        self.assertEqual(parsed.mode, 'filters')
+        self.assertEqual(parsed.mode, "filters")
         self.assertEqual(parsed.test[0].name, self.testInput0)
-        self.assertEqual(parsed.filter_baseline[0], 'c')
-        self.assertEqual(parsed.filter_contender[0], 'd')
+        self.assertEqual(parsed.filter_baseline[0], "c")
+        self.assertEqual(parsed.filter_contender[0], "d")
         self.assertFalse(parsed.benchmark_options)
 
     def test_filters_with_remainder(self):
-        parsed = self.parser.parse_args(
-            ['filters', self.testInput0, 'c', 'd', 'e'])
+        parsed = self.parser.parse_args(["filters", self.testInput0, "c", "d", "e"])
         self.assertFalse(parsed.display_aggregates_only)
         self.assertTrue(parsed.utest)
-        self.assertEqual(parsed.mode, 'filters')
+        self.assertEqual(parsed.mode, "filters")
         self.assertEqual(parsed.test[0].name, self.testInput0)
-        self.assertEqual(parsed.filter_baseline[0], 'c')
-        self.assertEqual(parsed.filter_contender[0], 'd')
-        self.assertEqual(parsed.benchmark_options, ['e'])
+        self.assertEqual(parsed.filter_baseline[0], "c")
+        self.assertEqual(parsed.filter_contender[0], "d")
+        self.assertEqual(parsed.benchmark_options, ["e"])
 
     def test_filters_with_remainder_after_doubleminus(self):
         parsed = self.parser.parse_args(
-            ['filters', self.testInput0, 'c', 'd', '--', 'f'])
+            ["filters", self.testInput0, "c", "d", "--", "f"]
+        )
         self.assertFalse(parsed.display_aggregates_only)
         self.assertTrue(parsed.utest)
-        self.assertEqual(parsed.mode, 'filters')
+        self.assertEqual(parsed.mode, "filters")
         self.assertEqual(parsed.test[0].name, self.testInput0)
-        self.assertEqual(parsed.filter_baseline[0], 'c')
-        self.assertEqual(parsed.filter_contender[0], 'd')
-        self.assertEqual(parsed.benchmark_options, ['f'])
+        self.assertEqual(parsed.filter_baseline[0], "c")
+        self.assertEqual(parsed.filter_contender[0], "d")
+        self.assertEqual(parsed.benchmark_options, ["f"])
 
     def test_benchmarksfiltered_basic(self):
         parsed = self.parser.parse_args(
-            ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e'])
+            ["benchmarksfiltered", self.testInput0, "c", self.testInput1, "e"]
+        )
         self.assertFalse(parsed.display_aggregates_only)
         self.assertTrue(parsed.utest)
-        self.assertEqual(parsed.mode, 'benchmarksfiltered')
+        self.assertEqual(parsed.mode, "benchmarksfiltered")
         self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
-        self.assertEqual(parsed.filter_baseline[0], 'c')
+        self.assertEqual(parsed.filter_baseline[0], "c")
         self.assertEqual(parsed.test_contender[0].name, self.testInput1)
-        self.assertEqual(parsed.filter_contender[0], 'e')
+        self.assertEqual(parsed.filter_contender[0], "e")
         self.assertFalse(parsed.benchmark_options)
 
     def test_benchmarksfiltered_with_remainder(self):
         parsed = self.parser.parse_args(
-            ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', 'f'])
+            ["benchmarksfiltered", self.testInput0, "c", self.testInput1, "e", "f"]
+        )
         self.assertFalse(parsed.display_aggregates_only)
         self.assertTrue(parsed.utest)
-        self.assertEqual(parsed.mode, 'benchmarksfiltered')
+        self.assertEqual(parsed.mode, "benchmarksfiltered")
         self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
-        self.assertEqual(parsed.filter_baseline[0], 'c')
+        self.assertEqual(parsed.filter_baseline[0], "c")
         self.assertEqual(parsed.test_contender[0].name, self.testInput1)
-        self.assertEqual(parsed.filter_contender[0], 'e')
-        self.assertEqual(parsed.benchmark_options[0], 'f')
+        self.assertEqual(parsed.filter_contender[0], "e")
+        self.assertEqual(parsed.benchmark_options[0], "f")
 
     def test_benchmarksfiltered_with_remainder_after_doubleminus(self):
         parsed = self.parser.parse_args(
-            ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', '--', 'g'])
+            [
+                "benchmarksfiltered",
+                self.testInput0,
+                "c",
+                self.testInput1,
+                "e",
+                "--",
+                "g",
+            ]
+        )
         self.assertFalse(parsed.display_aggregates_only)
         self.assertTrue(parsed.utest)
-        self.assertEqual(parsed.mode, 'benchmarksfiltered')
+        self.assertEqual(parsed.mode, "benchmarksfiltered")
         self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
-        self.assertEqual(parsed.filter_baseline[0], 'c')
+        self.assertEqual(parsed.filter_baseline[0], "c")
         self.assertEqual(parsed.test_contender[0].name, self.testInput1)
-        self.assertEqual(parsed.filter_contender[0], 'e')
-        self.assertEqual(parsed.benchmark_options[0], 'g')
+        self.assertEqual(parsed.filter_contender[0], "e")
+        self.assertEqual(parsed.benchmark_options[0], "g")
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     # unittest.main()
     main()
 

diff  --git a/third-party/benchmark/tools/gbench/__init__.py b/third-party/benchmark/tools/gbench/__init__.py
index fce1a1acfbb33..ffca396b4c3f2 100644
--- a/third-party/benchmark/tools/gbench/__init__.py
+++ b/third-party/benchmark/tools/gbench/__init__.py
@@ -1,8 +1,8 @@
 """Google Benchmark tooling"""
 
-__author__ = 'Eric Fiselier'
-__email__ = 'eric at efcs.ca'
+__author__ = "Eric Fiselier"
+__email__ = "eric at efcs.ca"
 __versioninfo__ = (0, 5, 0)
-__version__ = '.'.join(str(v) for v in __versioninfo__) + 'dev'
+__version__ = ".".join(str(v) for v in __versioninfo__) + "dev"
 
 __all__ = []

diff  --git a/third-party/benchmark/tools/gbench/report.py b/third-party/benchmark/tools/gbench/report.py
index 4c798baf69f2d..5092b0bf1469c 100644
--- a/third-party/benchmark/tools/gbench/report.py
+++ b/third-party/benchmark/tools/gbench/report.py
@@ -18,26 +18,25 @@ def __init__(self, name, code):
         self.code = code
 
     def __repr__(self):
-        return '%s%r' % (self.__class__.__name__,
-                         (self.name, self.code))
+        return "%s%r" % (self.__class__.__name__, (self.name, self.code))
 
     def __format__(self, format):
         return self.code
 
 
 # Benchmark Colors Enumeration
-BC_NONE = BenchmarkColor('NONE', '')
-BC_MAGENTA = BenchmarkColor('MAGENTA', '\033[95m')
-BC_CYAN = BenchmarkColor('CYAN', '\033[96m')
-BC_OKBLUE = BenchmarkColor('OKBLUE', '\033[94m')
-BC_OKGREEN = BenchmarkColor('OKGREEN', '\033[32m')
-BC_HEADER = BenchmarkColor('HEADER', '\033[92m')
-BC_WARNING = BenchmarkColor('WARNING', '\033[93m')
-BC_WHITE = BenchmarkColor('WHITE', '\033[97m')
-BC_FAIL = BenchmarkColor('FAIL', '\033[91m')
-BC_ENDC = BenchmarkColor('ENDC', '\033[0m')
-BC_BOLD = BenchmarkColor('BOLD', '\033[1m')
-BC_UNDERLINE = BenchmarkColor('UNDERLINE', '\033[4m')
+BC_NONE = BenchmarkColor("NONE", "")
+BC_MAGENTA = BenchmarkColor("MAGENTA", "\033[95m")
+BC_CYAN = BenchmarkColor("CYAN", "\033[96m")
+BC_OKBLUE = BenchmarkColor("OKBLUE", "\033[94m")
+BC_OKGREEN = BenchmarkColor("OKGREEN", "\033[32m")
+BC_HEADER = BenchmarkColor("HEADER", "\033[92m")
+BC_WARNING = BenchmarkColor("WARNING", "\033[93m")
+BC_WHITE = BenchmarkColor("WHITE", "\033[97m")
+BC_FAIL = BenchmarkColor("FAIL", "\033[91m")
+BC_ENDC = BenchmarkColor("ENDC", "\033[0m")
+BC_BOLD = BenchmarkColor("BOLD", "\033[1m")
+BC_UNDERLINE = BenchmarkColor("UNDERLINE", "\033[4m")
 
 UTEST_MIN_REPETITIONS = 2
 UTEST_OPTIMAL_REPETITIONS = 9  # Lowest reasonable number, More is better.
@@ -53,10 +52,11 @@ def color_format(use_color, fmt_str, *args, **kwargs):
     """
     assert use_color is True or use_color is False
     if not use_color:
-        args = [arg if not isinstance(arg, BenchmarkColor) else BC_NONE
-                for arg in args]
-        kwargs = {key: arg if not isinstance(arg, BenchmarkColor) else BC_NONE
-                  for key, arg in kwargs.items()}
+        args = [arg if not isinstance(arg, BenchmarkColor) else BC_NONE for arg in args]
+        kwargs = {
+            key: arg if not isinstance(arg, BenchmarkColor) else BC_NONE
+            for key, arg in kwargs.items()
+        }
     return fmt_str.format(*args, **kwargs)
 
 
@@ -67,8 +67,8 @@ def find_longest_name(benchmark_list):
     """
     longest_name = 1
     for bc in benchmark_list:
-        if len(bc['name']) > longest_name:
-            longest_name = len(bc['name'])
+        if len(bc["name"]) > longest_name:
+            longest_name = len(bc["name"])
     return longest_name
 
 
@@ -89,13 +89,13 @@ def filter_benchmark(json_orig, family, replacement=""):
     """
     regex = re.compile(family)
     filtered = {}
-    filtered['benchmarks'] = []
-    for be in json_orig['benchmarks']:
-        if not regex.search(be['name']):
+    filtered["benchmarks"] = []
+    for be in json_orig["benchmarks"]:
+        if not regex.search(be["name"]):
             continue
         filteredbench = copy.deepcopy(be)  # Do NOT modify the old name!
-        filteredbench['name'] = regex.sub(replacement, filteredbench['name'])
-        filtered['benchmarks'].append(filteredbench)
+        filteredbench["name"] = regex.sub(replacement, filteredbench["name"])
+        filtered["benchmarks"].append(filteredbench)
     return filtered
 
 
@@ -104,9 +104,11 @@ def get_unique_benchmark_names(json):
     While *keeping* the order, give all the unique 'names' used for benchmarks.
     """
     seen = set()
-    uniqued = [x['name'] for x in json['benchmarks']
-               if x['name'] not in seen and
-               (seen.add(x['name']) or True)]
+    uniqued = [
+        x["name"]
+        for x in json["benchmarks"]
+        if x["name"] not in seen and (seen.add(x["name"]) or True)
+    ]
     return uniqued
 
 
@@ -119,7 +121,7 @@ def intersect(list1, list2):
 
 
 def is_potentially_comparable_benchmark(x):
-    return ('time_unit' in x and 'real_time' in x and 'cpu_time' in x)
+    return "time_unit" in x and "real_time" in x and "cpu_time" in x
 
 
 def partition_benchmarks(json1, json2):
@@ -136,18 +138,24 @@ def partition_benchmarks(json1, json2):
         time_unit = None
         # Pick the time unit from the first entry of the lhs benchmark.
         # We should be careful not to crash with unexpected input.
-        for x in json1['benchmarks']:
-            if (x['name'] == name and is_potentially_comparable_benchmark(x)):
-                time_unit = x['time_unit']
+        for x in json1["benchmarks"]:
+            if x["name"] == name and is_potentially_comparable_benchmark(x):
+                time_unit = x["time_unit"]
                 break
         if time_unit is None:
             continue
         # Filter by name and time unit.
         # All the repetitions are assumed to be comparable.
-        lhs = [x for x in json1['benchmarks'] if x['name'] == name and
-               x['time_unit'] == time_unit]
-        rhs = [x for x in json2['benchmarks'] if x['name'] == name and
-               x['time_unit'] == time_unit]
+        lhs = [
+            x
+            for x in json1["benchmarks"]
+            if x["name"] == name and x["time_unit"] == time_unit
+        ]
+        rhs = [
+            x
+            for x in json2["benchmarks"]
+            if x["name"] == name and x["time_unit"] == time_unit
+        ]
         partitions.append([lhs, rhs])
     return partitions
 
@@ -157,9 +165,9 @@ def get_timedelta_field_as_seconds(benchmark, field_name):
     Get value of field_name field of benchmark, which is time with time unit
     time_unit, as time in seconds.
     """
-    time_unit = benchmark['time_unit'] if 'time_unit' in benchmark else 's'
+    time_unit = benchmark["time_unit"] if "time_unit" in benchmark else "s"
     dt = Timedelta(benchmark[field_name], time_unit)
-    return dt / Timedelta(1, 's')
+    return dt / Timedelta(1, "s")
 
 
 def calculate_geomean(json):
@@ -168,11 +176,15 @@ def calculate_geomean(json):
     and calculate their geomean.
     """
     times = []
-    for benchmark in json['benchmarks']:
-        if 'run_type' in benchmark and benchmark['run_type'] == 'aggregate':
+    for benchmark in json["benchmarks"]:
+        if "run_type" in benchmark and benchmark["run_type"] == "aggregate":
             continue
-        times.append([get_timedelta_field_as_seconds(benchmark, 'real_time'),
-                      get_timedelta_field_as_seconds(benchmark, 'cpu_time')])
+        times.append(
+            [
+                get_timedelta_field_as_seconds(benchmark, "real_time"),
+                get_timedelta_field_as_seconds(benchmark, "cpu_time"),
+            ]
+        )
     return gmean(times) if times else array([])
 
 
@@ -184,19 +196,23 @@ def extract_field(partition, field_name):
 
 
 def calc_utest(timings_cpu, timings_time):
-    min_rep_cnt = min(len(timings_time[0]),
-                      len(timings_time[1]),
-                      len(timings_cpu[0]),
-                      len(timings_cpu[1]))
+    min_rep_cnt = min(
+        len(timings_time[0]),
+        len(timings_time[1]),
+        len(timings_cpu[0]),
+        len(timings_cpu[1]),
+    )
 
     # Does *everything* has at least UTEST_MIN_REPETITIONS repetitions?
     if min_rep_cnt < UTEST_MIN_REPETITIONS:
         return False, None, None
 
     time_pvalue = mannwhitneyu(
-        timings_time[0], timings_time[1], alternative='two-sided').pvalue
+        timings_time[0], timings_time[1], alternative="two-sided"
+    ).pvalue
     cpu_pvalue = mannwhitneyu(
-        timings_cpu[0], timings_cpu[1], alternative='two-sided').pvalue
+        timings_cpu[0], timings_cpu[1], alternative="two-sided"
+    ).pvalue
 
     return (min_rep_cnt >= UTEST_OPTIMAL_REPETITIONS), cpu_pvalue, time_pvalue
 
@@ -206,38 +222,46 @@ def get_utest_color(pval):
         return BC_FAIL if pval >= utest_alpha else BC_OKGREEN
 
     # Check if we failed miserably with minimum required repetitions for utest
-    if not utest['have_optimal_repetitions'] and utest['cpu_pvalue'] is None and utest['time_pvalue'] is None:
+    if (
+        not utest["have_optimal_repetitions"]
+        and utest["cpu_pvalue"] is None
+        and utest["time_pvalue"] is None
+    ):
         return []
 
     dsc = "U Test, Repetitions: {} vs {}".format(
-        utest['nr_of_repetitions'], utest['nr_of_repetitions_other'])
+        utest["nr_of_repetitions"], utest["nr_of_repetitions_other"]
+    )
     dsc_color = BC_OKGREEN
 
     # We still got some results to show but issue a warning about it.
-    if not utest['have_optimal_repetitions']:
+    if not utest["have_optimal_repetitions"]:
         dsc_color = BC_WARNING
         dsc += ". WARNING: Results unreliable! {}+ repetitions recommended.".format(
-            UTEST_OPTIMAL_REPETITIONS)
+            UTEST_OPTIMAL_REPETITIONS
+        )
 
     special_str = "{}{:<{}s}{endc}{}{:16.4f}{endc}{}{:16.4f}{endc}{}      {}"
 
-    return [color_format(use_color,
-                         special_str,
-                         BC_HEADER,
-                         "{}{}".format(bc_name, UTEST_COL_NAME),
-                         first_col_width,
-                         get_utest_color(
-                             utest['time_pvalue']), utest['time_pvalue'],
-                         get_utest_color(
-                             utest['cpu_pvalue']), utest['cpu_pvalue'],
-                         dsc_color, dsc,
-                         endc=BC_ENDC)]
-
-
-def get_
diff erence_report(
-        json1,
-        json2,
-        utest=False):
+    return [
+        color_format(
+            use_color,
+            special_str,
+            BC_HEADER,
+            "{}{}".format(bc_name, UTEST_COL_NAME),
+            first_col_width,
+            get_utest_color(utest["time_pvalue"]),
+            utest["time_pvalue"],
+            get_utest_color(utest["cpu_pvalue"]),
+            utest["cpu_pvalue"],
+            dsc_color,
+            dsc,
+            endc=BC_ENDC,
+        )
+    ]
+
+
+def get_
diff erence_report(json1, json2, utest=False):
     """
     Calculate and report the 
diff erence between each test of two benchmarks
     runs specified as 'json1' and 'json2'. Output is another json containing
@@ -248,36 +272,39 @@ def get_
diff erence_report(
     
diff _report = []
     partitions = partition_benchmarks(json1, json2)
     for partition in partitions:
-        benchmark_name = partition[0][0]['name']
-        time_unit = partition[0][0]['time_unit']
+        benchmark_name = partition[0][0]["name"]
+        time_unit = partition[0][0]["time_unit"]
         measurements = []
         utest_results = {}
         # Careful, we may have 
diff erent repetition count.
         for i in range(min(len(partition[0]), len(partition[1]))):
             bn = partition[0][i]
             other_bench = partition[1][i]
-            measurements.append({
-                'real_time': bn['real_time'],
-                'cpu_time': bn['cpu_time'],
-                'real_time_other': other_bench['real_time'],
-                'cpu_time_other': other_bench['cpu_time'],
-                'time': calculate_change(bn['real_time'], other_bench['real_time']),
-                'cpu': calculate_change(bn['cpu_time'], other_bench['cpu_time'])
-            })
+            measurements.append(
+                {
+                    "real_time": bn["real_time"],
+                    "cpu_time": bn["cpu_time"],
+                    "real_time_other": other_bench["real_time"],
+                    "cpu_time_other": other_bench["cpu_time"],
+                    "time": calculate_change(bn["real_time"], other_bench["real_time"]),
+                    "cpu": calculate_change(bn["cpu_time"], other_bench["cpu_time"]),
+                }
+            )
 
         # After processing the whole partition, if requested, do the U test.
         if utest:
-            timings_cpu = extract_field(partition, 'cpu_time')
-            timings_time = extract_field(partition, 'real_time')
+            timings_cpu = extract_field(partition, "cpu_time")
+            timings_time = extract_field(partition, "real_time")
             have_optimal_repetitions, cpu_pvalue, time_pvalue = calc_utest(
-                timings_cpu, timings_time)
+                timings_cpu, timings_time
+            )
             if cpu_pvalue and time_pvalue:
                 utest_results = {
-                    'have_optimal_repetitions': have_optimal_repetitions,
-                    'cpu_pvalue': cpu_pvalue,
-                    'time_pvalue': time_pvalue,
-                    'nr_of_repetitions': len(timings_cpu[0]),
-                    'nr_of_repetitions_other': len(timings_cpu[1])
+                    "have_optimal_repetitions": have_optimal_repetitions,
+                    "cpu_pvalue": cpu_pvalue,
+                    "time_pvalue": time_pvalue,
+                    "nr_of_repetitions": len(timings_cpu[0]),
+                    "nr_of_repetitions_other": len(timings_cpu[1]),
                 }
 
         # Store only if we had any measurements for given benchmark.
@@ -285,45 +312,58 @@ def get_
diff erence_report(
         # time units which are not compatible with other time units in the
         # benchmark suite.
         if measurements:
-            run_type = partition[0][0]['run_type'] if 'run_type' in partition[0][0] else ''
-            aggregate_name = partition[0][0]['aggregate_name'] if run_type == 'aggregate' and 'aggregate_name' in partition[0][0] else ''
-            
diff _report.append({
-                'name': benchmark_name,
-                'measurements': measurements,
-                'time_unit': time_unit,
-                'run_type': run_type,
-                'aggregate_name': aggregate_name,
-                'utest': utest_results
-            })
+            run_type = (
+                partition[0][0]["run_type"] if "run_type" in partition[0][0] else ""
+            )
+            aggregate_name = (
+                partition[0][0]["aggregate_name"]
+                if run_type == "aggregate" and "aggregate_name" in partition[0][0]
+                else ""
+            )
+            
diff _report.append(
+                {
+                    "name": benchmark_name,
+                    "measurements": measurements,
+                    "time_unit": time_unit,
+                    "run_type": run_type,
+                    "aggregate_name": aggregate_name,
+                    "utest": utest_results,
+                }
+            )
 
     lhs_gmean = calculate_geomean(json1)
     rhs_gmean = calculate_geomean(json2)
     if lhs_gmean.any() and rhs_gmean.any():
-        
diff _report.append({
-            'name': 'OVERALL_GEOMEAN',
-            'measurements': [{
-                'real_time': lhs_gmean[0],
-                'cpu_time': lhs_gmean[1],
-                'real_time_other': rhs_gmean[0],
-                'cpu_time_other': rhs_gmean[1],
-                'time': calculate_change(lhs_gmean[0], rhs_gmean[0]),
-                'cpu': calculate_change(lhs_gmean[1], rhs_gmean[1])
-            }],
-            'time_unit': 's',
-            'run_type': 'aggregate',
-            'aggregate_name': 'geomean',
-            'utest': {}
-        })
+        
diff _report.append(
+            {
+                "name": "OVERALL_GEOMEAN",
+                "measurements": [
+                    {
+                        "real_time": lhs_gmean[0],
+                        "cpu_time": lhs_gmean[1],
+                        "real_time_other": rhs_gmean[0],
+                        "cpu_time_other": rhs_gmean[1],
+                        "time": calculate_change(lhs_gmean[0], rhs_gmean[0]),
+                        "cpu": calculate_change(lhs_gmean[1], rhs_gmean[1]),
+                    }
+                ],
+                "time_unit": "s",
+                "run_type": "aggregate",
+                "aggregate_name": "geomean",
+                "utest": {},
+            }
+        )
 
     return 
diff _report
 
 
 def print_
diff erence_report(
-        json_
diff _report,
-        include_aggregates_only=False,
-        utest=False,
-        utest_alpha=0.05,
-        use_color=True):
+    json_
diff _report,
+    include_aggregates_only=False,
+    utest=False,
+    utest_alpha=0.05,
+    use_color=True,
+):
     """
     Calculate and report the 
diff erence between each test of two benchmarks
     runs specified as 'json1' and 'json2'.
@@ -339,44 +379,53 @@ def get_color(res):
             return BC_CYAN
 
     first_col_width = find_longest_name(json_
diff _report)
-    first_col_width = max(
-        first_col_width,
-        len('Benchmark'))
+    first_col_width = max(first_col_width, len("Benchmark"))
     first_col_width += len(UTEST_COL_NAME)
     first_line = "{:<{}s}Time             CPU      Time Old      Time New       CPU Old       CPU New".format(
-        'Benchmark', 12 + first_col_width)
-    output_strs = [first_line, '-' * len(first_line)]
+        "Benchmark", 12 + first_col_width
+    )
+    output_strs = [first_line, "-" * len(first_line)]
 
     fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}"
     for benchmark in json_
diff _report:
         # *If* we were asked to only include aggregates,
         # and if it is non-aggregate, then don't print it.
-        if not include_aggregates_only or not 'run_type' in benchmark or benchmark['run_type'] == 'aggregate':
-            for measurement in benchmark['measurements']:
-                output_strs += [color_format(use_color,
-                                             fmt_str,
-                                             BC_HEADER,
-                                             benchmark['name'],
-                                             first_col_width,
-                                             get_color(measurement['time']),
-                                             measurement['time'],
-                                             get_color(measurement['cpu']),
-                                             measurement['cpu'],
-                                             measurement['real_time'],
-                                             measurement['real_time_other'],
-                                             measurement['cpu_time'],
-                                             measurement['cpu_time_other'],
-                                             endc=BC_ENDC)]
+        if (
+            not include_aggregates_only
+            or not "run_type" in benchmark
+            or benchmark["run_type"] == "aggregate"
+        ):
+            for measurement in benchmark["measurements"]:
+                output_strs += [
+                    color_format(
+                        use_color,
+                        fmt_str,
+                        BC_HEADER,
+                        benchmark["name"],
+                        first_col_width,
+                        get_color(measurement["time"]),
+                        measurement["time"],
+                        get_color(measurement["cpu"]),
+                        measurement["cpu"],
+                        measurement["real_time"],
+                        measurement["real_time_other"],
+                        measurement["cpu_time"],
+                        measurement["cpu_time_other"],
+                        endc=BC_ENDC,
+                    )
+                ]
 
         # After processing the measurements, if requested and
         # if applicable (e.g. u-test exists for given benchmark),
         # print the U test.
-        if utest and benchmark['utest']:
-            output_strs += print_utest(benchmark['name'],
-                                       benchmark['utest'],
-                                       utest_alpha=utest_alpha,
-                                       first_col_width=first_col_width,
-                                       use_color=use_color)
+        if utest and benchmark["utest"]:
+            output_strs += print_utest(
+                benchmark["name"],
+                benchmark["utest"],
+                utest_alpha=utest_alpha,
+                first_col_width=first_col_width,
+                use_color=use_color,
+            )
 
     return output_strs
 
@@ -388,21 +437,19 @@ def get_color(res):
 class TestGetUniqueBenchmarkNames(unittest.TestCase):
     def load_results(self):
         import json
-        testInputs = os.path.join(
-            os.path.dirname(
-                os.path.realpath(__file__)),
-            'Inputs')
-        testOutput = os.path.join(testInputs, 'test3_run0.json')
-        with open(testOutput, 'r') as f:
+
+        testInputs = os.path.join(os.path.dirname(os.path.realpath(__file__)), "Inputs")
+        testOutput = os.path.join(testInputs, "test3_run0.json")
+        with open(testOutput, "r") as f:
             json = json.load(f)
         return json
 
     def test_basic(self):
         expect_lines = [
-            'BM_One',
-            'BM_Two',
-            'short',  # These two are not sorted
-            'medium',  # These two are not sorted
+            "BM_One",
+            "BM_Two",
+            "short",  # These two are not sorted
+            "medium",  # These two are not sorted
         ]
         json = self.load_results()
         output_lines = get_unique_benchmark_names(json)
@@ -418,15 +465,15 @@ class TestReportDifference(unittest.TestCase):
     def setUpClass(cls):
         def load_results():
             import json
+
             testInputs = os.path.join(
-                os.path.dirname(
-                    os.path.realpath(__file__)),
-                'Inputs')
-            testOutput1 = os.path.join(testInputs, 'test1_run1.json')
-            testOutput2 = os.path.join(testInputs, 'test1_run2.json')
-            with open(testOutput1, 'r') as f:
+                os.path.dirname(os.path.realpath(__file__)), "Inputs"
+            )
+            testOutput1 = os.path.join(testInputs, "test1_run1.json")
+            testOutput2 = os.path.join(testInputs, "test1_run2.json")
+            with open(testOutput1, "r") as f:
                 json1 = json.load(f)
-            with open(testOutput2, 'r') as f:
+            with open(testOutput2, "r") as f:
                 json2 = json.load(f)
             return json1, json2
 
@@ -435,123 +482,236 @@ def load_results():
 
     def test_json_
diff _report_pretty_printing(self):
         expect_lines = [
-            ['BM_SameTimes', '+0.0000', '+0.0000', '10', '10', '10', '10'],
-            ['BM_2xFaster', '-0.5000', '-0.5000', '50', '25', '50', '25'],
-            ['BM_2xSlower', '+1.0000', '+1.0000', '50', '100', '50', '100'],
-            ['BM_1PercentFaster', '-0.0100', '-0.0100', '100', '99', '100', '99'],
-            ['BM_1PercentSlower', '+0.0100', '+0.0100', '100', '101', '100', '101'],
-            ['BM_10PercentFaster', '-0.1000', '-0.1000', '100', '90', '100', '90'],
-            ['BM_10PercentSlower', '+0.1000', '+0.1000', '100', '110', '100', '110'],
-            ['BM_100xSlower', '+99.0000', '+99.0000',
-                '100', '10000', '100', '10000'],
-            ['BM_100xFaster', '-0.9900', '-0.9900',
-                '10000', '100', '10000', '100'],
-            ['BM_10PercentCPUToTime', '+0.1000',
-                '-0.1000', '100', '110', '100', '90'],
-            ['BM_ThirdFaster', '-0.3333', '-0.3334', '100', '67', '100', '67'],
-            ['BM_NotBadTimeUnit', '-0.9000', '+0.2000', '0', '0', '0', '1'],
-            ['OVERALL_GEOMEAN', '-0.8344', '-0.8026', '0', '0', '0', '0']
+            ["BM_SameTimes", "+0.0000", "+0.0000", "10", "10", "10", "10"],
+            ["BM_2xFaster", "-0.5000", "-0.5000", "50", "25", "50", "25"],
+            ["BM_2xSlower", "+1.0000", "+1.0000", "50", "100", "50", "100"],
+            ["BM_1PercentFaster", "-0.0100", "-0.0100", "100", "99", "100", "99"],
+            ["BM_1PercentSlower", "+0.0100", "+0.0100", "100", "101", "100", "101"],
+            ["BM_10PercentFaster", "-0.1000", "-0.1000", "100", "90", "100", "90"],
+            ["BM_10PercentSlower", "+0.1000", "+0.1000", "100", "110", "100", "110"],
+            ["BM_100xSlower", "+99.0000", "+99.0000", "100", "10000", "100", "10000"],
+            ["BM_100xFaster", "-0.9900", "-0.9900", "10000", "100", "10000", "100"],
+            ["BM_10PercentCPUToTime", "+0.1000", "-0.1000", "100", "110", "100", "90"],
+            ["BM_ThirdFaster", "-0.3333", "-0.3334", "100", "67", "100", "67"],
+            ["BM_NotBadTimeUnit", "-0.9000", "+0.2000", "0", "0", "0", "1"],
+            ["OVERALL_GEOMEAN", "-0.8344", "-0.8026", "0", "0", "0", "0"],
         ]
         output_lines_with_header = print_
diff erence_report(
-            self.json_
diff _report, use_color=False)
+            self.json_
diff _report, use_color=False
+        )
         output_lines = output_lines_with_header[2:]
         print("\n")
         print("\n".join(output_lines_with_header))
         self.assertEqual(len(output_lines), len(expect_lines))
         for i in range(0, len(output_lines)):
-            parts = [x for x in output_lines[i].split(' ') if x]
+            parts = [x for x in output_lines[i].split(" ") if x]
             self.assertEqual(len(parts), 7)
             self.assertEqual(expect_lines[i], parts)
 
     def test_json_
diff _report_output(self):
         expected_output = [
             {
-                'name': 'BM_SameTimes',
-                'measurements': [{'time': 0.0000, 'cpu': 0.0000, 'real_time': 10, 'real_time_other': 10, 'cpu_time': 10, 'cpu_time_other': 10}],
-                'time_unit': 'ns',
-                'utest': {}
+                "name": "BM_SameTimes",
+                "measurements": [
+                    {
+                        "time": 0.0000,
+                        "cpu": 0.0000,
+                        "real_time": 10,
+                        "real_time_other": 10,
+                        "cpu_time": 10,
+                        "cpu_time_other": 10,
+                    }
+                ],
+                "time_unit": "ns",
+                "utest": {},
             },
             {
-                'name': 'BM_2xFaster',
-                'measurements': [{'time': -0.5000, 'cpu': -0.5000, 'real_time': 50, 'real_time_other': 25, 'cpu_time': 50, 'cpu_time_other': 25}],
-                'time_unit': 'ns',
-                'utest': {}
+                "name": "BM_2xFaster",
+                "measurements": [
+                    {
+                        "time": -0.5000,
+                        "cpu": -0.5000,
+                        "real_time": 50,
+                        "real_time_other": 25,
+                        "cpu_time": 50,
+                        "cpu_time_other": 25,
+                    }
+                ],
+                "time_unit": "ns",
+                "utest": {},
             },
             {
-                'name': 'BM_2xSlower',
-                'measurements': [{'time': 1.0000, 'cpu': 1.0000, 'real_time': 50, 'real_time_other': 100, 'cpu_time': 50, 'cpu_time_other': 100}],
-                'time_unit': 'ns',
-                'utest': {}
+                "name": "BM_2xSlower",
+                "measurements": [
+                    {
+                        "time": 1.0000,
+                        "cpu": 1.0000,
+                        "real_time": 50,
+                        "real_time_other": 100,
+                        "cpu_time": 50,
+                        "cpu_time_other": 100,
+                    }
+                ],
+                "time_unit": "ns",
+                "utest": {},
             },
             {
-                'name': 'BM_1PercentFaster',
-                'measurements': [{'time': -0.0100, 'cpu': -0.0100, 'real_time': 100, 'real_time_other': 98.9999999, 'cpu_time': 100, 'cpu_time_other': 98.9999999}],
-                'time_unit': 'ns',
-                'utest': {}
+                "name": "BM_1PercentFaster",
+                "measurements": [
+                    {
+                        "time": -0.0100,
+                        "cpu": -0.0100,
+                        "real_time": 100,
+                        "real_time_other": 98.9999999,
+                        "cpu_time": 100,
+                        "cpu_time_other": 98.9999999,
+                    }
+                ],
+                "time_unit": "ns",
+                "utest": {},
             },
             {
-                'name': 'BM_1PercentSlower',
-                'measurements': [{'time': 0.0100, 'cpu': 0.0100, 'real_time': 100, 'real_time_other': 101, 'cpu_time': 100, 'cpu_time_other': 101}],
-                'time_unit': 'ns',
-                'utest': {}
+                "name": "BM_1PercentSlower",
+                "measurements": [
+                    {
+                        "time": 0.0100,
+                        "cpu": 0.0100,
+                        "real_time": 100,
+                        "real_time_other": 101,
+                        "cpu_time": 100,
+                        "cpu_time_other": 101,
+                    }
+                ],
+                "time_unit": "ns",
+                "utest": {},
             },
             {
-                'name': 'BM_10PercentFaster',
-                'measurements': [{'time': -0.1000, 'cpu': -0.1000, 'real_time': 100, 'real_time_other': 90, 'cpu_time': 100, 'cpu_time_other': 90}],
-                'time_unit': 'ns',
-                'utest': {}
+                "name": "BM_10PercentFaster",
+                "measurements": [
+                    {
+                        "time": -0.1000,
+                        "cpu": -0.1000,
+                        "real_time": 100,
+                        "real_time_other": 90,
+                        "cpu_time": 100,
+                        "cpu_time_other": 90,
+                    }
+                ],
+                "time_unit": "ns",
+                "utest": {},
             },
             {
-                'name': 'BM_10PercentSlower',
-                'measurements': [{'time': 0.1000, 'cpu': 0.1000, 'real_time': 100, 'real_time_other': 110, 'cpu_time': 100, 'cpu_time_other': 110}],
-                'time_unit': 'ns',
-                'utest': {}
+                "name": "BM_10PercentSlower",
+                "measurements": [
+                    {
+                        "time": 0.1000,
+                        "cpu": 0.1000,
+                        "real_time": 100,
+                        "real_time_other": 110,
+                        "cpu_time": 100,
+                        "cpu_time_other": 110,
+                    }
+                ],
+                "time_unit": "ns",
+                "utest": {},
             },
             {
-                'name': 'BM_100xSlower',
-                'measurements': [{'time': 99.0000, 'cpu': 99.0000, 'real_time': 100, 'real_time_other': 10000, 'cpu_time': 100, 'cpu_time_other': 10000}],
-                'time_unit': 'ns',
-                'utest': {}
+                "name": "BM_100xSlower",
+                "measurements": [
+                    {
+                        "time": 99.0000,
+                        "cpu": 99.0000,
+                        "real_time": 100,
+                        "real_time_other": 10000,
+                        "cpu_time": 100,
+                        "cpu_time_other": 10000,
+                    }
+                ],
+                "time_unit": "ns",
+                "utest": {},
             },
             {
-                'name': 'BM_100xFaster',
-                'measurements': [{'time': -0.9900, 'cpu': -0.9900, 'real_time': 10000, 'real_time_other': 100, 'cpu_time': 10000, 'cpu_time_other': 100}],
-                'time_unit': 'ns',
-                'utest': {}
+                "name": "BM_100xFaster",
+                "measurements": [
+                    {
+                        "time": -0.9900,
+                        "cpu": -0.9900,
+                        "real_time": 10000,
+                        "real_time_other": 100,
+                        "cpu_time": 10000,
+                        "cpu_time_other": 100,
+                    }
+                ],
+                "time_unit": "ns",
+                "utest": {},
             },
             {
-                'name': 'BM_10PercentCPUToTime',
-                'measurements': [{'time': 0.1000, 'cpu': -0.1000, 'real_time': 100, 'real_time_other': 110, 'cpu_time': 100, 'cpu_time_other': 90}],
-                'time_unit': 'ns',
-                'utest': {}
+                "name": "BM_10PercentCPUToTime",
+                "measurements": [
+                    {
+                        "time": 0.1000,
+                        "cpu": -0.1000,
+                        "real_time": 100,
+                        "real_time_other": 110,
+                        "cpu_time": 100,
+                        "cpu_time_other": 90,
+                    }
+                ],
+                "time_unit": "ns",
+                "utest": {},
             },
             {
-                'name': 'BM_ThirdFaster',
-                'measurements': [{'time': -0.3333, 'cpu': -0.3334, 'real_time': 100, 'real_time_other': 67, 'cpu_time': 100, 'cpu_time_other': 67}],
-                'time_unit': 'ns',
-                'utest': {}
+                "name": "BM_ThirdFaster",
+                "measurements": [
+                    {
+                        "time": -0.3333,
+                        "cpu": -0.3334,
+                        "real_time": 100,
+                        "real_time_other": 67,
+                        "cpu_time": 100,
+                        "cpu_time_other": 67,
+                    }
+                ],
+                "time_unit": "ns",
+                "utest": {},
             },
             {
-                'name': 'BM_NotBadTimeUnit',
-                'measurements': [{'time': -0.9000, 'cpu': 0.2000, 'real_time': 0.4, 'real_time_other': 0.04, 'cpu_time': 0.5, 'cpu_time_other': 0.6}],
-                'time_unit': 's',
-                'utest': {}
+                "name": "BM_NotBadTimeUnit",
+                "measurements": [
+                    {
+                        "time": -0.9000,
+                        "cpu": 0.2000,
+                        "real_time": 0.4,
+                        "real_time_other": 0.04,
+                        "cpu_time": 0.5,
+                        "cpu_time_other": 0.6,
+                    }
+                ],
+                "time_unit": "s",
+                "utest": {},
             },
             {
-                'name': 'OVERALL_GEOMEAN',
-                'measurements': [{'real_time': 1.193776641714438e-06, 'cpu_time': 1.2144445585302297e-06,
-                                  'real_time_other': 1.9768988699420897e-07, 'cpu_time_other': 2.397447755209533e-07,
-                                  'time': -0.834399601997324, 'cpu': -0.8025889499549471}],
-                'time_unit': 's',
-                'run_type': 'aggregate',
-                'aggregate_name': 'geomean', 'utest': {}
+                "name": "OVERALL_GEOMEAN",
+                "measurements": [
+                    {
+                        "real_time": 1.193776641714438e-06,
+                        "cpu_time": 1.2144445585302297e-06,
+                        "real_time_other": 1.9768988699420897e-07,
+                        "cpu_time_other": 2.397447755209533e-07,
+                        "time": -0.834399601997324,
+                        "cpu": -0.8025889499549471,
+                    }
+                ],
+                "time_unit": "s",
+                "run_type": "aggregate",
+                "aggregate_name": "geomean",
+                "utest": {},
             },
         ]
         self.assertEqual(len(self.json_
diff _report), len(expected_output))
-        for out, expected in zip(
-                self.json_
diff _report, expected_output):
-            self.assertEqual(out['name'], expected['name'])
-            self.assertEqual(out['time_unit'], expected['time_unit'])
+        for out, expected in zip(self.json_
diff _report, expected_output):
+            self.assertEqual(out["name"], expected["name"])
+            self.assertEqual(out["time_unit"], expected["time_unit"])
             assert_utest(self, out, expected)
             assert_measurements(self, out, expected)
 
@@ -561,12 +721,12 @@ class TestReportDifferenceBetweenFamilies(unittest.TestCase):
     def setUpClass(cls):
         def load_result():
             import json
+
             testInputs = os.path.join(
-                os.path.dirname(
-                    os.path.realpath(__file__)),
-                'Inputs')
-            testOutput = os.path.join(testInputs, 'test2_run.json')
-            with open(testOutput, 'r') as f:
+                os.path.dirname(os.path.realpath(__file__)), "Inputs"
+            )
+            testOutput = os.path.join(testInputs, "test2_run.json")
+            with open(testOutput, "r") as f:
                 json = json.load(f)
             return json
 
@@ -577,65 +737,108 @@ def load_result():
 
     def test_json_
diff _report_pretty_printing(self):
         expect_lines = [
-            ['.', '-0.5000', '-0.5000', '10', '5', '10', '5'],
-            ['./4', '-0.5000', '-0.5000', '40', '20', '40', '20'],
-            ['Prefix/.', '-0.5000', '-0.5000', '20', '10', '20', '10'],
-            ['Prefix/./3', '-0.5000', '-0.5000', '30', '15', '30', '15'],
-            ['OVERALL_GEOMEAN', '-0.5000', '-0.5000', '0', '0', '0', '0']
+            [".", "-0.5000", "-0.5000", "10", "5", "10", "5"],
+            ["./4", "-0.5000", "-0.5000", "40", "20", "40", "20"],
+            ["Prefix/.", "-0.5000", "-0.5000", "20", "10", "20", "10"],
+            ["Prefix/./3", "-0.5000", "-0.5000", "30", "15", "30", "15"],
+            ["OVERALL_GEOMEAN", "-0.5000", "-0.5000", "0", "0", "0", "0"],
         ]
         output_lines_with_header = print_
diff erence_report(
-            self.json_
diff _report, use_color=False)
+            self.json_
diff _report, use_color=False
+        )
         output_lines = output_lines_with_header[2:]
         print("\n")
         print("\n".join(output_lines_with_header))
         self.assertEqual(len(output_lines), len(expect_lines))
         for i in range(0, len(output_lines)):
-            parts = [x for x in output_lines[i].split(' ') if x]
+            parts = [x for x in output_lines[i].split(" ") if x]
             self.assertEqual(len(parts), 7)
             self.assertEqual(expect_lines[i], parts)
 
     def test_json_
diff _report(self):
         expected_output = [
             {
-                'name': u'.',
-                'measurements': [{'time': -0.5, 'cpu': -0.5, 'real_time': 10, 'real_time_other': 5, 'cpu_time': 10, 'cpu_time_other': 5}],
-                'time_unit': 'ns',
-                'utest': {}
+                "name": ".",
+                "measurements": [
+                    {
+                        "time": -0.5,
+                        "cpu": -0.5,
+                        "real_time": 10,
+                        "real_time_other": 5,
+                        "cpu_time": 10,
+                        "cpu_time_other": 5,
+                    }
+                ],
+                "time_unit": "ns",
+                "utest": {},
             },
             {
-                'name': u'./4',
-                'measurements': [{'time': -0.5, 'cpu': -0.5, 'real_time': 40, 'real_time_other': 20, 'cpu_time': 40, 'cpu_time_other': 20}],
-                'time_unit': 'ns',
-                'utest': {},
+                "name": "./4",
+                "measurements": [
+                    {
+                        "time": -0.5,
+                        "cpu": -0.5,
+                        "real_time": 40,
+                        "real_time_other": 20,
+                        "cpu_time": 40,
+                        "cpu_time_other": 20,
+                    }
+                ],
+                "time_unit": "ns",
+                "utest": {},
             },
             {
-                'name': u'Prefix/.',
-                'measurements': [{'time': -0.5, 'cpu': -0.5, 'real_time': 20, 'real_time_other': 10, 'cpu_time': 20, 'cpu_time_other': 10}],
-                'time_unit': 'ns',
-                'utest': {}
+                "name": "Prefix/.",
+                "measurements": [
+                    {
+                        "time": -0.5,
+                        "cpu": -0.5,
+                        "real_time": 20,
+                        "real_time_other": 10,
+                        "cpu_time": 20,
+                        "cpu_time_other": 10,
+                    }
+                ],
+                "time_unit": "ns",
+                "utest": {},
             },
             {
-                'name': u'Prefix/./3',
-                'measurements': [{'time': -0.5, 'cpu': -0.5, 'real_time': 30, 'real_time_other': 15, 'cpu_time': 30, 'cpu_time_other': 15}],
-                'time_unit': 'ns',
-                'utest': {}
+                "name": "Prefix/./3",
+                "measurements": [
+                    {
+                        "time": -0.5,
+                        "cpu": -0.5,
+                        "real_time": 30,
+                        "real_time_other": 15,
+                        "cpu_time": 30,
+                        "cpu_time_other": 15,
+                    }
+                ],
+                "time_unit": "ns",
+                "utest": {},
             },
             {
-                'name': 'OVERALL_GEOMEAN',
-                'measurements': [{'real_time': 2.213363839400641e-08, 'cpu_time': 2.213363839400641e-08,
-                                  'real_time_other': 1.1066819197003185e-08, 'cpu_time_other': 1.1066819197003185e-08,
-                                  'time': -0.5000000000000009, 'cpu': -0.5000000000000009}],
-                'time_unit': 's',
-                'run_type': 'aggregate',
-                'aggregate_name': 'geomean',
-                'utest': {}
-            }
+                "name": "OVERALL_GEOMEAN",
+                "measurements": [
+                    {
+                        "real_time": 2.213363839400641e-08,
+                        "cpu_time": 2.213363839400641e-08,
+                        "real_time_other": 1.1066819197003185e-08,
+                        "cpu_time_other": 1.1066819197003185e-08,
+                        "time": -0.5000000000000009,
+                        "cpu": -0.5000000000000009,
+                    }
+                ],
+                "time_unit": "s",
+                "run_type": "aggregate",
+                "aggregate_name": "geomean",
+                "utest": {},
+            },
         ]
         self.assertEqual(len(self.json_
diff _report), len(expected_output))
-        for out, expected in zip(
-                self.json_
diff _report, expected_output):
-            self.assertEqual(out['name'], expected['name'])
-            self.assertEqual(out['time_unit'], expected['time_unit'])
+        for out, expected in zip(self.json_
diff _report, expected_output):
+            self.assertEqual(out["name"], expected["name"])
+            self.assertEqual(out["time_unit"], expected["time_unit"])
             assert_utest(self, out, expected)
             assert_measurements(self, out, expected)
 
@@ -645,424 +848,487 @@ class TestReportDifferenceWithUTest(unittest.TestCase):
     def setUpClass(cls):
         def load_results():
             import json
+
             testInputs = os.path.join(
-                os.path.dirname(
-                    os.path.realpath(__file__)),
-                'Inputs')
-            testOutput1 = os.path.join(testInputs, 'test3_run0.json')
-            testOutput2 = os.path.join(testInputs, 'test3_run1.json')
-            with open(testOutput1, 'r') as f:
+                os.path.dirname(os.path.realpath(__file__)), "Inputs"
+            )
+            testOutput1 = os.path.join(testInputs, "test3_run0.json")
+            testOutput2 = os.path.join(testInputs, "test3_run1.json")
+            with open(testOutput1, "r") as f:
                 json1 = json.load(f)
-            with open(testOutput2, 'r') as f:
+            with open(testOutput2, "r") as f:
                 json2 = json.load(f)
             return json1, json2
 
         json1, json2 = load_results()
-        cls.json_
diff _report = get_
diff erence_report(
-            json1, json2, utest=True)
+        cls.json_
diff _report = get_
diff erence_report(json1, json2, utest=True)
 
     def test_json_
diff _report_pretty_printing(self):
         expect_lines = [
-            ['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'],
-            ['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'],
-            ['BM_Two', '-0.1250', '-0.1628', '8', '7', '86', '72'],
-            ['BM_Two_pvalue',
-             '1.0000',
-             '0.6667',
-             'U',
-             'Test,',
-             'Repetitions:',
-             '2',
-             'vs',
-             '2.',
-             'WARNING:',
-             'Results',
-             'unreliable!',
-             '9+',
-             'repetitions',
-             'recommended.'],
-            ['short', '-0.1250', '-0.0625', '8', '7', '80', '75'],
-            ['short', '-0.4325', '-0.1351', '8', '5', '77', '67'],
-            ['short_pvalue',
-             '0.7671',
-             '0.2000',
-             'U',
-             'Test,',
-             'Repetitions:',
-             '2',
-             'vs',
-             '3.',
-             'WARNING:',
-             'Results',
-             'unreliable!',
-             '9+',
-             'repetitions',
-             'recommended.'],
-            ['medium', '-0.3750', '-0.3375', '8', '5', '80', '53'],
-            ['OVERALL_GEOMEAN', '+1.6405', '-0.6985', '0', '0', '0', '0']
+            ["BM_One", "-0.1000", "+0.1000", "10", "9", "100", "110"],
+            ["BM_Two", "+0.1111", "-0.0111", "9", "10", "90", "89"],
+            ["BM_Two", "-0.1250", "-0.1628", "8", "7", "86", "72"],
+            [
+                "BM_Two_pvalue",
+                "1.0000",
+                "0.6667",
+                "U",
+                "Test,",
+                "Repetitions:",
+                "2",
+                "vs",
+                "2.",
+                "WARNING:",
+                "Results",
+                "unreliable!",
+                "9+",
+                "repetitions",
+                "recommended.",
+            ],
+            ["short", "-0.1250", "-0.0625", "8", "7", "80", "75"],
+            ["short", "-0.4325", "-0.1351", "8", "5", "77", "67"],
+            [
+                "short_pvalue",
+                "0.7671",
+                "0.2000",
+                "U",
+                "Test,",
+                "Repetitions:",
+                "2",
+                "vs",
+                "3.",
+                "WARNING:",
+                "Results",
+                "unreliable!",
+                "9+",
+                "repetitions",
+                "recommended.",
+            ],
+            ["medium", "-0.3750", "-0.3375", "8", "5", "80", "53"],
+            ["OVERALL_GEOMEAN", "+1.6405", "-0.6985", "0", "0", "0", "0"],
         ]
         output_lines_with_header = print_
diff erence_report(
-            self.json_
diff _report, utest=True, utest_alpha=0.05, use_color=False)
+            self.json_
diff _report, utest=True, utest_alpha=0.05, use_color=False
+        )
         output_lines = output_lines_with_header[2:]
         print("\n")
         print("\n".join(output_lines_with_header))
         self.assertEqual(len(output_lines), len(expect_lines))
         for i in range(0, len(output_lines)):
-            parts = [x for x in output_lines[i].split(' ') if x]
+            parts = [x for x in output_lines[i].split(" ") if x]
             self.assertEqual(expect_lines[i], parts)
 
     def test_json_
diff _report_pretty_printing_aggregates_only(self):
         expect_lines = [
-            ['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'],
-            ['BM_Two_pvalue',
-             '1.0000',
-             '0.6667',
-             'U',
-             'Test,',
-             'Repetitions:',
-             '2',
-             'vs',
-             '2.',
-             'WARNING:',
-             'Results',
-             'unreliable!',
-             '9+',
-             'repetitions',
-             'recommended.'],
-            ['short', '-0.1250', '-0.0625', '8', '7', '80', '75'],
-            ['short', '-0.4325', '-0.1351', '8', '5', '77', '67'],
-            ['short_pvalue',
-             '0.7671',
-             '0.2000',
-             'U',
-             'Test,',
-             'Repetitions:',
-             '2',
-             'vs',
-             '3.',
-             'WARNING:',
-             'Results',
-             'unreliable!',
-             '9+',
-             'repetitions',
-             'recommended.'],
-            ['OVERALL_GEOMEAN', '+1.6405', '-0.6985', '0', '0', '0', '0']
+            ["BM_One", "-0.1000", "+0.1000", "10", "9", "100", "110"],
+            [
+                "BM_Two_pvalue",
+                "1.0000",
+                "0.6667",
+                "U",
+                "Test,",
+                "Repetitions:",
+                "2",
+                "vs",
+                "2.",
+                "WARNING:",
+                "Results",
+                "unreliable!",
+                "9+",
+                "repetitions",
+                "recommended.",
+            ],
+            ["short", "-0.1250", "-0.0625", "8", "7", "80", "75"],
+            ["short", "-0.4325", "-0.1351", "8", "5", "77", "67"],
+            [
+                "short_pvalue",
+                "0.7671",
+                "0.2000",
+                "U",
+                "Test,",
+                "Repetitions:",
+                "2",
+                "vs",
+                "3.",
+                "WARNING:",
+                "Results",
+                "unreliable!",
+                "9+",
+                "repetitions",
+                "recommended.",
+            ],
+            ["OVERALL_GEOMEAN", "+1.6405", "-0.6985", "0", "0", "0", "0"],
         ]
         output_lines_with_header = print_
diff erence_report(
-            self.json_
diff _report, include_aggregates_only=True, utest=True, utest_alpha=0.05, use_color=False)
+            self.json_
diff _report,
+            include_aggregates_only=True,
+            utest=True,
+            utest_alpha=0.05,
+            use_color=False,
+        )
         output_lines = output_lines_with_header[2:]
         print("\n")
         print("\n".join(output_lines_with_header))
         self.assertEqual(len(output_lines), len(expect_lines))
         for i in range(0, len(output_lines)):
-            parts = [x for x in output_lines[i].split(' ') if x]
+            parts = [x for x in output_lines[i].split(" ") if x]
             self.assertEqual(expect_lines[i], parts)
 
     def test_json_
diff _report(self):
         expected_output = [
             {
-                'name': u'BM_One',
-                'measurements': [
-                    {'time': -0.1,
-                     'cpu': 0.1,
-                     'real_time': 10,
-                     'real_time_other': 9,
-                     'cpu_time': 100,
-                     'cpu_time_other': 110}
+                "name": "BM_One",
+                "measurements": [
+                    {
+                        "time": -0.1,
+                        "cpu": 0.1,
+                        "real_time": 10,
+                        "real_time_other": 9,
+                        "cpu_time": 100,
+                        "cpu_time_other": 110,
+                    }
                 ],
-                'time_unit': 'ns',
-                'utest': {}
+                "time_unit": "ns",
+                "utest": {},
             },
             {
-                'name': u'BM_Two',
-                'measurements': [
-                    {'time': 0.1111111111111111,
-                     'cpu': -0.011111111111111112,
-                     'real_time': 9,
-                     'real_time_other': 10,
-                     'cpu_time': 90,
-                     'cpu_time_other': 89},
-                    {'time': -0.125, 'cpu': -0.16279069767441862, 'real_time': 8,
-                        'real_time_other': 7, 'cpu_time': 86, 'cpu_time_other': 72}
+                "name": "BM_Two",
+                "measurements": [
+                    {
+                        "time": 0.1111111111111111,
+                        "cpu": -0.011111111111111112,
+                        "real_time": 9,
+                        "real_time_other": 10,
+                        "cpu_time": 90,
+                        "cpu_time_other": 89,
+                    },
+                    {
+                        "time": -0.125,
+                        "cpu": -0.16279069767441862,
+                        "real_time": 8,
+                        "real_time_other": 7,
+                        "cpu_time": 86,
+                        "cpu_time_other": 72,
+                    },
                 ],
-                'time_unit': 'ns',
-                'utest': {
-                    'have_optimal_repetitions': False, 'cpu_pvalue': 0.6666666666666666, 'time_pvalue': 1.0
-                }
+                "time_unit": "ns",
+                "utest": {
+                    "have_optimal_repetitions": False,
+                    "cpu_pvalue": 0.6666666666666666,
+                    "time_pvalue": 1.0,
+                },
             },
             {
-                'name': u'short',
-                'measurements': [
-                    {'time': -0.125,
-                     'cpu': -0.0625,
-                     'real_time': 8,
-                     'real_time_other': 7,
-                     'cpu_time': 80,
-                     'cpu_time_other': 75},
-                    {'time': -0.4325,
-                     'cpu': -0.13506493506493514,
-                     'real_time': 8,
-                     'real_time_other': 4.54,
-                     'cpu_time': 77,
-                     'cpu_time_other': 66.6}
+                "name": "short",
+                "measurements": [
+                    {
+                        "time": -0.125,
+                        "cpu": -0.0625,
+                        "real_time": 8,
+                        "real_time_other": 7,
+                        "cpu_time": 80,
+                        "cpu_time_other": 75,
+                    },
+                    {
+                        "time": -0.4325,
+                        "cpu": -0.13506493506493514,
+                        "real_time": 8,
+                        "real_time_other": 4.54,
+                        "cpu_time": 77,
+                        "cpu_time_other": 66.6,
+                    },
                 ],
-                'time_unit': 'ns',
-                'utest': {
-                    'have_optimal_repetitions': False, 'cpu_pvalue': 0.2, 'time_pvalue': 0.7670968684102772
-                }
+                "time_unit": "ns",
+                "utest": {
+                    "have_optimal_repetitions": False,
+                    "cpu_pvalue": 0.2,
+                    "time_pvalue": 0.7670968684102772,
+                },
             },
             {
-                'name': u'medium',
-                'measurements': [
-                    {'time': -0.375,
-                     'cpu': -0.3375,
-                     'real_time': 8,
-                     'real_time_other': 5,
-                     'cpu_time': 80,
-                     'cpu_time_other': 53}
+                "name": "medium",
+                "measurements": [
+                    {
+                        "time": -0.375,
+                        "cpu": -0.3375,
+                        "real_time": 8,
+                        "real_time_other": 5,
+                        "cpu_time": 80,
+                        "cpu_time_other": 53,
+                    }
                 ],
-                'time_unit': 'ns',
-                'utest': {}
+                "time_unit": "ns",
+                "utest": {},
             },
             {
-                'name': 'OVERALL_GEOMEAN',
-                'measurements': [{'real_time': 8.48528137423858e-09, 'cpu_time': 8.441336246629233e-08,
-                                  'real_time_other': 2.2405267593145244e-08, 'cpu_time_other': 2.5453661413660466e-08,
-                                  'time': 1.6404861082353634, 'cpu': -0.6984640740519662}],
-                'time_unit': 's',
-                'run_type': 'aggregate',
-                'aggregate_name': 'geomean',
-                'utest': {}
-            }
+                "name": "OVERALL_GEOMEAN",
+                "measurements": [
+                    {
+                        "real_time": 8.48528137423858e-09,
+                        "cpu_time": 8.441336246629233e-08,
+                        "real_time_other": 2.2405267593145244e-08,
+                        "cpu_time_other": 2.5453661413660466e-08,
+                        "time": 1.6404861082353634,
+                        "cpu": -0.6984640740519662,
+                    }
+                ],
+                "time_unit": "s",
+                "run_type": "aggregate",
+                "aggregate_name": "geomean",
+                "utest": {},
+            },
         ]
         self.assertEqual(len(self.json_
diff _report), len(expected_output))
-        for out, expected in zip(
-                self.json_
diff _report, expected_output):
-            self.assertEqual(out['name'], expected['name'])
-            self.assertEqual(out['time_unit'], expected['time_unit'])
+        for out, expected in zip(self.json_
diff _report, expected_output):
+            self.assertEqual(out["name"], expected["name"])
+            self.assertEqual(out["time_unit"], expected["time_unit"])
             assert_utest(self, out, expected)
             assert_measurements(self, out, expected)
 
 
-class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly(
-        unittest.TestCase):
+class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly(unittest.TestCase):
     @classmethod
     def setUpClass(cls):
         def load_results():
             import json
+
             testInputs = os.path.join(
-                os.path.dirname(
-                    os.path.realpath(__file__)),
-                'Inputs')
-            testOutput1 = os.path.join(testInputs, 'test3_run0.json')
-            testOutput2 = os.path.join(testInputs, 'test3_run1.json')
-            with open(testOutput1, 'r') as f:
+                os.path.dirname(os.path.realpath(__file__)), "Inputs"
+            )
+            testOutput1 = os.path.join(testInputs, "test3_run0.json")
+            testOutput2 = os.path.join(testInputs, "test3_run1.json")
+            with open(testOutput1, "r") as f:
                 json1 = json.load(f)
-            with open(testOutput2, 'r') as f:
+            with open(testOutput2, "r") as f:
                 json2 = json.load(f)
             return json1, json2
 
         json1, json2 = load_results()
-        cls.json_
diff _report = get_
diff erence_report(
-            json1, json2, utest=True)
+        cls.json_
diff _report = get_
diff erence_report(json1, json2, utest=True)
 
     def test_json_
diff _report_pretty_printing(self):
         expect_lines = [
-            ['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'],
-            ['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'],
-            ['BM_Two', '-0.1250', '-0.1628', '8', '7', '86', '72'],
-            ['BM_Two_pvalue',
-             '1.0000',
-             '0.6667',
-             'U',
-             'Test,',
-             'Repetitions:',
-             '2',
-             'vs',
-             '2.',
-             'WARNING:',
-             'Results',
-             'unreliable!',
-             '9+',
-             'repetitions',
-             'recommended.'],
-            ['short', '-0.1250', '-0.0625', '8', '7', '80', '75'],
-            ['short', '-0.4325', '-0.1351', '8', '5', '77', '67'],
-            ['short_pvalue',
-             '0.7671',
-             '0.2000',
-             'U',
-             'Test,',
-             'Repetitions:',
-             '2',
-             'vs',
-             '3.',
-             'WARNING:',
-             'Results',
-             'unreliable!',
-             '9+',
-             'repetitions',
-             'recommended.'],
-            ['medium', '-0.3750', '-0.3375', '8', '5', '80', '53'],
-            ['OVERALL_GEOMEAN', '+1.6405', '-0.6985', '0', '0', '0', '0']
+            ["BM_One", "-0.1000", "+0.1000", "10", "9", "100", "110"],
+            ["BM_Two", "+0.1111", "-0.0111", "9", "10", "90", "89"],
+            ["BM_Two", "-0.1250", "-0.1628", "8", "7", "86", "72"],
+            [
+                "BM_Two_pvalue",
+                "1.0000",
+                "0.6667",
+                "U",
+                "Test,",
+                "Repetitions:",
+                "2",
+                "vs",
+                "2.",
+                "WARNING:",
+                "Results",
+                "unreliable!",
+                "9+",
+                "repetitions",
+                "recommended.",
+            ],
+            ["short", "-0.1250", "-0.0625", "8", "7", "80", "75"],
+            ["short", "-0.4325", "-0.1351", "8", "5", "77", "67"],
+            [
+                "short_pvalue",
+                "0.7671",
+                "0.2000",
+                "U",
+                "Test,",
+                "Repetitions:",
+                "2",
+                "vs",
+                "3.",
+                "WARNING:",
+                "Results",
+                "unreliable!",
+                "9+",
+                "repetitions",
+                "recommended.",
+            ],
+            ["medium", "-0.3750", "-0.3375", "8", "5", "80", "53"],
+            ["OVERALL_GEOMEAN", "+1.6405", "-0.6985", "0", "0", "0", "0"],
         ]
         output_lines_with_header = print_
diff erence_report(
-            self.json_
diff _report,
-            utest=True, utest_alpha=0.05, use_color=False)
+            self.json_
diff _report, utest=True, utest_alpha=0.05, use_color=False
+        )
         output_lines = output_lines_with_header[2:]
         print("\n")
         print("\n".join(output_lines_with_header))
         self.assertEqual(len(output_lines), len(expect_lines))
         for i in range(0, len(output_lines)):
-            parts = [x for x in output_lines[i].split(' ') if x]
+            parts = [x for x in output_lines[i].split(" ") if x]
             self.assertEqual(expect_lines[i], parts)
 
     def test_json_
diff _report(self):
         expected_output = [
             {
-                'name': u'BM_One',
-                'measurements': [
-                    {'time': -0.1,
-                     'cpu': 0.1,
-                     'real_time': 10,
-                     'real_time_other': 9,
-                     'cpu_time': 100,
-                     'cpu_time_other': 110}
+                "name": "BM_One",
+                "measurements": [
+                    {
+                        "time": -0.1,
+                        "cpu": 0.1,
+                        "real_time": 10,
+                        "real_time_other": 9,
+                        "cpu_time": 100,
+                        "cpu_time_other": 110,
+                    }
                 ],
-                'time_unit': 'ns',
-                'utest': {}
+                "time_unit": "ns",
+                "utest": {},
             },
             {
-                'name': u'BM_Two',
-                'measurements': [
-                    {'time': 0.1111111111111111,
-                     'cpu': -0.011111111111111112,
-                     'real_time': 9,
-                     'real_time_other': 10,
-                     'cpu_time': 90,
-                     'cpu_time_other': 89},
-                    {'time': -0.125, 'cpu': -0.16279069767441862, 'real_time': 8,
-                        'real_time_other': 7, 'cpu_time': 86, 'cpu_time_other': 72}
+                "name": "BM_Two",
+                "measurements": [
+                    {
+                        "time": 0.1111111111111111,
+                        "cpu": -0.011111111111111112,
+                        "real_time": 9,
+                        "real_time_other": 10,
+                        "cpu_time": 90,
+                        "cpu_time_other": 89,
+                    },
+                    {
+                        "time": -0.125,
+                        "cpu": -0.16279069767441862,
+                        "real_time": 8,
+                        "real_time_other": 7,
+                        "cpu_time": 86,
+                        "cpu_time_other": 72,
+                    },
                 ],
-                'time_unit': 'ns',
-                'utest': {
-                    'have_optimal_repetitions': False, 'cpu_pvalue': 0.6666666666666666, 'time_pvalue': 1.0
-                }
+                "time_unit": "ns",
+                "utest": {
+                    "have_optimal_repetitions": False,
+                    "cpu_pvalue": 0.6666666666666666,
+                    "time_pvalue": 1.0,
+                },
             },
             {
-                'name': u'short',
-                'measurements': [
-                    {'time': -0.125,
-                     'cpu': -0.0625,
-                     'real_time': 8,
-                     'real_time_other': 7,
-                     'cpu_time': 80,
-                     'cpu_time_other': 75},
-                    {'time': -0.4325,
-                     'cpu': -0.13506493506493514,
-                     'real_time': 8,
-                     'real_time_other': 4.54,
-                     'cpu_time': 77,
-                     'cpu_time_other': 66.6}
+                "name": "short",
+                "measurements": [
+                    {
+                        "time": -0.125,
+                        "cpu": -0.0625,
+                        "real_time": 8,
+                        "real_time_other": 7,
+                        "cpu_time": 80,
+                        "cpu_time_other": 75,
+                    },
+                    {
+                        "time": -0.4325,
+                        "cpu": -0.13506493506493514,
+                        "real_time": 8,
+                        "real_time_other": 4.54,
+                        "cpu_time": 77,
+                        "cpu_time_other": 66.6,
+                    },
                 ],
-                'time_unit': 'ns',
-                'utest': {
-                    'have_optimal_repetitions': False, 'cpu_pvalue': 0.2, 'time_pvalue': 0.7670968684102772
-                }
+                "time_unit": "ns",
+                "utest": {
+                    "have_optimal_repetitions": False,
+                    "cpu_pvalue": 0.2,
+                    "time_pvalue": 0.7670968684102772,
+                },
             },
             {
-                'name': u'medium',
-                'measurements': [
-                    {'real_time_other': 5,
-                     'cpu_time': 80,
-                     'time': -0.375,
-                     'real_time': 8,
-                     'cpu_time_other': 53,
-                     'cpu': -0.3375
-                     }
+                "name": "medium",
+                "measurements": [
+                    {
+                        "real_time_other": 5,
+                        "cpu_time": 80,
+                        "time": -0.375,
+                        "real_time": 8,
+                        "cpu_time_other": 53,
+                        "cpu": -0.3375,
+                    }
                 ],
-                'utest': {},
-                'time_unit': u'ns',
-                'aggregate_name': ''
+                "utest": {},
+                "time_unit": "ns",
+                "aggregate_name": "",
             },
             {
-                'name': 'OVERALL_GEOMEAN',
-                'measurements': [{'real_time': 8.48528137423858e-09, 'cpu_time': 8.441336246629233e-08,
-                                  'real_time_other': 2.2405267593145244e-08, 'cpu_time_other': 2.5453661413660466e-08,
-                                  'time': 1.6404861082353634, 'cpu': -0.6984640740519662}],
-                'time_unit': 's',
-                'run_type': 'aggregate',
-                'aggregate_name': 'geomean',
-                'utest': {}
-            }
+                "name": "OVERALL_GEOMEAN",
+                "measurements": [
+                    {
+                        "real_time": 8.48528137423858e-09,
+                        "cpu_time": 8.441336246629233e-08,
+                        "real_time_other": 2.2405267593145244e-08,
+                        "cpu_time_other": 2.5453661413660466e-08,
+                        "time": 1.6404861082353634,
+                        "cpu": -0.6984640740519662,
+                    }
+                ],
+                "time_unit": "s",
+                "run_type": "aggregate",
+                "aggregate_name": "geomean",
+                "utest": {},
+            },
         ]
         self.assertEqual(len(self.json_
diff _report), len(expected_output))
-        for out, expected in zip(
-                self.json_
diff _report, expected_output):
-            self.assertEqual(out['name'], expected['name'])
-            self.assertEqual(out['time_unit'], expected['time_unit'])
+        for out, expected in zip(self.json_
diff _report, expected_output):
+            self.assertEqual(out["name"], expected["name"])
+            self.assertEqual(out["time_unit"], expected["time_unit"])
             assert_utest(self, out, expected)
             assert_measurements(self, out, expected)
 
 
-class TestReportDifferenceForPercentageAggregates(
-        unittest.TestCase):
+class TestReportDifferenceForPercentageAggregates(unittest.TestCase):
     @classmethod
     def setUpClass(cls):
         def load_results():
             import json
+
             testInputs = os.path.join(
-                os.path.dirname(
-                    os.path.realpath(__file__)),
-                'Inputs')
-            testOutput1 = os.path.join(testInputs, 'test4_run0.json')
-            testOutput2 = os.path.join(testInputs, 'test4_run1.json')
-            with open(testOutput1, 'r') as f:
+                os.path.dirname(os.path.realpath(__file__)), "Inputs"
+            )
+            testOutput1 = os.path.join(testInputs, "test4_run0.json")
+            testOutput2 = os.path.join(testInputs, "test4_run1.json")
+            with open(testOutput1, "r") as f:
                 json1 = json.load(f)
-            with open(testOutput2, 'r') as f:
+            with open(testOutput2, "r") as f:
                 json2 = json.load(f)
             return json1, json2
 
         json1, json2 = load_results()
-        cls.json_
diff _report = get_
diff erence_report(
-            json1, json2, utest=True)
+        cls.json_
diff _report = get_
diff erence_report(json1, json2, utest=True)
 
     def test_json_
diff _report_pretty_printing(self):
-        expect_lines = [
-            ['whocares', '-0.5000', '+0.5000', '0', '0', '0', '0']
-        ]
+        expect_lines = [["whocares", "-0.5000", "+0.5000", "0", "0", "0", "0"]]
         output_lines_with_header = print_
diff erence_report(
-            self.json_
diff _report,
-            utest=True, utest_alpha=0.05, use_color=False)
+            self.json_
diff _report, utest=True, utest_alpha=0.05, use_color=False
+        )
         output_lines = output_lines_with_header[2:]
         print("\n")
         print("\n".join(output_lines_with_header))
         self.assertEqual(len(output_lines), len(expect_lines))
         for i in range(0, len(output_lines)):
-            parts = [x for x in output_lines[i].split(' ') if x]
+            parts = [x for x in output_lines[i].split(" ") if x]
             self.assertEqual(expect_lines[i], parts)
 
     def test_json_
diff _report(self):
         expected_output = [
             {
-                'name': u'whocares',
-                'measurements': [
-                    {'time': -0.5,
-                     'cpu': 0.5,
-                     'real_time': 0.01,
-                     'real_time_other': 0.005,
-                     'cpu_time': 0.10,
-                     'cpu_time_other': 0.15}
+                "name": "whocares",
+                "measurements": [
+                    {
+                        "time": -0.5,
+                        "cpu": 0.5,
+                        "real_time": 0.01,
+                        "real_time_other": 0.005,
+                        "cpu_time": 0.10,
+                        "cpu_time_other": 0.15,
+                    }
                 ],
-                'time_unit': 'ns',
-                'utest': {}
+                "time_unit": "ns",
+                "utest": {},
             }
         ]
         self.assertEqual(len(self.json_
diff _report), len(expected_output))
-        for out, expected in zip(
-                self.json_
diff _report, expected_output):
-            self.assertEqual(out['name'], expected['name'])
-            self.assertEqual(out['time_unit'], expected['time_unit'])
+        for out, expected in zip(self.json_
diff _report, expected_output):
+            self.assertEqual(out["name"], expected["name"])
+            self.assertEqual(out["time_unit"], expected["time_unit"])
             assert_utest(self, out, expected)
             assert_measurements(self, out, expected)
 
@@ -1072,12 +1338,12 @@ class TestReportSorting(unittest.TestCase):
     def setUpClass(cls):
         def load_result():
             import json
+
             testInputs = os.path.join(
-                os.path.dirname(
-                    os.path.realpath(__file__)),
-                'Inputs')
-            testOutput = os.path.join(testInputs, 'test4_run.json')
-            with open(testOutput, 'r') as f:
+                os.path.dirname(os.path.realpath(__file__)), "Inputs"
+            )
+            testOutput = os.path.join(testInputs, "test4_run.json")
+            with open(testOutput, "r") as f:
                 json = json.load(f)
             return json
 
@@ -1098,45 +1364,45 @@ def test_json_
diff _report_pretty_printing(self):
             "91 family 1 instance 0 aggregate",
             "90 family 1 instance 1 repetition 0",
             "89 family 1 instance 1 repetition 1",
-            "88 family 1 instance 1 aggregate"
+            "88 family 1 instance 1 aggregate",
         ]
 
-        for n in range(len(self.json['benchmarks']) ** 2):
-            random.shuffle(self.json['benchmarks'])
-            sorted_benchmarks = util.sort_benchmark_results(self.json)[
-                'benchmarks']
+        for n in range(len(self.json["benchmarks"]) ** 2):
+            random.shuffle(self.json["benchmarks"])
+            sorted_benchmarks = util.sort_benchmark_results(self.json)["benchmarks"]
             self.assertEqual(len(expected_names), len(sorted_benchmarks))
             for out, expected in zip(sorted_benchmarks, expected_names):
-                self.assertEqual(out['name'], expected)
+                self.assertEqual(out["name"], expected)
 
 
 def assert_utest(unittest_instance, lhs, rhs):
-    if lhs['utest']:
+    if lhs["utest"]:
         unittest_instance.assertAlmostEqual(
-            lhs['utest']['cpu_pvalue'],
-            rhs['utest']['cpu_pvalue'])
+            lhs["utest"]["cpu_pvalue"], rhs["utest"]["cpu_pvalue"]
+        )
         unittest_instance.assertAlmostEqual(
-            lhs['utest']['time_pvalue'],
-            rhs['utest']['time_pvalue'])
+            lhs["utest"]["time_pvalue"], rhs["utest"]["time_pvalue"]
+        )
         unittest_instance.assertEqual(
-            lhs['utest']['have_optimal_repetitions'],
-            rhs['utest']['have_optimal_repetitions'])
+            lhs["utest"]["have_optimal_repetitions"],
+            rhs["utest"]["have_optimal_repetitions"],
+        )
     else:
         # lhs is empty. assert if rhs is not.
-        unittest_instance.assertEqual(lhs['utest'], rhs['utest'])
+        unittest_instance.assertEqual(lhs["utest"], rhs["utest"])
 
 
 def assert_measurements(unittest_instance, lhs, rhs):
-    for m1, m2 in zip(lhs['measurements'], rhs['measurements']):
-        unittest_instance.assertEqual(m1['real_time'], m2['real_time'])
-        unittest_instance.assertEqual(m1['cpu_time'], m2['cpu_time'])
+    for m1, m2 in zip(lhs["measurements"], rhs["measurements"]):
+        unittest_instance.assertEqual(m1["real_time"], m2["real_time"])
+        unittest_instance.assertEqual(m1["cpu_time"], m2["cpu_time"])
         # m1['time'] and m1['cpu'] hold values which are being calculated,
         # and therefore we must use almost-equal pattern.
-        unittest_instance.assertAlmostEqual(m1['time'], m2['time'], places=4)
-        unittest_instance.assertAlmostEqual(m1['cpu'], m2['cpu'], places=4)
+        unittest_instance.assertAlmostEqual(m1["time"], m2["time"], places=4)
+        unittest_instance.assertAlmostEqual(m1["cpu"], m2["cpu"], places=4)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
 
 # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4

diff  --git a/third-party/benchmark/tools/gbench/util.py b/third-party/benchmark/tools/gbench/util.py
index 5d0012c0cb1c3..a46903a6d248d 100644
--- a/third-party/benchmark/tools/gbench/util.py
+++ b/third-party/benchmark/tools/gbench/util.py
@@ -12,7 +12,7 @@
 IT_JSON = 1
 IT_Executable = 2
 
-_num_magic_bytes = 2 if sys.platform.startswith('win') else 4
+_num_magic_bytes = 2 if sys.platform.startswith("win") else 4
 
 
 def is_executable_file(filename):
@@ -23,21 +23,21 @@ def is_executable_file(filename):
     """
     if not os.path.isfile(filename):
         return False
-    with open(filename, mode='rb') as f:
+    with open(filename, mode="rb") as f:
         magic_bytes = f.read(_num_magic_bytes)
-    if sys.platform == 'darwin':
+    if sys.platform == "darwin":
         return magic_bytes in [
-            b'\xfe\xed\xfa\xce',  # MH_MAGIC
-            b'\xce\xfa\xed\xfe',  # MH_CIGAM
-            b'\xfe\xed\xfa\xcf',  # MH_MAGIC_64
-            b'\xcf\xfa\xed\xfe',  # MH_CIGAM_64
-            b'\xca\xfe\xba\xbe',  # FAT_MAGIC
-            b'\xbe\xba\xfe\xca'   # FAT_CIGAM
+            b"\xfe\xed\xfa\xce",  # MH_MAGIC
+            b"\xce\xfa\xed\xfe",  # MH_CIGAM
+            b"\xfe\xed\xfa\xcf",  # MH_MAGIC_64
+            b"\xcf\xfa\xed\xfe",  # MH_CIGAM_64
+            b"\xca\xfe\xba\xbe",  # FAT_MAGIC
+            b"\xbe\xba\xfe\xca",  # FAT_CIGAM
         ]
-    elif sys.platform.startswith('win'):
-        return magic_bytes == b'MZ'
+    elif sys.platform.startswith("win"):
+        return magic_bytes == b"MZ"
     else:
-        return magic_bytes == b'\x7FELF'
+        return magic_bytes == b"\x7FELF"
 
 
 def is_json_file(filename):
@@ -46,7 +46,7 @@ def is_json_file(filename):
     'False' otherwise.
     """
     try:
-        with open(filename, 'r') as f:
+        with open(filename, "r") as f:
             json.load(f)
         return True
     except BaseException:
@@ -71,7 +71,9 @@ def classify_input_file(filename):
     elif is_json_file(filename):
         ftype = IT_JSON
     else:
-        err_msg = "'%s' does not name a valid benchmark executable or JSON file" % filename
+        err_msg = (
+            "'%s' does not name a valid benchmark executable or JSON file" % filename
+        )
     return ftype, err_msg
 
 
@@ -94,11 +96,11 @@ def find_benchmark_flag(prefix, benchmark_flags):
     if it is found return the arg it specifies. If specified more than once the
     last value is returned. If the flag is not found None is returned.
     """
-    assert prefix.startswith('--') and prefix.endswith('=')
+    assert prefix.startswith("--") and prefix.endswith("=")
     result = None
     for f in benchmark_flags:
         if f.startswith(prefix):
-            result = f[len(prefix):]
+            result = f[len(prefix) :]
     return result
 
 
@@ -107,7 +109,7 @@ def remove_benchmark_flags(prefix, benchmark_flags):
     Return a new list containing the specified benchmark_flags except those
     with the specified prefix.
     """
-    assert prefix.startswith('--') and prefix.endswith('=')
+    assert prefix.startswith("--") and prefix.endswith("=")
     return [f for f in benchmark_flags if not f.startswith(prefix)]
 
 
@@ -116,24 +118,40 @@ def load_benchmark_results(fname):
     Read benchmark output from a file and return the JSON object.
     REQUIRES: 'fname' names a file containing JSON benchmark output.
     """
-    with open(fname, 'r') as f:
+    with open(fname, "r") as f:
         return json.load(f)
 
 
 def sort_benchmark_results(result):
-    benchmarks = result['benchmarks']
+    benchmarks = result["benchmarks"]
 
     # From inner key to the outer key!
     benchmarks = sorted(
-        benchmarks, key=lambda benchmark: benchmark['repetition_index'] if 'repetition_index' in benchmark else -1)
+        benchmarks,
+        key=lambda benchmark: benchmark["repetition_index"]
+        if "repetition_index" in benchmark
+        else -1,
+    )
     benchmarks = sorted(
-        benchmarks, key=lambda benchmark: 1 if 'run_type' in benchmark and benchmark['run_type'] == "aggregate" else 0)
+        benchmarks,
+        key=lambda benchmark: 1
+        if "run_type" in benchmark and benchmark["run_type"] == "aggregate"
+        else 0,
+    )
     benchmarks = sorted(
-        benchmarks, key=lambda benchmark: benchmark['per_family_instance_index'] if 'per_family_instance_index' in benchmark else -1)
+        benchmarks,
+        key=lambda benchmark: benchmark["per_family_instance_index"]
+        if "per_family_instance_index" in benchmark
+        else -1,
+    )
     benchmarks = sorted(
-        benchmarks, key=lambda benchmark: benchmark['family_index'] if 'family_index' in benchmark else -1)
+        benchmarks,
+        key=lambda benchmark: benchmark["family_index"]
+        if "family_index" in benchmark
+        else -1,
+    )
 
-    result['benchmarks'] = benchmarks
+    result["benchmarks"] = benchmarks
     return result
 
 
@@ -144,21 +162,19 @@ def run_benchmark(exe_name, benchmark_flags):
     real time console output.
     RETURNS: A JSON object representing the benchmark output
     """
-    output_name = find_benchmark_flag('--benchmark_out=',
-                                      benchmark_flags)
+    output_name = find_benchmark_flag("--benchmark_out=", benchmark_flags)
     is_temp_output = False
     if output_name is None:
         is_temp_output = True
         thandle, output_name = tempfile.mkstemp()
         os.close(thandle)
-        benchmark_flags = list(benchmark_flags) + \
-            ['--benchmark_out=%s' % output_name]
+        benchmark_flags = list(benchmark_flags) + ["--benchmark_out=%s" % output_name]
 
     cmd = [exe_name] + benchmark_flags
-    print("RUNNING: %s" % ' '.join(cmd))
+    print("RUNNING: %s" % " ".join(cmd))
     exitCode = subprocess.call(cmd)
     if exitCode != 0:
-        print('TEST FAILED...')
+        print("TEST FAILED...")
         sys.exit(exitCode)
     json_res = load_benchmark_results(output_name)
     if is_temp_output:
@@ -178,4 +194,4 @@ def run_or_load_benchmark(filename, benchmark_flags):
         return load_benchmark_results(filename)
     if ftype == IT_Executable:
         return run_benchmark(filename, benchmark_flags)
-    raise ValueError('Unknown file type %s' % ftype)
+    raise ValueError("Unknown file type %s" % ftype)

diff  --git a/third-party/benchmark/tools/strip_asm.py b/third-party/benchmark/tools/strip_asm.py
index 9030550b43bec..086255dc65778 100755
--- a/third-party/benchmark/tools/strip_asm.py
+++ b/third-party/benchmark/tools/strip_asm.py
@@ -9,13 +9,14 @@
 import os
 import re
 
+
 def find_used_labels(asm):
     found = set()
     label_re = re.compile("\s*j[a-z]+\s+\.L([a-zA-Z0-9][a-zA-Z0-9_]*)")
     for l in asm.splitlines():
         m = label_re.match(l)
         if m:
-            found.add('.L%s' % m.group(1))
+            found.add(".L%s" % m.group(1))
     return found
 
 
@@ -28,24 +29,24 @@ def normalize_labels(asm):
             decls.add(m.group(0))
     if len(decls) == 0:
         return asm
-    needs_dot = next(iter(decls))[0] != '.'
+    needs_dot = next(iter(decls))[0] != "."
     if not needs_dot:
         return asm
     for ld in decls:
-        asm = re.sub("(^|\s+)" + ld + "(?=:|\s)", '\\1.' + ld, asm)
+        asm = re.sub("(^|\s+)" + ld + "(?=:|\s)", "\\1." + ld, asm)
     return asm
 
 
 def transform_labels(asm):
     asm = normalize_labels(asm)
     used_decls = find_used_labels(asm)
-    new_asm = ''
+    new_asm = ""
     label_decl = re.compile("^\.L([a-zA-Z0-9][a-zA-Z0-9_]*)(?=:)")
     for l in asm.splitlines():
         m = label_decl.match(l)
         if not m or m.group(0) in used_decls:
             new_asm += l
-            new_asm += '\n'
+            new_asm += "\n"
     return new_asm
 
 
@@ -53,14 +54,15 @@ def is_identifier(tk):
     if len(tk) == 0:
         return False
     first = tk[0]
-    if not first.isalpha() and first != '_':
+    if not first.isalpha() and first != "_":
         return False
     for i in range(1, len(tk)):
         c = tk[i]
-        if not c.isalnum() and c != '_':
+        if not c.isalnum() and c != "_":
             return False
     return True
 
+
 def process_identifiers(l):
     """
     process_identifiers - process all identifiers and modify them to have
@@ -68,14 +70,15 @@ def process_identifiers(l):
     For example, MachO inserts an additional understore at the beginning of
     names. This function removes that.
     """
-    parts = re.split(r'([a-zA-Z0-9_]+)', l)
-    new_line = ''
+    parts = re.split(r"([a-zA-Z0-9_]+)", l)
+    new_line = ""
     for tk in parts:
         if is_identifier(tk):
-            if tk.startswith('__Z'):
+            if tk.startswith("__Z"):
                 tk = tk[1:]
-            elif tk.startswith('_') and len(tk) > 1 and \
-                    tk[1].isalpha() and tk[1] != 'Z':
+            elif (
+                tk.startswith("_") and len(tk) > 1 and tk[1].isalpha() and tk[1] != "Z"
+            ):
                 tk = tk[1:]
         new_line += tk
     return new_line
@@ -85,24 +88,24 @@ def process_asm(asm):
     """
     Strip the ASM of unwanted directives and lines
     """
-    new_contents = ''
+    new_contents = ""
     asm = transform_labels(asm)
 
     # TODO: Add more things we want to remove
     discard_regexes = [
-        re.compile("\s+\..*$"), # directive
-        re.compile("\s*#(NO_APP|APP)$"), #inline ASM
-        re.compile("\s*#.*$"), # comment line
-        re.compile("\s*\.globa?l\s*([.a-zA-Z_][a-zA-Z0-9$_.]*)"), #global directive
-        re.compile("\s*\.(string|asciz|ascii|[1248]?byte|short|word|long|quad|value|zero)"),
-    ]
-    keep_regexes = [
-
+        re.compile("\s+\..*$"),  # directive
+        re.compile("\s*#(NO_APP|APP)$"),  # inline ASM
+        re.compile("\s*#.*$"),  # comment line
+        re.compile("\s*\.globa?l\s*([.a-zA-Z_][a-zA-Z0-9$_.]*)"),  # global directive
+        re.compile(
+            "\s*\.(string|asciz|ascii|[1248]?byte|short|word|long|quad|value|zero)"
+        ),
     ]
+    keep_regexes = []
     fn_label_def = re.compile("^[a-zA-Z_][a-zA-Z0-9_.]*:")
     for l in asm.splitlines():
         # Remove Mach-O attribute
-        l = l.replace('@GOTPCREL', '')
+        l = l.replace("@GOTPCREL", "")
         add_line = True
         for reg in discard_regexes:
             if reg.match(l) is not None:
@@ -114,21 +117,21 @@ def process_asm(asm):
                 break
         if add_line:
             if fn_label_def.match(l) and len(new_contents) != 0:
-                new_contents += '\n'
+                new_contents += "\n"
             l = process_identifiers(l)
             new_contents += l
-            new_contents += '\n'
+            new_contents += "\n"
     return new_contents
 
+
 def main():
-    parser = ArgumentParser(
-        description='generate a stripped assembly file')
+    parser = ArgumentParser(description="generate a stripped assembly file")
     parser.add_argument(
-        'input', metavar='input', type=str, nargs=1,
-        help='An input assembly file')
+        "input", metavar="input", type=str, nargs=1, help="An input assembly file"
+    )
     parser.add_argument(
-        'out', metavar='output', type=str, nargs=1,
-        help='The output file')
+        "out", metavar="output", type=str, nargs=1, help="The output file"
+    )
     args, unknown_args = parser.parse_known_args()
     input = args.input[0]
     output = args.out[0]
@@ -136,14 +139,14 @@ def main():
         print(("ERROR: input file '%s' does not exist") % input)
         sys.exit(1)
     contents = None
-    with open(input, 'r') as f:
+    with open(input, "r") as f:
         contents = f.read()
     new_contents = process_asm(contents)
-    with open(output, 'w') as f:
+    with open(output, "w") as f:
         f.write(new_contents)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()
 
 # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4

diff  --git a/utils/bazel/overlay_directories.py b/utils/bazel/overlay_directories.py
index 6cf1a078df6c0..526a78e978e5d 100755
--- a/utils/bazel/overlay_directories.py
+++ b/utils/bazel/overlay_directories.py
@@ -18,75 +18,82 @@
 
 
 def _check_python_version():
-  if sys.version_info[0] < 3:
-    raise RuntimeError(
-        "Must be invoked with a python 3 interpreter but was %s" %
-        sys.executable)
+    if sys.version_info[0] < 3:
+        raise RuntimeError(
+            "Must be invoked with a python 3 interpreter but was %s" % sys.executable
+        )
 
 
 def _check_dir_exists(path):
-  if not os.path.isdir(path):
-    raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), path)
+    if not os.path.isdir(path):
+        raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), path)
 
 
 def parse_arguments():
-  parser = argparse.ArgumentParser(description="""
+    parser = argparse.ArgumentParser(
+        description="""
     Overlays two directories into a target directory using symlinks.
 
     Tries to minimize the number of symlinks created (that is, does not symlink
     every single file). Symlinks every file in the overlay directory. Only
     symlinks individual files in the source directory if their parent directory
     is also contained in the overlay directory tree.
-    """)
-  parser.add_argument(
-      "--src",
-      required=True,
-      help="Directory that contains most of the content to symlink.")
-  parser.add_argument(
-      "--overlay",
-      required=True,
-      help="Directory to overlay on top of the source directory.")
-  parser.add_argument(
-      "--target",
-      required=True,
-      help="Directory in which to place the fused symlink directories.")
-
-  args = parser.parse_args()
-
-  _check_dir_exists(args.target)
-  _check_dir_exists(args.overlay)
-  _check_dir_exists(args.src)
-
-  return args
+    """
+    )
+    parser.add_argument(
+        "--src",
+        required=True,
+        help="Directory that contains most of the content to symlink.",
+    )
+    parser.add_argument(
+        "--overlay",
+        required=True,
+        help="Directory to overlay on top of the source directory.",
+    )
+    parser.add_argument(
+        "--target",
+        required=True,
+        help="Directory in which to place the fused symlink directories.",
+    )
+
+    args = parser.parse_args()
+
+    _check_dir_exists(args.target)
+    _check_dir_exists(args.overlay)
+    _check_dir_exists(args.src)
+
+    return args
 
 
 def _symlink_abs(from_path, to_path):
-  os.symlink(os.path.abspath(from_path), os.path.abspath(to_path))
+    os.symlink(os.path.abspath(from_path), os.path.abspath(to_path))
 
 
 def main(args):
-  for root, dirs, files in os.walk(args.overlay):
-    # We could do something more intelligent here and only symlink individual
-    # files if the directory is present in both overlay and src. This could also
-    # be generalized to an arbitrary number of directories without any
-    # "src/overlay" distinction. In the current use case we only have two and
-    # the overlay directory is always small, so putting that off for now.
-    rel_root = os.path.relpath(root, start=args.overlay)
-    if rel_root != ".":
-      os.mkdir(os.path.join(args.target, rel_root))
-
-    for file in files:
-      relpath = os.path.join(rel_root, file)
-      _symlink_abs(os.path.join(args.overlay, relpath),
-                   os.path.join(args.target, relpath))
-
-    for src_entry in os.listdir(os.path.join(args.src, rel_root)):
-      if src_entry not in dirs:
-        relpath = os.path.join(rel_root, src_entry)
-        _symlink_abs(os.path.join(args.src, relpath),
-                     os.path.join(args.target, relpath))
+    for root, dirs, files in os.walk(args.overlay):
+        # We could do something more intelligent here and only symlink individual
+        # files if the directory is present in both overlay and src. This could also
+        # be generalized to an arbitrary number of directories without any
+        # "src/overlay" distinction. In the current use case we only have two and
+        # the overlay directory is always small, so putting that off for now.
+        rel_root = os.path.relpath(root, start=args.overlay)
+        if rel_root != ".":
+            os.mkdir(os.path.join(args.target, rel_root))
+
+        for file in files:
+            relpath = os.path.join(rel_root, file)
+            _symlink_abs(
+                os.path.join(args.overlay, relpath), os.path.join(args.target, relpath)
+            )
+
+        for src_entry in os.listdir(os.path.join(args.src, rel_root)):
+            if src_entry not in dirs:
+                relpath = os.path.join(rel_root, src_entry)
+                _symlink_abs(
+                    os.path.join(args.src, relpath), os.path.join(args.target, relpath)
+                )
 
 
 if __name__ == "__main__":
-  _check_python_version()
-  main(parse_arguments())
+    _check_python_version()
+    main(parse_arguments())


        


More information about the flang-commits mailing list