[llvm] [CodeGen] Port AtomicExpand to new Pass Manager (PR #71220)

Rishabh Bali via llvm-commits llvm-commits at lists.llvm.org
Sat Jan 13 11:56:20 PST 2024


https://github.com/Ris-Bali updated https://github.com/llvm/llvm-project/pull/71220

>From afd42a1cf75df0015840f115441ae3a6670ad2dc Mon Sep 17 00:00:00 2001
From: Ris-Bali <rishabhsbali at gmail.com>
Date: Sat, 4 Nov 2023 03:12:05 +0530
Subject: [PATCH 1/4] Port atomicexpandpass to new PM

---
 llvm/include/llvm/CodeGen/AtomicExpand.h      |  35 +
 llvm/include/llvm/CodeGen/Passes.h            | 898 +++++++++---------
 llvm/include/llvm/InitializePasses.h          |   2 +-
 llvm/lib/CodeGen/AtomicExpandPass.cpp         | 143 +--
 llvm/lib/CodeGen/CodeGen.cpp                  |   2 +-
 llvm/lib/Passes/PassBuilder.cpp               |   3 +-
 llvm/lib/Passes/PassRegistry.def              |   1 +
 .../Target/AArch64/AArch64TargetMachine.cpp   |   2 +-
 .../lib/Target/AMDGPU/AMDGPUTargetMachine.cpp |   2 +-
 llvm/lib/Target/ARM/ARMTargetMachine.cpp      |   2 +-
 llvm/lib/Target/CSKY/CSKYTargetMachine.cpp    |   2 +-
 .../Target/Hexagon/HexagonTargetMachine.cpp   |   2 +-
 .../LoongArch/LoongArchTargetMachine.cpp      |   2 +-
 llvm/lib/Target/M68k/M68kTargetMachine.cpp    |   2 +-
 llvm/lib/Target/Mips/MipsTargetMachine.cpp    |   2 +-
 llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp  |   2 +-
 .../PowerPC/PPCExpandAtomicPseudoInsts.cpp    |   2 +-
 llvm/lib/Target/PowerPC/PPCTargetMachine.cpp  |   2 +-
 llvm/lib/Target/RISCV/RISCVTargetMachine.cpp  |   2 +-
 llvm/lib/Target/Sparc/SparcTargetMachine.cpp  |   2 +-
 .../Target/SystemZ/SystemZTargetMachine.cpp   |   2 +-
 llvm/lib/Target/VE/VETargetMachine.cpp        |   2 +-
 .../WebAssembly/WebAssemblyTargetMachine.cpp  |   2 +-
 llvm/lib/Target/X86/X86TargetMachine.cpp      |   2 +-
 llvm/lib/Target/XCore/XCoreTargetMachine.cpp  |   2 +-
 .../test/CodeGen/AMDGPU/idemponent-atomics.ll |   2 +-
 .../CodeGen/AMDGPU/private-memory-atomics.ll  |   2 +-
 .../AtomicExpand/AArch64/atomicrmw-fp.ll      |   2 +-
 .../AArch64/expand-atomicrmw-xchg-fp.ll       |   4 +-
 .../AtomicExpand/AArch64/pcsections.ll        |   2 +-
 .../AMDGPU/expand-atomic-i16-system.ll        |   2 +-
 .../AtomicExpand/AMDGPU/expand-atomic-i16.ll  |   4 +-
 .../AMDGPU/expand-atomic-i8-system.ll         |   2 +-
 .../AtomicExpand/AMDGPU/expand-atomic-i8.ll   |   4 +-
 ...and-atomic-rmw-fadd-flat-specialization.ll |   8 +-
 .../AMDGPU/expand-atomic-rmw-fadd.ll          |  12 +-
 .../AMDGPU/expand-atomic-rmw-fmax.ll          |   4 +-
 .../AMDGPU/expand-atomic-rmw-fmin.ll          |   4 +-
 .../AMDGPU/expand-atomic-rmw-fsub.ll          |   4 +-
 .../AMDGPU/expand-atomic-rmw-nand.ll          |   4 +-
 .../expand-atomic-simplify-cfg-CAS-block.ll   |   2 +-
 .../AtomicExpand/AMDGPU/unaligned-atomic.ll   |   2 +-
 .../AtomicExpand/ARM/atomic-expansion-v7.ll   |   2 +-
 .../AtomicExpand/ARM/atomic-expansion-v8.ll   |   2 +-
 .../AtomicExpand/ARM/atomicrmw-fp.ll          |   2 +-
 .../AtomicExpand/ARM/cmpxchg-weak.ll          |   2 +-
 .../AtomicExpand/Hexagon/atomicrmw-fp.ll      |   2 +-
 .../AtomicExpand/LoongArch/atomicrmw-fp.ll    |   2 +-
 .../LoongArch/load-store-atomic.ll            |   4 +-
 .../AtomicExpand/Mips/atomicrmw-fp.ll         |   2 +-
 .../AtomicExpand/PowerPC/atomicrmw-fp.ll      |   2 +-
 .../AtomicExpand/PowerPC/cfence-double.ll     |   4 +-
 .../AtomicExpand/PowerPC/cfence-float.ll      |   4 +-
 .../AtomicExpand/PowerPC/cmpxchg.ll           |   4 +-
 .../AtomicExpand/PowerPC/issue55983.ll        |   4 +-
 .../AtomicExpand/RISCV/atomicrmw-fp.ll        |   2 +-
 .../Transforms/AtomicExpand/SPARC/libcalls.ll |   2 +-
 .../Transforms/AtomicExpand/SPARC/partword.ll |   2 +-
 .../AtomicExpand/X86/expand-atomic-libcall.ll |   2 +-
 .../X86/expand-atomic-non-integer.ll          |   2 +-
 .../AtomicExpand/X86/expand-atomic-rmw-fp.ll  |   2 +-
 .../X86/expand-atomic-rmw-initial-load.ll     |   2 +-
 .../AtomicExpand/X86/expand-atomic-xchg-fp.ll |   2 +-
 llvm/tools/opt/opt.cpp                        |   2 -
 64 files changed, 654 insertions(+), 584 deletions(-)
 create mode 100644 llvm/include/llvm/CodeGen/AtomicExpand.h

diff --git a/llvm/include/llvm/CodeGen/AtomicExpand.h b/llvm/include/llvm/CodeGen/AtomicExpand.h
new file mode 100644
index 00000000000000..2430ac5afa9844
--- /dev/null
+++ b/llvm/include/llvm/CodeGen/AtomicExpand.h
@@ -0,0 +1,35 @@
+//===- AtomicExpand.h ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+///
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_ATOMICEXPAND_H
+#define LLVM_CODEGEN_ATOMICEXPAND_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Function;
+class TargetMachine;
+
+class AtomicExpandPass : public PassInfoMixin<AtomicExpandPass> {
+private:
+  const TargetMachine *TM;
+
+public:
+  AtomicExpandPass(const TargetMachine *TM) : TM(TM) {}
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_ATOMICEXPAND_H
\ No newline at end of file
diff --git a/llvm/include/llvm/CodeGen/Passes.h b/llvm/include/llvm/CodeGen/Passes.h
index 712048017bca1a..e70cd1462b338d 100644
--- a/llvm/include/llvm/CodeGen/Passes.h
+++ b/llvm/include/llvm/CodeGen/Passes.h
@@ -44,566 +44,566 @@ namespace llvm {
   /// AtomicExpandPass - At IR level this pass replace atomic instructions with
   /// __atomic_* library calls, or target specific instruction which implement the
   /// same semantics in a way which better fits the target backend.
-  FunctionPass *createAtomicExpandPass();
-
-  /// createUnreachableBlockEliminationPass - The LLVM code generator does not
-  /// work well with unreachable basic blocks (what live ranges make sense for a
-  /// block that cannot be reached?).  As such, a code generator should either
-  /// not instruction select unreachable blocks, or run this pass as its
-  /// last LLVM modifying pass to clean up blocks that are not reachable from
-  /// the entry block.
-  FunctionPass *createUnreachableBlockEliminationPass();
+FunctionPass *createAtomicExpandLegacyPass();
+
+/// createUnreachableBlockEliminationPass - The LLVM code generator does not
+/// work well with unreachable basic blocks (what live ranges make sense for a
+/// block that cannot be reached?).  As such, a code generator should either
+/// not instruction select unreachable blocks, or run this pass as its
+/// last LLVM modifying pass to clean up blocks that are not reachable from
+/// the entry block.
+FunctionPass *createUnreachableBlockEliminationPass();
 
-  /// createGCEmptyBasicblocksPass - Empty basic blocks (basic blocks without
-  /// real code) appear as the result of optimization passes removing
-  /// instructions. These blocks confuscate profile analysis (e.g., basic block
-  /// sections) since they will share the address of their fallthrough blocks.
-  /// This pass garbage-collects such basic blocks.
-  MachineFunctionPass *createGCEmptyBasicBlocksPass();
-
-  /// createBasicBlockSections Pass - This pass assigns sections to machine
-  /// basic blocks and is enabled with -fbasic-block-sections.
-  MachineFunctionPass *createBasicBlockSectionsPass();
-
-  MachineFunctionPass *createBasicBlockPathCloningPass();
+/// createGCEmptyBasicblocksPass - Empty basic blocks (basic blocks without
+/// real code) appear as the result of optimization passes removing
+/// instructions. These blocks confuscate profile analysis (e.g., basic block
+/// sections) since they will share the address of their fallthrough blocks.
+/// This pass garbage-collects such basic blocks.
+MachineFunctionPass *createGCEmptyBasicBlocksPass();
+
+/// createBasicBlockSections Pass - This pass assigns sections to machine
+/// basic blocks and is enabled with -fbasic-block-sections.
+MachineFunctionPass *createBasicBlockSectionsPass();
+
+MachineFunctionPass *createBasicBlockPathCloningPass();
 
-  /// createMachineFunctionSplitterPass - This pass splits machine functions
-  /// using profile information.
-  MachineFunctionPass *createMachineFunctionSplitterPass();
+/// createMachineFunctionSplitterPass - This pass splits machine functions
+/// using profile information.
+MachineFunctionPass *createMachineFunctionSplitterPass();
 
-  /// MachineFunctionPrinter pass - This pass prints out the machine function to
-  /// the given stream as a debugging tool.
-  MachineFunctionPass *
-  createMachineFunctionPrinterPass(raw_ostream &OS,
-                                   const std::string &Banner ="");
+/// MachineFunctionPrinter pass - This pass prints out the machine function to
+/// the given stream as a debugging tool.
+MachineFunctionPass *
+createMachineFunctionPrinterPass(raw_ostream &OS,
+                                 const std::string &Banner = "");
 
-  /// StackFramePrinter pass - This pass prints out the machine function's
-  /// stack frame to the given stream as a debugging tool.
-  MachineFunctionPass *createStackFrameLayoutAnalysisPass();
+/// StackFramePrinter pass - This pass prints out the machine function's
+/// stack frame to the given stream as a debugging tool.
+MachineFunctionPass *createStackFrameLayoutAnalysisPass();
 
-  /// MIRPrinting pass - this pass prints out the LLVM IR into the given stream
-  /// using the MIR serialization format.
-  MachineFunctionPass *createPrintMIRPass(raw_ostream &OS);
+/// MIRPrinting pass - this pass prints out the LLVM IR into the given stream
+/// using the MIR serialization format.
+MachineFunctionPass *createPrintMIRPass(raw_ostream &OS);
 
-  /// This pass resets a MachineFunction when it has the FailedISel property
-  /// as if it was just created.
-  /// If EmitFallbackDiag is true, the pass will emit a
-  /// DiagnosticInfoISelFallback for every MachineFunction it resets.
-  /// If AbortOnFailedISel is true, abort compilation instead of resetting.
-  MachineFunctionPass *createResetMachineFunctionPass(bool EmitFallbackDiag,
-                                                      bool AbortOnFailedISel);
+/// This pass resets a MachineFunction when it has the FailedISel property
+/// as if it was just created.
+/// If EmitFallbackDiag is true, the pass will emit a
+/// DiagnosticInfoISelFallback for every MachineFunction it resets.
+/// If AbortOnFailedISel is true, abort compilation instead of resetting.
+MachineFunctionPass *createResetMachineFunctionPass(bool EmitFallbackDiag,
+                                                    bool AbortOnFailedISel);
 
-  /// createCodeGenPreparePass - Transform the code to expose more pattern
-  /// matching during instruction selection.
-  FunctionPass *createCodeGenPreparePass();
+/// createCodeGenPreparePass - Transform the code to expose more pattern
+/// matching during instruction selection.
+FunctionPass *createCodeGenPreparePass();
 
-  /// This pass implements generation of target-specific intrinsics to support
-  /// handling of complex number arithmetic
-  FunctionPass *createComplexDeinterleavingPass(const TargetMachine *TM);
+/// This pass implements generation of target-specific intrinsics to support
+/// handling of complex number arithmetic
+FunctionPass *createComplexDeinterleavingPass(const TargetMachine *TM);
 
-  /// AtomicExpandID -- Lowers atomic operations in terms of either cmpxchg
-  /// load-linked/store-conditional loops.
-  extern char &AtomicExpandID;
+/// AtomicExpandID -- Lowers atomic operations in terms of either cmpxchg
+/// load-linked/store-conditional loops.
+extern char &AtomicExpandID;
 
-  /// MachineLoopInfo - This pass is a loop analysis pass.
-  extern char &MachineLoopInfoID;
-
-  /// MachineDominators - This pass is a machine dominators analysis pass.
-  extern char &MachineDominatorsID;
+/// MachineLoopInfo - This pass is a loop analysis pass.
+extern char &MachineLoopInfoID;
+
+/// MachineDominators - This pass is a machine dominators analysis pass.
+extern char &MachineDominatorsID;
 
-  /// MachineDominanaceFrontier - This pass is a machine dominators analysis.
-  extern char &MachineDominanceFrontierID;
-
-  /// MachineRegionInfo - This pass computes SESE regions for machine functions.
-  extern char &MachineRegionInfoPassID;
-
-  /// EdgeBundles analysis - Bundle machine CFG edges.
-  extern char &EdgeBundlesID;
-
-  /// LiveVariables pass - This pass computes the set of blocks in which each
-  /// variable is life and sets machine operand kill flags.
-  extern char &LiveVariablesID;
-
-  /// PHIElimination - This pass eliminates machine instruction PHI nodes
-  /// by inserting copy instructions.  This destroys SSA information, but is the
-  /// desired input for some register allocators.  This pass is "required" by
-  /// these register allocator like this: AU.addRequiredID(PHIEliminationID);
-  extern char &PHIEliminationID;
-
-  /// LiveIntervals - This analysis keeps track of the live ranges of virtual
-  /// and physical registers.
-  extern char &LiveIntervalsID;
-
-  /// LiveStacks pass. An analysis keeping track of the liveness of stack slots.
-  extern char &LiveStacksID;
-
-  /// TwoAddressInstruction - This pass reduces two-address instructions to
-  /// use two operands. This destroys SSA information but it is desired by
-  /// register allocators.
-  extern char &TwoAddressInstructionPassID;
-
-  /// ProcessImpicitDefs pass - This pass removes IMPLICIT_DEFs.
-  extern char &ProcessImplicitDefsID;
-
-  /// RegisterCoalescer - This pass merges live ranges to eliminate copies.
-  extern char &RegisterCoalescerID;
-
-  /// MachineScheduler - This pass schedules machine instructions.
-  extern char &MachineSchedulerID;
-
-  /// PostMachineScheduler - This pass schedules machine instructions postRA.
-  extern char &PostMachineSchedulerID;
+/// MachineDominanaceFrontier - This pass is a machine dominators analysis.
+extern char &MachineDominanceFrontierID;
+
+/// MachineRegionInfo - This pass computes SESE regions for machine functions.
+extern char &MachineRegionInfoPassID;
+
+/// EdgeBundles analysis - Bundle machine CFG edges.
+extern char &EdgeBundlesID;
+
+/// LiveVariables pass - This pass computes the set of blocks in which each
+/// variable is life and sets machine operand kill flags.
+extern char &LiveVariablesID;
+
+/// PHIElimination - This pass eliminates machine instruction PHI nodes
+/// by inserting copy instructions.  This destroys SSA information, but is the
+/// desired input for some register allocators.  This pass is "required" by
+/// these register allocator like this: AU.addRequiredID(PHIEliminationID);
+extern char &PHIEliminationID;
+
+/// LiveIntervals - This analysis keeps track of the live ranges of virtual
+/// and physical registers.
+extern char &LiveIntervalsID;
+
+/// LiveStacks pass. An analysis keeping track of the liveness of stack slots.
+extern char &LiveStacksID;
+
+/// TwoAddressInstruction - This pass reduces two-address instructions to
+/// use two operands. This destroys SSA information but it is desired by
+/// register allocators.
+extern char &TwoAddressInstructionPassID;
+
+/// ProcessImpicitDefs pass - This pass removes IMPLICIT_DEFs.
+extern char &ProcessImplicitDefsID;
+
+/// RegisterCoalescer - This pass merges live ranges to eliminate copies.
+extern char &RegisterCoalescerID;
+
+/// MachineScheduler - This pass schedules machine instructions.
+extern char &MachineSchedulerID;
+
+/// PostMachineScheduler - This pass schedules machine instructions postRA.
+extern char &PostMachineSchedulerID;
 
-  /// SpillPlacement analysis. Suggest optimal placement of spill code between
-  /// basic blocks.
-  extern char &SpillPlacementID;
+/// SpillPlacement analysis. Suggest optimal placement of spill code between
+/// basic blocks.
+extern char &SpillPlacementID;
 
-  /// ShrinkWrap pass. Look for the best place to insert save and restore
-  // instruction and update the MachineFunctionInfo with that information.
-  extern char &ShrinkWrapID;
+/// ShrinkWrap pass. Look for the best place to insert save and restore
+// instruction and update the MachineFunctionInfo with that information.
+extern char &ShrinkWrapID;
 
-  /// LiveRangeShrink pass. Move instruction close to its definition to shrink
-  /// the definition's live range.
-  extern char &LiveRangeShrinkID;
+/// LiveRangeShrink pass. Move instruction close to its definition to shrink
+/// the definition's live range.
+extern char &LiveRangeShrinkID;
 
-  /// Greedy register allocator.
-  extern char &RAGreedyID;
-
-  /// Basic register allocator.
-  extern char &RABasicID;
-
-  /// VirtRegRewriter pass. Rewrite virtual registers to physical registers as
-  /// assigned in VirtRegMap.
-  extern char &VirtRegRewriterID;
-  FunctionPass *createVirtRegRewriter(bool ClearVirtRegs = true);
+/// Greedy register allocator.
+extern char &RAGreedyID;
+
+/// Basic register allocator.
+extern char &RABasicID;
+
+/// VirtRegRewriter pass. Rewrite virtual registers to physical registers as
+/// assigned in VirtRegMap.
+extern char &VirtRegRewriterID;
+FunctionPass *createVirtRegRewriter(bool ClearVirtRegs = true);
 
-  /// UnreachableMachineBlockElimination - This pass removes unreachable
-  /// machine basic blocks.
-  extern char &UnreachableMachineBlockElimID;
+/// UnreachableMachineBlockElimination - This pass removes unreachable
+/// machine basic blocks.
+extern char &UnreachableMachineBlockElimID;
 
-  /// DeadMachineInstructionElim - This pass removes dead machine instructions.
-  extern char &DeadMachineInstructionElimID;
+/// DeadMachineInstructionElim - This pass removes dead machine instructions.
+extern char &DeadMachineInstructionElimID;
 
-  /// This pass adds dead/undef flags after analyzing subregister lanes.
-  extern char &DetectDeadLanesID;
+/// This pass adds dead/undef flags after analyzing subregister lanes.
+extern char &DetectDeadLanesID;
 
-  /// This pass perform post-ra machine sink for COPY instructions.
-  extern char &PostRAMachineSinkingID;
+/// This pass perform post-ra machine sink for COPY instructions.
+extern char &PostRAMachineSinkingID;
 
-  /// This pass adds flow sensitive discriminators.
-  extern char &MIRAddFSDiscriminatorsID;
+/// This pass adds flow sensitive discriminators.
+extern char &MIRAddFSDiscriminatorsID;
 
-  /// This pass reads flow sensitive profile.
-  extern char &MIRProfileLoaderPassID;
+/// This pass reads flow sensitive profile.
+extern char &MIRProfileLoaderPassID;
 
-  /// FastRegisterAllocation Pass - This pass register allocates as fast as
-  /// possible. It is best suited for debug code where live ranges are short.
-  ///
-  FunctionPass *createFastRegisterAllocator();
-  FunctionPass *createFastRegisterAllocator(RegClassFilterFunc F,
-                                            bool ClearVirtRegs);
+/// FastRegisterAllocation Pass - This pass register allocates as fast as
+/// possible. It is best suited for debug code where live ranges are short.
+///
+FunctionPass *createFastRegisterAllocator();
+FunctionPass *createFastRegisterAllocator(RegClassFilterFunc F,
+                                          bool ClearVirtRegs);
 
-  /// BasicRegisterAllocation Pass - This pass implements a degenerate global
-  /// register allocator using the basic regalloc framework.
-  ///
-  FunctionPass *createBasicRegisterAllocator();
-  FunctionPass *createBasicRegisterAllocator(RegClassFilterFunc F);
+/// BasicRegisterAllocation Pass - This pass implements a degenerate global
+/// register allocator using the basic regalloc framework.
+///
+FunctionPass *createBasicRegisterAllocator();
+FunctionPass *createBasicRegisterAllocator(RegClassFilterFunc F);
 
-  /// Greedy register allocation pass - This pass implements a global register
-  /// allocator for optimized builds.
-  ///
-  FunctionPass *createGreedyRegisterAllocator();
-  FunctionPass *createGreedyRegisterAllocator(RegClassFilterFunc F);
+/// Greedy register allocation pass - This pass implements a global register
+/// allocator for optimized builds.
+///
+FunctionPass *createGreedyRegisterAllocator();
+FunctionPass *createGreedyRegisterAllocator(RegClassFilterFunc F);
 
-  /// PBQPRegisterAllocation Pass - This pass implements the Partitioned Boolean
-  /// Quadratic Prograaming (PBQP) based register allocator.
-  ///
-  FunctionPass *createDefaultPBQPRegisterAllocator();
+/// PBQPRegisterAllocation Pass - This pass implements the Partitioned Boolean
+/// Quadratic Prograaming (PBQP) based register allocator.
+///
+FunctionPass *createDefaultPBQPRegisterAllocator();
 
-  /// PrologEpilogCodeInserter - This pass inserts prolog and epilog code,
-  /// and eliminates abstract frame references.
-  extern char &PrologEpilogCodeInserterID;
-  MachineFunctionPass *createPrologEpilogInserterPass();
-
-  /// ExpandPostRAPseudos - This pass expands pseudo instructions after
-  /// register allocation.
-  extern char &ExpandPostRAPseudosID;
+/// PrologEpilogCodeInserter - This pass inserts prolog and epilog code,
+/// and eliminates abstract frame references.
+extern char &PrologEpilogCodeInserterID;
+MachineFunctionPass *createPrologEpilogInserterPass();
+
+/// ExpandPostRAPseudos - This pass expands pseudo instructions after
+/// register allocation.
+extern char &ExpandPostRAPseudosID;
 
-  /// PostRAHazardRecognizer - This pass runs the post-ra hazard
-  /// recognizer.
-  extern char &PostRAHazardRecognizerID;
+/// PostRAHazardRecognizer - This pass runs the post-ra hazard
+/// recognizer.
+extern char &PostRAHazardRecognizerID;
 
-  /// PostRAScheduler - This pass performs post register allocation
-  /// scheduling.
-  extern char &PostRASchedulerID;
+/// PostRAScheduler - This pass performs post register allocation
+/// scheduling.
+extern char &PostRASchedulerID;
 
-  /// BranchFolding - This pass performs machine code CFG based
-  /// optimizations to delete branches to branches, eliminate branches to
-  /// successor blocks (creating fall throughs), and eliminating branches over
-  /// branches.
-  extern char &BranchFolderPassID;
+/// BranchFolding - This pass performs machine code CFG based
+/// optimizations to delete branches to branches, eliminate branches to
+/// successor blocks (creating fall throughs), and eliminating branches over
+/// branches.
+extern char &BranchFolderPassID;
 
-  /// BranchRelaxation - This pass replaces branches that need to jump further
-  /// than is supported by a branch instruction.
-  extern char &BranchRelaxationPassID;
+/// BranchRelaxation - This pass replaces branches that need to jump further
+/// than is supported by a branch instruction.
+extern char &BranchRelaxationPassID;
 
-  /// MachineFunctionPrinterPass - This pass prints out MachineInstr's.
-  extern char &MachineFunctionPrinterPassID;
+/// MachineFunctionPrinterPass - This pass prints out MachineInstr's.
+extern char &MachineFunctionPrinterPassID;
 
-  /// MIRPrintingPass - this pass prints out the LLVM IR using the MIR
-  /// serialization format.
-  extern char &MIRPrintingPassID;
+/// MIRPrintingPass - this pass prints out the LLVM IR using the MIR
+/// serialization format.
+extern char &MIRPrintingPassID;
 
-  /// TailDuplicate - Duplicate blocks with unconditional branches
-  /// into tails of their predecessors.
-  extern char &TailDuplicateID;
+/// TailDuplicate - Duplicate blocks with unconditional branches
+/// into tails of their predecessors.
+extern char &TailDuplicateID;
 
-  /// Duplicate blocks with unconditional branches into tails of their
-  /// predecessors. Variant that works before register allocation.
-  extern char &EarlyTailDuplicateID;
+/// Duplicate blocks with unconditional branches into tails of their
+/// predecessors. Variant that works before register allocation.
+extern char &EarlyTailDuplicateID;
 
-  /// MachineTraceMetrics - This pass computes critical path and CPU resource
-  /// usage in an ensemble of traces.
-  extern char &MachineTraceMetricsID;
+/// MachineTraceMetrics - This pass computes critical path and CPU resource
+/// usage in an ensemble of traces.
+extern char &MachineTraceMetricsID;
 
-  /// EarlyIfConverter - This pass performs if-conversion on SSA form by
-  /// inserting cmov instructions.
-  extern char &EarlyIfConverterID;
-
-  /// EarlyIfPredicator - This pass performs if-conversion on SSA form by
-  /// predicating if/else block and insert select at the join point.
-  extern char &EarlyIfPredicatorID;
+/// EarlyIfConverter - This pass performs if-conversion on SSA form by
+/// inserting cmov instructions.
+extern char &EarlyIfConverterID;
+
+/// EarlyIfPredicator - This pass performs if-conversion on SSA form by
+/// predicating if/else block and insert select at the join point.
+extern char &EarlyIfPredicatorID;
 
-  /// This pass performs instruction combining using trace metrics to estimate
-  /// critical-path and resource depth.
-  extern char &MachineCombinerID;
+/// This pass performs instruction combining using trace metrics to estimate
+/// critical-path and resource depth.
+extern char &MachineCombinerID;
 
-  /// StackSlotColoring - This pass performs stack coloring and merging.
-  /// It merges disjoint allocas to reduce the stack size.
-  extern char &StackColoringID;
+/// StackSlotColoring - This pass performs stack coloring and merging.
+/// It merges disjoint allocas to reduce the stack size.
+extern char &StackColoringID;
 
-  /// StackFramePrinter - This pass prints the stack frame layout and variable
-  /// mappings.
-  extern char &StackFrameLayoutAnalysisPassID;
+/// StackFramePrinter - This pass prints the stack frame layout and variable
+/// mappings.
+extern char &StackFrameLayoutAnalysisPassID;
 
-  /// IfConverter - This pass performs machine code if conversion.
-  extern char &IfConverterID;
+/// IfConverter - This pass performs machine code if conversion.
+extern char &IfConverterID;
 
-  FunctionPass *createIfConverter(
-      std::function<bool(const MachineFunction &)> Ftor);
-
-  /// MachineBlockPlacement - This pass places basic blocks based on branch
-  /// probabilities.
-  extern char &MachineBlockPlacementID;
-
-  /// MachineBlockPlacementStats - This pass collects statistics about the
-  /// basic block placement using branch probabilities and block frequency
-  /// information.
-  extern char &MachineBlockPlacementStatsID;
+FunctionPass *
+createIfConverter(std::function<bool(const MachineFunction &)> Ftor);
+
+/// MachineBlockPlacement - This pass places basic blocks based on branch
+/// probabilities.
+extern char &MachineBlockPlacementID;
+
+/// MachineBlockPlacementStats - This pass collects statistics about the
+/// basic block placement using branch probabilities and block frequency
+/// information.
+extern char &MachineBlockPlacementStatsID;
 
-  /// GCLowering Pass - Used by gc.root to perform its default lowering
-  /// operations.
-  FunctionPass *createGCLoweringPass();
-
-  /// GCLowering Pass - Used by gc.root to perform its default lowering
-  /// operations.
-  extern char &GCLoweringID;
+/// GCLowering Pass - Used by gc.root to perform its default lowering
+/// operations.
+FunctionPass *createGCLoweringPass();
+
+/// GCLowering Pass - Used by gc.root to perform its default lowering
+/// operations.
+extern char &GCLoweringID;
 
-  /// ShadowStackGCLowering - Implements the custom lowering mechanism
-  /// used by the shadow stack GC.  Only runs on functions which opt in to
-  /// the shadow stack collector.
-  FunctionPass *createShadowStackGCLoweringPass();
+/// ShadowStackGCLowering - Implements the custom lowering mechanism
+/// used by the shadow stack GC.  Only runs on functions which opt in to
+/// the shadow stack collector.
+FunctionPass *createShadowStackGCLoweringPass();
 
-  /// ShadowStackGCLowering - Implements the custom lowering mechanism
-  /// used by the shadow stack GC.
-  extern char &ShadowStackGCLoweringID;
+/// ShadowStackGCLowering - Implements the custom lowering mechanism
+/// used by the shadow stack GC.
+extern char &ShadowStackGCLoweringID;
 
-  /// GCMachineCodeAnalysis - Target-independent pass to mark safe points
-  /// in machine code. Must be added very late during code generation, just
-  /// prior to output, and importantly after all CFG transformations (such as
-  /// branch folding).
-  extern char &GCMachineCodeAnalysisID;
+/// GCMachineCodeAnalysis - Target-independent pass to mark safe points
+/// in machine code. Must be added very late during code generation, just
+/// prior to output, and importantly after all CFG transformations (such as
+/// branch folding).
+extern char &GCMachineCodeAnalysisID;
 
-  /// Creates a pass to print GC metadata.
-  ///
-  FunctionPass *createGCInfoPrinter(raw_ostream &OS);
+/// Creates a pass to print GC metadata.
+///
+FunctionPass *createGCInfoPrinter(raw_ostream &OS);
 
-  /// MachineCSE - This pass performs global CSE on machine instructions.
-  extern char &MachineCSEID;
+/// MachineCSE - This pass performs global CSE on machine instructions.
+extern char &MachineCSEID;
 
-  /// MIRCanonicalizer - This pass canonicalizes MIR by renaming vregs
-  /// according to the semantics of the instruction as well as hoists
-  /// code.
-  extern char &MIRCanonicalizerID;
+/// MIRCanonicalizer - This pass canonicalizes MIR by renaming vregs
+/// according to the semantics of the instruction as well as hoists
+/// code.
+extern char &MIRCanonicalizerID;
 
-  /// ImplicitNullChecks - This pass folds null pointer checks into nearby
-  /// memory operations.
-  extern char &ImplicitNullChecksID;
+/// ImplicitNullChecks - This pass folds null pointer checks into nearby
+/// memory operations.
+extern char &ImplicitNullChecksID;
 
-  /// This pass performs loop invariant code motion on machine instructions.
-  extern char &MachineLICMID;
+/// This pass performs loop invariant code motion on machine instructions.
+extern char &MachineLICMID;
 
-  /// This pass performs loop invariant code motion on machine instructions.
-  /// This variant works before register allocation. \see MachineLICMID.
-  extern char &EarlyMachineLICMID;
+/// This pass performs loop invariant code motion on machine instructions.
+/// This variant works before register allocation. \see MachineLICMID.
+extern char &EarlyMachineLICMID;
 
-  /// MachineSinking - This pass performs sinking on machine instructions.
-  extern char &MachineSinkingID;
+/// MachineSinking - This pass performs sinking on machine instructions.
+extern char &MachineSinkingID;
 
-  /// MachineCopyPropagation - This pass performs copy propagation on
-  /// machine instructions.
-  extern char &MachineCopyPropagationID;
+/// MachineCopyPropagation - This pass performs copy propagation on
+/// machine instructions.
+extern char &MachineCopyPropagationID;
 
-  MachineFunctionPass *createMachineCopyPropagationPass(bool UseCopyInstr);
+MachineFunctionPass *createMachineCopyPropagationPass(bool UseCopyInstr);
 
-  /// MachineLateInstrsCleanup - This pass removes redundant identical
-  /// instructions after register allocation and rematerialization.
-  extern char &MachineLateInstrsCleanupID;
+/// MachineLateInstrsCleanup - This pass removes redundant identical
+/// instructions after register allocation and rematerialization.
+extern char &MachineLateInstrsCleanupID;
 
-  /// PeepholeOptimizer - This pass performs peephole optimizations -
-  /// like extension and comparison eliminations.
-  extern char &PeepholeOptimizerID;
+/// PeepholeOptimizer - This pass performs peephole optimizations -
+/// like extension and comparison eliminations.
+extern char &PeepholeOptimizerID;
 
-  /// OptimizePHIs - This pass optimizes machine instruction PHIs
-  /// to take advantage of opportunities created during DAG legalization.
-  extern char &OptimizePHIsID;
+/// OptimizePHIs - This pass optimizes machine instruction PHIs
+/// to take advantage of opportunities created during DAG legalization.
+extern char &OptimizePHIsID;
 
-  /// StackSlotColoring - This pass performs stack slot coloring.
-  extern char &StackSlotColoringID;
+/// StackSlotColoring - This pass performs stack slot coloring.
+extern char &StackSlotColoringID;
 
-  /// This pass lays out funclets contiguously.
-  extern char &FuncletLayoutID;
+/// This pass lays out funclets contiguously.
+extern char &FuncletLayoutID;
 
-  /// This pass inserts the XRay instrumentation sleds if they are supported by
-  /// the target platform.
-  extern char &XRayInstrumentationID;
+/// This pass inserts the XRay instrumentation sleds if they are supported by
+/// the target platform.
+extern char &XRayInstrumentationID;
 
-  /// This pass inserts FEntry calls
-  extern char &FEntryInserterID;
+/// This pass inserts FEntry calls
+extern char &FEntryInserterID;
 
-  /// This pass implements the "patchable-function" attribute.
-  extern char &PatchableFunctionID;
-
-  /// createStackProtectorPass - This pass adds stack protectors to functions.
-  ///
-  FunctionPass *createStackProtectorPass();
-
-  /// createMachineVerifierPass - This pass verifies cenerated machine code
-  /// instructions for correctness.
-  ///
-  FunctionPass *createMachineVerifierPass(const std::string& Banner);
-
-  /// createDwarfEHPass - This pass mulches exception handling code into a form
-  /// adapted to code generation.  Required if using dwarf exception handling.
-  FunctionPass *createDwarfEHPass(CodeGenOptLevel OptLevel);
-
-  /// createWinEHPass - Prepares personality functions used by MSVC on Windows,
-  /// in addition to the Itanium LSDA based personalities.
-  FunctionPass *createWinEHPass(bool DemoteCatchSwitchPHIOnly = false);
+/// This pass implements the "patchable-function" attribute.
+extern char &PatchableFunctionID;
+
+/// createStackProtectorPass - This pass adds stack protectors to functions.
+///
+FunctionPass *createStackProtectorPass();
+
+/// createMachineVerifierPass - This pass verifies cenerated machine code
+/// instructions for correctness.
+///
+FunctionPass *createMachineVerifierPass(const std::string &Banner);
+
+/// createDwarfEHPass - This pass mulches exception handling code into a form
+/// adapted to code generation.  Required if using dwarf exception handling.
+FunctionPass *createDwarfEHPass(CodeGenOptLevel OptLevel);
+
+/// createWinEHPass - Prepares personality functions used by MSVC on Windows,
+/// in addition to the Itanium LSDA based personalities.
+FunctionPass *createWinEHPass(bool DemoteCatchSwitchPHIOnly = false);
 
-  /// createSjLjEHPreparePass - This pass adapts exception handling code to use
-  /// the GCC-style builtin setjmp/longjmp (sjlj) to handling EH control flow.
-  ///
-  FunctionPass *createSjLjEHPreparePass(const TargetMachine *TM);
-
-  /// createWasmEHPass - This pass adapts exception handling code to use
-  /// WebAssembly's exception handling scheme.
-  FunctionPass *createWasmEHPass();
-
-  /// LocalStackSlotAllocation - This pass assigns local frame indices to stack
-  /// slots relative to one another and allocates base registers to access them
-  /// when it is estimated by the target to be out of range of normal frame
-  /// pointer or stack pointer index addressing.
-  extern char &LocalStackSlotAllocationID;
-
-  /// This pass expands pseudo-instructions, reserves registers and adjusts
-  /// machine frame information.
-  extern char &FinalizeISelID;
+/// createSjLjEHPreparePass - This pass adapts exception handling code to use
+/// the GCC-style builtin setjmp/longjmp (sjlj) to handling EH control flow.
+///
+FunctionPass *createSjLjEHPreparePass(const TargetMachine *TM);
+
+/// createWasmEHPass - This pass adapts exception handling code to use
+/// WebAssembly's exception handling scheme.
+FunctionPass *createWasmEHPass();
+
+/// LocalStackSlotAllocation - This pass assigns local frame indices to stack
+/// slots relative to one another and allocates base registers to access them
+/// when it is estimated by the target to be out of range of normal frame
+/// pointer or stack pointer index addressing.
+extern char &LocalStackSlotAllocationID;
+
+/// This pass expands pseudo-instructions, reserves registers and adjusts
+/// machine frame information.
+extern char &FinalizeISelID;
 
-  /// UnpackMachineBundles - This pass unpack machine instruction bundles.
-  extern char &UnpackMachineBundlesID;
+/// UnpackMachineBundles - This pass unpack machine instruction bundles.
+extern char &UnpackMachineBundlesID;
 
-  FunctionPass *
-  createUnpackMachineBundles(std::function<bool(const MachineFunction &)> Ftor);
+FunctionPass *
+createUnpackMachineBundles(std::function<bool(const MachineFunction &)> Ftor);
 
-  /// FinalizeMachineBundles - This pass finalize machine instruction
-  /// bundles (created earlier, e.g. during pre-RA scheduling).
-  extern char &FinalizeMachineBundlesID;
+/// FinalizeMachineBundles - This pass finalize machine instruction
+/// bundles (created earlier, e.g. during pre-RA scheduling).
+extern char &FinalizeMachineBundlesID;
 
-  /// StackMapLiveness - This pass analyses the register live-out set of
-  /// stackmap/patchpoint intrinsics and attaches the calculated information to
-  /// the intrinsic for later emission to the StackMap.
-  extern char &StackMapLivenessID;
+/// StackMapLiveness - This pass analyses the register live-out set of
+/// stackmap/patchpoint intrinsics and attaches the calculated information to
+/// the intrinsic for later emission to the StackMap.
+extern char &StackMapLivenessID;
 
-  // MachineSanitizerBinaryMetadata - appends/finalizes sanitizer binary
-  // metadata after llvm SanitizerBinaryMetadata pass.
-  extern char &MachineSanitizerBinaryMetadataID;
+// MachineSanitizerBinaryMetadata - appends/finalizes sanitizer binary
+// metadata after llvm SanitizerBinaryMetadata pass.
+extern char &MachineSanitizerBinaryMetadataID;
 
-  /// RemoveRedundantDebugValues pass.
-  extern char &RemoveRedundantDebugValuesID;
+/// RemoveRedundantDebugValues pass.
+extern char &RemoveRedundantDebugValuesID;
 
-  /// MachineCFGPrinter pass.
-  extern char &MachineCFGPrinterID;
+/// MachineCFGPrinter pass.
+extern char &MachineCFGPrinterID;
 
-  /// LiveDebugValues pass
-  extern char &LiveDebugValuesID;
+/// LiveDebugValues pass
+extern char &LiveDebugValuesID;
 
-  /// InterleavedAccess Pass - This pass identifies and matches interleaved
-  /// memory accesses to target specific intrinsics.
-  ///
-  FunctionPass *createInterleavedAccessPass();
+/// InterleavedAccess Pass - This pass identifies and matches interleaved
+/// memory accesses to target specific intrinsics.
+///
+FunctionPass *createInterleavedAccessPass();
 
-  /// InterleavedLoadCombines Pass - This pass identifies interleaved loads and
-  /// combines them into wide loads detectable by InterleavedAccessPass
-  ///
-  FunctionPass *createInterleavedLoadCombinePass();
+/// InterleavedLoadCombines Pass - This pass identifies interleaved loads and
+/// combines them into wide loads detectable by InterleavedAccessPass
+///
+FunctionPass *createInterleavedLoadCombinePass();
 
-  /// LowerEmuTLS - This pass generates __emutls_[vt].xyz variables for all
-  /// TLS variables for the emulated TLS model.
-  ///
-  ModulePass *createLowerEmuTLSPass();
+/// LowerEmuTLS - This pass generates __emutls_[vt].xyz variables for all
+/// TLS variables for the emulated TLS model.
+///
+ModulePass *createLowerEmuTLSPass();
 
-  /// This pass lowers the \@llvm.load.relative and \@llvm.objc.* intrinsics to
-  /// instructions.  This is unsafe to do earlier because a pass may combine the
-  /// constant initializer into the load, which may result in an overflowing
-  /// evaluation.
-  ModulePass *createPreISelIntrinsicLoweringPass();
+/// This pass lowers the \@llvm.load.relative and \@llvm.objc.* intrinsics to
+/// instructions.  This is unsafe to do earlier because a pass may combine the
+/// constant initializer into the load, which may result in an overflowing
+/// evaluation.
+ModulePass *createPreISelIntrinsicLoweringPass();
 
-  /// GlobalMerge - This pass merges internal (by default) globals into structs
-  /// to enable reuse of a base pointer by indexed addressing modes.
-  /// It can also be configured to focus on size optimizations only.
-  ///
-  Pass *createGlobalMergePass(const TargetMachine *TM, unsigned MaximalOffset,
-                              bool OnlyOptimizeForSize = false,
-                              bool MergeExternalByDefault = false);
+/// GlobalMerge - This pass merges internal (by default) globals into structs
+/// to enable reuse of a base pointer by indexed addressing modes.
+/// It can also be configured to focus on size optimizations only.
+///
+Pass *createGlobalMergePass(const TargetMachine *TM, unsigned MaximalOffset,
+                            bool OnlyOptimizeForSize = false,
+                            bool MergeExternalByDefault = false);
 
-  /// This pass splits the stack into a safe stack and an unsafe stack to
-  /// protect against stack-based overflow vulnerabilities.
-  FunctionPass *createSafeStackPass();
+/// This pass splits the stack into a safe stack and an unsafe stack to
+/// protect against stack-based overflow vulnerabilities.
+FunctionPass *createSafeStackPass();
 
-  /// This pass detects subregister lanes in a virtual register that are used
-  /// independently of other lanes and splits them into separate virtual
-  /// registers.
-  extern char &RenameIndependentSubregsID;
+/// This pass detects subregister lanes in a virtual register that are used
+/// independently of other lanes and splits them into separate virtual
+/// registers.
+extern char &RenameIndependentSubregsID;
 
-  /// This pass is executed POST-RA to collect which physical registers are
-  /// preserved by given machine function.
-  FunctionPass *createRegUsageInfoCollector();
+/// This pass is executed POST-RA to collect which physical registers are
+/// preserved by given machine function.
+FunctionPass *createRegUsageInfoCollector();
 
-  /// Return a MachineFunction pass that identifies call sites
-  /// and propagates register usage information of callee to caller
-  /// if available with PysicalRegisterUsageInfo pass.
-  FunctionPass *createRegUsageInfoPropPass();
+/// Return a MachineFunction pass that identifies call sites
+/// and propagates register usage information of callee to caller
+/// if available with PysicalRegisterUsageInfo pass.
+FunctionPass *createRegUsageInfoPropPass();
 
-  /// This pass performs software pipelining on machine instructions.
-  extern char &MachinePipelinerID;
+/// This pass performs software pipelining on machine instructions.
+extern char &MachinePipelinerID;
 
-  /// This pass frees the memory occupied by the MachineFunction.
-  FunctionPass *createFreeMachineFunctionPass();
+/// This pass frees the memory occupied by the MachineFunction.
+FunctionPass *createFreeMachineFunctionPass();
 
-  /// This pass performs outlining on machine instructions directly before
-  /// printing assembly.
-  ModulePass *createMachineOutlinerPass(bool RunOnAllFunctions = true);
+/// This pass performs outlining on machine instructions directly before
+/// printing assembly.
+ModulePass *createMachineOutlinerPass(bool RunOnAllFunctions = true);
 
-  /// This pass expands the reduction intrinsics into sequences of shuffles.
-  FunctionPass *createExpandReductionsPass();
+/// This pass expands the reduction intrinsics into sequences of shuffles.
+FunctionPass *createExpandReductionsPass();
 
-  // This pass replaces intrinsics operating on vector operands with calls to
-  // the corresponding function in a vector library (e.g., SVML, libmvec).
-  FunctionPass *createReplaceWithVeclibLegacyPass();
+// This pass replaces intrinsics operating on vector operands with calls to
+// the corresponding function in a vector library (e.g., SVML, libmvec).
+FunctionPass *createReplaceWithVeclibLegacyPass();
 
-  /// This pass expands the vector predication intrinsics into unpredicated
-  /// instructions with selects or just the explicit vector length into the
-  /// predicate mask.
-  FunctionPass *createExpandVectorPredicationPass();
+/// This pass expands the vector predication intrinsics into unpredicated
+/// instructions with selects or just the explicit vector length into the
+/// predicate mask.
+FunctionPass *createExpandVectorPredicationPass();
 
-  // Expands large div/rem instructions.
-  FunctionPass *createExpandLargeDivRemPass();
+// Expands large div/rem instructions.
+FunctionPass *createExpandLargeDivRemPass();
 
-  // Expands large div/rem instructions.
-  FunctionPass *createExpandLargeFpConvertPass();
+// Expands large div/rem instructions.
+FunctionPass *createExpandLargeFpConvertPass();
 
-  // This pass expands memcmp() to load/stores.
-  FunctionPass *createExpandMemCmpPass();
+// This pass expands memcmp() to load/stores.
+FunctionPass *createExpandMemCmpPass();
 
-  /// Creates Break False Dependencies pass. \see BreakFalseDeps.cpp
-  FunctionPass *createBreakFalseDeps();
+/// Creates Break False Dependencies pass. \see BreakFalseDeps.cpp
+FunctionPass *createBreakFalseDeps();
 
-  // This pass expands indirectbr instructions.
-  FunctionPass *createIndirectBrExpandPass();
+// This pass expands indirectbr instructions.
+FunctionPass *createIndirectBrExpandPass();
 
-  /// Creates CFI Fixup pass. \see CFIFixup.cpp
-  FunctionPass *createCFIFixup();
+/// Creates CFI Fixup pass. \see CFIFixup.cpp
+FunctionPass *createCFIFixup();
 
-  /// Creates CFI Instruction Inserter pass. \see CFIInstrInserter.cpp
-  FunctionPass *createCFIInstrInserter();
+/// Creates CFI Instruction Inserter pass. \see CFIInstrInserter.cpp
+FunctionPass *createCFIInstrInserter();
 
-  /// Creates CFGuard longjmp target identification pass.
-  /// \see CFGuardLongjmp.cpp
-  FunctionPass *createCFGuardLongjmpPass();
+/// Creates CFGuard longjmp target identification pass.
+/// \see CFGuardLongjmp.cpp
+FunctionPass *createCFGuardLongjmpPass();
 
-  /// Creates EHContGuard catchret target identification pass.
-  /// \see EHContGuardCatchret.cpp
-  FunctionPass *createEHContGuardCatchretPass();
+/// Creates EHContGuard catchret target identification pass.
+/// \see EHContGuardCatchret.cpp
+FunctionPass *createEHContGuardCatchretPass();
 
-  /// Create Hardware Loop pass. \see HardwareLoops.cpp
-  FunctionPass *createHardwareLoopsLegacyPass();
+/// Create Hardware Loop pass. \see HardwareLoops.cpp
+FunctionPass *createHardwareLoopsLegacyPass();
 
-  /// This pass inserts pseudo probe annotation for callsite profiling.
-  FunctionPass *createPseudoProbeInserter();
+/// This pass inserts pseudo probe annotation for callsite profiling.
+FunctionPass *createPseudoProbeInserter();
 
-  /// Create IR Type Promotion pass. \see TypePromotion.cpp
-  FunctionPass *createTypePromotionLegacyPass();
+/// Create IR Type Promotion pass. \see TypePromotion.cpp
+FunctionPass *createTypePromotionLegacyPass();
 
-  /// Add Flow Sensitive Discriminators. PassNum specifies the
-  /// sequence number of this pass (starting from 1).
-  FunctionPass *
-  createMIRAddFSDiscriminatorsPass(sampleprof::FSDiscriminatorPass P);
+/// Add Flow Sensitive Discriminators. PassNum specifies the
+/// sequence number of this pass (starting from 1).
+FunctionPass *
+createMIRAddFSDiscriminatorsPass(sampleprof::FSDiscriminatorPass P);
 
-  /// Read Flow Sensitive Profile.
-  FunctionPass *
-  createMIRProfileLoaderPass(std::string File, std::string RemappingFile,
-                             sampleprof::FSDiscriminatorPass P,
-                             IntrusiveRefCntPtr<vfs::FileSystem> FS);
+/// Read Flow Sensitive Profile.
+FunctionPass *
+createMIRProfileLoaderPass(std::string File, std::string RemappingFile,
+                           sampleprof::FSDiscriminatorPass P,
+                           IntrusiveRefCntPtr<vfs::FileSystem> FS);
 
-  /// Creates MIR Debugify pass. \see MachineDebugify.cpp
-  ModulePass *createDebugifyMachineModulePass();
+/// Creates MIR Debugify pass. \see MachineDebugify.cpp
+ModulePass *createDebugifyMachineModulePass();
 
-  /// Creates MIR Strip Debug pass. \see MachineStripDebug.cpp
-  /// If OnlyDebugified is true then it will only strip debug info if it was
-  /// added by a Debugify pass. The module will be left unchanged if the debug
-  /// info was generated by another source such as clang.
-  ModulePass *createStripDebugMachineModulePass(bool OnlyDebugified);
+/// Creates MIR Strip Debug pass. \see MachineStripDebug.cpp
+/// If OnlyDebugified is true then it will only strip debug info if it was
+/// added by a Debugify pass. The module will be left unchanged if the debug
+/// info was generated by another source such as clang.
+ModulePass *createStripDebugMachineModulePass(bool OnlyDebugified);
 
-  /// Creates MIR Check Debug pass. \see MachineCheckDebugify.cpp
-  ModulePass *createCheckDebugMachineModulePass();
+/// Creates MIR Check Debug pass. \see MachineCheckDebugify.cpp
+ModulePass *createCheckDebugMachineModulePass();
 
-  /// The pass fixups statepoint machine instruction to replace usage of
-  /// caller saved registers with stack slots.
-  extern char &FixupStatepointCallerSavedID;
+/// The pass fixups statepoint machine instruction to replace usage of
+/// caller saved registers with stack slots.
+extern char &FixupStatepointCallerSavedID;
 
-  /// The pass transforms load/store <256 x i32> to AMX load/store intrinsics
-  /// or split the data to two <128 x i32>.
-  FunctionPass *createX86LowerAMXTypePass();
+/// The pass transforms load/store <256 x i32> to AMX load/store intrinsics
+/// or split the data to two <128 x i32>.
+FunctionPass *createX86LowerAMXTypePass();
 
-  /// The pass transforms amx intrinsics to scalar operation if the function has
-  /// optnone attribute or it is O0.
-  FunctionPass *createX86LowerAMXIntrinsicsPass();
+/// The pass transforms amx intrinsics to scalar operation if the function has
+/// optnone attribute or it is O0.
+FunctionPass *createX86LowerAMXIntrinsicsPass();
 
-  /// When learning an eviction policy, extract score(reward) information,
-  /// otherwise this does nothing
-  FunctionPass *createRegAllocScoringPass();
+/// When learning an eviction policy, extract score(reward) information,
+/// otherwise this does nothing
+FunctionPass *createRegAllocScoringPass();
 
-  /// JMC instrument pass.
-  ModulePass *createJMCInstrumenterPass();
+/// JMC instrument pass.
+ModulePass *createJMCInstrumenterPass();
 
-  /// This pass converts conditional moves to conditional jumps when profitable.
-  FunctionPass *createSelectOptimizePass();
+/// This pass converts conditional moves to conditional jumps when profitable.
+FunctionPass *createSelectOptimizePass();
 
-  FunctionPass *createCallBrPass();
+FunctionPass *createCallBrPass();
 
-  /// Lowers KCFI operand bundles for indirect calls.
-  FunctionPass *createKCFIPass();
+/// Lowers KCFI operand bundles for indirect calls.
+FunctionPass *createKCFIPass();
 } // End llvm namespace
 
 #endif
diff --git a/llvm/include/llvm/InitializePasses.h b/llvm/include/llvm/InitializePasses.h
index fafae8b5ecd7a7..a2bd5bdf4c5341 100644
--- a/llvm/include/llvm/InitializePasses.h
+++ b/llvm/include/llvm/InitializePasses.h
@@ -54,7 +54,7 @@ void initializeAlwaysInlinerLegacyPassPass(PassRegistry&);
 void initializeAssignmentTrackingAnalysisPass(PassRegistry &);
 void initializeAssumeBuilderPassLegacyPassPass(PassRegistry &);
 void initializeAssumptionCacheTrackerPass(PassRegistry&);
-void initializeAtomicExpandPass(PassRegistry&);
+void initializeAtomicExpandLegacyPass(PassRegistry &);
 void initializeBasicBlockPathCloningPass(PassRegistry &);
 void initializeBasicBlockSectionsProfileReaderPass(PassRegistry &);
 void initializeBasicBlockSectionsPass(PassRegistry &);
diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp
index ccf3e9ec649210..d756b972596c51 100644
--- a/llvm/lib/CodeGen/AtomicExpandPass.cpp
+++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp
@@ -19,6 +19,7 @@
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/Analysis/InstSimplifyFolder.h"
 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
+#include "llvm/CodeGen/AtomicExpand.h"
 #include "llvm/CodeGen/AtomicExpandUtils.h"
 #include "llvm/CodeGen/RuntimeLibcalls.h"
 #include "llvm/CodeGen/TargetLowering.h"
@@ -59,19 +60,10 @@ using namespace llvm;
 
 namespace {
 
-class AtomicExpand : public FunctionPass {
+class AtomicExpandImpl {
   const TargetLowering *TLI = nullptr;
   const DataLayout *DL = nullptr;
 
-public:
-  static char ID; // Pass identification, replacement for typeid
-
-  AtomicExpand() : FunctionPass(ID) {
-    initializeAtomicExpandPass(*PassRegistry::getPassRegistry());
-  }
-
-  bool runOnFunction(Function &F) override;
-
 private:
   bool bracketInstWithFences(Instruction *I, AtomicOrdering Order);
   IntegerType *getCorrespondingIntegerType(Type *T, const DataLayout &DL);
@@ -124,6 +116,20 @@ class AtomicExpand : public FunctionPass {
   friend bool
   llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI,
                                  CreateCmpXchgInstFun CreateCmpXchg);
+
+public:
+  bool run(Function &F, const TargetMachine *TM);
+};
+
+class AtomicExpandLegacy : public FunctionPass {
+public:
+  static char ID; // Pass identification, replacement for typeid
+
+  AtomicExpandLegacy() : FunctionPass(ID) {
+    initializeAtomicExpandLegacyPass(*PassRegistry::getPassRegistry());
+  }
+
+  bool runOnFunction(Function &F) override;
 };
 
 // IRBuilder to be used for replacement atomic instructions.
@@ -138,14 +144,15 @@ struct ReplacementIRBuilder : IRBuilder<InstSimplifyFolder> {
 
 } // end anonymous namespace
 
-char AtomicExpand::ID = 0;
+char AtomicExpandLegacy::ID = 0;
 
-char &llvm::AtomicExpandID = AtomicExpand::ID;
+char &llvm::AtomicExpandID = AtomicExpandLegacy::ID;
 
-INITIALIZE_PASS(AtomicExpand, DEBUG_TYPE, "Expand Atomic instructions", false,
-                false)
-
-FunctionPass *llvm::createAtomicExpandPass() { return new AtomicExpand(); }
+INITIALIZE_PASS_BEGIN(AtomicExpandLegacy, DEBUG_TYPE,
+                      "Expand Atomic instructions", false, false)
+INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
+INITIALIZE_PASS_END(AtomicExpandLegacy, DEBUG_TYPE,
+                    "Expand Atomic instructions", false, false)
 
 // Helper functions to retrieve the size of atomic instructions.
 static unsigned getAtomicOpSize(LoadInst *LI) {
@@ -179,13 +186,8 @@ static bool atomicSizeSupported(const TargetLowering *TLI, Inst *I) {
          Size <= TLI->getMaxAtomicSizeInBitsSupported() / 8;
 }
 
-bool AtomicExpand::runOnFunction(Function &F) {
-  auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
-  if (!TPC)
-    return false;
-
-  auto &TM = TPC->getTM<TargetMachine>();
-  const auto *Subtarget = TM.getSubtargetImpl(F);
+bool AtomicExpandImpl::run(Function &F, const TargetMachine *TM) {
+  const auto *Subtarget = TM->getSubtargetImpl(F);
   if (!Subtarget->enableAtomicExpand())
     return false;
   TLI = Subtarget->getTargetLowering();
@@ -340,7 +342,39 @@ bool AtomicExpand::runOnFunction(Function &F) {
   return MadeChange;
 }
 
-bool AtomicExpand::bracketInstWithFences(Instruction *I, AtomicOrdering Order) {
+bool AtomicExpandLegacy::runOnFunction(Function &F) {
+  if (skipFunction(F))
+    return false;
+
+  auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
+  if (!TPC)
+    return false;
+
+  auto *TM = &TPC->getTM<TargetMachine>();
+
+  AtomicExpandImpl AE;
+  return AE.run(F, TM);
+}
+
+FunctionPass *llvm::createAtomicExpandLegacyPass() {
+  return new AtomicExpandLegacy();
+}
+
+PreservedAnalyses AtomicExpandPass::run(Function &F,
+                                        FunctionAnalysisManager &AM) {
+  AtomicExpandImpl AE;
+
+  bool Changed = AE.run(F, TM);
+  if (!Changed)
+    return PreservedAnalyses::all();
+
+  PreservedAnalyses PA;
+  PA.preserveSet<CFGAnalyses>();
+  return PA;
+}
+
+bool AtomicExpandImpl::bracketInstWithFences(Instruction *I,
+                                             AtomicOrdering Order) {
   ReplacementIRBuilder Builder(I, *DL);
 
   auto LeadingFence = TLI->emitLeadingFence(Builder, I, Order);
@@ -355,8 +389,8 @@ bool AtomicExpand::bracketInstWithFences(Instruction *I, AtomicOrdering Order) {
 }
 
 /// Get the iX type with the same bitwidth as T.
-IntegerType *AtomicExpand::getCorrespondingIntegerType(Type *T,
-                                                       const DataLayout &DL) {
+IntegerType *
+AtomicExpandImpl::getCorrespondingIntegerType(Type *T, const DataLayout &DL) {
   EVT VT = TLI->getMemValueType(DL, T);
   unsigned BitWidth = VT.getStoreSizeInBits();
   assert(BitWidth == VT.getSizeInBits() && "must be a power of two");
@@ -366,7 +400,7 @@ IntegerType *AtomicExpand::getCorrespondingIntegerType(Type *T,
 /// Convert an atomic load of a non-integral type to an integer load of the
 /// equivalent bitwidth.  See the function comment on
 /// convertAtomicStoreToIntegerType for background.
-LoadInst *AtomicExpand::convertAtomicLoadToIntegerType(LoadInst *LI) {
+LoadInst *AtomicExpandImpl::convertAtomicLoadToIntegerType(LoadInst *LI) {
   auto *M = LI->getModule();
   Type *NewTy = getCorrespondingIntegerType(LI->getType(), M->getDataLayout());
 
@@ -387,7 +421,7 @@ LoadInst *AtomicExpand::convertAtomicLoadToIntegerType(LoadInst *LI) {
 }
 
 AtomicRMWInst *
-AtomicExpand::convertAtomicXchgToIntegerType(AtomicRMWInst *RMWI) {
+AtomicExpandImpl::convertAtomicXchgToIntegerType(AtomicRMWInst *RMWI) {
   auto *M = RMWI->getModule();
   Type *NewTy =
       getCorrespondingIntegerType(RMWI->getType(), M->getDataLayout());
@@ -414,7 +448,7 @@ AtomicExpand::convertAtomicXchgToIntegerType(AtomicRMWInst *RMWI) {
   return NewRMWI;
 }
 
-bool AtomicExpand::tryExpandAtomicLoad(LoadInst *LI) {
+bool AtomicExpandImpl::tryExpandAtomicLoad(LoadInst *LI) {
   switch (TLI->shouldExpandAtomicLoadInIR(LI)) {
   case TargetLoweringBase::AtomicExpansionKind::None:
     return false;
@@ -436,7 +470,7 @@ bool AtomicExpand::tryExpandAtomicLoad(LoadInst *LI) {
   }
 }
 
-bool AtomicExpand::tryExpandAtomicStore(StoreInst *SI) {
+bool AtomicExpandImpl::tryExpandAtomicStore(StoreInst *SI) {
   switch (TLI->shouldExpandAtomicStoreInIR(SI)) {
   case TargetLoweringBase::AtomicExpansionKind::None:
     return false;
@@ -451,7 +485,7 @@ bool AtomicExpand::tryExpandAtomicStore(StoreInst *SI) {
   }
 }
 
-bool AtomicExpand::expandAtomicLoadToLL(LoadInst *LI) {
+bool AtomicExpandImpl::expandAtomicLoadToLL(LoadInst *LI) {
   ReplacementIRBuilder Builder(LI, *DL);
 
   // On some architectures, load-linked instructions are atomic for larger
@@ -467,7 +501,7 @@ bool AtomicExpand::expandAtomicLoadToLL(LoadInst *LI) {
   return true;
 }
 
-bool AtomicExpand::expandAtomicLoadToCmpXchg(LoadInst *LI) {
+bool AtomicExpandImpl::expandAtomicLoadToCmpXchg(LoadInst *LI) {
   ReplacementIRBuilder Builder(LI, *DL);
   AtomicOrdering Order = LI->getOrdering();
   if (Order == AtomicOrdering::Unordered)
@@ -496,7 +530,7 @@ bool AtomicExpand::expandAtomicLoadToCmpXchg(LoadInst *LI) {
 /// instruction select from the original atomic store, but as a migration
 /// mechanism, we convert back to the old format which the backends understand.
 /// Each backend will need individual work to recognize the new format.
-StoreInst *AtomicExpand::convertAtomicStoreToIntegerType(StoreInst *SI) {
+StoreInst *AtomicExpandImpl::convertAtomicStoreToIntegerType(StoreInst *SI) {
   ReplacementIRBuilder Builder(SI, *DL);
   auto *M = SI->getModule();
   Type *NewTy = getCorrespondingIntegerType(SI->getValueOperand()->getType(),
@@ -514,7 +548,7 @@ StoreInst *AtomicExpand::convertAtomicStoreToIntegerType(StoreInst *SI) {
   return NewSI;
 }
 
-void AtomicExpand::expandAtomicStore(StoreInst *SI) {
+void AtomicExpandImpl::expandAtomicStore(StoreInst *SI) {
   // This function is only called on atomic stores that are too large to be
   // atomic if implemented as a native store. So we replace them by an
   // atomic swap, that can be implemented for example as a ldrex/strex on ARM
@@ -561,7 +595,7 @@ static void createCmpXchgInstFun(IRBuilderBase &Builder, Value *Addr,
     NewLoaded = Builder.CreateBitCast(NewLoaded, OrigTy);
 }
 
-bool AtomicExpand::tryExpandAtomicRMW(AtomicRMWInst *AI) {
+bool AtomicExpandImpl::tryExpandAtomicRMW(AtomicRMWInst *AI) {
   LLVMContext &Ctx = AI->getModule()->getContext();
   TargetLowering::AtomicExpansionKind Kind = TLI->shouldExpandAtomicRMWInIR(AI);
   switch (Kind) {
@@ -843,7 +877,7 @@ static Value *performMaskedAtomicOp(AtomicRMWInst::BinOp Op,
 /// way as a typical atomicrmw expansion. The only difference here is
 /// that the operation inside of the loop may operate upon only a
 /// part of the value.
-void AtomicExpand::expandPartwordAtomicRMW(
+void AtomicExpandImpl::expandPartwordAtomicRMW(
     AtomicRMWInst *AI, TargetLoweringBase::AtomicExpansionKind ExpansionKind) {
   AtomicOrdering MemOpOrder = AI->getOrdering();
   SyncScope::ID SSID = AI->getSyncScopeID();
@@ -887,7 +921,7 @@ void AtomicExpand::expandPartwordAtomicRMW(
 }
 
 // Widen the bitwise atomicrmw (or/xor/and) to the minimum supported width.
-AtomicRMWInst *AtomicExpand::widenPartwordAtomicRMW(AtomicRMWInst *AI) {
+AtomicRMWInst *AtomicExpandImpl::widenPartwordAtomicRMW(AtomicRMWInst *AI) {
   ReplacementIRBuilder Builder(AI, *DL);
   AtomicRMWInst::BinOp Op = AI->getOperation();
 
@@ -922,7 +956,7 @@ AtomicRMWInst *AtomicExpand::widenPartwordAtomicRMW(AtomicRMWInst *AI) {
   return NewAI;
 }
 
-bool AtomicExpand::expandPartwordCmpXchg(AtomicCmpXchgInst *CI) {
+bool AtomicExpandImpl::expandPartwordCmpXchg(AtomicCmpXchgInst *CI) {
   // The basic idea here is that we're expanding a cmpxchg of a
   // smaller memory size up to a word-sized cmpxchg. To do this, we
   // need to add a retry-loop for strong cmpxchg, so that
@@ -1047,7 +1081,7 @@ bool AtomicExpand::expandPartwordCmpXchg(AtomicCmpXchgInst *CI) {
   return true;
 }
 
-void AtomicExpand::expandAtomicOpToLLSC(
+void AtomicExpandImpl::expandAtomicOpToLLSC(
     Instruction *I, Type *ResultType, Value *Addr, Align AddrAlign,
     AtomicOrdering MemOpOrder,
     function_ref<Value *(IRBuilderBase &, Value *)> PerformOp) {
@@ -1059,7 +1093,7 @@ void AtomicExpand::expandAtomicOpToLLSC(
   I->eraseFromParent();
 }
 
-void AtomicExpand::expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI) {
+void AtomicExpandImpl::expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI) {
   ReplacementIRBuilder Builder(AI, *DL);
 
   PartwordMaskValues PMV =
@@ -1085,7 +1119,8 @@ void AtomicExpand::expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI) {
   AI->eraseFromParent();
 }
 
-void AtomicExpand::expandAtomicCmpXchgToMaskedIntrinsic(AtomicCmpXchgInst *CI) {
+void AtomicExpandImpl::expandAtomicCmpXchgToMaskedIntrinsic(
+    AtomicCmpXchgInst *CI) {
   ReplacementIRBuilder Builder(CI, *DL);
 
   PartwordMaskValues PMV = createMaskInstrs(
@@ -1112,7 +1147,7 @@ void AtomicExpand::expandAtomicCmpXchgToMaskedIntrinsic(AtomicCmpXchgInst *CI) {
   CI->eraseFromParent();
 }
 
-Value *AtomicExpand::insertRMWLLSCLoop(
+Value *AtomicExpandImpl::insertRMWLLSCLoop(
     IRBuilderBase &Builder, Type *ResultTy, Value *Addr, Align AddrAlign,
     AtomicOrdering MemOpOrder,
     function_ref<Value *(IRBuilderBase &, Value *)> PerformOp) {
@@ -1168,7 +1203,7 @@ Value *AtomicExpand::insertRMWLLSCLoop(
 /// way to represent a pointer cmpxchg so that we can update backends one by
 /// one.
 AtomicCmpXchgInst *
-AtomicExpand::convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI) {
+AtomicExpandImpl::convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI) {
   auto *M = CI->getModule();
   Type *NewTy = getCorrespondingIntegerType(CI->getCompareOperand()->getType(),
                                             M->getDataLayout());
@@ -1201,7 +1236,7 @@ AtomicExpand::convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI) {
   return NewCI;
 }
 
-bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
+bool AtomicExpandImpl::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
   AtomicOrdering SuccessOrder = CI->getSuccessOrdering();
   AtomicOrdering FailureOrder = CI->getFailureOrdering();
   Value *Addr = CI->getPointerOperand();
@@ -1447,7 +1482,7 @@ bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
   return true;
 }
 
-bool AtomicExpand::isIdempotentRMW(AtomicRMWInst *RMWI) {
+bool AtomicExpandImpl::isIdempotentRMW(AtomicRMWInst *RMWI) {
   auto C = dyn_cast<ConstantInt>(RMWI->getValOperand());
   if (!C)
     return false;
@@ -1467,7 +1502,7 @@ bool AtomicExpand::isIdempotentRMW(AtomicRMWInst *RMWI) {
   }
 }
 
-bool AtomicExpand::simplifyIdempotentRMW(AtomicRMWInst *RMWI) {
+bool AtomicExpandImpl::simplifyIdempotentRMW(AtomicRMWInst *RMWI) {
   if (auto ResultingLoad = TLI->lowerIdempotentRMWIntoFencedLoad(RMWI)) {
     tryExpandAtomicLoad(ResultingLoad);
     return true;
@@ -1475,7 +1510,7 @@ bool AtomicExpand::simplifyIdempotentRMW(AtomicRMWInst *RMWI) {
   return false;
 }
 
-Value *AtomicExpand::insertRMWCmpXchgLoop(
+Value *AtomicExpandImpl::insertRMWCmpXchgLoop(
     IRBuilderBase &Builder, Type *ResultTy, Value *Addr, Align AddrAlign,
     AtomicOrdering MemOpOrder, SyncScope::ID SSID,
     function_ref<Value *(IRBuilderBase &, Value *)> PerformOp,
@@ -1536,7 +1571,7 @@ Value *AtomicExpand::insertRMWCmpXchgLoop(
   return NewLoaded;
 }
 
-bool AtomicExpand::tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
+bool AtomicExpandImpl::tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
   unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8;
   unsigned ValueSize = getAtomicOpSize(CI);
 
@@ -1567,7 +1602,7 @@ bool llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI,
 
   // FIXME: If FP exceptions are observable, we should force them off for the
   // loop for the FP atomics.
-  Value *Loaded = AtomicExpand::insertRMWCmpXchgLoop(
+  Value *Loaded = AtomicExpandImpl::insertRMWCmpXchgLoop(
       Builder, AI->getType(), AI->getPointerOperand(), AI->getAlign(),
       AI->getOrdering(), AI->getSyncScopeID(),
       [&](IRBuilderBase &Builder, Value *Loaded) {
@@ -1601,7 +1636,7 @@ static bool canUseSizedAtomicCall(unsigned Size, Align Alignment,
          Size <= LargestSize;
 }
 
-void AtomicExpand::expandAtomicLoadToLibcall(LoadInst *I) {
+void AtomicExpandImpl::expandAtomicLoadToLibcall(LoadInst *I) {
   static const RTLIB::Libcall Libcalls[6] = {
       RTLIB::ATOMIC_LOAD,   RTLIB::ATOMIC_LOAD_1, RTLIB::ATOMIC_LOAD_2,
       RTLIB::ATOMIC_LOAD_4, RTLIB::ATOMIC_LOAD_8, RTLIB::ATOMIC_LOAD_16};
@@ -1614,7 +1649,7 @@ void AtomicExpand::expandAtomicLoadToLibcall(LoadInst *I) {
     report_fatal_error("expandAtomicOpToLibcall shouldn't fail for Load");
 }
 
-void AtomicExpand::expandAtomicStoreToLibcall(StoreInst *I) {
+void AtomicExpandImpl::expandAtomicStoreToLibcall(StoreInst *I) {
   static const RTLIB::Libcall Libcalls[6] = {
       RTLIB::ATOMIC_STORE,   RTLIB::ATOMIC_STORE_1, RTLIB::ATOMIC_STORE_2,
       RTLIB::ATOMIC_STORE_4, RTLIB::ATOMIC_STORE_8, RTLIB::ATOMIC_STORE_16};
@@ -1627,7 +1662,7 @@ void AtomicExpand::expandAtomicStoreToLibcall(StoreInst *I) {
     report_fatal_error("expandAtomicOpToLibcall shouldn't fail for Store");
 }
 
-void AtomicExpand::expandAtomicCASToLibcall(AtomicCmpXchgInst *I) {
+void AtomicExpandImpl::expandAtomicCASToLibcall(AtomicCmpXchgInst *I) {
   static const RTLIB::Libcall Libcalls[6] = {
       RTLIB::ATOMIC_COMPARE_EXCHANGE,   RTLIB::ATOMIC_COMPARE_EXCHANGE_1,
       RTLIB::ATOMIC_COMPARE_EXCHANGE_2, RTLIB::ATOMIC_COMPARE_EXCHANGE_4,
@@ -1705,7 +1740,7 @@ static ArrayRef<RTLIB::Libcall> GetRMWLibcall(AtomicRMWInst::BinOp Op) {
   llvm_unreachable("Unexpected AtomicRMW operation.");
 }
 
-void AtomicExpand::expandAtomicRMWToLibcall(AtomicRMWInst *I) {
+void AtomicExpandImpl::expandAtomicRMWToLibcall(AtomicRMWInst *I) {
   ArrayRef<RTLIB::Libcall> Libcalls = GetRMWLibcall(I->getOperation());
 
   unsigned Size = getAtomicOpSize(I);
@@ -1744,7 +1779,7 @@ void AtomicExpand::expandAtomicRMWToLibcall(AtomicRMWInst *I) {
 // ATOMIC libcalls to be emitted. All of the other arguments besides
 // 'I' are extracted from the Instruction subclass by the
 // caller. Depending on the particular call, some will be null.
-bool AtomicExpand::expandAtomicOpToLibcall(
+bool AtomicExpandImpl::expandAtomicOpToLibcall(
     Instruction *I, unsigned Size, Align Alignment, Value *PointerOperand,
     Value *ValueOperand, Value *CASExpected, AtomicOrdering Ordering,
     AtomicOrdering Ordering2, ArrayRef<RTLIB::Libcall> Libcalls) {
diff --git a/llvm/lib/CodeGen/CodeGen.cpp b/llvm/lib/CodeGen/CodeGen.cpp
index 79a95ee0d747a1..ac4747d6d572da 100644
--- a/llvm/lib/CodeGen/CodeGen.cpp
+++ b/llvm/lib/CodeGen/CodeGen.cpp
@@ -19,7 +19,7 @@ using namespace llvm;
 /// initializeCodeGen - Initialize all passes linked into the CodeGen library.
 void llvm::initializeCodeGen(PassRegistry &Registry) {
   initializeAssignmentTrackingAnalysisPass(Registry);
-  initializeAtomicExpandPass(Registry);
+  initializeAtomicExpandLegacyPass(Registry);
   initializeBasicBlockPathCloningPass(Registry);
   initializeBasicBlockSectionsPass(Registry);
   initializeBranchFolderPassPass(Registry);
diff --git a/llvm/lib/Passes/PassBuilder.cpp b/llvm/lib/Passes/PassBuilder.cpp
index fde759026e5d78..2ccc218f3d8f38 100644
--- a/llvm/lib/Passes/PassBuilder.cpp
+++ b/llvm/lib/Passes/PassBuilder.cpp
@@ -72,6 +72,7 @@
 #include "llvm/Analysis/TargetTransformInfo.h"
 #include "llvm/Analysis/TypeBasedAliasAnalysis.h"
 #include "llvm/Analysis/UniformityAnalysis.h"
+#include "llvm/CodeGen/AtomicExpand.h"
 #include "llvm/CodeGen/HardwareLoops.h"
 #include "llvm/CodeGen/TypePromotion.h"
 #include "llvm/IR/DebugInfo.h"
@@ -234,8 +235,8 @@
 #include "llvm/Transforms/Utils/CanonicalizeAliases.h"
 #include "llvm/Transforms/Utils/CanonicalizeFreezeInLoops.h"
 #include "llvm/Transforms/Utils/CountVisits.h"
-#include "llvm/Transforms/Utils/Debugify.h"
 #include "llvm/Transforms/Utils/DXILUpgrade.h"
+#include "llvm/Transforms/Utils/Debugify.h"
 #include "llvm/Transforms/Utils/EntryExitInstrumenter.h"
 #include "llvm/Transforms/Utils/FixIrreducible.h"
 #include "llvm/Transforms/Utils/HelloWorld.h"
diff --git a/llvm/lib/Passes/PassRegistry.def b/llvm/lib/Passes/PassRegistry.def
index 91782d661ddd7b..1318221c24e65a 100644
--- a/llvm/lib/Passes/PassRegistry.def
+++ b/llvm/lib/Passes/PassRegistry.def
@@ -429,6 +429,7 @@ FUNCTION_PASS("strip-gc-relocates", StripGCRelocates())
 FUNCTION_PASS("structurizecfg", StructurizeCFGPass())
 FUNCTION_PASS("tailcallelim", TailCallElimPass())
 FUNCTION_PASS("typepromotion", TypePromotionPass(TM))
+FUNCTION_PASS("atomicexpand", AtomicExpandPass(TM))
 FUNCTION_PASS("unify-loop-exits", UnifyLoopExitsPass())
 FUNCTION_PASS("vector-combine", VectorCombinePass())
 FUNCTION_PASS("verify", VerifierPass())
diff --git a/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp b/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp
index 3d818c76bd4b7d..43ede3c1ceecc0 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp
@@ -551,7 +551,7 @@ std::unique_ptr<CSEConfigBase> AArch64PassConfig::getCSEConfig() const {
 void AArch64PassConfig::addIRPasses() {
   // Always expand atomic operations, we don't deal with atomicrmw or cmpxchg
   // ourselves.
-  addPass(createAtomicExpandPass());
+  addPass(createAtomicExpandLegacyPass());
 
   // Expand any SVE vector library calls that we can't code generate directly.
   if (EnableSVEIntrinsicOpts &&
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index 375df27206f7b4..8e3c8f94634ce9 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -1033,7 +1033,7 @@ void AMDGPUPassConfig::addIRPasses() {
     addPass(createAMDGPUAtomicOptimizerPass(AMDGPUAtomicOptimizerStrategy));
   }
 
-  addPass(createAtomicExpandPass());
+  addPass(createAtomicExpandLegacyPass());
 
   if (TM.getOptLevel() > CodeGenOptLevel::None) {
     addPass(createAMDGPUPromoteAlloca());
diff --git a/llvm/lib/Target/ARM/ARMTargetMachine.cpp b/llvm/lib/Target/ARM/ARMTargetMachine.cpp
index a80d485e750beb..5a5142f7df0d88 100644
--- a/llvm/lib/Target/ARM/ARMTargetMachine.cpp
+++ b/llvm/lib/Target/ARM/ARMTargetMachine.cpp
@@ -417,7 +417,7 @@ void ARMPassConfig::addIRPasses() {
   if (TM->Options.ThreadModel == ThreadModel::Single)
     addPass(createLowerAtomicPass());
   else
-    addPass(createAtomicExpandPass());
+    addPass(createAtomicExpandLegacyPass());
 
   // Cmpxchg instructions are often used with a subsequent comparison to
   // determine whether it succeeded. We can exploit existing control-flow in
diff --git a/llvm/lib/Target/CSKY/CSKYTargetMachine.cpp b/llvm/lib/Target/CSKY/CSKYTargetMachine.cpp
index 8c268dc3161413..0bbfabe93147c5 100644
--- a/llvm/lib/Target/CSKY/CSKYTargetMachine.cpp
+++ b/llvm/lib/Target/CSKY/CSKYTargetMachine.cpp
@@ -118,7 +118,7 @@ TargetPassConfig *CSKYTargetMachine::createPassConfig(PassManagerBase &PM) {
 }
 
 void CSKYPassConfig::addIRPasses() {
-  addPass(createAtomicExpandPass());
+  addPass(createAtomicExpandLegacyPass());
   TargetPassConfig::addIRPasses();
 }
 
diff --git a/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp b/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
index 590e464e1653a1..dc2ab4837f4e83 100644
--- a/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
@@ -338,7 +338,7 @@ void HexagonPassConfig::addIRPasses() {
     addPass(createDeadCodeEliminationPass());
   }
 
-  addPass(createAtomicExpandPass());
+  addPass(createAtomicExpandLegacyPass());
 
   if (!NoOpt) {
     if (EnableInitialCFGCleanup)
diff --git a/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp b/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp
index a5a4d78aceeef0..85fd3d291e2da2 100644
--- a/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp
@@ -161,7 +161,7 @@ void LoongArchPassConfig::addIRPasses() {
   // pointer values N iterations ahead.
   if (TM->getOptLevel() != CodeGenOptLevel::None && EnableLoopDataPrefetch)
     addPass(createLoopDataPrefetchPass());
-  addPass(createAtomicExpandPass());
+  addPass(createAtomicExpandLegacyPass());
 
   TargetPassConfig::addIRPasses();
 }
diff --git a/llvm/lib/Target/M68k/M68kTargetMachine.cpp b/llvm/lib/Target/M68k/M68kTargetMachine.cpp
index af8cb9a83a050e..bbbcb1556ed557 100644
--- a/llvm/lib/Target/M68k/M68kTargetMachine.cpp
+++ b/llvm/lib/Target/M68k/M68kTargetMachine.cpp
@@ -171,7 +171,7 @@ TargetPassConfig *M68kTargetMachine::createPassConfig(PassManagerBase &PM) {
 }
 
 void M68kPassConfig::addIRPasses() {
-  addPass(createAtomicExpandPass());
+  addPass(createAtomicExpandLegacyPass());
   TargetPassConfig::addIRPasses();
 }
 
diff --git a/llvm/lib/Target/Mips/MipsTargetMachine.cpp b/llvm/lib/Target/Mips/MipsTargetMachine.cpp
index 07422283692929..4c4bf70e22c6c1 100644
--- a/llvm/lib/Target/Mips/MipsTargetMachine.cpp
+++ b/llvm/lib/Target/Mips/MipsTargetMachine.cpp
@@ -263,7 +263,7 @@ std::unique_ptr<CSEConfigBase> MipsPassConfig::getCSEConfig() const {
 
 void MipsPassConfig::addIRPasses() {
   TargetPassConfig::addIRPasses();
-  addPass(createAtomicExpandPass());
+  addPass(createAtomicExpandLegacyPass());
   if (getMipsSubtarget().os16())
     addPass(createMipsOs16Pass());
   if (getMipsSubtarget().inMips16HardFloat())
diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp b/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
index 8d895762fbe1d9..4265ac85c4c3c1 100644
--- a/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
@@ -379,7 +379,7 @@ void NVPTXPassConfig::addIRPasses() {
     addStraightLineScalarOptimizationPasses();
   }
 
-  addPass(createAtomicExpandPass());
+  addPass(createAtomicExpandLegacyPass());
   addPass(createNVPTXCtorDtorLoweringLegacyPass());
 
   // === LSR and other generic IR passes ===
diff --git a/llvm/lib/Target/PowerPC/PPCExpandAtomicPseudoInsts.cpp b/llvm/lib/Target/PowerPC/PPCExpandAtomicPseudoInsts.cpp
index aee57a5075ff71..904d9b7d9f1fbf 100644
--- a/llvm/lib/Target/PowerPC/PPCExpandAtomicPseudoInsts.cpp
+++ b/llvm/lib/Target/PowerPC/PPCExpandAtomicPseudoInsts.cpp
@@ -23,7 +23,7 @@
 
 using namespace llvm;
 
-#define DEBUG_TYPE "ppc-atomic-expand"
+#define DEBUG_TYPE "ppc-passes=atomicexpand"
 
 namespace {
 
diff --git a/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp b/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp
index b09975172bf5ec..d178b5f02ae717 100644
--- a/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp
+++ b/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp
@@ -457,7 +457,7 @@ TargetPassConfig *PPCTargetMachine::createPassConfig(PassManagerBase &PM) {
 void PPCPassConfig::addIRPasses() {
   if (TM->getOptLevel() != CodeGenOptLevel::None)
     addPass(createPPCBoolRetToIntPass());
-  addPass(createAtomicExpandPass());
+  addPass(createAtomicExpandLegacyPass());
 
   // Lower generic MASSV routines to PowerPC subtarget-specific entries.
   addPass(createPPCLowerMASSVEntriesPass());
diff --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
index 85683a3adc968d..21b3863dcae79c 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
@@ -306,7 +306,7 @@ TargetPassConfig *RISCVTargetMachine::createPassConfig(PassManagerBase &PM) {
 }
 
 void RISCVPassConfig::addIRPasses() {
-  addPass(createAtomicExpandPass());
+  addPass(createAtomicExpandLegacyPass());
 
   if (getOptLevel() != CodeGenOptLevel::None) {
     addPass(createRISCVGatherScatterLoweringPass());
diff --git a/llvm/lib/Target/Sparc/SparcTargetMachine.cpp b/llvm/lib/Target/Sparc/SparcTargetMachine.cpp
index dbc26636e39f1f..23fdf6e47ee6a0 100644
--- a/llvm/lib/Target/Sparc/SparcTargetMachine.cpp
+++ b/llvm/lib/Target/Sparc/SparcTargetMachine.cpp
@@ -172,7 +172,7 @@ TargetPassConfig *SparcTargetMachine::createPassConfig(PassManagerBase &PM) {
 }
 
 void SparcPassConfig::addIRPasses() {
-  addPass(createAtomicExpandPass());
+  addPass(createAtomicExpandLegacyPass());
 
   TargetPassConfig::addIRPasses();
 }
diff --git a/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp b/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp
index 186494ad2ac614..2a11921b2e7d78 100644
--- a/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp
@@ -226,7 +226,7 @@ void SystemZPassConfig::addIRPasses() {
     addPass(createLoopDataPrefetchPass());
   }
 
-  addPass(createAtomicExpandPass());
+  addPass(createAtomicExpandLegacyPass());
 
   TargetPassConfig::addIRPasses();
 }
diff --git a/llvm/lib/Target/VE/VETargetMachine.cpp b/llvm/lib/Target/VE/VETargetMachine.cpp
index 6d102bfd3926af..6f4e137e4d2f18 100644
--- a/llvm/lib/Target/VE/VETargetMachine.cpp
+++ b/llvm/lib/Target/VE/VETargetMachine.cpp
@@ -134,7 +134,7 @@ TargetPassConfig *VETargetMachine::createPassConfig(PassManagerBase &PM) {
 
 void VEPassConfig::addIRPasses() {
   // VE requires atomic expand pass.
-  addPass(createAtomicExpandPass());
+  addPass(createAtomicExpandLegacyPass());
   TargetPassConfig::addIRPasses();
 }
 
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp
index 2db1b6493cc476..9f24452b21fb20 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp
@@ -478,7 +478,7 @@ void WebAssemblyPassConfig::addISelPrepare() {
   addPass(new CoalesceFeaturesAndStripAtomics(&getWebAssemblyTargetMachine()));
 
   // This is a no-op if atomics are not used in the module
-  addPass(createAtomicExpandPass());
+  addPass(createAtomicExpandLegacyPass());
 
   TargetPassConfig::addISelPrepare();
 }
diff --git a/llvm/lib/Target/X86/X86TargetMachine.cpp b/llvm/lib/Target/X86/X86TargetMachine.cpp
index 5668b514d6dec0..660b43a08ed3ce 100644
--- a/llvm/lib/Target/X86/X86TargetMachine.cpp
+++ b/llvm/lib/Target/X86/X86TargetMachine.cpp
@@ -435,7 +435,7 @@ MachineFunctionInfo *X86TargetMachine::createMachineFunctionInfo(
 }
 
 void X86PassConfig::addIRPasses() {
-  addPass(createAtomicExpandPass());
+  addPass(createAtomicExpandLegacyPass());
 
   // We add both pass anyway and when these two passes run, we skip the pass
   // based on the option level and option attribute.
diff --git a/llvm/lib/Target/XCore/XCoreTargetMachine.cpp b/llvm/lib/Target/XCore/XCoreTargetMachine.cpp
index 345a8365ed49b3..374e91d01bdace 100644
--- a/llvm/lib/Target/XCore/XCoreTargetMachine.cpp
+++ b/llvm/lib/Target/XCore/XCoreTargetMachine.cpp
@@ -84,7 +84,7 @@ TargetPassConfig *XCoreTargetMachine::createPassConfig(PassManagerBase &PM) {
 }
 
 void XCorePassConfig::addIRPasses() {
-  addPass(createAtomicExpandPass());
+  addPass(createAtomicExpandLegacyPass());
 
   TargetPassConfig::addIRPasses();
 }
diff --git a/llvm/test/CodeGen/AMDGPU/idemponent-atomics.ll b/llvm/test/CodeGen/AMDGPU/idemponent-atomics.ll
index fbb36f1d7ec8f7..bbd161d7dad6ce 100644
--- a/llvm/test/CodeGen/AMDGPU/idemponent-atomics.ll
+++ b/llvm/test/CodeGen/AMDGPU/idemponent-atomics.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX940 %s
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -atomic-expand < %s | FileCheck --check-prefix=OPT %s
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=atomicexpand < %s | FileCheck --check-prefix=OPT %s
 
 define i32 @global_agent_monotonic_idempotent_or(ptr addrspace(1) %in) {
 ; GFX940-LABEL: global_agent_monotonic_idempotent_or:
diff --git a/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll b/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll
index 73457654307019..28393b84d7341f 100644
--- a/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll
+++ b/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: opt -S -mtriple=amdgcn-- -mcpu=tahiti -atomic-expand < %s | FileCheck -check-prefix=IR %s
+; RUN: opt -S -mtriple=amdgcn-- -mcpu=tahiti -passes=atomicexpand < %s | FileCheck -check-prefix=IR %s
 ; RUN: llc -mtriple=amdgcn-- -mcpu=tahiti < %s | FileCheck -check-prefix=GCN %s
 
 define i32 @load_atomic_private_seq_cst_i32(ptr addrspace(5) %ptr) {
diff --git a/llvm/test/Transforms/AtomicExpand/AArch64/atomicrmw-fp.ll b/llvm/test/Transforms/AtomicExpand/AArch64/atomicrmw-fp.ll
index 2fc848a3a810b8..77ac7ee9bda425 100644
--- a/llvm/test/Transforms/AtomicExpand/AArch64/atomicrmw-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/AArch64/atomicrmw-fp.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=aarch64-linux-gnu -atomic-expand %s | FileCheck %s
+; RUN: opt -S -mtriple=aarch64-linux-gnu -passes=atomicexpand %s | FileCheck %s
 
 define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) {
 ; CHECK-LABEL: @test_atomicrmw_fadd_f32(
diff --git a/llvm/test/Transforms/AtomicExpand/AArch64/expand-atomicrmw-xchg-fp.ll b/llvm/test/Transforms/AtomicExpand/AArch64/expand-atomicrmw-xchg-fp.ll
index 47d626261bfc43..6ef22406174217 100644
--- a/llvm/test/Transforms/AtomicExpand/AArch64/expand-atomicrmw-xchg-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/AArch64/expand-atomicrmw-xchg-fp.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -codegen-opt-level=1 -S -mtriple=aarch64-- -atomic-expand %s | FileCheck %s
-; RUN: opt -codegen-opt-level=1 -S -mtriple=aarch64-- -mattr=+outline-atomics -atomic-expand %s | FileCheck %s --check-prefix=OUTLINE-ATOMICS
+; RUN: opt -codegen-opt-level=1 -S -mtriple=aarch64-- -passes=atomicexpand %s | FileCheck %s
+; RUN: opt -codegen-opt-level=1 -S -mtriple=aarch64-- -mattr=+outline-atomics -passes=atomicexpand %s | FileCheck %s --check-prefix=OUTLINE-ATOMICS
 
 define void @atomic_swap_f16(ptr %ptr, half %val) nounwind {
 ; CHECK-LABEL: @atomic_swap_f16(
diff --git a/llvm/test/Transforms/AtomicExpand/AArch64/pcsections.ll b/llvm/test/Transforms/AtomicExpand/AArch64/pcsections.ll
index 2e9efe911e6d6c..a896627a0560fa 100644
--- a/llvm/test/Transforms/AtomicExpand/AArch64/pcsections.ll
+++ b/llvm/test/Transforms/AtomicExpand/AArch64/pcsections.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=aarch64-linux-gnu -atomic-expand %s | FileCheck %s
+; RUN: opt -S -mtriple=aarch64-linux-gnu -passes=atomicexpand %s | FileCheck %s
 
 define i8 @atomic8_load_unordered(ptr %a) nounwind uwtable {
 ; CHECK-LABEL: @atomic8_load_unordered(
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16-system.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16-system.ll
index b846c1f77538e6..a2d242ab0fc5bb 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16-system.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16-system.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -atomic-expand %s | FileCheck %s
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=atomicexpand %s | FileCheck %s
 
 target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:160:256:256:32-p8:128:128-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll
index 7f5d6e7cb76f82..177ad2a464210a 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -atomic-expand %s | FileCheck %s
-; RUN: opt -mtriple=r600-mesa-mesa3d -S -atomic-expand %s | FileCheck %s
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=atomicexpand %s | FileCheck %s
+; RUN: opt -mtriple=r600-mesa-mesa3d -S -passes=atomicexpand %s | FileCheck %s
 
 target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:160:256:256:32-p8:128:128-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8-system.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8-system.ll
index f796d3cca3036f..5eedc341eb850e 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8-system.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8-system.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -atomic-expand %s | FileCheck %s
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=atomicexpand %s | FileCheck %s
 
 define i8 @test_atomicrmw_xchg_i8_global_system(ptr addrspace(1) %ptr, i8 %value) {
 ; CHECK-LABEL: @test_atomicrmw_xchg_i8_global_system(
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll
index 6a6e416bdbc89d..a5b4bb88fdf079 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -atomic-expand %s | FileCheck %s --check-prefixes=CHECK,GCN
-; RUN: opt -mtriple=r600-mesa-mesa3d -S -atomic-expand %s | FileCheck %s --check-prefixes=CHECK,R600
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=atomicexpand %s | FileCheck %s --check-prefixes=CHECK,GCN
+; RUN: opt -mtriple=r600-mesa-mesa3d -S -passes=atomicexpand %s | FileCheck %s --check-prefixes=CHECK,R600
 
 define i8 @test_atomicrmw_xchg_i8_global_agent(ptr addrspace(1) %ptr, i8 %value) {
 ; GCN-LABEL: @test_atomicrmw_xchg_i8_global_agent(
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd-flat-specialization.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd-flat-specialization.ll
index 5d7825bb378876..92e4dccb231326 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd-flat-specialization.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd-flat-specialization.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 -atomic-expand %s | FileCheck -check-prefix=GFX908 %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -atomic-expand %s | FileCheck -check-prefix=GFX90A %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -atomic-expand %s | FileCheck -check-prefix=GFX940 %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -atomic-expand %s | FileCheck -check-prefix=GFX1100 %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 -passes=atomicexpand %s | FileCheck -check-prefix=GFX908 %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -passes=atomicexpand %s | FileCheck -check-prefix=GFX90A %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -passes=atomicexpand %s | FileCheck -check-prefix=GFX940 %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -passes=atomicexpand %s | FileCheck -check-prefix=GFX1100 %s
 
 define float @syncscope_system(ptr %addr, float %val) #0 {
 ; GFX908-LABEL: @syncscope_system(
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll
index 97c041168d147b..18173bd284a7f6 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll
@@ -1,10 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -atomic-expand %s | FileCheck -check-prefix=CI %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -atomic-expand %s | FileCheck -check-prefix=GFX9 %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 -atomic-expand %s | FileCheck -check-prefix=GFX908 %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -atomic-expand %s | FileCheck -check-prefix=GFX90A %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -atomic-expand %s | FileCheck -check-prefix=GFX940 %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -atomic-expand %s | FileCheck -check-prefix=GFX11 %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -passes=atomicexpand %s | FileCheck -check-prefix=CI %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=atomicexpand %s | FileCheck -check-prefix=GFX9 %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 -passes=atomicexpand %s | FileCheck -check-prefix=GFX908 %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -passes=atomicexpand %s | FileCheck -check-prefix=GFX90A %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -passes=atomicexpand %s | FileCheck -check-prefix=GFX940 %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -passes=atomicexpand %s | FileCheck -check-prefix=GFX11 %s
 
 define void @test_atomicrmw_fadd_f32_global_no_use_unsafe(ptr addrspace(1) %ptr, float %value) #0 {
 ; CI-LABEL: @test_atomicrmw_fadd_f32_global_no_use_unsafe(
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fmax.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fmax.ll
index 9dfbe9b4eb7413..102cf44d9dea96 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fmax.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fmax.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -atomic-expand %s | FileCheck -check-prefix=GCN %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -atomic-expand %s | FileCheck -check-prefix=GCN %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -passes=atomicexpand %s | FileCheck -check-prefix=GCN %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=atomicexpand %s | FileCheck -check-prefix=GCN %s
 
 define float @test_atomicrmw_fmax_f32_flat(ptr %ptr, float %value) {
 ; GCN-LABEL: @test_atomicrmw_fmax_f32_flat(
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fmin.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fmin.ll
index 5a732653b48b14..06b04d009940c5 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fmin.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fmin.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -atomic-expand %s | FileCheck -check-prefix=GCN %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -atomic-expand %s | FileCheck -check-prefix=GCN %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -passes=atomicexpand %s | FileCheck -check-prefix=GCN %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=atomicexpand %s | FileCheck -check-prefix=GCN %s
 
 define float @test_atomicrmw_fmin_f32_flat(ptr %ptr, float %value) {
 ; GCN-LABEL: @test_atomicrmw_fmin_f32_flat(
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fsub.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fsub.ll
index 9805c317b9215e..cdb6c32391e7a0 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fsub.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fsub.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -atomic-expand %s | FileCheck -check-prefix=GCN %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -atomic-expand %s | FileCheck -check-prefix=GCN %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -passes=atomicexpand %s | FileCheck -check-prefix=GCN %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=atomicexpand %s | FileCheck -check-prefix=GCN %s
 
 define float @test_atomicrmw_fsub_f32_flat(ptr %ptr, float %value) {
 ; GCN-LABEL: @test_atomicrmw_fsub_f32_flat(
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-nand.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-nand.ll
index 5fa9dcc4ad9bf0..69626a14ae52ed 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-nand.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-nand.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -atomic-expand %s | FileCheck %s
-; RUN: opt -mtriple=r600-mesa-mesa3d -S -atomic-expand %s | FileCheck %s
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=atomicexpand %s | FileCheck %s
+; RUN: opt -mtriple=r600-mesa-mesa3d -S -passes=atomicexpand %s | FileCheck %s
 
 define i32 @test_atomicrmw_nand_i32_flat(ptr %ptr, i32 %value) {
 ; CHECK-LABEL: @test_atomicrmw_nand_i32_flat(
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-simplify-cfg-CAS-block.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-simplify-cfg-CAS-block.ll
index aceb897a7d487d..c1a93763429bd9 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-simplify-cfg-CAS-block.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-simplify-cfg-CAS-block.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -atomic-expand %s | FileCheck -check-prefix=GFX90A %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -passes=atomicexpand %s | FileCheck -check-prefix=GFX90A %s
 
 declare i32 @llvm.amdgcn.workitem.id.x()
 
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/unaligned-atomic.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/unaligned-atomic.ll
index bdfd90dc11dca5..87c40c0337c169 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/unaligned-atomic.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/unaligned-atomic.ll
@@ -1,4 +1,4 @@
-; RUN: not --crash opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -atomic-expand %s 2>&1 | FileCheck %s
+; RUN: not --crash opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=atomicexpand %s 2>&1 | FileCheck %s
 ; The AtomicExpand pass cannot handle missing libcalls (yet) so reports a fatal error.
 ; CHECK: LLVM ERROR: expandAtomicOpToLibcall shouldn't fail for Load
 
diff --git a/llvm/test/Transforms/AtomicExpand/ARM/atomic-expansion-v7.ll b/llvm/test/Transforms/AtomicExpand/ARM/atomic-expansion-v7.ll
index 353aafb9727a5b..3410921d7edf55 100644
--- a/llvm/test/Transforms/AtomicExpand/ARM/atomic-expansion-v7.ll
+++ b/llvm/test/Transforms/AtomicExpand/ARM/atomic-expansion-v7.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S -o - -mtriple=armv7-apple-ios7.0 -atomic-expand -codegen-opt-level=1 %s | FileCheck %s
+; RUN: opt -S -o - -mtriple=armv7-apple-ios7.0 -passes=atomicexpand -codegen-opt-level=1 %s | FileCheck %s
 
 define i8 @test_atomic_xchg_i8(ptr %ptr, i8 %xchgend) {
 ; CHECK-LABEL: @test_atomic_xchg_i8
diff --git a/llvm/test/Transforms/AtomicExpand/ARM/atomic-expansion-v8.ll b/llvm/test/Transforms/AtomicExpand/ARM/atomic-expansion-v8.ll
index bad28b2b6824e5..39729ec8cb270d 100644
--- a/llvm/test/Transforms/AtomicExpand/ARM/atomic-expansion-v8.ll
+++ b/llvm/test/Transforms/AtomicExpand/ARM/atomic-expansion-v8.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S -o - -mtriple=armv8-linux-gnueabihf -atomic-expand %s -codegen-opt-level=1 | FileCheck %s
+; RUN: opt -S -o - -mtriple=armv8-linux-gnueabihf -passes=atomicexpand %s -codegen-opt-level=1 | FileCheck %s
 
 define i8 @test_atomic_xchg_i8(ptr %ptr, i8 %xchgend) {
 ; CHECK-LABEL: @test_atomic_xchg_i8
diff --git a/llvm/test/Transforms/AtomicExpand/ARM/atomicrmw-fp.ll b/llvm/test/Transforms/AtomicExpand/ARM/atomicrmw-fp.ll
index d0268bf3e00796..3cbd71bed10167 100644
--- a/llvm/test/Transforms/AtomicExpand/ARM/atomicrmw-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/ARM/atomicrmw-fp.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=armv7-apple-ios7.0 -atomic-expand %s | FileCheck %s
+; RUN: opt -S -mtriple=armv7-apple-ios7.0 -passes=atomicexpand %s | FileCheck %s
 
 define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) {
 ; CHECK-LABEL: @test_atomicrmw_fadd_f32(
diff --git a/llvm/test/Transforms/AtomicExpand/ARM/cmpxchg-weak.ll b/llvm/test/Transforms/AtomicExpand/ARM/cmpxchg-weak.ll
index f7a210d631bf95..3ac21e1b0ed772 100644
--- a/llvm/test/Transforms/AtomicExpand/ARM/cmpxchg-weak.ll
+++ b/llvm/test/Transforms/AtomicExpand/ARM/cmpxchg-weak.ll
@@ -1,4 +1,4 @@
-; RUN: opt -atomic-expand -codegen-opt-level=1 -S -mtriple=thumbv7s-apple-ios7.0 %s | FileCheck %s
+; RUN: opt -passes=atomicexpand -codegen-opt-level=1 -S -mtriple=thumbv7s-apple-ios7.0 %s | FileCheck %s
 
 define i32 @test_cmpxchg_seq_cst(ptr %addr, i32 %desired, i32 %new) {
 ; CHECK-LABEL: @test_cmpxchg_seq_cst
diff --git a/llvm/test/Transforms/AtomicExpand/Hexagon/atomicrmw-fp.ll b/llvm/test/Transforms/AtomicExpand/Hexagon/atomicrmw-fp.ll
index 8827eb5d8e1088..d3772badcd4350 100644
--- a/llvm/test/Transforms/AtomicExpand/Hexagon/atomicrmw-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/Hexagon/atomicrmw-fp.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=hexagon-- -atomic-expand %s | FileCheck %s
+; RUN: opt -S -mtriple=hexagon-- -passes=atomicexpand %s | FileCheck %s
 
 define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) {
 ; CHECK-LABEL: @test_atomicrmw_fadd_f32(
diff --git a/llvm/test/Transforms/AtomicExpand/LoongArch/atomicrmw-fp.ll b/llvm/test/Transforms/AtomicExpand/LoongArch/atomicrmw-fp.ll
index 43fdd25e257b82..393a822c7d529b 100644
--- a/llvm/test/Transforms/AtomicExpand/LoongArch/atomicrmw-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/LoongArch/atomicrmw-fp.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S --mtriple=loongarch64 --atomic-expand --mattr=+d %s | FileCheck %s
+; RUN: opt -S --mtriple=loongarch64 --passes=atomicexpand --mattr=+d %s | FileCheck %s
 
 define float @atomicrmw_fadd_float(ptr %ptr, float %value) {
 ; CHECK-LABEL: @atomicrmw_fadd_float(
diff --git a/llvm/test/Transforms/AtomicExpand/LoongArch/load-store-atomic.ll b/llvm/test/Transforms/AtomicExpand/LoongArch/load-store-atomic.ll
index b0875669bc3a21..8946f4f26c0902 100644
--- a/llvm/test/Transforms/AtomicExpand/LoongArch/load-store-atomic.ll
+++ b/llvm/test/Transforms/AtomicExpand/LoongArch/load-store-atomic.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S --mtriple=loongarch32 --atomic-expand %s | FileCheck %s --check-prefix=LA32
-; RUN: opt -S --mtriple=loongarch64 --atomic-expand %s | FileCheck %s --check-prefix=LA64
+; RUN: opt -S --mtriple=loongarch32 --passes=atomicexpand %s | FileCheck %s --check-prefix=LA32
+; RUN: opt -S --mtriple=loongarch64 --passes=atomicexpand %s | FileCheck %s --check-prefix=LA64
 
 define i8 @load_acquire_i8(ptr %ptr) {
 ; LA32-LABEL: @load_acquire_i8(
diff --git a/llvm/test/Transforms/AtomicExpand/Mips/atomicrmw-fp.ll b/llvm/test/Transforms/AtomicExpand/Mips/atomicrmw-fp.ll
index 2c90a70bd0ad05..7299f1f71f90bb 100644
--- a/llvm/test/Transforms/AtomicExpand/Mips/atomicrmw-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/Mips/atomicrmw-fp.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=mips64-mti-linux-gnu -atomic-expand %s | FileCheck %s
+; RUN: opt -S -mtriple=mips64-mti-linux-gnu -passes=atomicexpand %s | FileCheck %s
 
 define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) {
 ; CHECK-LABEL: @test_atomicrmw_fadd_f32(
diff --git a/llvm/test/Transforms/AtomicExpand/PowerPC/atomicrmw-fp.ll b/llvm/test/Transforms/AtomicExpand/PowerPC/atomicrmw-fp.ll
index 7e42735feabfff..aa1d80ce225ce9 100644
--- a/llvm/test/Transforms/AtomicExpand/PowerPC/atomicrmw-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/PowerPC/atomicrmw-fp.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=powerpc64-unknown-unknown -atomic-expand %s | FileCheck %s
+; RUN: opt -S -mtriple=powerpc64-unknown-unknown -passes=atomicexpand %s | FileCheck %s
 
 define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) {
 ; CHECK-LABEL: @test_atomicrmw_fadd_f32(
diff --git a/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-double.ll b/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-double.ll
index 19e5f56821d746..caed477b6cc217 100644
--- a/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-double.ll
+++ b/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-double.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -atomic-expand -mtriple=powerpc64le-unknown-unknown \
+; RUN: opt -S -passes=atomicexpand -mtriple=powerpc64le-unknown-unknown \
 ; RUN:   < %s 2>&1 | FileCheck %s
-; RUN: opt -S -atomic-expand -mtriple=powerpc64-unknown-unknown \
+; RUN: opt -S -passes=atomicexpand -mtriple=powerpc64-unknown-unknown \
 ; RUN:   < %s 2>&1 | FileCheck %s
 
 define double @foo(ptr %dp) {
diff --git a/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-float.ll b/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-float.ll
index 62f0db00df800b..ef1f64ed45e711 100644
--- a/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-float.ll
+++ b/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-float.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -atomic-expand -mtriple=powerpc64le-unknown-unknown \
+; RUN: opt -S -passes=atomicexpand -mtriple=powerpc64le-unknown-unknown \
 ; RUN:   < %s 2>&1 | FileCheck %s
-; RUN: opt -S -atomic-expand -mtriple=powerpc64-unknown-unknown \
+; RUN: opt -S -passes=atomicexpand -mtriple=powerpc64-unknown-unknown \
 ; RUN:   < %s 2>&1 | FileCheck %s
 
 define float @bar(ptr %fp) {
diff --git a/llvm/test/Transforms/AtomicExpand/PowerPC/cmpxchg.ll b/llvm/test/Transforms/AtomicExpand/PowerPC/cmpxchg.ll
index 169d73cc0308d3..6e3f965950330f 100644
--- a/llvm/test/Transforms/AtomicExpand/PowerPC/cmpxchg.ll
+++ b/llvm/test/Transforms/AtomicExpand/PowerPC/cmpxchg.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -atomic-expand -S -mtriple=powerpc64-unknown-unknown \
+; RUN: opt -passes=atomicexpand -S -mtriple=powerpc64-unknown-unknown \
 ; RUN:   -mcpu=pwr8 %s | FileCheck %s
-; RUN: opt -atomic-expand -S -mtriple=powerpc64-unknown-unknown \
+; RUN: opt -passes=atomicexpand -S -mtriple=powerpc64-unknown-unknown \
 ; RUN:   -mcpu=pwr7 %s | FileCheck --check-prefix=PWR7 %s
 
 define i1 @test_cmpxchg_seq_cst(ptr %addr, i128 %desire, i128 %new) {
diff --git a/llvm/test/Transforms/AtomicExpand/PowerPC/issue55983.ll b/llvm/test/Transforms/AtomicExpand/PowerPC/issue55983.ll
index 342506301d0046..d06d3380fdb8f9 100644
--- a/llvm/test/Transforms/AtomicExpand/PowerPC/issue55983.ll
+++ b/llvm/test/Transforms/AtomicExpand/PowerPC/issue55983.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -atomic-expand -S -mtriple=powerpc64le-unknown-unknown \
+; RUN: opt -passes=atomicexpand -S -mtriple=powerpc64le-unknown-unknown \
 ; RUN:   %s | FileCheck %s
-; RUN: opt -atomic-expand -S -mtriple=powerpc64-unknown-unknown \
+; RUN: opt -passes=atomicexpand -S -mtriple=powerpc64-unknown-unknown \
 ; RUN:   %s | FileCheck %s
 
 define ptr @foo(ptr %p) {
diff --git a/llvm/test/Transforms/AtomicExpand/RISCV/atomicrmw-fp.ll b/llvm/test/Transforms/AtomicExpand/RISCV/atomicrmw-fp.ll
index ceaafd89990b05..a92a9027c54261 100644
--- a/llvm/test/Transforms/AtomicExpand/RISCV/atomicrmw-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/RISCV/atomicrmw-fp.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=riscv32-- -atomic-expand %s | FileCheck %s
+; RUN: opt -S -mtriple=riscv32-- -passes=atomicexpand %s | FileCheck %s
 
 define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) {
 ; CHECK-LABEL: @test_atomicrmw_fadd_f32(
diff --git a/llvm/test/Transforms/AtomicExpand/SPARC/libcalls.ll b/llvm/test/Transforms/AtomicExpand/SPARC/libcalls.ll
index 4427c5e7ed23dc..4a678711de1cf9 100644
--- a/llvm/test/Transforms/AtomicExpand/SPARC/libcalls.ll
+++ b/llvm/test/Transforms/AtomicExpand/SPARC/libcalls.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S %s -atomic-expand | FileCheck %s
+; RUN: opt -S %s -passes=atomicexpand | FileCheck %s
 
 ;;; NOTE: this test is actually target-independent -- any target which
 ;;; doesn't support inline atomics can be used. (E.g. X86 i386 would
diff --git a/llvm/test/Transforms/AtomicExpand/SPARC/partword.ll b/llvm/test/Transforms/AtomicExpand/SPARC/partword.ll
index 5bcb21105df8bb..6d9e32ebe75903 100644
--- a/llvm/test/Transforms/AtomicExpand/SPARC/partword.ll
+++ b/llvm/test/Transforms/AtomicExpand/SPARC/partword.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S %s -atomic-expand | FileCheck %s
+; RUN: opt -S %s -passes=atomicexpand | FileCheck %s
 
 ;; Verify the cmpxchg and atomicrmw expansions where sub-word-size
 ;; instructions are not available.
diff --git a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-libcall.ll b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-libcall.ll
index 8d71966c04d039..9589009852b17e 100644
--- a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-libcall.ll
+++ b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-libcall.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=i686-linux-gnu -atomic-expand %s | FileCheck %s
+; RUN: opt -S -mtriple=i686-linux-gnu -passes=atomicexpand %s | FileCheck %s
 
 
 define i256 @atomic_load256_libcall(ptr %ptr) nounwind {
diff --git a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-non-integer.ll b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-non-integer.ll
index dab7677086e91c..21117d6e013383 100644
--- a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-non-integer.ll
+++ b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-non-integer.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S %s -atomic-expand -mtriple=x86_64-linux-gnu | FileCheck %s
+; RUN: opt -S %s -passes=atomicexpand -mtriple=x86_64-linux-gnu | FileCheck %s
 
 ; This file tests the functions `llvm::convertAtomicLoadToIntegerType` and
 ; `llvm::convertAtomicStoreToIntegerType`. If X86 stops using this 
diff --git a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-fp.ll b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-fp.ll
index 69837b96a90d00..fc2df10077d070 100644
--- a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-fp.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=i686-linux-gnu -atomic-expand %s | FileCheck %s
+; RUN: opt -S -mtriple=i686-linux-gnu -passes=atomicexpand %s | FileCheck %s
 
 define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) {
 ; CHECK-LABEL: @test_atomicrmw_fadd_f32(
diff --git a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-initial-load.ll b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-initial-load.ll
index fba1512368ea27..50fd402cb38d05 100644
--- a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-initial-load.ll
+++ b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-initial-load.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S %s -atomic-expand -mtriple=i686-linux-gnu | FileCheck %s
+; RUN: opt -S %s -passes=atomicexpand -mtriple=i686-linux-gnu | FileCheck %s
 
 ; This file tests the function `llvm::expandAtomicRMWToCmpXchg`.
 ; It isn't technically target specific, but is exposed through a pass that is.
diff --git a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-xchg-fp.ll b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-xchg-fp.ll
index 2464af3336ef3f..5ae8ceca4d539e 100644
--- a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-xchg-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-xchg-fp.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=i686-linux-gnu -atomic-expand %s | FileCheck %s
+; RUN: opt -S -mtriple=i686-linux-gnu -passes=atomicexpand %s | FileCheck %s
 
 define double @atomic_xchg_f64(ptr %ptr) nounwind {
 ; CHECK-LABEL: @atomic_xchg_f64(
diff --git a/llvm/tools/opt/opt.cpp b/llvm/tools/opt/opt.cpp
index bb6627364442ef..1f9814efe7fabe 100644
--- a/llvm/tools/opt/opt.cpp
+++ b/llvm/tools/opt/opt.cpp
@@ -340,7 +340,6 @@ static bool shouldPinPassToLegacyPM(StringRef Pass) {
       "interleaved-load-combine",
       "unreachableblockelim",
       "verify-safepoint-ir",
-      "atomic-expand",
       "expandvp",
       "mve-tail-predication",
       "interleaved-access",
@@ -420,7 +419,6 @@ int main(int argc, char **argv) {
   initializeSelectOptimizePass(Registry);
   initializeCallBrPreparePass(Registry);
   initializeCodeGenPreparePass(Registry);
-  initializeAtomicExpandPass(Registry);
   initializeWinEHPreparePass(Registry);
   initializeDwarfEHPrepareLegacyPassPass(Registry);
   initializeSafeStackLegacyPassPass(Registry);

>From 35a15d9435ea23b098824178d4f4d24dde1b6f69 Mon Sep 17 00:00:00 2001
From: Ris-Bali <rishabhsbali at gmail.com>
Date: Sat, 4 Nov 2023 18:31:39 +0530
Subject: [PATCH 2/4] atomicexpand port

---
 llvm/include/llvm/CodeGen/Passes.h | 898 ++++++++++++++---------------
 llvm/lib/Passes/PassBuilder.cpp    |   2 +-
 2 files changed, 450 insertions(+), 450 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/Passes.h b/llvm/include/llvm/CodeGen/Passes.h
index e70cd1462b338d..8444bf18147581 100644
--- a/llvm/include/llvm/CodeGen/Passes.h
+++ b/llvm/include/llvm/CodeGen/Passes.h
@@ -44,566 +44,566 @@ namespace llvm {
   /// AtomicExpandPass - At IR level this pass replace atomic instructions with
   /// __atomic_* library calls, or target specific instruction which implement the
   /// same semantics in a way which better fits the target backend.
-FunctionPass *createAtomicExpandLegacyPass();
-
-/// createUnreachableBlockEliminationPass - The LLVM code generator does not
-/// work well with unreachable basic blocks (what live ranges make sense for a
-/// block that cannot be reached?).  As such, a code generator should either
-/// not instruction select unreachable blocks, or run this pass as its
-/// last LLVM modifying pass to clean up blocks that are not reachable from
-/// the entry block.
-FunctionPass *createUnreachableBlockEliminationPass();
+  FunctionPass *createAtomicExpandLegacyPass();
+
+  /// createUnreachableBlockEliminationPass - The LLVM code generator does not
+  /// work well with unreachable basic blocks (what live ranges make sense for a
+  /// block that cannot be reached?).  As such, a code generator should either
+  /// not instruction select unreachable blocks, or run this pass as its
+  /// last LLVM modifying pass to clean up blocks that are not reachable from
+  /// the entry block.
+  FunctionPass *createUnreachableBlockEliminationPass();
 
-/// createGCEmptyBasicblocksPass - Empty basic blocks (basic blocks without
-/// real code) appear as the result of optimization passes removing
-/// instructions. These blocks confuscate profile analysis (e.g., basic block
-/// sections) since they will share the address of their fallthrough blocks.
-/// This pass garbage-collects such basic blocks.
-MachineFunctionPass *createGCEmptyBasicBlocksPass();
-
-/// createBasicBlockSections Pass - This pass assigns sections to machine
-/// basic blocks and is enabled with -fbasic-block-sections.
-MachineFunctionPass *createBasicBlockSectionsPass();
-
-MachineFunctionPass *createBasicBlockPathCloningPass();
+  /// createGCEmptyBasicblocksPass - Empty basic blocks (basic blocks without
+  /// real code) appear as the result of optimization passes removing
+  /// instructions. These blocks confuscate profile analysis (e.g., basic block
+  /// sections) since they will share the address of their fallthrough blocks.
+  /// This pass garbage-collects such basic blocks.
+  MachineFunctionPass *createGCEmptyBasicBlocksPass();
+
+  /// createBasicBlockSections Pass - This pass assigns sections to machine
+  /// basic blocks and is enabled with -fbasic-block-sections.
+  MachineFunctionPass *createBasicBlockSectionsPass();
+
+  MachineFunctionPass *createBasicBlockPathCloningPass();
 
-/// createMachineFunctionSplitterPass - This pass splits machine functions
-/// using profile information.
-MachineFunctionPass *createMachineFunctionSplitterPass();
+  /// createMachineFunctionSplitterPass - This pass splits machine functions
+  /// using profile information.
+  MachineFunctionPass *createMachineFunctionSplitterPass();
 
-/// MachineFunctionPrinter pass - This pass prints out the machine function to
-/// the given stream as a debugging tool.
-MachineFunctionPass *
-createMachineFunctionPrinterPass(raw_ostream &OS,
-                                 const std::string &Banner = "");
+  /// MachineFunctionPrinter pass - This pass prints out the machine function to
+  /// the given stream as a debugging tool.
+  MachineFunctionPass *
+  createMachineFunctionPrinterPass(raw_ostream &OS,
+                                   const std::string &Banner ="");
 
-/// StackFramePrinter pass - This pass prints out the machine function's
-/// stack frame to the given stream as a debugging tool.
-MachineFunctionPass *createStackFrameLayoutAnalysisPass();
+  /// StackFramePrinter pass - This pass prints out the machine function's
+  /// stack frame to the given stream as a debugging tool.
+  MachineFunctionPass *createStackFrameLayoutAnalysisPass();
 
-/// MIRPrinting pass - this pass prints out the LLVM IR into the given stream
-/// using the MIR serialization format.
-MachineFunctionPass *createPrintMIRPass(raw_ostream &OS);
+  /// MIRPrinting pass - this pass prints out the LLVM IR into the given stream
+  /// using the MIR serialization format.
+  MachineFunctionPass *createPrintMIRPass(raw_ostream &OS);
 
-/// This pass resets a MachineFunction when it has the FailedISel property
-/// as if it was just created.
-/// If EmitFallbackDiag is true, the pass will emit a
-/// DiagnosticInfoISelFallback for every MachineFunction it resets.
-/// If AbortOnFailedISel is true, abort compilation instead of resetting.
-MachineFunctionPass *createResetMachineFunctionPass(bool EmitFallbackDiag,
-                                                    bool AbortOnFailedISel);
+  /// This pass resets a MachineFunction when it has the FailedISel property
+  /// as if it was just created.
+  /// If EmitFallbackDiag is true, the pass will emit a
+  /// DiagnosticInfoISelFallback for every MachineFunction it resets.
+  /// If AbortOnFailedISel is true, abort compilation instead of resetting.
+  MachineFunctionPass *createResetMachineFunctionPass(bool EmitFallbackDiag,
+                                                      bool AbortOnFailedISel);
 
-/// createCodeGenPreparePass - Transform the code to expose more pattern
-/// matching during instruction selection.
-FunctionPass *createCodeGenPreparePass();
+  /// createCodeGenPreparePass - Transform the code to expose more pattern
+  /// matching during instruction selection.
+  FunctionPass *createCodeGenPreparePass();
 
-/// This pass implements generation of target-specific intrinsics to support
-/// handling of complex number arithmetic
-FunctionPass *createComplexDeinterleavingPass(const TargetMachine *TM);
+  /// This pass implements generation of target-specific intrinsics to support
+  /// handling of complex number arithmetic
+  FunctionPass *createComplexDeinterleavingPass(const TargetMachine *TM);
 
-/// AtomicExpandID -- Lowers atomic operations in terms of either cmpxchg
-/// load-linked/store-conditional loops.
-extern char &AtomicExpandID;
+  /// AtomicExpandID -- Lowers atomic operations in terms of either cmpxchg
+  /// load-linked/store-conditional loops.
+  extern char &AtomicExpandID;
 
-/// MachineLoopInfo - This pass is a loop analysis pass.
-extern char &MachineLoopInfoID;
-
-/// MachineDominators - This pass is a machine dominators analysis pass.
-extern char &MachineDominatorsID;
+  /// MachineLoopInfo - This pass is a loop analysis pass.
+  extern char &MachineLoopInfoID;
+
+  /// MachineDominators - This pass is a machine dominators analysis pass.
+  extern char &MachineDominatorsID;
 
-/// MachineDominanaceFrontier - This pass is a machine dominators analysis.
-extern char &MachineDominanceFrontierID;
-
-/// MachineRegionInfo - This pass computes SESE regions for machine functions.
-extern char &MachineRegionInfoPassID;
-
-/// EdgeBundles analysis - Bundle machine CFG edges.
-extern char &EdgeBundlesID;
-
-/// LiveVariables pass - This pass computes the set of blocks in which each
-/// variable is life and sets machine operand kill flags.
-extern char &LiveVariablesID;
-
-/// PHIElimination - This pass eliminates machine instruction PHI nodes
-/// by inserting copy instructions.  This destroys SSA information, but is the
-/// desired input for some register allocators.  This pass is "required" by
-/// these register allocator like this: AU.addRequiredID(PHIEliminationID);
-extern char &PHIEliminationID;
-
-/// LiveIntervals - This analysis keeps track of the live ranges of virtual
-/// and physical registers.
-extern char &LiveIntervalsID;
-
-/// LiveStacks pass. An analysis keeping track of the liveness of stack slots.
-extern char &LiveStacksID;
-
-/// TwoAddressInstruction - This pass reduces two-address instructions to
-/// use two operands. This destroys SSA information but it is desired by
-/// register allocators.
-extern char &TwoAddressInstructionPassID;
-
-/// ProcessImpicitDefs pass - This pass removes IMPLICIT_DEFs.
-extern char &ProcessImplicitDefsID;
-
-/// RegisterCoalescer - This pass merges live ranges to eliminate copies.
-extern char &RegisterCoalescerID;
-
-/// MachineScheduler - This pass schedules machine instructions.
-extern char &MachineSchedulerID;
-
-/// PostMachineScheduler - This pass schedules machine instructions postRA.
-extern char &PostMachineSchedulerID;
+  /// MachineDominanaceFrontier - This pass is a machine dominators analysis.
+  extern char &MachineDominanceFrontierID;
+
+  /// MachineRegionInfo - This pass computes SESE regions for machine functions.
+  extern char &MachineRegionInfoPassID;
+
+  /// EdgeBundles analysis - Bundle machine CFG edges.
+  extern char &EdgeBundlesID;
+
+  /// LiveVariables pass - This pass computes the set of blocks in which each
+  /// variable is life and sets machine operand kill flags.
+  extern char &LiveVariablesID;
+
+  /// PHIElimination - This pass eliminates machine instruction PHI nodes
+  /// by inserting copy instructions.  This destroys SSA information, but is the
+  /// desired input for some register allocators.  This pass is "required" by
+  /// these register allocator like this: AU.addRequiredID(PHIEliminationID);
+  extern char &PHIEliminationID;
+
+  /// LiveIntervals - This analysis keeps track of the live ranges of virtual
+  /// and physical registers.
+  extern char &LiveIntervalsID;
+
+  /// LiveStacks pass. An analysis keeping track of the liveness of stack slots.
+  extern char &LiveStacksID;
+
+  /// TwoAddressInstruction - This pass reduces two-address instructions to
+  /// use two operands. This destroys SSA information but it is desired by
+  /// register allocators.
+  extern char &TwoAddressInstructionPassID;
+
+  /// ProcessImpicitDefs pass - This pass removes IMPLICIT_DEFs.
+  extern char &ProcessImplicitDefsID;
+
+  /// RegisterCoalescer - This pass merges live ranges to eliminate copies.
+  extern char &RegisterCoalescerID;
+
+  /// MachineScheduler - This pass schedules machine instructions.
+  extern char &MachineSchedulerID;
+
+  /// PostMachineScheduler - This pass schedules machine instructions postRA.
+  extern char &PostMachineSchedulerID;
 
-/// SpillPlacement analysis. Suggest optimal placement of spill code between
-/// basic blocks.
-extern char &SpillPlacementID;
+  /// SpillPlacement analysis. Suggest optimal placement of spill code between
+  /// basic blocks.
+  extern char &SpillPlacementID;
 
-/// ShrinkWrap pass. Look for the best place to insert save and restore
-// instruction and update the MachineFunctionInfo with that information.
-extern char &ShrinkWrapID;
+  /// ShrinkWrap pass. Look for the best place to insert save and restore
+  // instruction and update the MachineFunctionInfo with that information.
+  extern char &ShrinkWrapID;
 
-/// LiveRangeShrink pass. Move instruction close to its definition to shrink
-/// the definition's live range.
-extern char &LiveRangeShrinkID;
+  /// LiveRangeShrink pass. Move instruction close to its definition to shrink
+  /// the definition's live range.
+  extern char &LiveRangeShrinkID;
 
-/// Greedy register allocator.
-extern char &RAGreedyID;
-
-/// Basic register allocator.
-extern char &RABasicID;
-
-/// VirtRegRewriter pass. Rewrite virtual registers to physical registers as
-/// assigned in VirtRegMap.
-extern char &VirtRegRewriterID;
-FunctionPass *createVirtRegRewriter(bool ClearVirtRegs = true);
+  /// Greedy register allocator.
+  extern char &RAGreedyID;
+
+  /// Basic register allocator.
+  extern char &RABasicID;
+
+  /// VirtRegRewriter pass. Rewrite virtual registers to physical registers as
+  /// assigned in VirtRegMap.
+  extern char &VirtRegRewriterID;
+  FunctionPass *createVirtRegRewriter(bool ClearVirtRegs = true);
 
-/// UnreachableMachineBlockElimination - This pass removes unreachable
-/// machine basic blocks.
-extern char &UnreachableMachineBlockElimID;
+  /// UnreachableMachineBlockElimination - This pass removes unreachable
+  /// machine basic blocks.
+  extern char &UnreachableMachineBlockElimID;
 
-/// DeadMachineInstructionElim - This pass removes dead machine instructions.
-extern char &DeadMachineInstructionElimID;
+  /// DeadMachineInstructionElim - This pass removes dead machine instructions.
+  extern char &DeadMachineInstructionElimID;
 
-/// This pass adds dead/undef flags after analyzing subregister lanes.
-extern char &DetectDeadLanesID;
+  /// This pass adds dead/undef flags after analyzing subregister lanes.
+  extern char &DetectDeadLanesID;
 
-/// This pass perform post-ra machine sink for COPY instructions.
-extern char &PostRAMachineSinkingID;
+  /// This pass perform post-ra machine sink for COPY instructions.
+  extern char &PostRAMachineSinkingID;
 
-/// This pass adds flow sensitive discriminators.
-extern char &MIRAddFSDiscriminatorsID;
+  /// This pass adds flow sensitive discriminators.
+  extern char &MIRAddFSDiscriminatorsID;
 
-/// This pass reads flow sensitive profile.
-extern char &MIRProfileLoaderPassID;
+  /// This pass reads flow sensitive profile.
+  extern char &MIRProfileLoaderPassID;
 
-/// FastRegisterAllocation Pass - This pass register allocates as fast as
-/// possible. It is best suited for debug code where live ranges are short.
-///
-FunctionPass *createFastRegisterAllocator();
-FunctionPass *createFastRegisterAllocator(RegClassFilterFunc F,
-                                          bool ClearVirtRegs);
+  /// FastRegisterAllocation Pass - This pass register allocates as fast as
+  /// possible. It is best suited for debug code where live ranges are short.
+  ///
+  FunctionPass *createFastRegisterAllocator();
+  FunctionPass *createFastRegisterAllocator(RegClassFilterFunc F,
+                                            bool ClearVirtRegs);
 
-/// BasicRegisterAllocation Pass - This pass implements a degenerate global
-/// register allocator using the basic regalloc framework.
-///
-FunctionPass *createBasicRegisterAllocator();
-FunctionPass *createBasicRegisterAllocator(RegClassFilterFunc F);
+  /// BasicRegisterAllocation Pass - This pass implements a degenerate global
+  /// register allocator using the basic regalloc framework.
+  ///
+  FunctionPass *createBasicRegisterAllocator();
+  FunctionPass *createBasicRegisterAllocator(RegClassFilterFunc F);
 
-/// Greedy register allocation pass - This pass implements a global register
-/// allocator for optimized builds.
-///
-FunctionPass *createGreedyRegisterAllocator();
-FunctionPass *createGreedyRegisterAllocator(RegClassFilterFunc F);
+  /// Greedy register allocation pass - This pass implements a global register
+  /// allocator for optimized builds.
+  ///
+  FunctionPass *createGreedyRegisterAllocator();
+  FunctionPass *createGreedyRegisterAllocator(RegClassFilterFunc F);
 
-/// PBQPRegisterAllocation Pass - This pass implements the Partitioned Boolean
-/// Quadratic Prograaming (PBQP) based register allocator.
-///
-FunctionPass *createDefaultPBQPRegisterAllocator();
+  /// PBQPRegisterAllocation Pass - This pass implements the Partitioned Boolean
+  /// Quadratic Prograaming (PBQP) based register allocator.
+  ///
+  FunctionPass *createDefaultPBQPRegisterAllocator();
 
-/// PrologEpilogCodeInserter - This pass inserts prolog and epilog code,
-/// and eliminates abstract frame references.
-extern char &PrologEpilogCodeInserterID;
-MachineFunctionPass *createPrologEpilogInserterPass();
-
-/// ExpandPostRAPseudos - This pass expands pseudo instructions after
-/// register allocation.
-extern char &ExpandPostRAPseudosID;
+  /// PrologEpilogCodeInserter - This pass inserts prolog and epilog code,
+  /// and eliminates abstract frame references.
+  extern char &PrologEpilogCodeInserterID;
+  MachineFunctionPass *createPrologEpilogInserterPass();
+
+  /// ExpandPostRAPseudos - This pass expands pseudo instructions after
+  /// register allocation.
+  extern char &ExpandPostRAPseudosID;
 
-/// PostRAHazardRecognizer - This pass runs the post-ra hazard
-/// recognizer.
-extern char &PostRAHazardRecognizerID;
+  /// PostRAHazardRecognizer - This pass runs the post-ra hazard
+  /// recognizer.
+  extern char &PostRAHazardRecognizerID;
 
-/// PostRAScheduler - This pass performs post register allocation
-/// scheduling.
-extern char &PostRASchedulerID;
+  /// PostRAScheduler - This pass performs post register allocation
+  /// scheduling.
+  extern char &PostRASchedulerID;
 
-/// BranchFolding - This pass performs machine code CFG based
-/// optimizations to delete branches to branches, eliminate branches to
-/// successor blocks (creating fall throughs), and eliminating branches over
-/// branches.
-extern char &BranchFolderPassID;
+  /// BranchFolding - This pass performs machine code CFG based
+  /// optimizations to delete branches to branches, eliminate branches to
+  /// successor blocks (creating fall throughs), and eliminating branches over
+  /// branches.
+  extern char &BranchFolderPassID;
 
-/// BranchRelaxation - This pass replaces branches that need to jump further
-/// than is supported by a branch instruction.
-extern char &BranchRelaxationPassID;
+  /// BranchRelaxation - This pass replaces branches that need to jump further
+  /// than is supported by a branch instruction.
+  extern char &BranchRelaxationPassID;
 
-/// MachineFunctionPrinterPass - This pass prints out MachineInstr's.
-extern char &MachineFunctionPrinterPassID;
+  /// MachineFunctionPrinterPass - This pass prints out MachineInstr's.
+  extern char &MachineFunctionPrinterPassID;
 
-/// MIRPrintingPass - this pass prints out the LLVM IR using the MIR
-/// serialization format.
-extern char &MIRPrintingPassID;
+  /// MIRPrintingPass - this pass prints out the LLVM IR using the MIR
+  /// serialization format.
+  extern char &MIRPrintingPassID;
 
-/// TailDuplicate - Duplicate blocks with unconditional branches
-/// into tails of their predecessors.
-extern char &TailDuplicateID;
+  /// TailDuplicate - Duplicate blocks with unconditional branches
+  /// into tails of their predecessors.
+  extern char &TailDuplicateID;
 
-/// Duplicate blocks with unconditional branches into tails of their
-/// predecessors. Variant that works before register allocation.
-extern char &EarlyTailDuplicateID;
+  /// Duplicate blocks with unconditional branches into tails of their
+  /// predecessors. Variant that works before register allocation.
+  extern char &EarlyTailDuplicateID;
 
-/// MachineTraceMetrics - This pass computes critical path and CPU resource
-/// usage in an ensemble of traces.
-extern char &MachineTraceMetricsID;
+  /// MachineTraceMetrics - This pass computes critical path and CPU resource
+  /// usage in an ensemble of traces.
+  extern char &MachineTraceMetricsID;
 
-/// EarlyIfConverter - This pass performs if-conversion on SSA form by
-/// inserting cmov instructions.
-extern char &EarlyIfConverterID;
-
-/// EarlyIfPredicator - This pass performs if-conversion on SSA form by
-/// predicating if/else block and insert select at the join point.
-extern char &EarlyIfPredicatorID;
+  /// EarlyIfConverter - This pass performs if-conversion on SSA form by
+  /// inserting cmov instructions.
+  extern char &EarlyIfConverterID;
+
+  /// EarlyIfPredicator - This pass performs if-conversion on SSA form by
+  /// predicating if/else block and insert select at the join point.
+  extern char &EarlyIfPredicatorID;
 
-/// This pass performs instruction combining using trace metrics to estimate
-/// critical-path and resource depth.
-extern char &MachineCombinerID;
+  /// This pass performs instruction combining using trace metrics to estimate
+  /// critical-path and resource depth.
+  extern char &MachineCombinerID;
 
-/// StackSlotColoring - This pass performs stack coloring and merging.
-/// It merges disjoint allocas to reduce the stack size.
-extern char &StackColoringID;
+  /// StackSlotColoring - This pass performs stack coloring and merging.
+  /// It merges disjoint allocas to reduce the stack size.
+  extern char &StackColoringID;
 
-/// StackFramePrinter - This pass prints the stack frame layout and variable
-/// mappings.
-extern char &StackFrameLayoutAnalysisPassID;
+  /// StackFramePrinter - This pass prints the stack frame layout and variable
+  /// mappings.
+  extern char &StackFrameLayoutAnalysisPassID;
 
-/// IfConverter - This pass performs machine code if conversion.
-extern char &IfConverterID;
+  /// IfConverter - This pass performs machine code if conversion.
+  extern char &IfConverterID;
 
-FunctionPass *
-createIfConverter(std::function<bool(const MachineFunction &)> Ftor);
-
-/// MachineBlockPlacement - This pass places basic blocks based on branch
-/// probabilities.
-extern char &MachineBlockPlacementID;
-
-/// MachineBlockPlacementStats - This pass collects statistics about the
-/// basic block placement using branch probabilities and block frequency
-/// information.
-extern char &MachineBlockPlacementStatsID;
+  FunctionPass *createIfConverter(
+      std::function<bool(const MachineFunction &)> Ftor);
+
+  /// MachineBlockPlacement - This pass places basic blocks based on branch
+  /// probabilities.
+  extern char &MachineBlockPlacementID;
+
+  /// MachineBlockPlacementStats - This pass collects statistics about the
+  /// basic block placement using branch probabilities and block frequency
+  /// information.
+  extern char &MachineBlockPlacementStatsID;
 
-/// GCLowering Pass - Used by gc.root to perform its default lowering
-/// operations.
-FunctionPass *createGCLoweringPass();
-
-/// GCLowering Pass - Used by gc.root to perform its default lowering
-/// operations.
-extern char &GCLoweringID;
+  /// GCLowering Pass - Used by gc.root to perform its default lowering
+  /// operations.
+  FunctionPass *createGCLoweringPass();
+
+  /// GCLowering Pass - Used by gc.root to perform its default lowering
+  /// operations.
+  extern char &GCLoweringID;
 
-/// ShadowStackGCLowering - Implements the custom lowering mechanism
-/// used by the shadow stack GC.  Only runs on functions which opt in to
-/// the shadow stack collector.
-FunctionPass *createShadowStackGCLoweringPass();
+  /// ShadowStackGCLowering - Implements the custom lowering mechanism
+  /// used by the shadow stack GC.  Only runs on functions which opt in to
+  /// the shadow stack collector.
+  FunctionPass *createShadowStackGCLoweringPass();
 
-/// ShadowStackGCLowering - Implements the custom lowering mechanism
-/// used by the shadow stack GC.
-extern char &ShadowStackGCLoweringID;
+  /// ShadowStackGCLowering - Implements the custom lowering mechanism
+  /// used by the shadow stack GC.
+  extern char &ShadowStackGCLoweringID;
 
-/// GCMachineCodeAnalysis - Target-independent pass to mark safe points
-/// in machine code. Must be added very late during code generation, just
-/// prior to output, and importantly after all CFG transformations (such as
-/// branch folding).
-extern char &GCMachineCodeAnalysisID;
+  /// GCMachineCodeAnalysis - Target-independent pass to mark safe points
+  /// in machine code. Must be added very late during code generation, just
+  /// prior to output, and importantly after all CFG transformations (such as
+  /// branch folding).
+  extern char &GCMachineCodeAnalysisID;
 
-/// Creates a pass to print GC metadata.
-///
-FunctionPass *createGCInfoPrinter(raw_ostream &OS);
+  /// Creates a pass to print GC metadata.
+  ///
+  FunctionPass *createGCInfoPrinter(raw_ostream &OS);
 
-/// MachineCSE - This pass performs global CSE on machine instructions.
-extern char &MachineCSEID;
+  /// MachineCSE - This pass performs global CSE on machine instructions.
+  extern char &MachineCSEID;
 
-/// MIRCanonicalizer - This pass canonicalizes MIR by renaming vregs
-/// according to the semantics of the instruction as well as hoists
-/// code.
-extern char &MIRCanonicalizerID;
+  /// MIRCanonicalizer - This pass canonicalizes MIR by renaming vregs
+  /// according to the semantics of the instruction as well as hoists
+  /// code.
+  extern char &MIRCanonicalizerID;
 
-/// ImplicitNullChecks - This pass folds null pointer checks into nearby
-/// memory operations.
-extern char &ImplicitNullChecksID;
+  /// ImplicitNullChecks - This pass folds null pointer checks into nearby
+  /// memory operations.
+  extern char &ImplicitNullChecksID;
 
-/// This pass performs loop invariant code motion on machine instructions.
-extern char &MachineLICMID;
+  /// This pass performs loop invariant code motion on machine instructions.
+  extern char &MachineLICMID;
 
-/// This pass performs loop invariant code motion on machine instructions.
-/// This variant works before register allocation. \see MachineLICMID.
-extern char &EarlyMachineLICMID;
+  /// This pass performs loop invariant code motion on machine instructions.
+  /// This variant works before register allocation. \see MachineLICMID.
+  extern char &EarlyMachineLICMID;
 
-/// MachineSinking - This pass performs sinking on machine instructions.
-extern char &MachineSinkingID;
+  /// MachineSinking - This pass performs sinking on machine instructions.
+  extern char &MachineSinkingID;
 
-/// MachineCopyPropagation - This pass performs copy propagation on
-/// machine instructions.
-extern char &MachineCopyPropagationID;
+  /// MachineCopyPropagation - This pass performs copy propagation on
+  /// machine instructions.
+  extern char &MachineCopyPropagationID;
 
-MachineFunctionPass *createMachineCopyPropagationPass(bool UseCopyInstr);
+  MachineFunctionPass *createMachineCopyPropagationPass(bool UseCopyInstr);
 
-/// MachineLateInstrsCleanup - This pass removes redundant identical
-/// instructions after register allocation and rematerialization.
-extern char &MachineLateInstrsCleanupID;
+  /// MachineLateInstrsCleanup - This pass removes redundant identical
+  /// instructions after register allocation and rematerialization.
+  extern char &MachineLateInstrsCleanupID;
 
-/// PeepholeOptimizer - This pass performs peephole optimizations -
-/// like extension and comparison eliminations.
-extern char &PeepholeOptimizerID;
+  /// PeepholeOptimizer - This pass performs peephole optimizations -
+  /// like extension and comparison eliminations.
+  extern char &PeepholeOptimizerID;
 
-/// OptimizePHIs - This pass optimizes machine instruction PHIs
-/// to take advantage of opportunities created during DAG legalization.
-extern char &OptimizePHIsID;
+  /// OptimizePHIs - This pass optimizes machine instruction PHIs
+  /// to take advantage of opportunities created during DAG legalization.
+  extern char &OptimizePHIsID;
 
-/// StackSlotColoring - This pass performs stack slot coloring.
-extern char &StackSlotColoringID;
+  /// StackSlotColoring - This pass performs stack slot coloring.
+  extern char &StackSlotColoringID;
 
-/// This pass lays out funclets contiguously.
-extern char &FuncletLayoutID;
+  /// This pass lays out funclets contiguously.
+  extern char &FuncletLayoutID;
 
-/// This pass inserts the XRay instrumentation sleds if they are supported by
-/// the target platform.
-extern char &XRayInstrumentationID;
+  /// This pass inserts the XRay instrumentation sleds if they are supported by
+  /// the target platform.
+  extern char &XRayInstrumentationID;
 
-/// This pass inserts FEntry calls
-extern char &FEntryInserterID;
+  /// This pass inserts FEntry calls
+  extern char &FEntryInserterID;
 
-/// This pass implements the "patchable-function" attribute.
-extern char &PatchableFunctionID;
-
-/// createStackProtectorPass - This pass adds stack protectors to functions.
-///
-FunctionPass *createStackProtectorPass();
-
-/// createMachineVerifierPass - This pass verifies cenerated machine code
-/// instructions for correctness.
-///
-FunctionPass *createMachineVerifierPass(const std::string &Banner);
-
-/// createDwarfEHPass - This pass mulches exception handling code into a form
-/// adapted to code generation.  Required if using dwarf exception handling.
-FunctionPass *createDwarfEHPass(CodeGenOptLevel OptLevel);
-
-/// createWinEHPass - Prepares personality functions used by MSVC on Windows,
-/// in addition to the Itanium LSDA based personalities.
-FunctionPass *createWinEHPass(bool DemoteCatchSwitchPHIOnly = false);
+  /// This pass implements the "patchable-function" attribute.
+  extern char &PatchableFunctionID;
+
+  /// createStackProtectorPass - This pass adds stack protectors to functions.
+  ///
+  FunctionPass *createStackProtectorPass();
+
+  /// createMachineVerifierPass - This pass verifies cenerated machine code
+  /// instructions for correctness.
+  ///
+  FunctionPass *createMachineVerifierPass(const std::string& Banner);
+
+  /// createDwarfEHPass - This pass mulches exception handling code into a form
+  /// adapted to code generation.  Required if using dwarf exception handling.
+  FunctionPass *createDwarfEHPass(CodeGenOptLevel OptLevel);
+
+  /// createWinEHPass - Prepares personality functions used by MSVC on Windows,
+  /// in addition to the Itanium LSDA based personalities.
+  FunctionPass *createWinEHPass(bool DemoteCatchSwitchPHIOnly = false);
 
-/// createSjLjEHPreparePass - This pass adapts exception handling code to use
-/// the GCC-style builtin setjmp/longjmp (sjlj) to handling EH control flow.
-///
-FunctionPass *createSjLjEHPreparePass(const TargetMachine *TM);
-
-/// createWasmEHPass - This pass adapts exception handling code to use
-/// WebAssembly's exception handling scheme.
-FunctionPass *createWasmEHPass();
-
-/// LocalStackSlotAllocation - This pass assigns local frame indices to stack
-/// slots relative to one another and allocates base registers to access them
-/// when it is estimated by the target to be out of range of normal frame
-/// pointer or stack pointer index addressing.
-extern char &LocalStackSlotAllocationID;
-
-/// This pass expands pseudo-instructions, reserves registers and adjusts
-/// machine frame information.
-extern char &FinalizeISelID;
+  /// createSjLjEHPreparePass - This pass adapts exception handling code to use
+  /// the GCC-style builtin setjmp/longjmp (sjlj) to handling EH control flow.
+  ///
+  FunctionPass *createSjLjEHPreparePass(const TargetMachine *TM);
+
+  /// createWasmEHPass - This pass adapts exception handling code to use
+  /// WebAssembly's exception handling scheme.
+  FunctionPass *createWasmEHPass();
+
+  /// LocalStackSlotAllocation - This pass assigns local frame indices to stack
+  /// slots relative to one another and allocates base registers to access them
+  /// when it is estimated by the target to be out of range of normal frame
+  /// pointer or stack pointer index addressing.
+  extern char &LocalStackSlotAllocationID;
+
+  /// This pass expands pseudo-instructions, reserves registers and adjusts
+  /// machine frame information.
+  extern char &FinalizeISelID;
 
-/// UnpackMachineBundles - This pass unpack machine instruction bundles.
-extern char &UnpackMachineBundlesID;
+  /// UnpackMachineBundles - This pass unpack machine instruction bundles.
+  extern char &UnpackMachineBundlesID;
 
-FunctionPass *
-createUnpackMachineBundles(std::function<bool(const MachineFunction &)> Ftor);
+  FunctionPass *
+  createUnpackMachineBundles(std::function<bool(const MachineFunction &)> Ftor);
 
-/// FinalizeMachineBundles - This pass finalize machine instruction
-/// bundles (created earlier, e.g. during pre-RA scheduling).
-extern char &FinalizeMachineBundlesID;
+  /// FinalizeMachineBundles - This pass finalize machine instruction
+  /// bundles (created earlier, e.g. during pre-RA scheduling).
+  extern char &FinalizeMachineBundlesID;
 
-/// StackMapLiveness - This pass analyses the register live-out set of
-/// stackmap/patchpoint intrinsics and attaches the calculated information to
-/// the intrinsic for later emission to the StackMap.
-extern char &StackMapLivenessID;
+  /// StackMapLiveness - This pass analyses the register live-out set of
+  /// stackmap/patchpoint intrinsics and attaches the calculated information to
+  /// the intrinsic for later emission to the StackMap.
+  extern char &StackMapLivenessID;
 
-// MachineSanitizerBinaryMetadata - appends/finalizes sanitizer binary
-// metadata after llvm SanitizerBinaryMetadata pass.
-extern char &MachineSanitizerBinaryMetadataID;
+  // MachineSanitizerBinaryMetadata - appends/finalizes sanitizer binary
+  // metadata after llvm SanitizerBinaryMetadata pass.
+  extern char &MachineSanitizerBinaryMetadataID;
 
-/// RemoveRedundantDebugValues pass.
-extern char &RemoveRedundantDebugValuesID;
+  /// RemoveRedundantDebugValues pass.
+  extern char &RemoveRedundantDebugValuesID;
 
-/// MachineCFGPrinter pass.
-extern char &MachineCFGPrinterID;
+  /// MachineCFGPrinter pass.
+  extern char &MachineCFGPrinterID;
 
-/// LiveDebugValues pass
-extern char &LiveDebugValuesID;
+  /// LiveDebugValues pass
+  extern char &LiveDebugValuesID;
 
-/// InterleavedAccess Pass - This pass identifies and matches interleaved
-/// memory accesses to target specific intrinsics.
-///
-FunctionPass *createInterleavedAccessPass();
+  /// InterleavedAccess Pass - This pass identifies and matches interleaved
+  /// memory accesses to target specific intrinsics.
+  ///
+  FunctionPass *createInterleavedAccessPass();
 
-/// InterleavedLoadCombines Pass - This pass identifies interleaved loads and
-/// combines them into wide loads detectable by InterleavedAccessPass
-///
-FunctionPass *createInterleavedLoadCombinePass();
+  /// InterleavedLoadCombines Pass - This pass identifies interleaved loads and
+  /// combines them into wide loads detectable by InterleavedAccessPass
+  ///
+  FunctionPass *createInterleavedLoadCombinePass();
 
-/// LowerEmuTLS - This pass generates __emutls_[vt].xyz variables for all
-/// TLS variables for the emulated TLS model.
-///
-ModulePass *createLowerEmuTLSPass();
+  /// LowerEmuTLS - This pass generates __emutls_[vt].xyz variables for all
+  /// TLS variables for the emulated TLS model.
+  ///
+  ModulePass *createLowerEmuTLSPass();
 
-/// This pass lowers the \@llvm.load.relative and \@llvm.objc.* intrinsics to
-/// instructions.  This is unsafe to do earlier because a pass may combine the
-/// constant initializer into the load, which may result in an overflowing
-/// evaluation.
-ModulePass *createPreISelIntrinsicLoweringPass();
+  /// This pass lowers the \@llvm.load.relative and \@llvm.objc.* intrinsics to
+  /// instructions.  This is unsafe to do earlier because a pass may combine the
+  /// constant initializer into the load, which may result in an overflowing
+  /// evaluation.
+  ModulePass *createPreISelIntrinsicLoweringPass();
 
-/// GlobalMerge - This pass merges internal (by default) globals into structs
-/// to enable reuse of a base pointer by indexed addressing modes.
-/// It can also be configured to focus on size optimizations only.
-///
-Pass *createGlobalMergePass(const TargetMachine *TM, unsigned MaximalOffset,
-                            bool OnlyOptimizeForSize = false,
-                            bool MergeExternalByDefault = false);
+  /// GlobalMerge - This pass merges internal (by default) globals into structs
+  /// to enable reuse of a base pointer by indexed addressing modes.
+  /// It can also be configured to focus on size optimizations only.
+  ///
+  Pass *createGlobalMergePass(const TargetMachine *TM, unsigned MaximalOffset,
+                              bool OnlyOptimizeForSize = false,
+                              bool MergeExternalByDefault = false);
 
-/// This pass splits the stack into a safe stack and an unsafe stack to
-/// protect against stack-based overflow vulnerabilities.
-FunctionPass *createSafeStackPass();
+  /// This pass splits the stack into a safe stack and an unsafe stack to
+  /// protect against stack-based overflow vulnerabilities.
+  FunctionPass *createSafeStackPass();
 
-/// This pass detects subregister lanes in a virtual register that are used
-/// independently of other lanes and splits them into separate virtual
-/// registers.
-extern char &RenameIndependentSubregsID;
+  /// This pass detects subregister lanes in a virtual register that are used
+  /// independently of other lanes and splits them into separate virtual
+  /// registers.
+  extern char &RenameIndependentSubregsID;
 
-/// This pass is executed POST-RA to collect which physical registers are
-/// preserved by given machine function.
-FunctionPass *createRegUsageInfoCollector();
+  /// This pass is executed POST-RA to collect which physical registers are
+  /// preserved by given machine function.
+  FunctionPass *createRegUsageInfoCollector();
 
-/// Return a MachineFunction pass that identifies call sites
-/// and propagates register usage information of callee to caller
-/// if available with PysicalRegisterUsageInfo pass.
-FunctionPass *createRegUsageInfoPropPass();
+  /// Return a MachineFunction pass that identifies call sites
+  /// and propagates register usage information of callee to caller
+  /// if available with PysicalRegisterUsageInfo pass.
+  FunctionPass *createRegUsageInfoPropPass();
 
-/// This pass performs software pipelining on machine instructions.
-extern char &MachinePipelinerID;
+  /// This pass performs software pipelining on machine instructions.
+  extern char &MachinePipelinerID;
 
-/// This pass frees the memory occupied by the MachineFunction.
-FunctionPass *createFreeMachineFunctionPass();
+  /// This pass frees the memory occupied by the MachineFunction.
+  FunctionPass *createFreeMachineFunctionPass();
 
-/// This pass performs outlining on machine instructions directly before
-/// printing assembly.
-ModulePass *createMachineOutlinerPass(bool RunOnAllFunctions = true);
+  /// This pass performs outlining on machine instructions directly before
+  /// printing assembly.
+  ModulePass *createMachineOutlinerPass(bool RunOnAllFunctions = true);
 
-/// This pass expands the reduction intrinsics into sequences of shuffles.
-FunctionPass *createExpandReductionsPass();
+  /// This pass expands the reduction intrinsics into sequences of shuffles.
+  FunctionPass *createExpandReductionsPass();
 
-// This pass replaces intrinsics operating on vector operands with calls to
-// the corresponding function in a vector library (e.g., SVML, libmvec).
-FunctionPass *createReplaceWithVeclibLegacyPass();
+  // This pass replaces intrinsics operating on vector operands with calls to
+  // the corresponding function in a vector library (e.g., SVML, libmvec).
+  FunctionPass *createReplaceWithVeclibLegacyPass();
 
-/// This pass expands the vector predication intrinsics into unpredicated
-/// instructions with selects or just the explicit vector length into the
-/// predicate mask.
-FunctionPass *createExpandVectorPredicationPass();
+  /// This pass expands the vector predication intrinsics into unpredicated
+  /// instructions with selects or just the explicit vector length into the
+  /// predicate mask.
+  FunctionPass *createExpandVectorPredicationPass();
 
-// Expands large div/rem instructions.
-FunctionPass *createExpandLargeDivRemPass();
+  // Expands large div/rem instructions.
+  FunctionPass *createExpandLargeDivRemPass();
 
-// Expands large div/rem instructions.
-FunctionPass *createExpandLargeFpConvertPass();
+  // Expands large div/rem instructions.
+  FunctionPass *createExpandLargeFpConvertPass();
 
-// This pass expands memcmp() to load/stores.
-FunctionPass *createExpandMemCmpPass();
+  // This pass expands memcmp() to load/stores.
+  FunctionPass *createExpandMemCmpPass();
 
-/// Creates Break False Dependencies pass. \see BreakFalseDeps.cpp
-FunctionPass *createBreakFalseDeps();
+  /// Creates Break False Dependencies pass. \see BreakFalseDeps.cpp
+  FunctionPass *createBreakFalseDeps();
 
-// This pass expands indirectbr instructions.
-FunctionPass *createIndirectBrExpandPass();
+  // This pass expands indirectbr instructions.
+  FunctionPass *createIndirectBrExpandPass();
 
-/// Creates CFI Fixup pass. \see CFIFixup.cpp
-FunctionPass *createCFIFixup();
+  /// Creates CFI Fixup pass. \see CFIFixup.cpp
+  FunctionPass *createCFIFixup();
 
-/// Creates CFI Instruction Inserter pass. \see CFIInstrInserter.cpp
-FunctionPass *createCFIInstrInserter();
+  /// Creates CFI Instruction Inserter pass. \see CFIInstrInserter.cpp
+  FunctionPass *createCFIInstrInserter();
 
-/// Creates CFGuard longjmp target identification pass.
-/// \see CFGuardLongjmp.cpp
-FunctionPass *createCFGuardLongjmpPass();
+  /// Creates CFGuard longjmp target identification pass.
+  /// \see CFGuardLongjmp.cpp
+  FunctionPass *createCFGuardLongjmpPass();
 
-/// Creates EHContGuard catchret target identification pass.
-/// \see EHContGuardCatchret.cpp
-FunctionPass *createEHContGuardCatchretPass();
+  /// Creates EHContGuard catchret target identification pass.
+  /// \see EHContGuardCatchret.cpp
+  FunctionPass *createEHContGuardCatchretPass();
 
-/// Create Hardware Loop pass. \see HardwareLoops.cpp
-FunctionPass *createHardwareLoopsLegacyPass();
+  /// Create Hardware Loop pass. \see HardwareLoops.cpp
+  FunctionPass *createHardwareLoopsLegacyPass();
 
-/// This pass inserts pseudo probe annotation for callsite profiling.
-FunctionPass *createPseudoProbeInserter();
+  /// This pass inserts pseudo probe annotation for callsite profiling.
+  FunctionPass *createPseudoProbeInserter();
 
-/// Create IR Type Promotion pass. \see TypePromotion.cpp
-FunctionPass *createTypePromotionLegacyPass();
+  /// Create IR Type Promotion pass. \see TypePromotion.cpp
+  FunctionPass *createTypePromotionLegacyPass();
 
-/// Add Flow Sensitive Discriminators. PassNum specifies the
-/// sequence number of this pass (starting from 1).
-FunctionPass *
-createMIRAddFSDiscriminatorsPass(sampleprof::FSDiscriminatorPass P);
+  /// Add Flow Sensitive Discriminators. PassNum specifies the
+  /// sequence number of this pass (starting from 1).
+  FunctionPass *
+  createMIRAddFSDiscriminatorsPass(sampleprof::FSDiscriminatorPass P);
 
-/// Read Flow Sensitive Profile.
-FunctionPass *
-createMIRProfileLoaderPass(std::string File, std::string RemappingFile,
-                           sampleprof::FSDiscriminatorPass P,
-                           IntrusiveRefCntPtr<vfs::FileSystem> FS);
+  /// Read Flow Sensitive Profile.
+  FunctionPass *
+  createMIRProfileLoaderPass(std::string File, std::string RemappingFile,
+                             sampleprof::FSDiscriminatorPass P,
+                             IntrusiveRefCntPtr<vfs::FileSystem> FS);
 
-/// Creates MIR Debugify pass. \see MachineDebugify.cpp
-ModulePass *createDebugifyMachineModulePass();
+  /// Creates MIR Debugify pass. \see MachineDebugify.cpp
+  ModulePass *createDebugifyMachineModulePass();
 
-/// Creates MIR Strip Debug pass. \see MachineStripDebug.cpp
-/// If OnlyDebugified is true then it will only strip debug info if it was
-/// added by a Debugify pass. The module will be left unchanged if the debug
-/// info was generated by another source such as clang.
-ModulePass *createStripDebugMachineModulePass(bool OnlyDebugified);
+  /// Creates MIR Strip Debug pass. \see MachineStripDebug.cpp
+  /// If OnlyDebugified is true then it will only strip debug info if it was
+  /// added by a Debugify pass. The module will be left unchanged if the debug
+  /// info was generated by another source such as clang.
+  ModulePass *createStripDebugMachineModulePass(bool OnlyDebugified);
 
-/// Creates MIR Check Debug pass. \see MachineCheckDebugify.cpp
-ModulePass *createCheckDebugMachineModulePass();
+  /// Creates MIR Check Debug pass. \see MachineCheckDebugify.cpp
+  ModulePass *createCheckDebugMachineModulePass();
 
-/// The pass fixups statepoint machine instruction to replace usage of
-/// caller saved registers with stack slots.
-extern char &FixupStatepointCallerSavedID;
+  /// The pass fixups statepoint machine instruction to replace usage of
+  /// caller saved registers with stack slots.
+  extern char &FixupStatepointCallerSavedID;
 
-/// The pass transforms load/store <256 x i32> to AMX load/store intrinsics
-/// or split the data to two <128 x i32>.
-FunctionPass *createX86LowerAMXTypePass();
+  /// The pass transforms load/store <256 x i32> to AMX load/store intrinsics
+  /// or split the data to two <128 x i32>.
+  FunctionPass *createX86LowerAMXTypePass();
 
-/// The pass transforms amx intrinsics to scalar operation if the function has
-/// optnone attribute or it is O0.
-FunctionPass *createX86LowerAMXIntrinsicsPass();
+  /// The pass transforms amx intrinsics to scalar operation if the function has
+  /// optnone attribute or it is O0.
+  FunctionPass *createX86LowerAMXIntrinsicsPass();
 
-/// When learning an eviction policy, extract score(reward) information,
-/// otherwise this does nothing
-FunctionPass *createRegAllocScoringPass();
+  /// When learning an eviction policy, extract score(reward) information,
+  /// otherwise this does nothing
+  FunctionPass *createRegAllocScoringPass();
 
-/// JMC instrument pass.
-ModulePass *createJMCInstrumenterPass();
+  /// JMC instrument pass.
+  ModulePass *createJMCInstrumenterPass();
 
-/// This pass converts conditional moves to conditional jumps when profitable.
-FunctionPass *createSelectOptimizePass();
+  /// This pass converts conditional moves to conditional jumps when profitable.
+  FunctionPass *createSelectOptimizePass();
 
-FunctionPass *createCallBrPass();
+  FunctionPass *createCallBrPass();
 
-/// Lowers KCFI operand bundles for indirect calls.
-FunctionPass *createKCFIPass();
+  /// Lowers KCFI operand bundles for indirect calls.
+  FunctionPass *createKCFIPass();
 } // End llvm namespace
 
 #endif
diff --git a/llvm/lib/Passes/PassBuilder.cpp b/llvm/lib/Passes/PassBuilder.cpp
index 2ccc218f3d8f38..3929b2b9181de2 100644
--- a/llvm/lib/Passes/PassBuilder.cpp
+++ b/llvm/lib/Passes/PassBuilder.cpp
@@ -14,6 +14,7 @@
 ///
 //===----------------------------------------------------------------------===//
 
+#include "llvm/CodeGen/AtomicExpand.h"
 #include "llvm/Passes/PassBuilder.h"
 #include "llvm/ADT/StringSwitch.h"
 #include "llvm/Analysis/AliasAnalysisEvaluator.h"
@@ -72,7 +73,6 @@
 #include "llvm/Analysis/TargetTransformInfo.h"
 #include "llvm/Analysis/TypeBasedAliasAnalysis.h"
 #include "llvm/Analysis/UniformityAnalysis.h"
-#include "llvm/CodeGen/AtomicExpand.h"
 #include "llvm/CodeGen/HardwareLoops.h"
 #include "llvm/CodeGen/TypePromotion.h"
 #include "llvm/IR/DebugInfo.h"

>From ece871c4c83dd5f504b2141342d24870d98e100d Mon Sep 17 00:00:00 2001
From: Rishabh Bali <rishabhsbali at gmail.com>
Date: Sat, 13 Jan 2024 19:23:42 +0530
Subject: [PATCH 3/4] Address initial comments

---
 llvm/include/llvm/CodeGen/AtomicExpand.h | 9 ++-------
 1 file changed, 2 insertions(+), 7 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/AtomicExpand.h b/llvm/include/llvm/CodeGen/AtomicExpand.h
index 2430ac5afa9844..1b8a988ef48664 100644
--- a/llvm/include/llvm/CodeGen/AtomicExpand.h
+++ b/llvm/include/llvm/CodeGen/AtomicExpand.h
@@ -1,15 +1,10 @@
-//===- AtomicExpand.h ------------------------------------------*- C++ -*-===//
+//===-- AtomicExpand.h - Expand Atomic Instructions -------------*- C++ -*-===//
 //
 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 // See https://llvm.org/LICENSE.txt for license information.
 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 //
 //===----------------------------------------------------------------------===//
-/// \file
-///
-///
-///
-//===----------------------------------------------------------------------===//
 
 #ifndef LLVM_CODEGEN_ATOMICEXPAND_H
 #define LLVM_CODEGEN_ATOMICEXPAND_H
@@ -32,4 +27,4 @@ class AtomicExpandPass : public PassInfoMixin<AtomicExpandPass> {
 
 } // end namespace llvm
 
-#endif // LLVM_CODEGEN_ATOMICEXPAND_H
\ No newline at end of file
+#endif // LLVM_CODEGEN_ATOMICEXPAND_H

>From 2ba734e919f24218fd06e82e794d5a49b12ff448 Mon Sep 17 00:00:00 2001
From: Rishabh Bali <rishabhsbali at gmail.com>
Date: Sun, 14 Jan 2024 01:25:43 +0530
Subject: [PATCH 4/4] Rename Pass

---
 .../{AtomicExpand.h => ExpandAtomic.h}        | 12 +--
 .../llvm/CodeGen/MachinePassRegistry.def      |  2 +-
 llvm/include/llvm/CodeGen/Passes.h            |  6 +-
 .../llvm/CodeGen/TargetSubtargetInfo.h        |  2 +-
 llvm/include/llvm/InitializePasses.h          |  2 +-
 llvm/lib/CodeGen/CMakeLists.txt               |  2 +-
 llvm/lib/CodeGen/CodeGen.cpp                  |  2 +-
 ...micExpandPass.cpp => ExpandAtomicPass.cpp} | 98 +++++++++----------
 llvm/lib/CodeGen/TargetSubtargetInfo.cpp      |  2 +-
 llvm/lib/Passes/PassBuilder.cpp               |  2 +-
 llvm/lib/Passes/PassRegistry.def              |  2 +-
 .../Target/AArch64/AArch64TargetMachine.cpp   |  2 +-
 .../lib/Target/AMDGPU/AMDGPUTargetMachine.cpp |  2 +-
 llvm/lib/Target/ARM/ARMTargetMachine.cpp      |  2 +-
 llvm/lib/Target/CSKY/CSKYTargetMachine.cpp    |  2 +-
 .../Target/Hexagon/HexagonTargetMachine.cpp   |  2 +-
 .../LoongArch/LoongArchTargetMachine.cpp      |  2 +-
 llvm/lib/Target/M68k/M68kTargetMachine.cpp    |  2 +-
 llvm/lib/Target/Mips/MipsTargetMachine.cpp    |  2 +-
 llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp  |  2 +-
 .../PowerPC/PPCExpandAtomicPseudoInsts.cpp    |  2 +-
 llvm/lib/Target/PowerPC/PPCTargetMachine.cpp  |  2 +-
 llvm/lib/Target/RISCV/RISCVTargetMachine.cpp  |  2 +-
 llvm/lib/Target/Sparc/SparcTargetMachine.cpp  |  2 +-
 .../Target/SystemZ/SystemZTargetMachine.cpp   |  2 +-
 llvm/lib/Target/VE/VETargetMachine.cpp        |  2 +-
 .../WebAssembly/WebAssemblySubtarget.cpp      |  2 +-
 .../Target/WebAssembly/WebAssemblySubtarget.h |  2 +-
 .../WebAssembly/WebAssemblyTargetMachine.cpp  |  2 +-
 llvm/lib/Target/X86/X86TargetMachine.cpp      |  2 +-
 llvm/lib/Target/XCore/XCoreTargetMachine.cpp  |  2 +-
 .../test/CodeGen/AMDGPU/idemponent-atomics.ll |  2 +-
 .../CodeGen/AMDGPU/private-memory-atomics.ll  |  2 +-
 .../AtomicExpand/AArch64/atomicrmw-fp.ll      |  2 +-
 .../AArch64/expand-atomicrmw-xchg-fp.ll       |  4 +-
 .../AtomicExpand/AArch64/pcsections.ll        |  2 +-
 .../AMDGPU/expand-atomic-i16-system.ll        |  2 +-
 .../AtomicExpand/AMDGPU/expand-atomic-i16.ll  |  4 +-
 .../AMDGPU/expand-atomic-i8-system.ll         |  2 +-
 .../AtomicExpand/AMDGPU/expand-atomic-i8.ll   |  4 +-
 ...and-atomic-rmw-fadd-flat-specialization.ll |  8 +-
 .../AMDGPU/expand-atomic-rmw-fadd.ll          | 12 +--
 .../AMDGPU/expand-atomic-rmw-fmax.ll          |  4 +-
 .../AMDGPU/expand-atomic-rmw-fmin.ll          |  4 +-
 .../AMDGPU/expand-atomic-rmw-fsub.ll          |  4 +-
 .../AMDGPU/expand-atomic-rmw-nand.ll          |  4 +-
 .../expand-atomic-simplify-cfg-CAS-block.ll   |  2 +-
 .../AtomicExpand/AMDGPU/unaligned-atomic.ll   |  2 +-
 .../AtomicExpand/ARM/atomic-expansion-v7.ll   |  2 +-
 .../AtomicExpand/ARM/atomic-expansion-v8.ll   |  2 +-
 .../AtomicExpand/ARM/atomicrmw-fp.ll          |  2 +-
 .../AtomicExpand/ARM/cmpxchg-weak.ll          |  2 +-
 .../AtomicExpand/Hexagon/atomicrmw-fp.ll      |  2 +-
 .../AtomicExpand/LoongArch/atomicrmw-fp.ll    |  2 +-
 .../LoongArch/load-store-atomic.ll            |  4 +-
 .../AtomicExpand/Mips/atomicrmw-fp.ll         |  2 +-
 .../AtomicExpand/PowerPC/atomicrmw-fp.ll      |  2 +-
 .../AtomicExpand/PowerPC/cfence-double.ll     |  4 +-
 .../AtomicExpand/PowerPC/cfence-float.ll      |  4 +-
 .../AtomicExpand/PowerPC/cmpxchg.ll           |  4 +-
 .../AtomicExpand/PowerPC/issue55983.ll        |  4 +-
 .../AtomicExpand/RISCV/atomicrmw-fp.ll        |  2 +-
 .../Transforms/AtomicExpand/SPARC/libcalls.ll |  2 +-
 .../Transforms/AtomicExpand/SPARC/partword.ll |  2 +-
 .../AtomicExpand/X86/expand-atomic-libcall.ll |  2 +-
 .../X86/expand-atomic-non-integer.ll          |  2 +-
 .../AtomicExpand/X86/expand-atomic-rmw-fp.ll  |  2 +-
 .../X86/expand-atomic-rmw-initial-load.ll     |  2 +-
 .../AtomicExpand/X86/expand-atomic-xchg-fp.ll |  2 +-
 .../gn/secondary/llvm/lib/CodeGen/BUILD.gn    |  2 +-
 70 files changed, 145 insertions(+), 145 deletions(-)
 rename llvm/include/llvm/CodeGen/{AtomicExpand.h => ExpandAtomic.h} (64%)
 rename llvm/lib/CodeGen/{AtomicExpandPass.cpp => ExpandAtomicPass.cpp} (96%)

diff --git a/llvm/include/llvm/CodeGen/AtomicExpand.h b/llvm/include/llvm/CodeGen/ExpandAtomic.h
similarity index 64%
rename from llvm/include/llvm/CodeGen/AtomicExpand.h
rename to llvm/include/llvm/CodeGen/ExpandAtomic.h
index 1b8a988ef48664..4ba49f8886ca94 100644
--- a/llvm/include/llvm/CodeGen/AtomicExpand.h
+++ b/llvm/include/llvm/CodeGen/ExpandAtomic.h
@@ -1,4 +1,4 @@
-//===-- AtomicExpand.h - Expand Atomic Instructions -------------*- C++ -*-===//
+//===-- ExpandAtomic.h - Expand Atomic Instructions -------------*- C++ -*-===//
 //
 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 // See https://llvm.org/LICENSE.txt for license information.
@@ -6,8 +6,8 @@
 //
 //===----------------------------------------------------------------------===//
 
-#ifndef LLVM_CODEGEN_ATOMICEXPAND_H
-#define LLVM_CODEGEN_ATOMICEXPAND_H
+#ifndef LLVM_CODEGEN_EXPANDATOMIC_H
+#define LLVM_CODEGEN_EXPANDATOMIC_H
 
 #include "llvm/IR/PassManager.h"
 
@@ -16,15 +16,15 @@ namespace llvm {
 class Function;
 class TargetMachine;
 
-class AtomicExpandPass : public PassInfoMixin<AtomicExpandPass> {
+class ExpandAtomicPass : public PassInfoMixin<ExpandAtomicPass> {
 private:
   const TargetMachine *TM;
 
 public:
-  AtomicExpandPass(const TargetMachine *TM) : TM(TM) {}
+  ExpandAtomicPass(const TargetMachine *TM) : TM(TM) {}
   PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
 };
 
 } // end namespace llvm
 
-#endif // LLVM_CODEGEN_ATOMICEXPAND_H
+#endif // LLVM_CODEGEN_EXPANDATOMIC_H
diff --git a/llvm/include/llvm/CodeGen/MachinePassRegistry.def b/llvm/include/llvm/CodeGen/MachinePassRegistry.def
index a29269644ea1dc..680c1b403a85c8 100644
--- a/llvm/include/llvm/CodeGen/MachinePassRegistry.def
+++ b/llvm/include/llvm/CodeGen/MachinePassRegistry.def
@@ -116,7 +116,7 @@ DUMMY_FUNCTION_PASS("wasmehprepare", WasmEHPass, ())
 DUMMY_FUNCTION_PASS("codegenprepare", CodeGenPreparePass, ())
 DUMMY_FUNCTION_PASS("safe-stack", SafeStackPass, ())
 DUMMY_FUNCTION_PASS("stack-protector", StackProtectorPass, ())
-DUMMY_FUNCTION_PASS("atomic-expand", AtomicExpandPass, ())
+DUMMY_FUNCTION_PASS("expandatomic", ExpandAtomicPass, ())
 DUMMY_FUNCTION_PASS("interleaved-access", InterleavedAccessPass, ())
 DUMMY_FUNCTION_PASS("indirectbr-expand", IndirectBrExpandPass, ())
 DUMMY_FUNCTION_PASS("cfguard-dispatch", CFGuardDispatchPass, ())
diff --git a/llvm/include/llvm/CodeGen/Passes.h b/llvm/include/llvm/CodeGen/Passes.h
index 8444bf18147581..b9f9ce936dbb7f 100644
--- a/llvm/include/llvm/CodeGen/Passes.h
+++ b/llvm/include/llvm/CodeGen/Passes.h
@@ -41,10 +41,10 @@ class FileSystem;
 // List of target independent CodeGen pass IDs.
 namespace llvm {
 
-  /// AtomicExpandPass - At IR level this pass replace atomic instructions with
+  /// ExpandAtomicPass - At IR level this pass replace atomic instructions with
   /// __atomic_* library calls, or target specific instruction which implement the
   /// same semantics in a way which better fits the target backend.
-  FunctionPass *createAtomicExpandLegacyPass();
+  FunctionPass *createExpandAtomicLegacyPass();
 
   /// createUnreachableBlockEliminationPass - The LLVM code generator does not
   /// work well with unreachable basic blocks (what live ranges make sense for a
@@ -103,7 +103,7 @@ namespace llvm {
 
   /// AtomicExpandID -- Lowers atomic operations in terms of either cmpxchg
   /// load-linked/store-conditional loops.
-  extern char &AtomicExpandID;
+  extern char &ExpandAtomicID;
 
   /// MachineLoopInfo - This pass is a loop analysis pass.
   extern char &MachineLoopInfoID;
diff --git a/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h b/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h
index 55ef95c2854319..da1fd6737b796f 100644
--- a/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h
+++ b/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h
@@ -215,7 +215,7 @@ class TargetSubtargetInfo : public MCSubtargetInfo {
   virtual bool enablePostRAMachineScheduler() const;
 
   /// True if the subtarget should run the atomic expansion pass.
-  virtual bool enableAtomicExpand() const;
+  virtual bool enableExpandAtomic() const;
 
   /// True if the subtarget should run the indirectbr expansion pass.
   virtual bool enableIndirectBrExpand() const;
diff --git a/llvm/include/llvm/InitializePasses.h b/llvm/include/llvm/InitializePasses.h
index a2bd5bdf4c5341..04365af3d1a74e 100644
--- a/llvm/include/llvm/InitializePasses.h
+++ b/llvm/include/llvm/InitializePasses.h
@@ -54,7 +54,7 @@ void initializeAlwaysInlinerLegacyPassPass(PassRegistry&);
 void initializeAssignmentTrackingAnalysisPass(PassRegistry &);
 void initializeAssumeBuilderPassLegacyPassPass(PassRegistry &);
 void initializeAssumptionCacheTrackerPass(PassRegistry&);
-void initializeAtomicExpandLegacyPass(PassRegistry &);
+void initializeExpandAtomicLegacyPass(PassRegistry &);
 void initializeBasicBlockPathCloningPass(PassRegistry &);
 void initializeBasicBlockSectionsProfileReaderPass(PassRegistry &);
 void initializeBasicBlockSectionsPass(PassRegistry &);
diff --git a/llvm/lib/CodeGen/CMakeLists.txt b/llvm/lib/CodeGen/CMakeLists.txt
index df2d1831ee5fdb..12bd12bc49caeb 100644
--- a/llvm/lib/CodeGen/CMakeLists.txt
+++ b/llvm/lib/CodeGen/CMakeLists.txt
@@ -40,7 +40,7 @@ add_llvm_component_library(LLVMCodeGen
   AllocationOrder.cpp
   Analysis.cpp
   AssignmentTrackingAnalysis.cpp
-  AtomicExpandPass.cpp
+  ExpandAtomicPass.cpp
   BasicTargetTransformInfo.cpp
   BranchFolding.cpp
   BranchRelaxation.cpp
diff --git a/llvm/lib/CodeGen/CodeGen.cpp b/llvm/lib/CodeGen/CodeGen.cpp
index ac4747d6d572da..eacd8b36b610c8 100644
--- a/llvm/lib/CodeGen/CodeGen.cpp
+++ b/llvm/lib/CodeGen/CodeGen.cpp
@@ -19,7 +19,7 @@ using namespace llvm;
 /// initializeCodeGen - Initialize all passes linked into the CodeGen library.
 void llvm::initializeCodeGen(PassRegistry &Registry) {
   initializeAssignmentTrackingAnalysisPass(Registry);
-  initializeAtomicExpandLegacyPass(Registry);
+  initializeExpandAtomicLegacyPass(Registry);
   initializeBasicBlockPathCloningPass(Registry);
   initializeBasicBlockSectionsPass(Registry);
   initializeBranchFolderPassPass(Registry);
diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/ExpandAtomicPass.cpp
similarity index 96%
rename from llvm/lib/CodeGen/AtomicExpandPass.cpp
rename to llvm/lib/CodeGen/ExpandAtomicPass.cpp
index d756b972596c51..5f8e069bafc7a6 100644
--- a/llvm/lib/CodeGen/AtomicExpandPass.cpp
+++ b/llvm/lib/CodeGen/ExpandAtomicPass.cpp
@@ -1,4 +1,4 @@
-//===- AtomicExpandPass.cpp - Expand atomic instructions ------------------===//
+//===- ExpandAtomicPass.cpp - Expand atomic instructions ------------------===//
 //
 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 // See https://llvm.org/LICENSE.txt for license information.
@@ -19,7 +19,7 @@
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/Analysis/InstSimplifyFolder.h"
 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
-#include "llvm/CodeGen/AtomicExpand.h"
+#include "llvm/CodeGen/ExpandAtomic.h"
 #include "llvm/CodeGen/AtomicExpandUtils.h"
 #include "llvm/CodeGen/RuntimeLibcalls.h"
 #include "llvm/CodeGen/TargetLowering.h"
@@ -56,11 +56,11 @@
 
 using namespace llvm;
 
-#define DEBUG_TYPE "atomic-expand"
+#define DEBUG_TYPE "expand-atomic"
 
 namespace {
 
-class AtomicExpandImpl {
+class ExpandAtomicImpl {
   const TargetLowering *TLI = nullptr;
   const DataLayout *DL = nullptr;
 
@@ -121,12 +121,12 @@ class AtomicExpandImpl {
   bool run(Function &F, const TargetMachine *TM);
 };
 
-class AtomicExpandLegacy : public FunctionPass {
+class ExpandAtomicLegacy : public FunctionPass {
 public:
   static char ID; // Pass identification, replacement for typeid
 
-  AtomicExpandLegacy() : FunctionPass(ID) {
-    initializeAtomicExpandLegacyPass(*PassRegistry::getPassRegistry());
+  ExpandAtomicLegacy() : FunctionPass(ID) {
+    initializeExpandAtomicLegacyPass(*PassRegistry::getPassRegistry());
   }
 
   bool runOnFunction(Function &F) override;
@@ -144,14 +144,14 @@ struct ReplacementIRBuilder : IRBuilder<InstSimplifyFolder> {
 
 } // end anonymous namespace
 
-char AtomicExpandLegacy::ID = 0;
+char ExpandAtomicLegacy::ID = 0;
 
-char &llvm::AtomicExpandID = AtomicExpandLegacy::ID;
+char &llvm::ExpandAtomicID = ExpandAtomicLegacy::ID;
 
-INITIALIZE_PASS_BEGIN(AtomicExpandLegacy, DEBUG_TYPE,
+INITIALIZE_PASS_BEGIN(ExpandAtomicLegacy, DEBUG_TYPE,
                       "Expand Atomic instructions", false, false)
 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
-INITIALIZE_PASS_END(AtomicExpandLegacy, DEBUG_TYPE,
+INITIALIZE_PASS_END(ExpandAtomicLegacy, DEBUG_TYPE,
                     "Expand Atomic instructions", false, false)
 
 // Helper functions to retrieve the size of atomic instructions.
@@ -186,9 +186,9 @@ static bool atomicSizeSupported(const TargetLowering *TLI, Inst *I) {
          Size <= TLI->getMaxAtomicSizeInBitsSupported() / 8;
 }
 
-bool AtomicExpandImpl::run(Function &F, const TargetMachine *TM) {
+bool ExpandAtomicImpl::run(Function &F, const TargetMachine *TM) {
   const auto *Subtarget = TM->getSubtargetImpl(F);
-  if (!Subtarget->enableAtomicExpand())
+  if (!Subtarget->enableExpandAtomic())
     return false;
   TLI = Subtarget->getTargetLowering();
   DL = &F.getParent()->getDataLayout();
@@ -342,7 +342,7 @@ bool AtomicExpandImpl::run(Function &F, const TargetMachine *TM) {
   return MadeChange;
 }
 
-bool AtomicExpandLegacy::runOnFunction(Function &F) {
+bool ExpandAtomicLegacy::runOnFunction(Function &F) {
   if (skipFunction(F))
     return false;
 
@@ -352,17 +352,17 @@ bool AtomicExpandLegacy::runOnFunction(Function &F) {
 
   auto *TM = &TPC->getTM<TargetMachine>();
 
-  AtomicExpandImpl AE;
+  ExpandAtomicImpl AE;
   return AE.run(F, TM);
 }
 
-FunctionPass *llvm::createAtomicExpandLegacyPass() {
-  return new AtomicExpandLegacy();
+FunctionPass *llvm::createExpandAtomicLegacyPass() {
+  return new ExpandAtomicLegacy();
 }
 
-PreservedAnalyses AtomicExpandPass::run(Function &F,
+PreservedAnalyses ExpandAtomicPass::run(Function &F,
                                         FunctionAnalysisManager &AM) {
-  AtomicExpandImpl AE;
+  ExpandAtomicImpl AE;
 
   bool Changed = AE.run(F, TM);
   if (!Changed)
@@ -373,7 +373,7 @@ PreservedAnalyses AtomicExpandPass::run(Function &F,
   return PA;
 }
 
-bool AtomicExpandImpl::bracketInstWithFences(Instruction *I,
+bool ExpandAtomicImpl::bracketInstWithFences(Instruction *I,
                                              AtomicOrdering Order) {
   ReplacementIRBuilder Builder(I, *DL);
 
@@ -390,7 +390,7 @@ bool AtomicExpandImpl::bracketInstWithFences(Instruction *I,
 
 /// Get the iX type with the same bitwidth as T.
 IntegerType *
-AtomicExpandImpl::getCorrespondingIntegerType(Type *T, const DataLayout &DL) {
+ExpandAtomicImpl::getCorrespondingIntegerType(Type *T, const DataLayout &DL) {
   EVT VT = TLI->getMemValueType(DL, T);
   unsigned BitWidth = VT.getStoreSizeInBits();
   assert(BitWidth == VT.getSizeInBits() && "must be a power of two");
@@ -400,7 +400,7 @@ AtomicExpandImpl::getCorrespondingIntegerType(Type *T, const DataLayout &DL) {
 /// Convert an atomic load of a non-integral type to an integer load of the
 /// equivalent bitwidth.  See the function comment on
 /// convertAtomicStoreToIntegerType for background.
-LoadInst *AtomicExpandImpl::convertAtomicLoadToIntegerType(LoadInst *LI) {
+LoadInst *ExpandAtomicImpl::convertAtomicLoadToIntegerType(LoadInst *LI) {
   auto *M = LI->getModule();
   Type *NewTy = getCorrespondingIntegerType(LI->getType(), M->getDataLayout());
 
@@ -421,7 +421,7 @@ LoadInst *AtomicExpandImpl::convertAtomicLoadToIntegerType(LoadInst *LI) {
 }
 
 AtomicRMWInst *
-AtomicExpandImpl::convertAtomicXchgToIntegerType(AtomicRMWInst *RMWI) {
+ExpandAtomicImpl::convertAtomicXchgToIntegerType(AtomicRMWInst *RMWI) {
   auto *M = RMWI->getModule();
   Type *NewTy =
       getCorrespondingIntegerType(RMWI->getType(), M->getDataLayout());
@@ -448,7 +448,7 @@ AtomicExpandImpl::convertAtomicXchgToIntegerType(AtomicRMWInst *RMWI) {
   return NewRMWI;
 }
 
-bool AtomicExpandImpl::tryExpandAtomicLoad(LoadInst *LI) {
+bool ExpandAtomicImpl::tryExpandAtomicLoad(LoadInst *LI) {
   switch (TLI->shouldExpandAtomicLoadInIR(LI)) {
   case TargetLoweringBase::AtomicExpansionKind::None:
     return false;
@@ -470,7 +470,7 @@ bool AtomicExpandImpl::tryExpandAtomicLoad(LoadInst *LI) {
   }
 }
 
-bool AtomicExpandImpl::tryExpandAtomicStore(StoreInst *SI) {
+bool ExpandAtomicImpl::tryExpandAtomicStore(StoreInst *SI) {
   switch (TLI->shouldExpandAtomicStoreInIR(SI)) {
   case TargetLoweringBase::AtomicExpansionKind::None:
     return false;
@@ -485,7 +485,7 @@ bool AtomicExpandImpl::tryExpandAtomicStore(StoreInst *SI) {
   }
 }
 
-bool AtomicExpandImpl::expandAtomicLoadToLL(LoadInst *LI) {
+bool ExpandAtomicImpl::expandAtomicLoadToLL(LoadInst *LI) {
   ReplacementIRBuilder Builder(LI, *DL);
 
   // On some architectures, load-linked instructions are atomic for larger
@@ -501,7 +501,7 @@ bool AtomicExpandImpl::expandAtomicLoadToLL(LoadInst *LI) {
   return true;
 }
 
-bool AtomicExpandImpl::expandAtomicLoadToCmpXchg(LoadInst *LI) {
+bool ExpandAtomicImpl::expandAtomicLoadToCmpXchg(LoadInst *LI) {
   ReplacementIRBuilder Builder(LI, *DL);
   AtomicOrdering Order = LI->getOrdering();
   if (Order == AtomicOrdering::Unordered)
@@ -530,7 +530,7 @@ bool AtomicExpandImpl::expandAtomicLoadToCmpXchg(LoadInst *LI) {
 /// instruction select from the original atomic store, but as a migration
 /// mechanism, we convert back to the old format which the backends understand.
 /// Each backend will need individual work to recognize the new format.
-StoreInst *AtomicExpandImpl::convertAtomicStoreToIntegerType(StoreInst *SI) {
+StoreInst *ExpandAtomicImpl::convertAtomicStoreToIntegerType(StoreInst *SI) {
   ReplacementIRBuilder Builder(SI, *DL);
   auto *M = SI->getModule();
   Type *NewTy = getCorrespondingIntegerType(SI->getValueOperand()->getType(),
@@ -548,7 +548,7 @@ StoreInst *AtomicExpandImpl::convertAtomicStoreToIntegerType(StoreInst *SI) {
   return NewSI;
 }
 
-void AtomicExpandImpl::expandAtomicStore(StoreInst *SI) {
+void ExpandAtomicImpl::expandAtomicStore(StoreInst *SI) {
   // This function is only called on atomic stores that are too large to be
   // atomic if implemented as a native store. So we replace them by an
   // atomic swap, that can be implemented for example as a ldrex/strex on ARM
@@ -595,7 +595,7 @@ static void createCmpXchgInstFun(IRBuilderBase &Builder, Value *Addr,
     NewLoaded = Builder.CreateBitCast(NewLoaded, OrigTy);
 }
 
-bool AtomicExpandImpl::tryExpandAtomicRMW(AtomicRMWInst *AI) {
+bool ExpandAtomicImpl::tryExpandAtomicRMW(AtomicRMWInst *AI) {
   LLVMContext &Ctx = AI->getModule()->getContext();
   TargetLowering::AtomicExpansionKind Kind = TLI->shouldExpandAtomicRMWInIR(AI);
   switch (Kind) {
@@ -877,7 +877,7 @@ static Value *performMaskedAtomicOp(AtomicRMWInst::BinOp Op,
 /// way as a typical atomicrmw expansion. The only difference here is
 /// that the operation inside of the loop may operate upon only a
 /// part of the value.
-void AtomicExpandImpl::expandPartwordAtomicRMW(
+void ExpandAtomicImpl::expandPartwordAtomicRMW(
     AtomicRMWInst *AI, TargetLoweringBase::AtomicExpansionKind ExpansionKind) {
   AtomicOrdering MemOpOrder = AI->getOrdering();
   SyncScope::ID SSID = AI->getSyncScopeID();
@@ -921,7 +921,7 @@ void AtomicExpandImpl::expandPartwordAtomicRMW(
 }
 
 // Widen the bitwise atomicrmw (or/xor/and) to the minimum supported width.
-AtomicRMWInst *AtomicExpandImpl::widenPartwordAtomicRMW(AtomicRMWInst *AI) {
+AtomicRMWInst *ExpandAtomicImpl::widenPartwordAtomicRMW(AtomicRMWInst *AI) {
   ReplacementIRBuilder Builder(AI, *DL);
   AtomicRMWInst::BinOp Op = AI->getOperation();
 
@@ -956,7 +956,7 @@ AtomicRMWInst *AtomicExpandImpl::widenPartwordAtomicRMW(AtomicRMWInst *AI) {
   return NewAI;
 }
 
-bool AtomicExpandImpl::expandPartwordCmpXchg(AtomicCmpXchgInst *CI) {
+bool ExpandAtomicImpl::expandPartwordCmpXchg(AtomicCmpXchgInst *CI) {
   // The basic idea here is that we're expanding a cmpxchg of a
   // smaller memory size up to a word-sized cmpxchg. To do this, we
   // need to add a retry-loop for strong cmpxchg, so that
@@ -1081,7 +1081,7 @@ bool AtomicExpandImpl::expandPartwordCmpXchg(AtomicCmpXchgInst *CI) {
   return true;
 }
 
-void AtomicExpandImpl::expandAtomicOpToLLSC(
+void ExpandAtomicImpl::expandAtomicOpToLLSC(
     Instruction *I, Type *ResultType, Value *Addr, Align AddrAlign,
     AtomicOrdering MemOpOrder,
     function_ref<Value *(IRBuilderBase &, Value *)> PerformOp) {
@@ -1093,7 +1093,7 @@ void AtomicExpandImpl::expandAtomicOpToLLSC(
   I->eraseFromParent();
 }
 
-void AtomicExpandImpl::expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI) {
+void ExpandAtomicImpl::expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI) {
   ReplacementIRBuilder Builder(AI, *DL);
 
   PartwordMaskValues PMV =
@@ -1119,7 +1119,7 @@ void AtomicExpandImpl::expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI) {
   AI->eraseFromParent();
 }
 
-void AtomicExpandImpl::expandAtomicCmpXchgToMaskedIntrinsic(
+void ExpandAtomicImpl::expandAtomicCmpXchgToMaskedIntrinsic(
     AtomicCmpXchgInst *CI) {
   ReplacementIRBuilder Builder(CI, *DL);
 
@@ -1147,7 +1147,7 @@ void AtomicExpandImpl::expandAtomicCmpXchgToMaskedIntrinsic(
   CI->eraseFromParent();
 }
 
-Value *AtomicExpandImpl::insertRMWLLSCLoop(
+Value *ExpandAtomicImpl::insertRMWLLSCLoop(
     IRBuilderBase &Builder, Type *ResultTy, Value *Addr, Align AddrAlign,
     AtomicOrdering MemOpOrder,
     function_ref<Value *(IRBuilderBase &, Value *)> PerformOp) {
@@ -1203,7 +1203,7 @@ Value *AtomicExpandImpl::insertRMWLLSCLoop(
 /// way to represent a pointer cmpxchg so that we can update backends one by
 /// one.
 AtomicCmpXchgInst *
-AtomicExpandImpl::convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI) {
+ExpandAtomicImpl::convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI) {
   auto *M = CI->getModule();
   Type *NewTy = getCorrespondingIntegerType(CI->getCompareOperand()->getType(),
                                             M->getDataLayout());
@@ -1236,7 +1236,7 @@ AtomicExpandImpl::convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI) {
   return NewCI;
 }
 
-bool AtomicExpandImpl::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
+bool ExpandAtomicImpl::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
   AtomicOrdering SuccessOrder = CI->getSuccessOrdering();
   AtomicOrdering FailureOrder = CI->getFailureOrdering();
   Value *Addr = CI->getPointerOperand();
@@ -1482,7 +1482,7 @@ bool AtomicExpandImpl::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
   return true;
 }
 
-bool AtomicExpandImpl::isIdempotentRMW(AtomicRMWInst *RMWI) {
+bool ExpandAtomicImpl::isIdempotentRMW(AtomicRMWInst *RMWI) {
   auto C = dyn_cast<ConstantInt>(RMWI->getValOperand());
   if (!C)
     return false;
@@ -1502,7 +1502,7 @@ bool AtomicExpandImpl::isIdempotentRMW(AtomicRMWInst *RMWI) {
   }
 }
 
-bool AtomicExpandImpl::simplifyIdempotentRMW(AtomicRMWInst *RMWI) {
+bool ExpandAtomicImpl::simplifyIdempotentRMW(AtomicRMWInst *RMWI) {
   if (auto ResultingLoad = TLI->lowerIdempotentRMWIntoFencedLoad(RMWI)) {
     tryExpandAtomicLoad(ResultingLoad);
     return true;
@@ -1510,7 +1510,7 @@ bool AtomicExpandImpl::simplifyIdempotentRMW(AtomicRMWInst *RMWI) {
   return false;
 }
 
-Value *AtomicExpandImpl::insertRMWCmpXchgLoop(
+Value *ExpandAtomicImpl::insertRMWCmpXchgLoop(
     IRBuilderBase &Builder, Type *ResultTy, Value *Addr, Align AddrAlign,
     AtomicOrdering MemOpOrder, SyncScope::ID SSID,
     function_ref<Value *(IRBuilderBase &, Value *)> PerformOp,
@@ -1571,7 +1571,7 @@ Value *AtomicExpandImpl::insertRMWCmpXchgLoop(
   return NewLoaded;
 }
 
-bool AtomicExpandImpl::tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
+bool ExpandAtomicImpl::tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
   unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8;
   unsigned ValueSize = getAtomicOpSize(CI);
 
@@ -1602,7 +1602,7 @@ bool llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI,
 
   // FIXME: If FP exceptions are observable, we should force them off for the
   // loop for the FP atomics.
-  Value *Loaded = AtomicExpandImpl::insertRMWCmpXchgLoop(
+  Value *Loaded = ExpandAtomicImpl::insertRMWCmpXchgLoop(
       Builder, AI->getType(), AI->getPointerOperand(), AI->getAlign(),
       AI->getOrdering(), AI->getSyncScopeID(),
       [&](IRBuilderBase &Builder, Value *Loaded) {
@@ -1636,7 +1636,7 @@ static bool canUseSizedAtomicCall(unsigned Size, Align Alignment,
          Size <= LargestSize;
 }
 
-void AtomicExpandImpl::expandAtomicLoadToLibcall(LoadInst *I) {
+void ExpandAtomicImpl::expandAtomicLoadToLibcall(LoadInst *I) {
   static const RTLIB::Libcall Libcalls[6] = {
       RTLIB::ATOMIC_LOAD,   RTLIB::ATOMIC_LOAD_1, RTLIB::ATOMIC_LOAD_2,
       RTLIB::ATOMIC_LOAD_4, RTLIB::ATOMIC_LOAD_8, RTLIB::ATOMIC_LOAD_16};
@@ -1649,7 +1649,7 @@ void AtomicExpandImpl::expandAtomicLoadToLibcall(LoadInst *I) {
     report_fatal_error("expandAtomicOpToLibcall shouldn't fail for Load");
 }
 
-void AtomicExpandImpl::expandAtomicStoreToLibcall(StoreInst *I) {
+void ExpandAtomicImpl::expandAtomicStoreToLibcall(StoreInst *I) {
   static const RTLIB::Libcall Libcalls[6] = {
       RTLIB::ATOMIC_STORE,   RTLIB::ATOMIC_STORE_1, RTLIB::ATOMIC_STORE_2,
       RTLIB::ATOMIC_STORE_4, RTLIB::ATOMIC_STORE_8, RTLIB::ATOMIC_STORE_16};
@@ -1662,7 +1662,7 @@ void AtomicExpandImpl::expandAtomicStoreToLibcall(StoreInst *I) {
     report_fatal_error("expandAtomicOpToLibcall shouldn't fail for Store");
 }
 
-void AtomicExpandImpl::expandAtomicCASToLibcall(AtomicCmpXchgInst *I) {
+void ExpandAtomicImpl::expandAtomicCASToLibcall(AtomicCmpXchgInst *I) {
   static const RTLIB::Libcall Libcalls[6] = {
       RTLIB::ATOMIC_COMPARE_EXCHANGE,   RTLIB::ATOMIC_COMPARE_EXCHANGE_1,
       RTLIB::ATOMIC_COMPARE_EXCHANGE_2, RTLIB::ATOMIC_COMPARE_EXCHANGE_4,
@@ -1740,7 +1740,7 @@ static ArrayRef<RTLIB::Libcall> GetRMWLibcall(AtomicRMWInst::BinOp Op) {
   llvm_unreachable("Unexpected AtomicRMW operation.");
 }
 
-void AtomicExpandImpl::expandAtomicRMWToLibcall(AtomicRMWInst *I) {
+void ExpandAtomicImpl::expandAtomicRMWToLibcall(AtomicRMWInst *I) {
   ArrayRef<RTLIB::Libcall> Libcalls = GetRMWLibcall(I->getOperation());
 
   unsigned Size = getAtomicOpSize(I);
@@ -1779,7 +1779,7 @@ void AtomicExpandImpl::expandAtomicRMWToLibcall(AtomicRMWInst *I) {
 // ATOMIC libcalls to be emitted. All of the other arguments besides
 // 'I' are extracted from the Instruction subclass by the
 // caller. Depending on the particular call, some will be null.
-bool AtomicExpandImpl::expandAtomicOpToLibcall(
+bool ExpandAtomicImpl::expandAtomicOpToLibcall(
     Instruction *I, unsigned Size, Align Alignment, Value *PointerOperand,
     Value *ValueOperand, Value *CASExpected, AtomicOrdering Ordering,
     AtomicOrdering Ordering2, ArrayRef<RTLIB::Libcall> Libcalls) {
diff --git a/llvm/lib/CodeGen/TargetSubtargetInfo.cpp b/llvm/lib/CodeGen/TargetSubtargetInfo.cpp
index 6c97bc0568bdee..a8fa14bebbe686 100644
--- a/llvm/lib/CodeGen/TargetSubtargetInfo.cpp
+++ b/llvm/lib/CodeGen/TargetSubtargetInfo.cpp
@@ -24,7 +24,7 @@ TargetSubtargetInfo::TargetSubtargetInfo(
 
 TargetSubtargetInfo::~TargetSubtargetInfo() = default;
 
-bool TargetSubtargetInfo::enableAtomicExpand() const {
+bool TargetSubtargetInfo::enableExpandAtomic() const {
   return true;
 }
 
diff --git a/llvm/lib/Passes/PassBuilder.cpp b/llvm/lib/Passes/PassBuilder.cpp
index 3929b2b9181de2..c77f678e07a2d4 100644
--- a/llvm/lib/Passes/PassBuilder.cpp
+++ b/llvm/lib/Passes/PassBuilder.cpp
@@ -14,7 +14,7 @@
 ///
 //===----------------------------------------------------------------------===//
 
-#include "llvm/CodeGen/AtomicExpand.h"
+#include "llvm/CodeGen/ExpandAtomic.h"
 #include "llvm/Passes/PassBuilder.h"
 #include "llvm/ADT/StringSwitch.h"
 #include "llvm/Analysis/AliasAnalysisEvaluator.h"
diff --git a/llvm/lib/Passes/PassRegistry.def b/llvm/lib/Passes/PassRegistry.def
index 1318221c24e65a..cd39892f94aca7 100644
--- a/llvm/lib/Passes/PassRegistry.def
+++ b/llvm/lib/Passes/PassRegistry.def
@@ -429,7 +429,7 @@ FUNCTION_PASS("strip-gc-relocates", StripGCRelocates())
 FUNCTION_PASS("structurizecfg", StructurizeCFGPass())
 FUNCTION_PASS("tailcallelim", TailCallElimPass())
 FUNCTION_PASS("typepromotion", TypePromotionPass(TM))
-FUNCTION_PASS("atomicexpand", AtomicExpandPass(TM))
+FUNCTION_PASS("expandatomic", ExpandAtomicPass(TM))
 FUNCTION_PASS("unify-loop-exits", UnifyLoopExitsPass())
 FUNCTION_PASS("vector-combine", VectorCombinePass())
 FUNCTION_PASS("verify", VerifierPass())
diff --git a/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp b/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp
index 43ede3c1ceecc0..bfea06668e366e 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp
@@ -551,7 +551,7 @@ std::unique_ptr<CSEConfigBase> AArch64PassConfig::getCSEConfig() const {
 void AArch64PassConfig::addIRPasses() {
   // Always expand atomic operations, we don't deal with atomicrmw or cmpxchg
   // ourselves.
-  addPass(createAtomicExpandLegacyPass());
+  addPass(createExpandAtomicLegacyPass());
 
   // Expand any SVE vector library calls that we can't code generate directly.
   if (EnableSVEIntrinsicOpts &&
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index 8e3c8f94634ce9..3974e0162e7919 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -1033,7 +1033,7 @@ void AMDGPUPassConfig::addIRPasses() {
     addPass(createAMDGPUAtomicOptimizerPass(AMDGPUAtomicOptimizerStrategy));
   }
 
-  addPass(createAtomicExpandLegacyPass());
+  addPass(createExpandAtomicLegacyPass());
 
   if (TM.getOptLevel() > CodeGenOptLevel::None) {
     addPass(createAMDGPUPromoteAlloca());
diff --git a/llvm/lib/Target/ARM/ARMTargetMachine.cpp b/llvm/lib/Target/ARM/ARMTargetMachine.cpp
index 5a5142f7df0d88..5ffef5f3347a97 100644
--- a/llvm/lib/Target/ARM/ARMTargetMachine.cpp
+++ b/llvm/lib/Target/ARM/ARMTargetMachine.cpp
@@ -417,7 +417,7 @@ void ARMPassConfig::addIRPasses() {
   if (TM->Options.ThreadModel == ThreadModel::Single)
     addPass(createLowerAtomicPass());
   else
-    addPass(createAtomicExpandLegacyPass());
+    addPass(createExpandAtomicLegacyPass());
 
   // Cmpxchg instructions are often used with a subsequent comparison to
   // determine whether it succeeded. We can exploit existing control-flow in
diff --git a/llvm/lib/Target/CSKY/CSKYTargetMachine.cpp b/llvm/lib/Target/CSKY/CSKYTargetMachine.cpp
index 0bbfabe93147c5..223e27b49fe3df 100644
--- a/llvm/lib/Target/CSKY/CSKYTargetMachine.cpp
+++ b/llvm/lib/Target/CSKY/CSKYTargetMachine.cpp
@@ -118,7 +118,7 @@ TargetPassConfig *CSKYTargetMachine::createPassConfig(PassManagerBase &PM) {
 }
 
 void CSKYPassConfig::addIRPasses() {
-  addPass(createAtomicExpandLegacyPass());
+  addPass(createExpandAtomicLegacyPass());
   TargetPassConfig::addIRPasses();
 }
 
diff --git a/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp b/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
index dc2ab4837f4e83..1ce3cc9bbf587a 100644
--- a/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
@@ -338,7 +338,7 @@ void HexagonPassConfig::addIRPasses() {
     addPass(createDeadCodeEliminationPass());
   }
 
-  addPass(createAtomicExpandLegacyPass());
+  addPass(createExpandAtomicLegacyPass());
 
   if (!NoOpt) {
     if (EnableInitialCFGCleanup)
diff --git a/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp b/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp
index 85fd3d291e2da2..b9b29e58c42835 100644
--- a/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp
@@ -161,7 +161,7 @@ void LoongArchPassConfig::addIRPasses() {
   // pointer values N iterations ahead.
   if (TM->getOptLevel() != CodeGenOptLevel::None && EnableLoopDataPrefetch)
     addPass(createLoopDataPrefetchPass());
-  addPass(createAtomicExpandLegacyPass());
+  addPass(createExpandAtomicLegacyPass());
 
   TargetPassConfig::addIRPasses();
 }
diff --git a/llvm/lib/Target/M68k/M68kTargetMachine.cpp b/llvm/lib/Target/M68k/M68kTargetMachine.cpp
index bbbcb1556ed557..e8d399e50e6c68 100644
--- a/llvm/lib/Target/M68k/M68kTargetMachine.cpp
+++ b/llvm/lib/Target/M68k/M68kTargetMachine.cpp
@@ -171,7 +171,7 @@ TargetPassConfig *M68kTargetMachine::createPassConfig(PassManagerBase &PM) {
 }
 
 void M68kPassConfig::addIRPasses() {
-  addPass(createAtomicExpandLegacyPass());
+  addPass(createExpandAtomicLegacyPass());
   TargetPassConfig::addIRPasses();
 }
 
diff --git a/llvm/lib/Target/Mips/MipsTargetMachine.cpp b/llvm/lib/Target/Mips/MipsTargetMachine.cpp
index 4c4bf70e22c6c1..5113b51938e7a4 100644
--- a/llvm/lib/Target/Mips/MipsTargetMachine.cpp
+++ b/llvm/lib/Target/Mips/MipsTargetMachine.cpp
@@ -263,7 +263,7 @@ std::unique_ptr<CSEConfigBase> MipsPassConfig::getCSEConfig() const {
 
 void MipsPassConfig::addIRPasses() {
   TargetPassConfig::addIRPasses();
-  addPass(createAtomicExpandLegacyPass());
+  addPass(createExpandAtomicLegacyPass());
   if (getMipsSubtarget().os16())
     addPass(createMipsOs16Pass());
   if (getMipsSubtarget().inMips16HardFloat())
diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp b/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
index 4265ac85c4c3c1..b9f867f7c6b195 100644
--- a/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
@@ -379,7 +379,7 @@ void NVPTXPassConfig::addIRPasses() {
     addStraightLineScalarOptimizationPasses();
   }
 
-  addPass(createAtomicExpandLegacyPass());
+  addPass(createExpandAtomicLegacyPass());
   addPass(createNVPTXCtorDtorLoweringLegacyPass());
 
   // === LSR and other generic IR passes ===
diff --git a/llvm/lib/Target/PowerPC/PPCExpandAtomicPseudoInsts.cpp b/llvm/lib/Target/PowerPC/PPCExpandAtomicPseudoInsts.cpp
index 904d9b7d9f1fbf..774c3eb014ba4c 100644
--- a/llvm/lib/Target/PowerPC/PPCExpandAtomicPseudoInsts.cpp
+++ b/llvm/lib/Target/PowerPC/PPCExpandAtomicPseudoInsts.cpp
@@ -23,7 +23,7 @@
 
 using namespace llvm;
 
-#define DEBUG_TYPE "ppc-passes=atomicexpand"
+#define DEBUG_TYPE "ppc-passes=expandatomic"
 
 namespace {
 
diff --git a/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp b/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp
index d178b5f02ae717..66b6b0f27445d4 100644
--- a/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp
+++ b/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp
@@ -457,7 +457,7 @@ TargetPassConfig *PPCTargetMachine::createPassConfig(PassManagerBase &PM) {
 void PPCPassConfig::addIRPasses() {
   if (TM->getOptLevel() != CodeGenOptLevel::None)
     addPass(createPPCBoolRetToIntPass());
-  addPass(createAtomicExpandLegacyPass());
+  addPass(createExpandAtomicLegacyPass());
 
   // Lower generic MASSV routines to PowerPC subtarget-specific entries.
   addPass(createPPCLowerMASSVEntriesPass());
diff --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
index 21b3863dcae79c..992e89df34139e 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
@@ -306,7 +306,7 @@ TargetPassConfig *RISCVTargetMachine::createPassConfig(PassManagerBase &PM) {
 }
 
 void RISCVPassConfig::addIRPasses() {
-  addPass(createAtomicExpandLegacyPass());
+  addPass(createExpandAtomicLegacyPass());
 
   if (getOptLevel() != CodeGenOptLevel::None) {
     addPass(createRISCVGatherScatterLoweringPass());
diff --git a/llvm/lib/Target/Sparc/SparcTargetMachine.cpp b/llvm/lib/Target/Sparc/SparcTargetMachine.cpp
index 23fdf6e47ee6a0..d70c563751df9c 100644
--- a/llvm/lib/Target/Sparc/SparcTargetMachine.cpp
+++ b/llvm/lib/Target/Sparc/SparcTargetMachine.cpp
@@ -172,7 +172,7 @@ TargetPassConfig *SparcTargetMachine::createPassConfig(PassManagerBase &PM) {
 }
 
 void SparcPassConfig::addIRPasses() {
-  addPass(createAtomicExpandLegacyPass());
+  addPass(createExpandAtomicLegacyPass());
 
   TargetPassConfig::addIRPasses();
 }
diff --git a/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp b/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp
index 2a11921b2e7d78..91945cb5875c96 100644
--- a/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp
@@ -226,7 +226,7 @@ void SystemZPassConfig::addIRPasses() {
     addPass(createLoopDataPrefetchPass());
   }
 
-  addPass(createAtomicExpandLegacyPass());
+  addPass(createExpandAtomicLegacyPass());
 
   TargetPassConfig::addIRPasses();
 }
diff --git a/llvm/lib/Target/VE/VETargetMachine.cpp b/llvm/lib/Target/VE/VETargetMachine.cpp
index 6f4e137e4d2f18..c9c6f134b61faa 100644
--- a/llvm/lib/Target/VE/VETargetMachine.cpp
+++ b/llvm/lib/Target/VE/VETargetMachine.cpp
@@ -134,7 +134,7 @@ TargetPassConfig *VETargetMachine::createPassConfig(PassManagerBase &PM) {
 
 void VEPassConfig::addIRPasses() {
   // VE requires atomic expand pass.
-  addPass(createAtomicExpandLegacyPass());
+  addPass(createExpandAtomicLegacyPass());
   TargetPassConfig::addIRPasses();
 }
 
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp b/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp
index 912f61765579f8..100e3a60c5ea3b 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp
@@ -45,7 +45,7 @@ WebAssemblySubtarget::WebAssemblySubtarget(const Triple &TT,
       TargetTriple(TT), InstrInfo(initializeSubtargetDependencies(CPU, FS)),
       TLInfo(TM, *this) {}
 
-bool WebAssemblySubtarget::enableAtomicExpand() const {
+bool WebAssemblySubtarget::enableExpandAtomic() const {
   // If atomics are disabled, atomic ops are lowered instead of expanded
   return hasAtomics();
 }
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h b/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h
index 85d02b087c786e..1f5bb72b027bdd 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h
+++ b/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h
@@ -84,7 +84,7 @@ class WebAssemblySubtarget final : public WebAssemblyGenSubtargetInfo {
     return &getInstrInfo()->getRegisterInfo();
   }
   const Triple &getTargetTriple() const { return TargetTriple; }
-  bool enableAtomicExpand() const override;
+  bool enableExpandAtomic() const override;
   bool enableIndirectBrExpand() const override { return true; }
   bool enableMachineScheduler() const override;
   bool useAA() const override;
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp
index 9f24452b21fb20..77048a336e699e 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp
@@ -478,7 +478,7 @@ void WebAssemblyPassConfig::addISelPrepare() {
   addPass(new CoalesceFeaturesAndStripAtomics(&getWebAssemblyTargetMachine()));
 
   // This is a no-op if atomics are not used in the module
-  addPass(createAtomicExpandLegacyPass());
+  addPass(createExpandAtomicLegacyPass());
 
   TargetPassConfig::addISelPrepare();
 }
diff --git a/llvm/lib/Target/X86/X86TargetMachine.cpp b/llvm/lib/Target/X86/X86TargetMachine.cpp
index 660b43a08ed3ce..4fd1a398e58e14 100644
--- a/llvm/lib/Target/X86/X86TargetMachine.cpp
+++ b/llvm/lib/Target/X86/X86TargetMachine.cpp
@@ -435,7 +435,7 @@ MachineFunctionInfo *X86TargetMachine::createMachineFunctionInfo(
 }
 
 void X86PassConfig::addIRPasses() {
-  addPass(createAtomicExpandLegacyPass());
+  addPass(createExpandAtomicLegacyPass());
 
   // We add both pass anyway and when these two passes run, we skip the pass
   // based on the option level and option attribute.
diff --git a/llvm/lib/Target/XCore/XCoreTargetMachine.cpp b/llvm/lib/Target/XCore/XCoreTargetMachine.cpp
index 374e91d01bdace..c230efda852ecc 100644
--- a/llvm/lib/Target/XCore/XCoreTargetMachine.cpp
+++ b/llvm/lib/Target/XCore/XCoreTargetMachine.cpp
@@ -84,7 +84,7 @@ TargetPassConfig *XCoreTargetMachine::createPassConfig(PassManagerBase &PM) {
 }
 
 void XCorePassConfig::addIRPasses() {
-  addPass(createAtomicExpandLegacyPass());
+  addPass(createExpandAtomicLegacyPass());
 
   TargetPassConfig::addIRPasses();
 }
diff --git a/llvm/test/CodeGen/AMDGPU/idemponent-atomics.ll b/llvm/test/CodeGen/AMDGPU/idemponent-atomics.ll
index bbd161d7dad6ce..90080da8c2af9b 100644
--- a/llvm/test/CodeGen/AMDGPU/idemponent-atomics.ll
+++ b/llvm/test/CodeGen/AMDGPU/idemponent-atomics.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX940 %s
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=atomicexpand < %s | FileCheck --check-prefix=OPT %s
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=expandatomic < %s | FileCheck --check-prefix=OPT %s
 
 define i32 @global_agent_monotonic_idempotent_or(ptr addrspace(1) %in) {
 ; GFX940-LABEL: global_agent_monotonic_idempotent_or:
diff --git a/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll b/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll
index 28393b84d7341f..ce8a64bca32312 100644
--- a/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll
+++ b/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: opt -S -mtriple=amdgcn-- -mcpu=tahiti -passes=atomicexpand < %s | FileCheck -check-prefix=IR %s
+; RUN: opt -S -mtriple=amdgcn-- -mcpu=tahiti -passes=expandatomic < %s | FileCheck -check-prefix=IR %s
 ; RUN: llc -mtriple=amdgcn-- -mcpu=tahiti < %s | FileCheck -check-prefix=GCN %s
 
 define i32 @load_atomic_private_seq_cst_i32(ptr addrspace(5) %ptr) {
diff --git a/llvm/test/Transforms/AtomicExpand/AArch64/atomicrmw-fp.ll b/llvm/test/Transforms/AtomicExpand/AArch64/atomicrmw-fp.ll
index 77ac7ee9bda425..3b73b793900f9f 100644
--- a/llvm/test/Transforms/AtomicExpand/AArch64/atomicrmw-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/AArch64/atomicrmw-fp.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=aarch64-linux-gnu -passes=atomicexpand %s | FileCheck %s
+; RUN: opt -S -mtriple=aarch64-linux-gnu -passes=expandatomic %s | FileCheck %s
 
 define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) {
 ; CHECK-LABEL: @test_atomicrmw_fadd_f32(
diff --git a/llvm/test/Transforms/AtomicExpand/AArch64/expand-atomicrmw-xchg-fp.ll b/llvm/test/Transforms/AtomicExpand/AArch64/expand-atomicrmw-xchg-fp.ll
index 6ef22406174217..88075757606735 100644
--- a/llvm/test/Transforms/AtomicExpand/AArch64/expand-atomicrmw-xchg-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/AArch64/expand-atomicrmw-xchg-fp.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -codegen-opt-level=1 -S -mtriple=aarch64-- -passes=atomicexpand %s | FileCheck %s
-; RUN: opt -codegen-opt-level=1 -S -mtriple=aarch64-- -mattr=+outline-atomics -passes=atomicexpand %s | FileCheck %s --check-prefix=OUTLINE-ATOMICS
+; RUN: opt -codegen-opt-level=1 -S -mtriple=aarch64-- -passes=expandatomic %s | FileCheck %s
+; RUN: opt -codegen-opt-level=1 -S -mtriple=aarch64-- -mattr=+outline-atomics -passes=expandatomic %s | FileCheck %s --check-prefix=OUTLINE-ATOMICS
 
 define void @atomic_swap_f16(ptr %ptr, half %val) nounwind {
 ; CHECK-LABEL: @atomic_swap_f16(
diff --git a/llvm/test/Transforms/AtomicExpand/AArch64/pcsections.ll b/llvm/test/Transforms/AtomicExpand/AArch64/pcsections.ll
index a896627a0560fa..a6a912c6901c64 100644
--- a/llvm/test/Transforms/AtomicExpand/AArch64/pcsections.ll
+++ b/llvm/test/Transforms/AtomicExpand/AArch64/pcsections.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=aarch64-linux-gnu -passes=atomicexpand %s | FileCheck %s
+; RUN: opt -S -mtriple=aarch64-linux-gnu -passes=expandatomic %s | FileCheck %s
 
 define i8 @atomic8_load_unordered(ptr %a) nounwind uwtable {
 ; CHECK-LABEL: @atomic8_load_unordered(
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16-system.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16-system.ll
index a2d242ab0fc5bb..467b31fef50c61 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16-system.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16-system.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=atomicexpand %s | FileCheck %s
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=expandatomic %s | FileCheck %s
 
 target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:160:256:256:32-p8:128:128-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll
index 177ad2a464210a..fe45501d1fd45e 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=atomicexpand %s | FileCheck %s
-; RUN: opt -mtriple=r600-mesa-mesa3d -S -passes=atomicexpand %s | FileCheck %s
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=expandatomic %s | FileCheck %s
+; RUN: opt -mtriple=r600-mesa-mesa3d -S -passes=expandatomic %s | FileCheck %s
 
 target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:160:256:256:32-p8:128:128-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8-system.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8-system.ll
index 5eedc341eb850e..0ca689c622348b 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8-system.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8-system.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=atomicexpand %s | FileCheck %s
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=expandatomic %s | FileCheck %s
 
 define i8 @test_atomicrmw_xchg_i8_global_system(ptr addrspace(1) %ptr, i8 %value) {
 ; CHECK-LABEL: @test_atomicrmw_xchg_i8_global_system(
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll
index a5b4bb88fdf079..3df0edc00c1751 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=atomicexpand %s | FileCheck %s --check-prefixes=CHECK,GCN
-; RUN: opt -mtriple=r600-mesa-mesa3d -S -passes=atomicexpand %s | FileCheck %s --check-prefixes=CHECK,R600
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=expandatomic %s | FileCheck %s --check-prefixes=CHECK,GCN
+; RUN: opt -mtriple=r600-mesa-mesa3d -S -passes=expandatomic %s | FileCheck %s --check-prefixes=CHECK,R600
 
 define i8 @test_atomicrmw_xchg_i8_global_agent(ptr addrspace(1) %ptr, i8 %value) {
 ; GCN-LABEL: @test_atomicrmw_xchg_i8_global_agent(
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd-flat-specialization.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd-flat-specialization.ll
index 92e4dccb231326..d53f7000cc65ae 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd-flat-specialization.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd-flat-specialization.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 -passes=atomicexpand %s | FileCheck -check-prefix=GFX908 %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -passes=atomicexpand %s | FileCheck -check-prefix=GFX90A %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -passes=atomicexpand %s | FileCheck -check-prefix=GFX940 %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -passes=atomicexpand %s | FileCheck -check-prefix=GFX1100 %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 -passes=expandatomic %s | FileCheck -check-prefix=GFX908 %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -passes=expandatomic %s | FileCheck -check-prefix=GFX90A %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -passes=expandatomic %s | FileCheck -check-prefix=GFX940 %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -passes=expandatomic %s | FileCheck -check-prefix=GFX1100 %s
 
 define float @syncscope_system(ptr %addr, float %val) #0 {
 ; GFX908-LABEL: @syncscope_system(
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll
index 18173bd284a7f6..0a2dda31c4ddaf 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll
@@ -1,10 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -passes=atomicexpand %s | FileCheck -check-prefix=CI %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=atomicexpand %s | FileCheck -check-prefix=GFX9 %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 -passes=atomicexpand %s | FileCheck -check-prefix=GFX908 %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -passes=atomicexpand %s | FileCheck -check-prefix=GFX90A %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -passes=atomicexpand %s | FileCheck -check-prefix=GFX940 %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -passes=atomicexpand %s | FileCheck -check-prefix=GFX11 %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -passes=expandatomic %s | FileCheck -check-prefix=CI %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=expandatomic %s | FileCheck -check-prefix=GFX9 %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 -passes=expandatomic %s | FileCheck -check-prefix=GFX908 %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -passes=expandatomic %s | FileCheck -check-prefix=GFX90A %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -passes=expandatomic %s | FileCheck -check-prefix=GFX940 %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -passes=expandatomic %s | FileCheck -check-prefix=GFX11 %s
 
 define void @test_atomicrmw_fadd_f32_global_no_use_unsafe(ptr addrspace(1) %ptr, float %value) #0 {
 ; CI-LABEL: @test_atomicrmw_fadd_f32_global_no_use_unsafe(
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fmax.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fmax.ll
index 102cf44d9dea96..25dd17d79b5b2b 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fmax.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fmax.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -passes=atomicexpand %s | FileCheck -check-prefix=GCN %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=atomicexpand %s | FileCheck -check-prefix=GCN %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -passes=expandatomic %s | FileCheck -check-prefix=GCN %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=expandatomic %s | FileCheck -check-prefix=GCN %s
 
 define float @test_atomicrmw_fmax_f32_flat(ptr %ptr, float %value) {
 ; GCN-LABEL: @test_atomicrmw_fmax_f32_flat(
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fmin.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fmin.ll
index 06b04d009940c5..3d86f9fec62bf5 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fmin.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fmin.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -passes=atomicexpand %s | FileCheck -check-prefix=GCN %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=atomicexpand %s | FileCheck -check-prefix=GCN %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -passes=expandatomic %s | FileCheck -check-prefix=GCN %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=expandatomic %s | FileCheck -check-prefix=GCN %s
 
 define float @test_atomicrmw_fmin_f32_flat(ptr %ptr, float %value) {
 ; GCN-LABEL: @test_atomicrmw_fmin_f32_flat(
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fsub.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fsub.ll
index cdb6c32391e7a0..176b8259c88a27 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fsub.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fsub.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -passes=atomicexpand %s | FileCheck -check-prefix=GCN %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=atomicexpand %s | FileCheck -check-prefix=GCN %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -passes=expandatomic %s | FileCheck -check-prefix=GCN %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=expandatomic %s | FileCheck -check-prefix=GCN %s
 
 define float @test_atomicrmw_fsub_f32_flat(ptr %ptr, float %value) {
 ; GCN-LABEL: @test_atomicrmw_fsub_f32_flat(
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-nand.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-nand.ll
index 69626a14ae52ed..835bea807a2a6b 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-nand.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-nand.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=atomicexpand %s | FileCheck %s
-; RUN: opt -mtriple=r600-mesa-mesa3d -S -passes=atomicexpand %s | FileCheck %s
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=expandatomic %s | FileCheck %s
+; RUN: opt -mtriple=r600-mesa-mesa3d -S -passes=expandatomic %s | FileCheck %s
 
 define i32 @test_atomicrmw_nand_i32_flat(ptr %ptr, i32 %value) {
 ; CHECK-LABEL: @test_atomicrmw_nand_i32_flat(
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-simplify-cfg-CAS-block.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-simplify-cfg-CAS-block.ll
index c1a93763429bd9..158d4e2310381a 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-simplify-cfg-CAS-block.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-simplify-cfg-CAS-block.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -passes=atomicexpand %s | FileCheck -check-prefix=GFX90A %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -passes=expandatomic %s | FileCheck -check-prefix=GFX90A %s
 
 declare i32 @llvm.amdgcn.workitem.id.x()
 
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/unaligned-atomic.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/unaligned-atomic.ll
index 87c40c0337c169..428e1b54ec2583 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/unaligned-atomic.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/unaligned-atomic.ll
@@ -1,4 +1,4 @@
-; RUN: not --crash opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=atomicexpand %s 2>&1 | FileCheck %s
+; RUN: not --crash opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=expandatomic %s 2>&1 | FileCheck %s
 ; The AtomicExpand pass cannot handle missing libcalls (yet) so reports a fatal error.
 ; CHECK: LLVM ERROR: expandAtomicOpToLibcall shouldn't fail for Load
 
diff --git a/llvm/test/Transforms/AtomicExpand/ARM/atomic-expansion-v7.ll b/llvm/test/Transforms/AtomicExpand/ARM/atomic-expansion-v7.ll
index 3410921d7edf55..2c5441f73f5438 100644
--- a/llvm/test/Transforms/AtomicExpand/ARM/atomic-expansion-v7.ll
+++ b/llvm/test/Transforms/AtomicExpand/ARM/atomic-expansion-v7.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S -o - -mtriple=armv7-apple-ios7.0 -passes=atomicexpand -codegen-opt-level=1 %s | FileCheck %s
+; RUN: opt -S -o - -mtriple=armv7-apple-ios7.0 -passes=expandatomic -codegen-opt-level=1 %s | FileCheck %s
 
 define i8 @test_atomic_xchg_i8(ptr %ptr, i8 %xchgend) {
 ; CHECK-LABEL: @test_atomic_xchg_i8
diff --git a/llvm/test/Transforms/AtomicExpand/ARM/atomic-expansion-v8.ll b/llvm/test/Transforms/AtomicExpand/ARM/atomic-expansion-v8.ll
index 39729ec8cb270d..c1a2d7ba302bd9 100644
--- a/llvm/test/Transforms/AtomicExpand/ARM/atomic-expansion-v8.ll
+++ b/llvm/test/Transforms/AtomicExpand/ARM/atomic-expansion-v8.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S -o - -mtriple=armv8-linux-gnueabihf -passes=atomicexpand %s -codegen-opt-level=1 | FileCheck %s
+; RUN: opt -S -o - -mtriple=armv8-linux-gnueabihf -passes=expandatomic %s -codegen-opt-level=1 | FileCheck %s
 
 define i8 @test_atomic_xchg_i8(ptr %ptr, i8 %xchgend) {
 ; CHECK-LABEL: @test_atomic_xchg_i8
diff --git a/llvm/test/Transforms/AtomicExpand/ARM/atomicrmw-fp.ll b/llvm/test/Transforms/AtomicExpand/ARM/atomicrmw-fp.ll
index 3cbd71bed10167..06451bfd5b7a90 100644
--- a/llvm/test/Transforms/AtomicExpand/ARM/atomicrmw-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/ARM/atomicrmw-fp.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=armv7-apple-ios7.0 -passes=atomicexpand %s | FileCheck %s
+; RUN: opt -S -mtriple=armv7-apple-ios7.0 -passes=expandatomic %s | FileCheck %s
 
 define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) {
 ; CHECK-LABEL: @test_atomicrmw_fadd_f32(
diff --git a/llvm/test/Transforms/AtomicExpand/ARM/cmpxchg-weak.ll b/llvm/test/Transforms/AtomicExpand/ARM/cmpxchg-weak.ll
index 3ac21e1b0ed772..d37ed612400a47 100644
--- a/llvm/test/Transforms/AtomicExpand/ARM/cmpxchg-weak.ll
+++ b/llvm/test/Transforms/AtomicExpand/ARM/cmpxchg-weak.ll
@@ -1,4 +1,4 @@
-; RUN: opt -passes=atomicexpand -codegen-opt-level=1 -S -mtriple=thumbv7s-apple-ios7.0 %s | FileCheck %s
+; RUN: opt -passes=expandatomic -codegen-opt-level=1 -S -mtriple=thumbv7s-apple-ios7.0 %s | FileCheck %s
 
 define i32 @test_cmpxchg_seq_cst(ptr %addr, i32 %desired, i32 %new) {
 ; CHECK-LABEL: @test_cmpxchg_seq_cst
diff --git a/llvm/test/Transforms/AtomicExpand/Hexagon/atomicrmw-fp.ll b/llvm/test/Transforms/AtomicExpand/Hexagon/atomicrmw-fp.ll
index d3772badcd4350..759c6c677620ff 100644
--- a/llvm/test/Transforms/AtomicExpand/Hexagon/atomicrmw-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/Hexagon/atomicrmw-fp.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=hexagon-- -passes=atomicexpand %s | FileCheck %s
+; RUN: opt -S -mtriple=hexagon-- -passes=expandatomic %s | FileCheck %s
 
 define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) {
 ; CHECK-LABEL: @test_atomicrmw_fadd_f32(
diff --git a/llvm/test/Transforms/AtomicExpand/LoongArch/atomicrmw-fp.ll b/llvm/test/Transforms/AtomicExpand/LoongArch/atomicrmw-fp.ll
index 393a822c7d529b..571a9351040828 100644
--- a/llvm/test/Transforms/AtomicExpand/LoongArch/atomicrmw-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/LoongArch/atomicrmw-fp.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S --mtriple=loongarch64 --passes=atomicexpand --mattr=+d %s | FileCheck %s
+; RUN: opt -S --mtriple=loongarch64 --passes=expandatomic --mattr=+d %s | FileCheck %s
 
 define float @atomicrmw_fadd_float(ptr %ptr, float %value) {
 ; CHECK-LABEL: @atomicrmw_fadd_float(
diff --git a/llvm/test/Transforms/AtomicExpand/LoongArch/load-store-atomic.ll b/llvm/test/Transforms/AtomicExpand/LoongArch/load-store-atomic.ll
index 8946f4f26c0902..92bdf171d09af5 100644
--- a/llvm/test/Transforms/AtomicExpand/LoongArch/load-store-atomic.ll
+++ b/llvm/test/Transforms/AtomicExpand/LoongArch/load-store-atomic.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S --mtriple=loongarch32 --passes=atomicexpand %s | FileCheck %s --check-prefix=LA32
-; RUN: opt -S --mtriple=loongarch64 --passes=atomicexpand %s | FileCheck %s --check-prefix=LA64
+; RUN: opt -S --mtriple=loongarch32 --passes=expandatomic %s | FileCheck %s --check-prefix=LA32
+; RUN: opt -S --mtriple=loongarch64 --passes=expandatomic %s | FileCheck %s --check-prefix=LA64
 
 define i8 @load_acquire_i8(ptr %ptr) {
 ; LA32-LABEL: @load_acquire_i8(
diff --git a/llvm/test/Transforms/AtomicExpand/Mips/atomicrmw-fp.ll b/llvm/test/Transforms/AtomicExpand/Mips/atomicrmw-fp.ll
index 7299f1f71f90bb..72c265b2faa953 100644
--- a/llvm/test/Transforms/AtomicExpand/Mips/atomicrmw-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/Mips/atomicrmw-fp.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=mips64-mti-linux-gnu -passes=atomicexpand %s | FileCheck %s
+; RUN: opt -S -mtriple=mips64-mti-linux-gnu -passes=expandatomic %s | FileCheck %s
 
 define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) {
 ; CHECK-LABEL: @test_atomicrmw_fadd_f32(
diff --git a/llvm/test/Transforms/AtomicExpand/PowerPC/atomicrmw-fp.ll b/llvm/test/Transforms/AtomicExpand/PowerPC/atomicrmw-fp.ll
index aa1d80ce225ce9..2c0ebcc78f7379 100644
--- a/llvm/test/Transforms/AtomicExpand/PowerPC/atomicrmw-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/PowerPC/atomicrmw-fp.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=powerpc64-unknown-unknown -passes=atomicexpand %s | FileCheck %s
+; RUN: opt -S -mtriple=powerpc64-unknown-unknown -passes=expandatomic %s | FileCheck %s
 
 define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) {
 ; CHECK-LABEL: @test_atomicrmw_fadd_f32(
diff --git a/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-double.ll b/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-double.ll
index caed477b6cc217..df4b94922d64b3 100644
--- a/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-double.ll
+++ b/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-double.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -passes=atomicexpand -mtriple=powerpc64le-unknown-unknown \
+; RUN: opt -S -passes=expandatomic -mtriple=powerpc64le-unknown-unknown \
 ; RUN:   < %s 2>&1 | FileCheck %s
-; RUN: opt -S -passes=atomicexpand -mtriple=powerpc64-unknown-unknown \
+; RUN: opt -S -passes=expandatomic -mtriple=powerpc64-unknown-unknown \
 ; RUN:   < %s 2>&1 | FileCheck %s
 
 define double @foo(ptr %dp) {
diff --git a/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-float.ll b/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-float.ll
index ef1f64ed45e711..edea790ce6a778 100644
--- a/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-float.ll
+++ b/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-float.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -passes=atomicexpand -mtriple=powerpc64le-unknown-unknown \
+; RUN: opt -S -passes=expandatomic -mtriple=powerpc64le-unknown-unknown \
 ; RUN:   < %s 2>&1 | FileCheck %s
-; RUN: opt -S -passes=atomicexpand -mtriple=powerpc64-unknown-unknown \
+; RUN: opt -S -passes=expandatomic -mtriple=powerpc64-unknown-unknown \
 ; RUN:   < %s 2>&1 | FileCheck %s
 
 define float @bar(ptr %fp) {
diff --git a/llvm/test/Transforms/AtomicExpand/PowerPC/cmpxchg.ll b/llvm/test/Transforms/AtomicExpand/PowerPC/cmpxchg.ll
index 6e3f965950330f..935a4be573a249 100644
--- a/llvm/test/Transforms/AtomicExpand/PowerPC/cmpxchg.ll
+++ b/llvm/test/Transforms/AtomicExpand/PowerPC/cmpxchg.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -passes=atomicexpand -S -mtriple=powerpc64-unknown-unknown \
+; RUN: opt -passes=expandatomic -S -mtriple=powerpc64-unknown-unknown \
 ; RUN:   -mcpu=pwr8 %s | FileCheck %s
-; RUN: opt -passes=atomicexpand -S -mtriple=powerpc64-unknown-unknown \
+; RUN: opt -passes=expandatomic -S -mtriple=powerpc64-unknown-unknown \
 ; RUN:   -mcpu=pwr7 %s | FileCheck --check-prefix=PWR7 %s
 
 define i1 @test_cmpxchg_seq_cst(ptr %addr, i128 %desire, i128 %new) {
diff --git a/llvm/test/Transforms/AtomicExpand/PowerPC/issue55983.ll b/llvm/test/Transforms/AtomicExpand/PowerPC/issue55983.ll
index d06d3380fdb8f9..35bab9d925def6 100644
--- a/llvm/test/Transforms/AtomicExpand/PowerPC/issue55983.ll
+++ b/llvm/test/Transforms/AtomicExpand/PowerPC/issue55983.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -passes=atomicexpand -S -mtriple=powerpc64le-unknown-unknown \
+; RUN: opt -passes=expandatomic -S -mtriple=powerpc64le-unknown-unknown \
 ; RUN:   %s | FileCheck %s
-; RUN: opt -passes=atomicexpand -S -mtriple=powerpc64-unknown-unknown \
+; RUN: opt -passes=expandatomic -S -mtriple=powerpc64-unknown-unknown \
 ; RUN:   %s | FileCheck %s
 
 define ptr @foo(ptr %p) {
diff --git a/llvm/test/Transforms/AtomicExpand/RISCV/atomicrmw-fp.ll b/llvm/test/Transforms/AtomicExpand/RISCV/atomicrmw-fp.ll
index a92a9027c54261..d17927e927a770 100644
--- a/llvm/test/Transforms/AtomicExpand/RISCV/atomicrmw-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/RISCV/atomicrmw-fp.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=riscv32-- -passes=atomicexpand %s | FileCheck %s
+; RUN: opt -S -mtriple=riscv32-- -passes=expandatomic %s | FileCheck %s
 
 define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) {
 ; CHECK-LABEL: @test_atomicrmw_fadd_f32(
diff --git a/llvm/test/Transforms/AtomicExpand/SPARC/libcalls.ll b/llvm/test/Transforms/AtomicExpand/SPARC/libcalls.ll
index 4a678711de1cf9..ef323695ca1a48 100644
--- a/llvm/test/Transforms/AtomicExpand/SPARC/libcalls.ll
+++ b/llvm/test/Transforms/AtomicExpand/SPARC/libcalls.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S %s -passes=atomicexpand | FileCheck %s
+; RUN: opt -S %s -passes=expandatomic | FileCheck %s
 
 ;;; NOTE: this test is actually target-independent -- any target which
 ;;; doesn't support inline atomics can be used. (E.g. X86 i386 would
diff --git a/llvm/test/Transforms/AtomicExpand/SPARC/partword.ll b/llvm/test/Transforms/AtomicExpand/SPARC/partword.ll
index 6d9e32ebe75903..8b096cfd041ad8 100644
--- a/llvm/test/Transforms/AtomicExpand/SPARC/partword.ll
+++ b/llvm/test/Transforms/AtomicExpand/SPARC/partword.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S %s -passes=atomicexpand | FileCheck %s
+; RUN: opt -S %s -passes=expandatomic | FileCheck %s
 
 ;; Verify the cmpxchg and atomicrmw expansions where sub-word-size
 ;; instructions are not available.
diff --git a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-libcall.ll b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-libcall.ll
index 9589009852b17e..e47b1d867da3d9 100644
--- a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-libcall.ll
+++ b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-libcall.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=i686-linux-gnu -passes=atomicexpand %s | FileCheck %s
+; RUN: opt -S -mtriple=i686-linux-gnu -passes=expandatomic %s | FileCheck %s
 
 
 define i256 @atomic_load256_libcall(ptr %ptr) nounwind {
diff --git a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-non-integer.ll b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-non-integer.ll
index 21117d6e013383..530d998bcbed78 100644
--- a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-non-integer.ll
+++ b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-non-integer.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S %s -passes=atomicexpand -mtriple=x86_64-linux-gnu | FileCheck %s
+; RUN: opt -S %s -passes=expandatomic -mtriple=x86_64-linux-gnu | FileCheck %s
 
 ; This file tests the functions `llvm::convertAtomicLoadToIntegerType` and
 ; `llvm::convertAtomicStoreToIntegerType`. If X86 stops using this 
diff --git a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-fp.ll b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-fp.ll
index fc2df10077d070..9669322a87f9fb 100644
--- a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-fp.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=i686-linux-gnu -passes=atomicexpand %s | FileCheck %s
+; RUN: opt -S -mtriple=i686-linux-gnu -passes=expandatomic %s | FileCheck %s
 
 define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) {
 ; CHECK-LABEL: @test_atomicrmw_fadd_f32(
diff --git a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-initial-load.ll b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-initial-load.ll
index 50fd402cb38d05..1f1106c460a93e 100644
--- a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-initial-load.ll
+++ b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-initial-load.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S %s -passes=atomicexpand -mtriple=i686-linux-gnu | FileCheck %s
+; RUN: opt -S %s -passes=expandatomic -mtriple=i686-linux-gnu | FileCheck %s
 
 ; This file tests the function `llvm::expandAtomicRMWToCmpXchg`.
 ; It isn't technically target specific, but is exposed through a pass that is.
diff --git a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-xchg-fp.ll b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-xchg-fp.ll
index 5ae8ceca4d539e..c7694b6e04f233 100644
--- a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-xchg-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-xchg-fp.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=i686-linux-gnu -passes=atomicexpand %s | FileCheck %s
+; RUN: opt -S -mtriple=i686-linux-gnu -passes=expandatomic %s | FileCheck %s
 
 define double @atomic_xchg_f64(ptr %ptr) nounwind {
 ; CHECK-LABEL: @atomic_xchg_f64(
diff --git a/llvm/utils/gn/secondary/llvm/lib/CodeGen/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/CodeGen/BUILD.gn
index 047f6583ec4e88..4589490a9bc806 100644
--- a/llvm/utils/gn/secondary/llvm/lib/CodeGen/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/lib/CodeGen/BUILD.gn
@@ -39,7 +39,7 @@ static_library("CodeGen") {
     "AllocationOrder.cpp",
     "Analysis.cpp",
     "AssignmentTrackingAnalysis.cpp",
-    "AtomicExpandPass.cpp",
+    "ExpandAtomicPass.cpp",
     "BasicBlockPathCloning.cpp",
     "BasicBlockSections.cpp",
     "BasicBlockSectionsProfileReader.cpp",



More information about the llvm-commits mailing list