[clang] [llvm] [CodeGen] Port AtomicExpand to new Pass Manager (PR #71220)

Rishabh Bali via cfe-commits cfe-commits at lists.llvm.org
Wed Jan 17 09:07:20 PST 2024


https://github.com/Ris-Bali updated https://github.com/llvm/llvm-project/pull/71220

>From 0948e11b508e3f978f76a639f27101c8825250c7 Mon Sep 17 00:00:00 2001
From: Rishabh Bali <rishabhsbali at gmail.com>
Date: Sun, 14 Jan 2024 22:50:06 +0530
Subject: [PATCH 1/8] Port Atomicexpandpass to new PM

---
 llvm/include/llvm/CodeGen/AtomicExpandUtils.h |   4 +-
 llvm/include/llvm/CodeGen/ExpandAtomic.h      |  30 ++++
 .../llvm/CodeGen/MachinePassRegistry.def      |   2 +-
 llvm/include/llvm/CodeGen/Passes.h            |   8 +-
 .../llvm/CodeGen/TargetSubtargetInfo.h        |   2 +-
 llvm/include/llvm/InitializePasses.h          |   2 +-
 llvm/lib/CodeGen/CMakeLists.txt               |   2 +-
 llvm/lib/CodeGen/CodeGen.cpp                  |   2 +-
 ...micExpandPass.cpp => ExpandAtomicPass.cpp} | 149 +++++++++++-------
 llvm/lib/CodeGen/TargetSubtargetInfo.cpp      |   2 +-
 llvm/lib/Passes/PassBuilder.cpp               |   1 +
 llvm/lib/Passes/PassRegistry.def              |   1 +
 .../Target/AArch64/AArch64TargetMachine.cpp   |   2 +-
 .../lib/Target/AMDGPU/AMDGPUTargetMachine.cpp |   2 +-
 llvm/lib/Target/ARC/ARCTargetMachine.cpp      |   2 +-
 llvm/lib/Target/ARM/ARMTargetMachine.cpp      |   2 +-
 llvm/lib/Target/BPF/BPFTargetMachine.cpp      |   2 +-
 llvm/lib/Target/CSKY/CSKYTargetMachine.cpp    |   2 +-
 .../Target/Hexagon/HexagonTargetMachine.cpp   |   2 +-
 llvm/lib/Target/Lanai/LanaiTargetMachine.cpp  |   2 +-
 .../LoongArch/LoongArchTargetMachine.cpp      |   2 +-
 llvm/lib/Target/M68k/M68kTargetMachine.cpp    |   2 +-
 .../lib/Target/MSP430/MSP430TargetMachine.cpp |   2 +-
 llvm/lib/Target/Mips/MipsTargetMachine.cpp    |   2 +-
 llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp  |   2 +-
 .../PowerPC/PPCExpandAtomicPseudoInsts.cpp    |   2 +-
 llvm/lib/Target/PowerPC/PPCTargetMachine.cpp  |   2 +-
 llvm/lib/Target/RISCV/RISCVTargetMachine.cpp  |   2 +-
 llvm/lib/Target/Sparc/SparcTargetMachine.cpp  |   2 +-
 .../Target/SystemZ/SystemZTargetMachine.cpp   |   2 +-
 llvm/lib/Target/VE/VETargetMachine.cpp        |   2 +-
 .../WebAssembly/WebAssemblySubtarget.cpp      |   2 +-
 .../Target/WebAssembly/WebAssemblySubtarget.h |   2 +-
 .../WebAssembly/WebAssemblyTargetMachine.cpp  |   2 +-
 llvm/lib/Target/X86/X86TargetMachine.cpp      |   2 +-
 llvm/lib/Target/XCore/XCoreTargetMachine.cpp  |   2 +-
 .../test/CodeGen/AMDGPU/idemponent-atomics.ll |   2 +-
 .../CodeGen/AMDGPU/private-memory-atomics.ll  |   2 +-
 .../AtomicExpand/AArch64/atomicrmw-fp.ll      |   2 +-
 .../AArch64/expand-atomicrmw-xchg-fp.ll       |   4 +-
 .../AtomicExpand/AArch64/pcsections.ll        |   2 +-
 .../AMDGPU/expand-atomic-i16-system.ll        |   2 +-
 .../AtomicExpand/AMDGPU/expand-atomic-i16.ll  |   4 +-
 .../AMDGPU/expand-atomic-i8-system.ll         |   2 +-
 .../AtomicExpand/AMDGPU/expand-atomic-i8.ll   |   4 +-
 ...and-atomic-rmw-fadd-flat-specialization.ll |   8 +-
 .../AMDGPU/expand-atomic-rmw-fadd.ll          |  12 +-
 .../AMDGPU/expand-atomic-rmw-fmax.ll          |   4 +-
 .../AMDGPU/expand-atomic-rmw-fmin.ll          |   4 +-
 .../AMDGPU/expand-atomic-rmw-fsub.ll          |   4 +-
 .../AMDGPU/expand-atomic-rmw-nand.ll          |   4 +-
 .../expand-atomic-simplify-cfg-CAS-block.ll   |   2 +-
 .../AtomicExpand/AMDGPU/unaligned-atomic.ll   |   2 +-
 .../AtomicExpand/ARM/atomic-expansion-v7.ll   |   2 +-
 .../AtomicExpand/ARM/atomic-expansion-v8.ll   |   2 +-
 .../AtomicExpand/ARM/atomicrmw-fp.ll          |   2 +-
 .../AtomicExpand/ARM/cmpxchg-weak.ll          |   2 +-
 .../AtomicExpand/Hexagon/atomicrmw-fp.ll      |   2 +-
 .../AtomicExpand/LoongArch/atomicrmw-fp.ll    |   2 +-
 .../LoongArch/load-store-atomic.ll            |   4 +-
 .../AtomicExpand/Mips/atomicrmw-fp.ll         |   2 +-
 .../AtomicExpand/PowerPC/atomicrmw-fp.ll      |   2 +-
 .../AtomicExpand/PowerPC/cfence-double.ll     |   4 +-
 .../AtomicExpand/PowerPC/cfence-float.ll      |   4 +-
 .../AtomicExpand/PowerPC/cmpxchg.ll           |   4 +-
 .../AtomicExpand/PowerPC/issue55983.ll        |   4 +-
 .../AtomicExpand/RISCV/atomicrmw-fp.ll        |   2 +-
 .../Transforms/AtomicExpand/SPARC/libcalls.ll |   2 +-
 .../Transforms/AtomicExpand/SPARC/partword.ll |   2 +-
 .../AtomicExpand/X86/expand-atomic-libcall.ll |   2 +-
 .../X86/expand-atomic-non-integer.ll          |   2 +-
 .../AtomicExpand/X86/expand-atomic-rmw-fp.ll  |   2 +-
 .../X86/expand-atomic-rmw-initial-load.ll     |   2 +-
 .../AtomicExpand/X86/expand-atomic-xchg-fp.ll |   2 +-
 llvm/tools/opt/opt.cpp                        |   2 -
 .../gn/secondary/llvm/lib/CodeGen/BUILD.gn    |   2 +-
 76 files changed, 219 insertions(+), 154 deletions(-)
 create mode 100644 llvm/include/llvm/CodeGen/ExpandAtomic.h
 rename llvm/lib/CodeGen/{AtomicExpandPass.cpp => ExpandAtomicPass.cpp} (95%)

diff --git a/llvm/include/llvm/CodeGen/AtomicExpandUtils.h b/llvm/include/llvm/CodeGen/AtomicExpandUtils.h
index 1cb410a0c31c69e..851492678aeba54 100644
--- a/llvm/include/llvm/CodeGen/AtomicExpandUtils.h
+++ b/llvm/include/llvm/CodeGen/AtomicExpandUtils.h
@@ -34,7 +34,7 @@ using CreateCmpXchgInstFun =
 /// instructions directly into a platform specific intrinsics (because, say,
 /// those intrinsics don't exist). If such a pass is able to expand cmpxchg
 /// instructions directly however, then, with this function, it could avoid two
-/// extra module passes (avoiding passes by `-atomic-expand` and itself). A
+/// extra module passes (avoiding passes by `-expand-atomic` and itself). A
 /// specific example would be PNaCl's `RewriteAtomics` pass.
 ///
 /// Given: atomicrmw some_op iN* %addr, iN %incr ordering
@@ -46,7 +46,7 @@ using CreateCmpXchgInstFun =
 /// loop:
 ///     %loaded = phi iN [ %init_loaded, %entry ], [ %new_loaded, %loop ]
 ///     %new = some_op iN %loaded, %incr
-/// ; This is what -atomic-expand will produce using this function on i686
+/// ; This is what -expand-atomic will produce using this function on i686
 /// targets:
 ///     %pair = cmpxchg iN* %addr, iN %loaded, iN %new_val
 ///     %new_loaded = extractvalue { iN, i1 } %pair, 0
diff --git a/llvm/include/llvm/CodeGen/ExpandAtomic.h b/llvm/include/llvm/CodeGen/ExpandAtomic.h
new file mode 100644
index 000000000000000..4ba49f8886ca94c
--- /dev/null
+++ b/llvm/include/llvm/CodeGen/ExpandAtomic.h
@@ -0,0 +1,30 @@
+//===-- ExpandAtomic.h - Expand Atomic Instructions -------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_EXPANDATOMIC_H
+#define LLVM_CODEGEN_EXPANDATOMIC_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Function;
+class TargetMachine;
+
+class ExpandAtomicPass : public PassInfoMixin<ExpandAtomicPass> {
+private:
+  const TargetMachine *TM;
+
+public:
+  ExpandAtomicPass(const TargetMachine *TM) : TM(TM) {}
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_EXPANDATOMIC_H
diff --git a/llvm/include/llvm/CodeGen/MachinePassRegistry.def b/llvm/include/llvm/CodeGen/MachinePassRegistry.def
index e789747036ef9aa..97cb359630f736e 100644
--- a/llvm/include/llvm/CodeGen/MachinePassRegistry.def
+++ b/llvm/include/llvm/CodeGen/MachinePassRegistry.def
@@ -135,7 +135,7 @@ MACHINE_FUNCTION_ANALYSIS("pass-instrumentation", PassInstrumentationAnalysis,
 #ifndef DUMMY_FUNCTION_PASS
 #define DUMMY_FUNCTION_PASS(NAME, PASS_NAME, CONSTRUCTOR)
 #endif
-DUMMY_FUNCTION_PASS("atomic-expand", AtomicExpandPass, ())
+DUMMY_FUNCTION_PASS("expand-atomic", ExpandAtomicPass, ())
 #undef DUMMY_FUNCTION_PASS
 
 #ifndef DUMMY_MACHINE_MODULE_PASS
diff --git a/llvm/include/llvm/CodeGen/Passes.h b/llvm/include/llvm/CodeGen/Passes.h
index bbfb8a0dbe26a42..4f63c32d9fd2eb1 100644
--- a/llvm/include/llvm/CodeGen/Passes.h
+++ b/llvm/include/llvm/CodeGen/Passes.h
@@ -41,10 +41,10 @@ class FileSystem;
 // List of target independent CodeGen pass IDs.
 namespace llvm {
 
-  /// AtomicExpandPass - At IR level this pass replace atomic instructions with
+  /// ExpandAtomicPass - At IR level this pass replace atomic instructions with
   /// __atomic_* library calls, or target specific instruction which implement the
   /// same semantics in a way which better fits the target backend.
-  FunctionPass *createAtomicExpandPass();
+  FunctionPass *createExpandAtomicPass();
 
   /// createUnreachableBlockEliminationPass - The LLVM code generator does not
   /// work well with unreachable basic blocks (what live ranges make sense for a
@@ -101,9 +101,9 @@ namespace llvm {
   /// handling of complex number arithmetic
   FunctionPass *createComplexDeinterleavingPass(const TargetMachine *TM);
 
-  /// AtomicExpandID -- Lowers atomic operations in terms of either cmpxchg
+  /// ExpandAtomicID -- Lowers atomic operations in terms of either cmpxchg
   /// load-linked/store-conditional loops.
-  extern char &AtomicExpandID;
+  extern char &ExpandAtomicID;
 
   /// MachineLoopInfo - This pass is a loop analysis pass.
   extern char &MachineLoopInfoID;
diff --git a/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h b/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h
index 55ef95c28543190..da1fd6737b796fe 100644
--- a/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h
+++ b/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h
@@ -215,7 +215,7 @@ class TargetSubtargetInfo : public MCSubtargetInfo {
   virtual bool enablePostRAMachineScheduler() const;
 
   /// True if the subtarget should run the atomic expansion pass.
-  virtual bool enableAtomicExpand() const;
+  virtual bool enableExpandAtomic() const;
 
   /// True if the subtarget should run the indirectbr expansion pass.
   virtual bool enableIndirectBrExpand() const;
diff --git a/llvm/include/llvm/InitializePasses.h b/llvm/include/llvm/InitializePasses.h
index 3db639a68724077..efcfa080912b82b 100644
--- a/llvm/include/llvm/InitializePasses.h
+++ b/llvm/include/llvm/InitializePasses.h
@@ -52,7 +52,6 @@ void initializeAAResultsWrapperPassPass(PassRegistry&);
 void initializeAlwaysInlinerLegacyPassPass(PassRegistry&);
 void initializeAssignmentTrackingAnalysisPass(PassRegistry &);
 void initializeAssumptionCacheTrackerPass(PassRegistry&);
-void initializeAtomicExpandPass(PassRegistry&);
 void initializeBasicBlockPathCloningPass(PassRegistry &);
 void initializeBasicBlockSectionsProfileReaderWrapperPassPass(PassRegistry &);
 void initializeBasicBlockSectionsPass(PassRegistry &);
@@ -101,6 +100,7 @@ void initializeEarlyMachineLICMPass(PassRegistry&);
 void initializeEarlyTailDuplicatePass(PassRegistry&);
 void initializeEdgeBundlesPass(PassRegistry&);
 void initializeEHContGuardCatchretPass(PassRegistry &);
+void initializeExpandAtomicPass(PassRegistry&);
 void initializeExpandLargeFpConvertLegacyPassPass(PassRegistry&);
 void initializeExpandLargeDivRemLegacyPassPass(PassRegistry&);
 void initializeExpandMemCmpLegacyPassPass(PassRegistry &);
diff --git a/llvm/lib/CodeGen/CMakeLists.txt b/llvm/lib/CodeGen/CMakeLists.txt
index df2d1831ee5fdbf..c237574bdf542fb 100644
--- a/llvm/lib/CodeGen/CMakeLists.txt
+++ b/llvm/lib/CodeGen/CMakeLists.txt
@@ -40,7 +40,6 @@ add_llvm_component_library(LLVMCodeGen
   AllocationOrder.cpp
   Analysis.cpp
   AssignmentTrackingAnalysis.cpp
-  AtomicExpandPass.cpp
   BasicTargetTransformInfo.cpp
   BranchFolding.cpp
   BranchRelaxation.cpp
@@ -69,6 +68,7 @@ add_llvm_component_library(LLVMCodeGen
   EdgeBundles.cpp
   EHContGuardCatchret.cpp
   ExecutionDomainFix.cpp
+  ExpandAtomicPass.cpp
   ExpandLargeDivRem.cpp
   ExpandLargeFpConvert.cpp
   ExpandMemCmp.cpp
diff --git a/llvm/lib/CodeGen/CodeGen.cpp b/llvm/lib/CodeGen/CodeGen.cpp
index 418066452c17242..230776984af0ce2 100644
--- a/llvm/lib/CodeGen/CodeGen.cpp
+++ b/llvm/lib/CodeGen/CodeGen.cpp
@@ -19,7 +19,6 @@ using namespace llvm;
 /// initializeCodeGen - Initialize all passes linked into the CodeGen library.
 void llvm::initializeCodeGen(PassRegistry &Registry) {
   initializeAssignmentTrackingAnalysisPass(Registry);
-  initializeAtomicExpandPass(Registry);
   initializeBasicBlockPathCloningPass(Registry);
   initializeBasicBlockSectionsPass(Registry);
   initializeBranchFolderPassPass(Registry);
@@ -39,6 +38,7 @@ void llvm::initializeCodeGen(PassRegistry &Registry) {
   initializeEarlyIfPredicatorPass(Registry);
   initializeEarlyMachineLICMPass(Registry);
   initializeEarlyTailDuplicatePass(Registry);
+  initializeExpandAtomicLegacyPass(Registry);
   initializeExpandLargeDivRemLegacyPassPass(Registry);
   initializeExpandLargeFpConvertLegacyPassPass(Registry);
   initializeExpandMemCmpLegacyPassPass(Registry);
diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/ExpandAtomicPass.cpp
similarity index 95%
rename from llvm/lib/CodeGen/AtomicExpandPass.cpp
rename to llvm/lib/CodeGen/ExpandAtomicPass.cpp
index ccf3e9ec6492105..5f8e069bafc7a64 100644
--- a/llvm/lib/CodeGen/AtomicExpandPass.cpp
+++ b/llvm/lib/CodeGen/ExpandAtomicPass.cpp
@@ -1,4 +1,4 @@
-//===- AtomicExpandPass.cpp - Expand atomic instructions ------------------===//
+//===- ExpandAtomicPass.cpp - Expand atomic instructions ------------------===//
 //
 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 // See https://llvm.org/LICENSE.txt for license information.
@@ -19,6 +19,7 @@
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/Analysis/InstSimplifyFolder.h"
 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
+#include "llvm/CodeGen/ExpandAtomic.h"
 #include "llvm/CodeGen/AtomicExpandUtils.h"
 #include "llvm/CodeGen/RuntimeLibcalls.h"
 #include "llvm/CodeGen/TargetLowering.h"
@@ -55,23 +56,14 @@
 
 using namespace llvm;
 
-#define DEBUG_TYPE "atomic-expand"
+#define DEBUG_TYPE "expand-atomic"
 
 namespace {
 
-class AtomicExpand : public FunctionPass {
+class ExpandAtomicImpl {
   const TargetLowering *TLI = nullptr;
   const DataLayout *DL = nullptr;
 
-public:
-  static char ID; // Pass identification, replacement for typeid
-
-  AtomicExpand() : FunctionPass(ID) {
-    initializeAtomicExpandPass(*PassRegistry::getPassRegistry());
-  }
-
-  bool runOnFunction(Function &F) override;
-
 private:
   bool bracketInstWithFences(Instruction *I, AtomicOrdering Order);
   IntegerType *getCorrespondingIntegerType(Type *T, const DataLayout &DL);
@@ -124,6 +116,20 @@ class AtomicExpand : public FunctionPass {
   friend bool
   llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI,
                                  CreateCmpXchgInstFun CreateCmpXchg);
+
+public:
+  bool run(Function &F, const TargetMachine *TM);
+};
+
+class ExpandAtomicLegacy : public FunctionPass {
+public:
+  static char ID; // Pass identification, replacement for typeid
+
+  ExpandAtomicLegacy() : FunctionPass(ID) {
+    initializeExpandAtomicLegacyPass(*PassRegistry::getPassRegistry());
+  }
+
+  bool runOnFunction(Function &F) override;
 };
 
 // IRBuilder to be used for replacement atomic instructions.
@@ -138,14 +144,15 @@ struct ReplacementIRBuilder : IRBuilder<InstSimplifyFolder> {
 
 } // end anonymous namespace
 
-char AtomicExpand::ID = 0;
+char ExpandAtomicLegacy::ID = 0;
 
-char &llvm::AtomicExpandID = AtomicExpand::ID;
+char &llvm::ExpandAtomicID = ExpandAtomicLegacy::ID;
 
-INITIALIZE_PASS(AtomicExpand, DEBUG_TYPE, "Expand Atomic instructions", false,
-                false)
-
-FunctionPass *llvm::createAtomicExpandPass() { return new AtomicExpand(); }
+INITIALIZE_PASS_BEGIN(ExpandAtomicLegacy, DEBUG_TYPE,
+                      "Expand Atomic instructions", false, false)
+INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
+INITIALIZE_PASS_END(ExpandAtomicLegacy, DEBUG_TYPE,
+                    "Expand Atomic instructions", false, false)
 
 // Helper functions to retrieve the size of atomic instructions.
 static unsigned getAtomicOpSize(LoadInst *LI) {
@@ -179,14 +186,9 @@ static bool atomicSizeSupported(const TargetLowering *TLI, Inst *I) {
          Size <= TLI->getMaxAtomicSizeInBitsSupported() / 8;
 }
 
-bool AtomicExpand::runOnFunction(Function &F) {
-  auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
-  if (!TPC)
-    return false;
-
-  auto &TM = TPC->getTM<TargetMachine>();
-  const auto *Subtarget = TM.getSubtargetImpl(F);
-  if (!Subtarget->enableAtomicExpand())
+bool ExpandAtomicImpl::run(Function &F, const TargetMachine *TM) {
+  const auto *Subtarget = TM->getSubtargetImpl(F);
+  if (!Subtarget->enableExpandAtomic())
     return false;
   TLI = Subtarget->getTargetLowering();
   DL = &F.getParent()->getDataLayout();
@@ -340,7 +342,39 @@ bool AtomicExpand::runOnFunction(Function &F) {
   return MadeChange;
 }
 
-bool AtomicExpand::bracketInstWithFences(Instruction *I, AtomicOrdering Order) {
+bool ExpandAtomicLegacy::runOnFunction(Function &F) {
+  if (skipFunction(F))
+    return false;
+
+  auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
+  if (!TPC)
+    return false;
+
+  auto *TM = &TPC->getTM<TargetMachine>();
+
+  ExpandAtomicImpl AE;
+  return AE.run(F, TM);
+}
+
+FunctionPass *llvm::createExpandAtomicLegacyPass() {
+  return new ExpandAtomicLegacy();
+}
+
+PreservedAnalyses ExpandAtomicPass::run(Function &F,
+                                        FunctionAnalysisManager &AM) {
+  ExpandAtomicImpl AE;
+
+  bool Changed = AE.run(F, TM);
+  if (!Changed)
+    return PreservedAnalyses::all();
+
+  PreservedAnalyses PA;
+  PA.preserveSet<CFGAnalyses>();
+  return PA;
+}
+
+bool ExpandAtomicImpl::bracketInstWithFences(Instruction *I,
+                                             AtomicOrdering Order) {
   ReplacementIRBuilder Builder(I, *DL);
 
   auto LeadingFence = TLI->emitLeadingFence(Builder, I, Order);
@@ -355,8 +389,8 @@ bool AtomicExpand::bracketInstWithFences(Instruction *I, AtomicOrdering Order) {
 }
 
 /// Get the iX type with the same bitwidth as T.
-IntegerType *AtomicExpand::getCorrespondingIntegerType(Type *T,
-                                                       const DataLayout &DL) {
+IntegerType *
+ExpandAtomicImpl::getCorrespondingIntegerType(Type *T, const DataLayout &DL) {
   EVT VT = TLI->getMemValueType(DL, T);
   unsigned BitWidth = VT.getStoreSizeInBits();
   assert(BitWidth == VT.getSizeInBits() && "must be a power of two");
@@ -366,7 +400,7 @@ IntegerType *AtomicExpand::getCorrespondingIntegerType(Type *T,
 /// Convert an atomic load of a non-integral type to an integer load of the
 /// equivalent bitwidth.  See the function comment on
 /// convertAtomicStoreToIntegerType for background.
-LoadInst *AtomicExpand::convertAtomicLoadToIntegerType(LoadInst *LI) {
+LoadInst *ExpandAtomicImpl::convertAtomicLoadToIntegerType(LoadInst *LI) {
   auto *M = LI->getModule();
   Type *NewTy = getCorrespondingIntegerType(LI->getType(), M->getDataLayout());
 
@@ -387,7 +421,7 @@ LoadInst *AtomicExpand::convertAtomicLoadToIntegerType(LoadInst *LI) {
 }
 
 AtomicRMWInst *
-AtomicExpand::convertAtomicXchgToIntegerType(AtomicRMWInst *RMWI) {
+ExpandAtomicImpl::convertAtomicXchgToIntegerType(AtomicRMWInst *RMWI) {
   auto *M = RMWI->getModule();
   Type *NewTy =
       getCorrespondingIntegerType(RMWI->getType(), M->getDataLayout());
@@ -414,7 +448,7 @@ AtomicExpand::convertAtomicXchgToIntegerType(AtomicRMWInst *RMWI) {
   return NewRMWI;
 }
 
-bool AtomicExpand::tryExpandAtomicLoad(LoadInst *LI) {
+bool ExpandAtomicImpl::tryExpandAtomicLoad(LoadInst *LI) {
   switch (TLI->shouldExpandAtomicLoadInIR(LI)) {
   case TargetLoweringBase::AtomicExpansionKind::None:
     return false;
@@ -436,7 +470,7 @@ bool AtomicExpand::tryExpandAtomicLoad(LoadInst *LI) {
   }
 }
 
-bool AtomicExpand::tryExpandAtomicStore(StoreInst *SI) {
+bool ExpandAtomicImpl::tryExpandAtomicStore(StoreInst *SI) {
   switch (TLI->shouldExpandAtomicStoreInIR(SI)) {
   case TargetLoweringBase::AtomicExpansionKind::None:
     return false;
@@ -451,7 +485,7 @@ bool AtomicExpand::tryExpandAtomicStore(StoreInst *SI) {
   }
 }
 
-bool AtomicExpand::expandAtomicLoadToLL(LoadInst *LI) {
+bool ExpandAtomicImpl::expandAtomicLoadToLL(LoadInst *LI) {
   ReplacementIRBuilder Builder(LI, *DL);
 
   // On some architectures, load-linked instructions are atomic for larger
@@ -467,7 +501,7 @@ bool AtomicExpand::expandAtomicLoadToLL(LoadInst *LI) {
   return true;
 }
 
-bool AtomicExpand::expandAtomicLoadToCmpXchg(LoadInst *LI) {
+bool ExpandAtomicImpl::expandAtomicLoadToCmpXchg(LoadInst *LI) {
   ReplacementIRBuilder Builder(LI, *DL);
   AtomicOrdering Order = LI->getOrdering();
   if (Order == AtomicOrdering::Unordered)
@@ -496,7 +530,7 @@ bool AtomicExpand::expandAtomicLoadToCmpXchg(LoadInst *LI) {
 /// instruction select from the original atomic store, but as a migration
 /// mechanism, we convert back to the old format which the backends understand.
 /// Each backend will need individual work to recognize the new format.
-StoreInst *AtomicExpand::convertAtomicStoreToIntegerType(StoreInst *SI) {
+StoreInst *ExpandAtomicImpl::convertAtomicStoreToIntegerType(StoreInst *SI) {
   ReplacementIRBuilder Builder(SI, *DL);
   auto *M = SI->getModule();
   Type *NewTy = getCorrespondingIntegerType(SI->getValueOperand()->getType(),
@@ -514,7 +548,7 @@ StoreInst *AtomicExpand::convertAtomicStoreToIntegerType(StoreInst *SI) {
   return NewSI;
 }
 
-void AtomicExpand::expandAtomicStore(StoreInst *SI) {
+void ExpandAtomicImpl::expandAtomicStore(StoreInst *SI) {
   // This function is only called on atomic stores that are too large to be
   // atomic if implemented as a native store. So we replace them by an
   // atomic swap, that can be implemented for example as a ldrex/strex on ARM
@@ -561,7 +595,7 @@ static void createCmpXchgInstFun(IRBuilderBase &Builder, Value *Addr,
     NewLoaded = Builder.CreateBitCast(NewLoaded, OrigTy);
 }
 
-bool AtomicExpand::tryExpandAtomicRMW(AtomicRMWInst *AI) {
+bool ExpandAtomicImpl::tryExpandAtomicRMW(AtomicRMWInst *AI) {
   LLVMContext &Ctx = AI->getModule()->getContext();
   TargetLowering::AtomicExpansionKind Kind = TLI->shouldExpandAtomicRMWInIR(AI);
   switch (Kind) {
@@ -843,7 +877,7 @@ static Value *performMaskedAtomicOp(AtomicRMWInst::BinOp Op,
 /// way as a typical atomicrmw expansion. The only difference here is
 /// that the operation inside of the loop may operate upon only a
 /// part of the value.
-void AtomicExpand::expandPartwordAtomicRMW(
+void ExpandAtomicImpl::expandPartwordAtomicRMW(
     AtomicRMWInst *AI, TargetLoweringBase::AtomicExpansionKind ExpansionKind) {
   AtomicOrdering MemOpOrder = AI->getOrdering();
   SyncScope::ID SSID = AI->getSyncScopeID();
@@ -887,7 +921,7 @@ void AtomicExpand::expandPartwordAtomicRMW(
 }
 
 // Widen the bitwise atomicrmw (or/xor/and) to the minimum supported width.
-AtomicRMWInst *AtomicExpand::widenPartwordAtomicRMW(AtomicRMWInst *AI) {
+AtomicRMWInst *ExpandAtomicImpl::widenPartwordAtomicRMW(AtomicRMWInst *AI) {
   ReplacementIRBuilder Builder(AI, *DL);
   AtomicRMWInst::BinOp Op = AI->getOperation();
 
@@ -922,7 +956,7 @@ AtomicRMWInst *AtomicExpand::widenPartwordAtomicRMW(AtomicRMWInst *AI) {
   return NewAI;
 }
 
-bool AtomicExpand::expandPartwordCmpXchg(AtomicCmpXchgInst *CI) {
+bool ExpandAtomicImpl::expandPartwordCmpXchg(AtomicCmpXchgInst *CI) {
   // The basic idea here is that we're expanding a cmpxchg of a
   // smaller memory size up to a word-sized cmpxchg. To do this, we
   // need to add a retry-loop for strong cmpxchg, so that
@@ -1047,7 +1081,7 @@ bool AtomicExpand::expandPartwordCmpXchg(AtomicCmpXchgInst *CI) {
   return true;
 }
 
-void AtomicExpand::expandAtomicOpToLLSC(
+void ExpandAtomicImpl::expandAtomicOpToLLSC(
     Instruction *I, Type *ResultType, Value *Addr, Align AddrAlign,
     AtomicOrdering MemOpOrder,
     function_ref<Value *(IRBuilderBase &, Value *)> PerformOp) {
@@ -1059,7 +1093,7 @@ void AtomicExpand::expandAtomicOpToLLSC(
   I->eraseFromParent();
 }
 
-void AtomicExpand::expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI) {
+void ExpandAtomicImpl::expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI) {
   ReplacementIRBuilder Builder(AI, *DL);
 
   PartwordMaskValues PMV =
@@ -1085,7 +1119,8 @@ void AtomicExpand::expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI) {
   AI->eraseFromParent();
 }
 
-void AtomicExpand::expandAtomicCmpXchgToMaskedIntrinsic(AtomicCmpXchgInst *CI) {
+void ExpandAtomicImpl::expandAtomicCmpXchgToMaskedIntrinsic(
+    AtomicCmpXchgInst *CI) {
   ReplacementIRBuilder Builder(CI, *DL);
 
   PartwordMaskValues PMV = createMaskInstrs(
@@ -1112,7 +1147,7 @@ void AtomicExpand::expandAtomicCmpXchgToMaskedIntrinsic(AtomicCmpXchgInst *CI) {
   CI->eraseFromParent();
 }
 
-Value *AtomicExpand::insertRMWLLSCLoop(
+Value *ExpandAtomicImpl::insertRMWLLSCLoop(
     IRBuilderBase &Builder, Type *ResultTy, Value *Addr, Align AddrAlign,
     AtomicOrdering MemOpOrder,
     function_ref<Value *(IRBuilderBase &, Value *)> PerformOp) {
@@ -1168,7 +1203,7 @@ Value *AtomicExpand::insertRMWLLSCLoop(
 /// way to represent a pointer cmpxchg so that we can update backends one by
 /// one.
 AtomicCmpXchgInst *
-AtomicExpand::convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI) {
+ExpandAtomicImpl::convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI) {
   auto *M = CI->getModule();
   Type *NewTy = getCorrespondingIntegerType(CI->getCompareOperand()->getType(),
                                             M->getDataLayout());
@@ -1201,7 +1236,7 @@ AtomicExpand::convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI) {
   return NewCI;
 }
 
-bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
+bool ExpandAtomicImpl::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
   AtomicOrdering SuccessOrder = CI->getSuccessOrdering();
   AtomicOrdering FailureOrder = CI->getFailureOrdering();
   Value *Addr = CI->getPointerOperand();
@@ -1447,7 +1482,7 @@ bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
   return true;
 }
 
-bool AtomicExpand::isIdempotentRMW(AtomicRMWInst *RMWI) {
+bool ExpandAtomicImpl::isIdempotentRMW(AtomicRMWInst *RMWI) {
   auto C = dyn_cast<ConstantInt>(RMWI->getValOperand());
   if (!C)
     return false;
@@ -1467,7 +1502,7 @@ bool AtomicExpand::isIdempotentRMW(AtomicRMWInst *RMWI) {
   }
 }
 
-bool AtomicExpand::simplifyIdempotentRMW(AtomicRMWInst *RMWI) {
+bool ExpandAtomicImpl::simplifyIdempotentRMW(AtomicRMWInst *RMWI) {
   if (auto ResultingLoad = TLI->lowerIdempotentRMWIntoFencedLoad(RMWI)) {
     tryExpandAtomicLoad(ResultingLoad);
     return true;
@@ -1475,7 +1510,7 @@ bool AtomicExpand::simplifyIdempotentRMW(AtomicRMWInst *RMWI) {
   return false;
 }
 
-Value *AtomicExpand::insertRMWCmpXchgLoop(
+Value *ExpandAtomicImpl::insertRMWCmpXchgLoop(
     IRBuilderBase &Builder, Type *ResultTy, Value *Addr, Align AddrAlign,
     AtomicOrdering MemOpOrder, SyncScope::ID SSID,
     function_ref<Value *(IRBuilderBase &, Value *)> PerformOp,
@@ -1536,7 +1571,7 @@ Value *AtomicExpand::insertRMWCmpXchgLoop(
   return NewLoaded;
 }
 
-bool AtomicExpand::tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
+bool ExpandAtomicImpl::tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
   unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8;
   unsigned ValueSize = getAtomicOpSize(CI);
 
@@ -1567,7 +1602,7 @@ bool llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI,
 
   // FIXME: If FP exceptions are observable, we should force them off for the
   // loop for the FP atomics.
-  Value *Loaded = AtomicExpand::insertRMWCmpXchgLoop(
+  Value *Loaded = ExpandAtomicImpl::insertRMWCmpXchgLoop(
       Builder, AI->getType(), AI->getPointerOperand(), AI->getAlign(),
       AI->getOrdering(), AI->getSyncScopeID(),
       [&](IRBuilderBase &Builder, Value *Loaded) {
@@ -1601,7 +1636,7 @@ static bool canUseSizedAtomicCall(unsigned Size, Align Alignment,
          Size <= LargestSize;
 }
 
-void AtomicExpand::expandAtomicLoadToLibcall(LoadInst *I) {
+void ExpandAtomicImpl::expandAtomicLoadToLibcall(LoadInst *I) {
   static const RTLIB::Libcall Libcalls[6] = {
       RTLIB::ATOMIC_LOAD,   RTLIB::ATOMIC_LOAD_1, RTLIB::ATOMIC_LOAD_2,
       RTLIB::ATOMIC_LOAD_4, RTLIB::ATOMIC_LOAD_8, RTLIB::ATOMIC_LOAD_16};
@@ -1614,7 +1649,7 @@ void AtomicExpand::expandAtomicLoadToLibcall(LoadInst *I) {
     report_fatal_error("expandAtomicOpToLibcall shouldn't fail for Load");
 }
 
-void AtomicExpand::expandAtomicStoreToLibcall(StoreInst *I) {
+void ExpandAtomicImpl::expandAtomicStoreToLibcall(StoreInst *I) {
   static const RTLIB::Libcall Libcalls[6] = {
       RTLIB::ATOMIC_STORE,   RTLIB::ATOMIC_STORE_1, RTLIB::ATOMIC_STORE_2,
       RTLIB::ATOMIC_STORE_4, RTLIB::ATOMIC_STORE_8, RTLIB::ATOMIC_STORE_16};
@@ -1627,7 +1662,7 @@ void AtomicExpand::expandAtomicStoreToLibcall(StoreInst *I) {
     report_fatal_error("expandAtomicOpToLibcall shouldn't fail for Store");
 }
 
-void AtomicExpand::expandAtomicCASToLibcall(AtomicCmpXchgInst *I) {
+void ExpandAtomicImpl::expandAtomicCASToLibcall(AtomicCmpXchgInst *I) {
   static const RTLIB::Libcall Libcalls[6] = {
       RTLIB::ATOMIC_COMPARE_EXCHANGE,   RTLIB::ATOMIC_COMPARE_EXCHANGE_1,
       RTLIB::ATOMIC_COMPARE_EXCHANGE_2, RTLIB::ATOMIC_COMPARE_EXCHANGE_4,
@@ -1705,7 +1740,7 @@ static ArrayRef<RTLIB::Libcall> GetRMWLibcall(AtomicRMWInst::BinOp Op) {
   llvm_unreachable("Unexpected AtomicRMW operation.");
 }
 
-void AtomicExpand::expandAtomicRMWToLibcall(AtomicRMWInst *I) {
+void ExpandAtomicImpl::expandAtomicRMWToLibcall(AtomicRMWInst *I) {
   ArrayRef<RTLIB::Libcall> Libcalls = GetRMWLibcall(I->getOperation());
 
   unsigned Size = getAtomicOpSize(I);
@@ -1744,7 +1779,7 @@ void AtomicExpand::expandAtomicRMWToLibcall(AtomicRMWInst *I) {
 // ATOMIC libcalls to be emitted. All of the other arguments besides
 // 'I' are extracted from the Instruction subclass by the
 // caller. Depending on the particular call, some will be null.
-bool AtomicExpand::expandAtomicOpToLibcall(
+bool ExpandAtomicImpl::expandAtomicOpToLibcall(
     Instruction *I, unsigned Size, Align Alignment, Value *PointerOperand,
     Value *ValueOperand, Value *CASExpected, AtomicOrdering Ordering,
     AtomicOrdering Ordering2, ArrayRef<RTLIB::Libcall> Libcalls) {
diff --git a/llvm/lib/CodeGen/TargetSubtargetInfo.cpp b/llvm/lib/CodeGen/TargetSubtargetInfo.cpp
index 6c97bc0568bdeee..a8fa14bebbe6860 100644
--- a/llvm/lib/CodeGen/TargetSubtargetInfo.cpp
+++ b/llvm/lib/CodeGen/TargetSubtargetInfo.cpp
@@ -24,7 +24,7 @@ TargetSubtargetInfo::TargetSubtargetInfo(
 
 TargetSubtargetInfo::~TargetSubtargetInfo() = default;
 
-bool TargetSubtargetInfo::enableAtomicExpand() const {
+bool TargetSubtargetInfo::enableExpandAtomic() const {
   return true;
 }
 
diff --git a/llvm/lib/Passes/PassBuilder.cpp b/llvm/lib/Passes/PassBuilder.cpp
index d0f3a55a12b0566..c40d8aa9bbfb166 100644
--- a/llvm/lib/Passes/PassBuilder.cpp
+++ b/llvm/lib/Passes/PassBuilder.cpp
@@ -77,6 +77,7 @@
 #include "llvm/CodeGen/CallBrPrepare.h"
 #include "llvm/CodeGen/CodeGenPrepare.h"
 #include "llvm/CodeGen/DwarfEHPrepare.h"
+#include "llvm/CodeGen/ExpandAtomic.h"
 #include "llvm/CodeGen/ExpandLargeDivRem.h"
 #include "llvm/CodeGen/ExpandLargeFpConvert.h"
 #include "llvm/CodeGen/ExpandMemCmp.h"
diff --git a/llvm/lib/Passes/PassRegistry.def b/llvm/lib/Passes/PassRegistry.def
index 0b53b59787dd20e..abf3ae0dfbbedd2 100644
--- a/llvm/lib/Passes/PassRegistry.def
+++ b/llvm/lib/Passes/PassRegistry.def
@@ -312,6 +312,7 @@ FUNCTION_PASS("dot-post-dom", PostDomPrinter())
 FUNCTION_PASS("dot-post-dom-only", PostDomOnlyPrinter())
 FUNCTION_PASS("dse", DSEPass())
 FUNCTION_PASS("dwarf-eh-prepare", DwarfEHPreparePass(TM))
+FUNCTION_PASS("expand-atomic", ExpandAtomicPass(TM))
 FUNCTION_PASS("expand-large-div-rem", ExpandLargeDivRemPass(TM))
 FUNCTION_PASS("expand-large-fp-convert", ExpandLargeFpConvertPass(TM))
 FUNCTION_PASS("expand-memcmp", ExpandMemCmpPass(TM))
diff --git a/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp b/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp
index 144610e021c58ed..449ebb6803fb0d8 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp
@@ -564,7 +564,7 @@ std::unique_ptr<CSEConfigBase> AArch64PassConfig::getCSEConfig() const {
 void AArch64PassConfig::addIRPasses() {
   // Always expand atomic operations, we don't deal with atomicrmw or cmpxchg
   // ourselves.
-  addPass(createAtomicExpandPass());
+  addPass(createExpandAtomicLegacyPass());
 
   // Expand any SVE vector library calls that we can't code generate directly.
   if (EnableSVEIntrinsicOpts &&
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index 0f3bb3e7b0d8d02..e802c0ad89a6985 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -1054,7 +1054,7 @@ void AMDGPUPassConfig::addIRPasses() {
     addPass(createAMDGPUAtomicOptimizerPass(AMDGPUAtomicOptimizerStrategy));
   }
 
-  addPass(createAtomicExpandPass());
+  addPass(createExpandAtomicLegacyPass());
 
   if (TM.getOptLevel() > CodeGenOptLevel::None) {
     addPass(createAMDGPUPromoteAlloca());
diff --git a/llvm/lib/Target/ARC/ARCTargetMachine.cpp b/llvm/lib/Target/ARC/ARCTargetMachine.cpp
index 4f612ae623b986d..6f16b328af7efa8 100644
--- a/llvm/lib/Target/ARC/ARCTargetMachine.cpp
+++ b/llvm/lib/Target/ARC/ARCTargetMachine.cpp
@@ -70,7 +70,7 @@ TargetPassConfig *ARCTargetMachine::createPassConfig(PassManagerBase &PM) {
 }
 
 void ARCPassConfig::addIRPasses() {
-  addPass(createAtomicExpandPass());
+  addPass(createExpandAtomicLegacyPass());
 
   TargetPassConfig::addIRPasses();
 }
diff --git a/llvm/lib/Target/ARM/ARMTargetMachine.cpp b/llvm/lib/Target/ARM/ARMTargetMachine.cpp
index a99773691df123f..a5d32793e6f57eb 100644
--- a/llvm/lib/Target/ARM/ARMTargetMachine.cpp
+++ b/llvm/lib/Target/ARM/ARMTargetMachine.cpp
@@ -418,7 +418,7 @@ void ARMPassConfig::addIRPasses() {
   if (TM->Options.ThreadModel == ThreadModel::Single)
     addPass(createLowerAtomicPass());
   else
-    addPass(createAtomicExpandPass());
+    addPass(createExpandAtomicLegacyPass());
 
   // Cmpxchg instructions are often used with a subsequent comparison to
   // determine whether it succeeded. We can exploit existing control-flow in
diff --git a/llvm/lib/Target/BPF/BPFTargetMachine.cpp b/llvm/lib/Target/BPF/BPFTargetMachine.cpp
index 8a6e7ae3663e0d7..d94466c8892eb19 100644
--- a/llvm/lib/Target/BPF/BPFTargetMachine.cpp
+++ b/llvm/lib/Target/BPF/BPFTargetMachine.cpp
@@ -149,7 +149,7 @@ void BPFTargetMachine::registerPassBuilderCallbacks(
 }
 
 void BPFPassConfig::addIRPasses() {
-  addPass(createAtomicExpandPass());
+  addPass(createExpandAtomicLegacyPass());
   addPass(createBPFCheckAndAdjustIR());
 
   TargetPassConfig::addIRPasses();
diff --git a/llvm/lib/Target/CSKY/CSKYTargetMachine.cpp b/llvm/lib/Target/CSKY/CSKYTargetMachine.cpp
index 8c268dc3161413b..223e27b49fe3dfa 100644
--- a/llvm/lib/Target/CSKY/CSKYTargetMachine.cpp
+++ b/llvm/lib/Target/CSKY/CSKYTargetMachine.cpp
@@ -118,7 +118,7 @@ TargetPassConfig *CSKYTargetMachine::createPassConfig(PassManagerBase &PM) {
 }
 
 void CSKYPassConfig::addIRPasses() {
-  addPass(createAtomicExpandPass());
+  addPass(createExpandAtomicLegacyPass());
   TargetPassConfig::addIRPasses();
 }
 
diff --git a/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp b/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
index e7a692d67ba015e..786641ce063435a 100644
--- a/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
@@ -339,7 +339,7 @@ void HexagonPassConfig::addIRPasses() {
     addPass(createDeadCodeEliminationPass());
   }
 
-  addPass(createAtomicExpandPass());
+  addPass(createExpandAtomicLegacyPass());
 
   if (!NoOpt) {
     if (EnableInitialCFGCleanup)
diff --git a/llvm/lib/Target/Lanai/LanaiTargetMachine.cpp b/llvm/lib/Target/Lanai/LanaiTargetMachine.cpp
index 33479720183b433..0434bf5010e880f 100644
--- a/llvm/lib/Target/Lanai/LanaiTargetMachine.cpp
+++ b/llvm/lib/Target/Lanai/LanaiTargetMachine.cpp
@@ -106,7 +106,7 @@ LanaiTargetMachine::createPassConfig(PassManagerBase &PassManager) {
 }
 
 void LanaiPassConfig::addIRPasses() {
-  addPass(createAtomicExpandPass());
+  addPass(createExpandAtomicLegacyPass());
 
   TargetPassConfig::addIRPasses();
 }
diff --git a/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp b/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp
index 62ae1dea00d6f84..786b845e32f4808 100644
--- a/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp
@@ -161,7 +161,7 @@ void LoongArchPassConfig::addIRPasses() {
   // pointer values N iterations ahead.
   if (TM->getOptLevel() != CodeGenOptLevel::None && EnableLoopDataPrefetch)
     addPass(createLoopDataPrefetchPass());
-  addPass(createAtomicExpandPass());
+  addPass(createExpandAtomicLegacyPass());
 
   TargetPassConfig::addIRPasses();
 }
diff --git a/llvm/lib/Target/M68k/M68kTargetMachine.cpp b/llvm/lib/Target/M68k/M68kTargetMachine.cpp
index af8cb9a83a050ef..e8d399e50e6c685 100644
--- a/llvm/lib/Target/M68k/M68kTargetMachine.cpp
+++ b/llvm/lib/Target/M68k/M68kTargetMachine.cpp
@@ -171,7 +171,7 @@ TargetPassConfig *M68kTargetMachine::createPassConfig(PassManagerBase &PM) {
 }
 
 void M68kPassConfig::addIRPasses() {
-  addPass(createAtomicExpandPass());
+  addPass(createExpandAtomicLegacyPass());
   TargetPassConfig::addIRPasses();
 }
 
diff --git a/llvm/lib/Target/MSP430/MSP430TargetMachine.cpp b/llvm/lib/Target/MSP430/MSP430TargetMachine.cpp
index 283de46e57d5c4b..a8cad1d8aefcb0a 100644
--- a/llvm/lib/Target/MSP430/MSP430TargetMachine.cpp
+++ b/llvm/lib/Target/MSP430/MSP430TargetMachine.cpp
@@ -83,7 +83,7 @@ MachineFunctionInfo *MSP430TargetMachine::createMachineFunctionInfo(
 }
 
 void MSP430PassConfig::addIRPasses() {
-  addPass(createAtomicExpandPass());
+  addPass(createExpandAtomicLegacyPass());
 
   TargetPassConfig::addIRPasses();
 }
diff --git a/llvm/lib/Target/Mips/MipsTargetMachine.cpp b/llvm/lib/Target/Mips/MipsTargetMachine.cpp
index 074222836929297..5113b51938e7a49 100644
--- a/llvm/lib/Target/Mips/MipsTargetMachine.cpp
+++ b/llvm/lib/Target/Mips/MipsTargetMachine.cpp
@@ -263,7 +263,7 @@ std::unique_ptr<CSEConfigBase> MipsPassConfig::getCSEConfig() const {
 
 void MipsPassConfig::addIRPasses() {
   TargetPassConfig::addIRPasses();
-  addPass(createAtomicExpandPass());
+  addPass(createExpandAtomicLegacyPass());
   if (getMipsSubtarget().os16())
     addPass(createMipsOs16Pass());
   if (getMipsSubtarget().inMips16HardFloat())
diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp b/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
index fad69f5e80a7a8a..a3a959de2d5f90a 100644
--- a/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
@@ -380,7 +380,7 @@ void NVPTXPassConfig::addIRPasses() {
     addStraightLineScalarOptimizationPasses();
   }
 
-  addPass(createAtomicExpandPass());
+  addPass(createExpandAtomicLegacyPass());
   addPass(createNVPTXCtorDtorLoweringLegacyPass());
 
   // === LSR and other generic IR passes ===
diff --git a/llvm/lib/Target/PowerPC/PPCExpandAtomicPseudoInsts.cpp b/llvm/lib/Target/PowerPC/PPCExpandAtomicPseudoInsts.cpp
index aee57a5075ff719..a71a1b8e8439b08 100644
--- a/llvm/lib/Target/PowerPC/PPCExpandAtomicPseudoInsts.cpp
+++ b/llvm/lib/Target/PowerPC/PPCExpandAtomicPseudoInsts.cpp
@@ -23,7 +23,7 @@
 
 using namespace llvm;
 
-#define DEBUG_TYPE "ppc-atomic-expand"
+#define DEBUG_TYPE "ppc-expand-atomic"
 
 namespace {
 
diff --git a/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp b/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp
index d676fa86a10e775..4dc3d1937548961 100644
--- a/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp
+++ b/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp
@@ -457,7 +457,7 @@ TargetPassConfig *PPCTargetMachine::createPassConfig(PassManagerBase &PM) {
 void PPCPassConfig::addIRPasses() {
   if (TM->getOptLevel() != CodeGenOptLevel::None)
     addPass(createPPCBoolRetToIntPass());
-  addPass(createAtomicExpandPass());
+  addPass(createExpandAtomicLegacyPass());
 
   // Lower generic MASSV routines to PowerPC subtarget-specific entries.
   addPass(createPPCLowerMASSVEntriesPass());
diff --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
index 3abdb6003659fa2..27133b8ca952394 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
@@ -425,7 +425,7 @@ bool RISCVPassConfig::addRegAssignAndRewriteOptimized() {
 }
 
 void RISCVPassConfig::addIRPasses() {
-  addPass(createAtomicExpandPass());
+  addPass(createExpandAtomicLegacyPass());
 
   if (getOptLevel() != CodeGenOptLevel::None) {
     if (EnableLoopDataPrefetch)
diff --git a/llvm/lib/Target/Sparc/SparcTargetMachine.cpp b/llvm/lib/Target/Sparc/SparcTargetMachine.cpp
index b408af2ea5943d2..ac2d73d99bc3c71 100644
--- a/llvm/lib/Target/Sparc/SparcTargetMachine.cpp
+++ b/llvm/lib/Target/Sparc/SparcTargetMachine.cpp
@@ -175,7 +175,7 @@ TargetPassConfig *SparcTargetMachine::createPassConfig(PassManagerBase &PM) {
 }
 
 void SparcPassConfig::addIRPasses() {
-  addPass(createAtomicExpandPass());
+  addPass(createExpandAtomicLegacyPass());
 
   TargetPassConfig::addIRPasses();
 }
diff --git a/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp b/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp
index 73e01e3ec184427..670e7b0a75fa971 100644
--- a/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp
@@ -228,7 +228,7 @@ void SystemZPassConfig::addIRPasses() {
     addPass(createLoopDataPrefetchPass());
   }
 
-  addPass(createAtomicExpandPass());
+  addPass(createExpandAtomicLegacyPass());
 
   TargetPassConfig::addIRPasses();
 }
diff --git a/llvm/lib/Target/VE/VETargetMachine.cpp b/llvm/lib/Target/VE/VETargetMachine.cpp
index 6d102bfd3926af7..c9c6f134b61faa4 100644
--- a/llvm/lib/Target/VE/VETargetMachine.cpp
+++ b/llvm/lib/Target/VE/VETargetMachine.cpp
@@ -134,7 +134,7 @@ TargetPassConfig *VETargetMachine::createPassConfig(PassManagerBase &PM) {
 
 void VEPassConfig::addIRPasses() {
   // VE requires atomic expand pass.
-  addPass(createAtomicExpandPass());
+  addPass(createExpandAtomicLegacyPass());
   TargetPassConfig::addIRPasses();
 }
 
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp b/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp
index 912f61765579f83..100e3a60c5ea3ba 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp
@@ -45,7 +45,7 @@ WebAssemblySubtarget::WebAssemblySubtarget(const Triple &TT,
       TargetTriple(TT), InstrInfo(initializeSubtargetDependencies(CPU, FS)),
       TLInfo(TM, *this) {}
 
-bool WebAssemblySubtarget::enableAtomicExpand() const {
+bool WebAssemblySubtarget::enableExpandAtomic() const {
   // If atomics are disabled, atomic ops are lowered instead of expanded
   return hasAtomics();
 }
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h b/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h
index 85d02b087c786ed..1f5bb72b027bdd7 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h
+++ b/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h
@@ -84,7 +84,7 @@ class WebAssemblySubtarget final : public WebAssemblyGenSubtargetInfo {
     return &getInstrInfo()->getRegisterInfo();
   }
   const Triple &getTargetTriple() const { return TargetTriple; }
-  bool enableAtomicExpand() const override;
+  bool enableExpandAtomic() const override;
   bool enableIndirectBrExpand() const override { return true; }
   bool enableMachineScheduler() const override;
   bool useAA() const override;
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp
index 2db1b6493cc4768..77048a336e699e1 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp
@@ -478,7 +478,7 @@ void WebAssemblyPassConfig::addISelPrepare() {
   addPass(new CoalesceFeaturesAndStripAtomics(&getWebAssemblyTargetMachine()));
 
   // This is a no-op if atomics are not used in the module
-  addPass(createAtomicExpandPass());
+  addPass(createExpandAtomicLegacyPass());
 
   TargetPassConfig::addISelPrepare();
 }
diff --git a/llvm/lib/Target/X86/X86TargetMachine.cpp b/llvm/lib/Target/X86/X86TargetMachine.cpp
index b92bffbe6239bb3..70dedfbc4cff146 100644
--- a/llvm/lib/Target/X86/X86TargetMachine.cpp
+++ b/llvm/lib/Target/X86/X86TargetMachine.cpp
@@ -435,7 +435,7 @@ MachineFunctionInfo *X86TargetMachine::createMachineFunctionInfo(
 }
 
 void X86PassConfig::addIRPasses() {
-  addPass(createAtomicExpandPass());
+  addPass(createExpandAtomicLegacyPass());
 
   // We add both pass anyway and when these two passes run, we skip the pass
   // based on the option level and option attribute.
diff --git a/llvm/lib/Target/XCore/XCoreTargetMachine.cpp b/llvm/lib/Target/XCore/XCoreTargetMachine.cpp
index 345a8365ed49b3e..c230efda852ecca 100644
--- a/llvm/lib/Target/XCore/XCoreTargetMachine.cpp
+++ b/llvm/lib/Target/XCore/XCoreTargetMachine.cpp
@@ -84,7 +84,7 @@ TargetPassConfig *XCoreTargetMachine::createPassConfig(PassManagerBase &PM) {
 }
 
 void XCorePassConfig::addIRPasses() {
-  addPass(createAtomicExpandPass());
+  addPass(createExpandAtomicLegacyPass());
 
   TargetPassConfig::addIRPasses();
 }
diff --git a/llvm/test/CodeGen/AMDGPU/idemponent-atomics.ll b/llvm/test/CodeGen/AMDGPU/idemponent-atomics.ll
index f45fc22783d1fb7..0a0fe156305a971 100644
--- a/llvm/test/CodeGen/AMDGPU/idemponent-atomics.ll
+++ b/llvm/test/CodeGen/AMDGPU/idemponent-atomics.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX940 %s
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -atomic-expand < %s | FileCheck --check-prefix=OPT %s
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=expand-atomic < %s | FileCheck --check-prefix=OPT %s
 
 define i32 @global_agent_monotonic_idempotent_or(ptr addrspace(1) %in) {
 ; GFX940-LABEL: global_agent_monotonic_idempotent_or:
diff --git a/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll b/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll
index 81ad1604756835d..fc88f3f9f5cce38 100644
--- a/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll
+++ b/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: opt -S -mtriple=amdgcn-- -mcpu=tahiti -atomic-expand < %s | FileCheck -check-prefix=IR %s
+; RUN: opt -S -mtriple=amdgcn-- -mcpu=tahiti -passes=expand-atomic < %s | FileCheck -check-prefix=IR %s
 ; RUN: llc -mtriple=amdgcn-- -mcpu=tahiti < %s | FileCheck -check-prefix=GCN %s
 
 define i32 @load_atomic_private_seq_cst_i32(ptr addrspace(5) %ptr) {
diff --git a/llvm/test/Transforms/AtomicExpand/AArch64/atomicrmw-fp.ll b/llvm/test/Transforms/AtomicExpand/AArch64/atomicrmw-fp.ll
index 2fc848a3a810b89..d454e125844639f 100644
--- a/llvm/test/Transforms/AtomicExpand/AArch64/atomicrmw-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/AArch64/atomicrmw-fp.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=aarch64-linux-gnu -atomic-expand %s | FileCheck %s
+; RUN: opt -S -mtriple=aarch64-linux-gnu -passes=expand-atomic %s | FileCheck %s
 
 define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) {
 ; CHECK-LABEL: @test_atomicrmw_fadd_f32(
diff --git a/llvm/test/Transforms/AtomicExpand/AArch64/expand-atomicrmw-xchg-fp.ll b/llvm/test/Transforms/AtomicExpand/AArch64/expand-atomicrmw-xchg-fp.ll
index 47d626261bfc432..046324bd9d917f6 100644
--- a/llvm/test/Transforms/AtomicExpand/AArch64/expand-atomicrmw-xchg-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/AArch64/expand-atomicrmw-xchg-fp.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -codegen-opt-level=1 -S -mtriple=aarch64-- -atomic-expand %s | FileCheck %s
-; RUN: opt -codegen-opt-level=1 -S -mtriple=aarch64-- -mattr=+outline-atomics -atomic-expand %s | FileCheck %s --check-prefix=OUTLINE-ATOMICS
+; RUN: opt -codegen-opt-level=1 -S -mtriple=aarch64-- -passes=expand-atomic %s | FileCheck %s
+; RUN: opt -codegen-opt-level=1 -S -mtriple=aarch64-- -mattr=+outline-atomics -passes=expand-atomic %s | FileCheck %s --check-prefix=OUTLINE-ATOMICS
 
 define void @atomic_swap_f16(ptr %ptr, half %val) nounwind {
 ; CHECK-LABEL: @atomic_swap_f16(
diff --git a/llvm/test/Transforms/AtomicExpand/AArch64/pcsections.ll b/llvm/test/Transforms/AtomicExpand/AArch64/pcsections.ll
index 2e9efe911e6d6ca..5c6c0ae31d9dd7b 100644
--- a/llvm/test/Transforms/AtomicExpand/AArch64/pcsections.ll
+++ b/llvm/test/Transforms/AtomicExpand/AArch64/pcsections.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=aarch64-linux-gnu -atomic-expand %s | FileCheck %s
+; RUN: opt -S -mtriple=aarch64-linux-gnu -passes=expand-atomic %s | FileCheck %s
 
 define i8 @atomic8_load_unordered(ptr %a) nounwind uwtable {
 ; CHECK-LABEL: @atomic8_load_unordered(
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16-system.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16-system.ll
index b846c1f77538e68..c968c4ec5cc9612 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16-system.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16-system.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -atomic-expand %s | FileCheck %s
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=expand-atomic %s | FileCheck %s
 
 target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:160:256:256:32-p8:128:128-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll
index 7f5d6e7cb76f82f..3f902ce2d3ef535 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -atomic-expand %s | FileCheck %s
-; RUN: opt -mtriple=r600-mesa-mesa3d -S -atomic-expand %s | FileCheck %s
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=expand-atomic %s | FileCheck %s
+; RUN: opt -mtriple=r600-mesa-mesa3d -S -passes=expand-atomic %s | FileCheck %s
 
 target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:160:256:256:32-p8:128:128-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8-system.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8-system.ll
index f796d3cca3036f5..f3982fcd9897df4 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8-system.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8-system.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -atomic-expand %s | FileCheck %s
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=expand-atomic %s | FileCheck %s
 
 define i8 @test_atomicrmw_xchg_i8_global_system(ptr addrspace(1) %ptr, i8 %value) {
 ; CHECK-LABEL: @test_atomicrmw_xchg_i8_global_system(
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll
index 6a6e416bdbc89de..9b725ed4cb06c2a 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -atomic-expand %s | FileCheck %s --check-prefixes=CHECK,GCN
-; RUN: opt -mtriple=r600-mesa-mesa3d -S -atomic-expand %s | FileCheck %s --check-prefixes=CHECK,R600
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=expand-atomic %s | FileCheck %s --check-prefixes=CHECK,GCN
+; RUN: opt -mtriple=r600-mesa-mesa3d -S -passes=expand-atomic %s | FileCheck %s --check-prefixes=CHECK,R600
 
 define i8 @test_atomicrmw_xchg_i8_global_agent(ptr addrspace(1) %ptr, i8 %value) {
 ; GCN-LABEL: @test_atomicrmw_xchg_i8_global_agent(
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd-flat-specialization.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd-flat-specialization.ll
index 5d7825bb3788763..e601021cd3ca382 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd-flat-specialization.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd-flat-specialization.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 -atomic-expand %s | FileCheck -check-prefix=GFX908 %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -atomic-expand %s | FileCheck -check-prefix=GFX90A %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -atomic-expand %s | FileCheck -check-prefix=GFX940 %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -atomic-expand %s | FileCheck -check-prefix=GFX1100 %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 -passes=expand-atomic %s | FileCheck -check-prefix=GFX908 %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -passes=expand-atomic %s | FileCheck -check-prefix=GFX90A %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -passes=expand-atomic %s | FileCheck -check-prefix=GFX940 %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -passes=expand-atomic %s | FileCheck -check-prefix=GFX1100 %s
 
 define float @syncscope_system(ptr %addr, float %val) #0 {
 ; GFX908-LABEL: @syncscope_system(
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll
index 97c041168d147b5..6be6771ed920ac1 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll
@@ -1,10 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -atomic-expand %s | FileCheck -check-prefix=CI %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -atomic-expand %s | FileCheck -check-prefix=GFX9 %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 -atomic-expand %s | FileCheck -check-prefix=GFX908 %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -atomic-expand %s | FileCheck -check-prefix=GFX90A %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -atomic-expand %s | FileCheck -check-prefix=GFX940 %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -atomic-expand %s | FileCheck -check-prefix=GFX11 %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -passes=expand-atomic %s | FileCheck -check-prefix=CI %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=expand-atomic %s | FileCheck -check-prefix=GFX9 %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 -passes=expand-atomic %s | FileCheck -check-prefix=GFX908 %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -passes=expand-atomic %s | FileCheck -check-prefix=GFX90A %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -passes=expand-atomic %s | FileCheck -check-prefix=GFX940 %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -passes=expand-atomic %s | FileCheck -check-prefix=GFX11 %s
 
 define void @test_atomicrmw_fadd_f32_global_no_use_unsafe(ptr addrspace(1) %ptr, float %value) #0 {
 ; CI-LABEL: @test_atomicrmw_fadd_f32_global_no_use_unsafe(
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fmax.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fmax.ll
index 9dfbe9b4eb7413d..9df002ea0b2a871 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fmax.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fmax.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -atomic-expand %s | FileCheck -check-prefix=GCN %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -atomic-expand %s | FileCheck -check-prefix=GCN %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -passes=expand-atomic %s | FileCheck -check-prefix=GCN %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=expand-atomic %s | FileCheck -check-prefix=GCN %s
 
 define float @test_atomicrmw_fmax_f32_flat(ptr %ptr, float %value) {
 ; GCN-LABEL: @test_atomicrmw_fmax_f32_flat(
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fmin.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fmin.ll
index 5a732653b48b149..78cf7d0769ce39f 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fmin.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fmin.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -atomic-expand %s | FileCheck -check-prefix=GCN %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -atomic-expand %s | FileCheck -check-prefix=GCN %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -passes=expand-atomic %s | FileCheck -check-prefix=GCN %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=expand-atomic %s | FileCheck -check-prefix=GCN %s
 
 define float @test_atomicrmw_fmin_f32_flat(ptr %ptr, float %value) {
 ; GCN-LABEL: @test_atomicrmw_fmin_f32_flat(
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fsub.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fsub.ll
index 9805c317b9215e5..5d4a219a8fe5fad 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fsub.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fsub.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -atomic-expand %s | FileCheck -check-prefix=GCN %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -atomic-expand %s | FileCheck -check-prefix=GCN %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -passes=expand-atomic %s | FileCheck -check-prefix=GCN %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=expand-atomic %s | FileCheck -check-prefix=GCN %s
 
 define float @test_atomicrmw_fsub_f32_flat(ptr %ptr, float %value) {
 ; GCN-LABEL: @test_atomicrmw_fsub_f32_flat(
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-nand.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-nand.ll
index 5fa9dcc4ad9bf07..e1b0ea7d0f5bcf1 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-nand.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-nand.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -atomic-expand %s | FileCheck %s
-; RUN: opt -mtriple=r600-mesa-mesa3d -S -atomic-expand %s | FileCheck %s
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=expand-atomic %s | FileCheck %s
+; RUN: opt -mtriple=r600-mesa-mesa3d -S -passes=expand-atomic %s | FileCheck %s
 
 define i32 @test_atomicrmw_nand_i32_flat(ptr %ptr, i32 %value) {
 ; CHECK-LABEL: @test_atomicrmw_nand_i32_flat(
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-simplify-cfg-CAS-block.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-simplify-cfg-CAS-block.ll
index aceb897a7d487d0..2fc671b2624227b 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-simplify-cfg-CAS-block.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-simplify-cfg-CAS-block.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -atomic-expand %s | FileCheck -check-prefix=GFX90A %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -passes=expand-atomic %s | FileCheck -check-prefix=GFX90A %s
 
 declare i32 @llvm.amdgcn.workitem.id.x()
 
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/unaligned-atomic.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/unaligned-atomic.ll
index 6c84474edc05bbc..bda3cc4c57a88e9 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/unaligned-atomic.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/unaligned-atomic.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -atomic-expand %s 2>&1 | FileCheck --check-prefix=GCN %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=expand-atomic %s 2>&1 | FileCheck --check-prefix=GCN %s
 
 define i32 @atomic_load_global_align1(ptr addrspace(1) %ptr) {
 ; GCN-LABEL: @atomic_load_global_align1(
diff --git a/llvm/test/Transforms/AtomicExpand/ARM/atomic-expansion-v7.ll b/llvm/test/Transforms/AtomicExpand/ARM/atomic-expansion-v7.ll
index 353aafb9727a5b8..786aad24c4d3482 100644
--- a/llvm/test/Transforms/AtomicExpand/ARM/atomic-expansion-v7.ll
+++ b/llvm/test/Transforms/AtomicExpand/ARM/atomic-expansion-v7.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S -o - -mtriple=armv7-apple-ios7.0 -atomic-expand -codegen-opt-level=1 %s | FileCheck %s
+; RUN: opt -S -o - -mtriple=armv7-apple-ios7.0 -passes=expand-atomic -codegen-opt-level=1 %s | FileCheck %s
 
 define i8 @test_atomic_xchg_i8(ptr %ptr, i8 %xchgend) {
 ; CHECK-LABEL: @test_atomic_xchg_i8
diff --git a/llvm/test/Transforms/AtomicExpand/ARM/atomic-expansion-v8.ll b/llvm/test/Transforms/AtomicExpand/ARM/atomic-expansion-v8.ll
index bad28b2b6824e53..a8a212aa2cb36db 100644
--- a/llvm/test/Transforms/AtomicExpand/ARM/atomic-expansion-v8.ll
+++ b/llvm/test/Transforms/AtomicExpand/ARM/atomic-expansion-v8.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S -o - -mtriple=armv8-linux-gnueabihf -atomic-expand %s -codegen-opt-level=1 | FileCheck %s
+; RUN: opt -S -o - -mtriple=armv8-linux-gnueabihf -passes=expand-atomic %s -codegen-opt-level=1 | FileCheck %s
 
 define i8 @test_atomic_xchg_i8(ptr %ptr, i8 %xchgend) {
 ; CHECK-LABEL: @test_atomic_xchg_i8
diff --git a/llvm/test/Transforms/AtomicExpand/ARM/atomicrmw-fp.ll b/llvm/test/Transforms/AtomicExpand/ARM/atomicrmw-fp.ll
index d0268bf3e007961..50336e82719bb39 100644
--- a/llvm/test/Transforms/AtomicExpand/ARM/atomicrmw-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/ARM/atomicrmw-fp.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=armv7-apple-ios7.0 -atomic-expand %s | FileCheck %s
+; RUN: opt -S -mtriple=armv7-apple-ios7.0 -passes=expand-atomic %s | FileCheck %s
 
 define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) {
 ; CHECK-LABEL: @test_atomicrmw_fadd_f32(
diff --git a/llvm/test/Transforms/AtomicExpand/ARM/cmpxchg-weak.ll b/llvm/test/Transforms/AtomicExpand/ARM/cmpxchg-weak.ll
index f7a210d631bf957..56583ce6aed46f4 100644
--- a/llvm/test/Transforms/AtomicExpand/ARM/cmpxchg-weak.ll
+++ b/llvm/test/Transforms/AtomicExpand/ARM/cmpxchg-weak.ll
@@ -1,4 +1,4 @@
-; RUN: opt -atomic-expand -codegen-opt-level=1 -S -mtriple=thumbv7s-apple-ios7.0 %s | FileCheck %s
+; RUN: opt -passes=expand-atomic -codegen-opt-level=1 -S -mtriple=thumbv7s-apple-ios7.0 %s | FileCheck %s
 
 define i32 @test_cmpxchg_seq_cst(ptr %addr, i32 %desired, i32 %new) {
 ; CHECK-LABEL: @test_cmpxchg_seq_cst
diff --git a/llvm/test/Transforms/AtomicExpand/Hexagon/atomicrmw-fp.ll b/llvm/test/Transforms/AtomicExpand/Hexagon/atomicrmw-fp.ll
index 8827eb5d8e1088a..4d759ecbc7c7a63 100644
--- a/llvm/test/Transforms/AtomicExpand/Hexagon/atomicrmw-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/Hexagon/atomicrmw-fp.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=hexagon-- -atomic-expand %s | FileCheck %s
+; RUN: opt -S -mtriple=hexagon-- -passes=expand-atomic %s | FileCheck %s
 
 define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) {
 ; CHECK-LABEL: @test_atomicrmw_fadd_f32(
diff --git a/llvm/test/Transforms/AtomicExpand/LoongArch/atomicrmw-fp.ll b/llvm/test/Transforms/AtomicExpand/LoongArch/atomicrmw-fp.ll
index 43fdd25e257b82f..18977862cfca06d 100644
--- a/llvm/test/Transforms/AtomicExpand/LoongArch/atomicrmw-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/LoongArch/atomicrmw-fp.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S --mtriple=loongarch64 --atomic-expand --mattr=+d %s | FileCheck %s
+; RUN: opt -S --mtriple=loongarch64 -passes=expand-atomic --mattr=+d %s | FileCheck %s
 
 define float @atomicrmw_fadd_float(ptr %ptr, float %value) {
 ; CHECK-LABEL: @atomicrmw_fadd_float(
diff --git a/llvm/test/Transforms/AtomicExpand/LoongArch/load-store-atomic.ll b/llvm/test/Transforms/AtomicExpand/LoongArch/load-store-atomic.ll
index b0875669bc3a212..6bb42d17bd5b953 100644
--- a/llvm/test/Transforms/AtomicExpand/LoongArch/load-store-atomic.ll
+++ b/llvm/test/Transforms/AtomicExpand/LoongArch/load-store-atomic.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S --mtriple=loongarch32 --atomic-expand %s | FileCheck %s --check-prefix=LA32
-; RUN: opt -S --mtriple=loongarch64 --atomic-expand %s | FileCheck %s --check-prefix=LA64
+; RUN: opt -S --mtriple=loongarch32 -passes=expand-atomic %s | FileCheck %s --check-prefix=LA32
+; RUN: opt -S --mtriple=loongarch64 -passes=expand-atomic %s | FileCheck %s --check-prefix=LA64
 
 define i8 @load_acquire_i8(ptr %ptr) {
 ; LA32-LABEL: @load_acquire_i8(
diff --git a/llvm/test/Transforms/AtomicExpand/Mips/atomicrmw-fp.ll b/llvm/test/Transforms/AtomicExpand/Mips/atomicrmw-fp.ll
index 2c90a70bd0ad056..acc7e2e9869a95e 100644
--- a/llvm/test/Transforms/AtomicExpand/Mips/atomicrmw-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/Mips/atomicrmw-fp.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=mips64-mti-linux-gnu -atomic-expand %s | FileCheck %s
+; RUN: opt -S -mtriple=mips64-mti-linux-gnu -passes=expand-atomic %s | FileCheck %s
 
 define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) {
 ; CHECK-LABEL: @test_atomicrmw_fadd_f32(
diff --git a/llvm/test/Transforms/AtomicExpand/PowerPC/atomicrmw-fp.ll b/llvm/test/Transforms/AtomicExpand/PowerPC/atomicrmw-fp.ll
index 7e42735feabfff5..d8ab8d9b987ef35 100644
--- a/llvm/test/Transforms/AtomicExpand/PowerPC/atomicrmw-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/PowerPC/atomicrmw-fp.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=powerpc64-unknown-unknown -atomic-expand %s | FileCheck %s
+; RUN: opt -S -mtriple=powerpc64-unknown-unknown -passes=expand-atomic %s | FileCheck %s
 
 define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) {
 ; CHECK-LABEL: @test_atomicrmw_fadd_f32(
diff --git a/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-double.ll b/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-double.ll
index 19e5f56821d746e..90ed8128e8898f6 100644
--- a/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-double.ll
+++ b/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-double.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -atomic-expand -mtriple=powerpc64le-unknown-unknown \
+; RUN: opt -S -passes=expand-atomic -mtriple=powerpc64le-unknown-unknown \
 ; RUN:   < %s 2>&1 | FileCheck %s
-; RUN: opt -S -atomic-expand -mtriple=powerpc64-unknown-unknown \
+; RUN: opt -S -passes=expand-atomic -mtriple=powerpc64-unknown-unknown \
 ; RUN:   < %s 2>&1 | FileCheck %s
 
 define double @foo(ptr %dp) {
diff --git a/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-float.ll b/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-float.ll
index 62f0db00df800bb..b5be91fca077ffe 100644
--- a/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-float.ll
+++ b/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-float.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -atomic-expand -mtriple=powerpc64le-unknown-unknown \
+; RUN: opt -S -passes=expand-atomic -mtriple=powerpc64le-unknown-unknown \
 ; RUN:   < %s 2>&1 | FileCheck %s
-; RUN: opt -S -atomic-expand -mtriple=powerpc64-unknown-unknown \
+; RUN: opt -S -passes=expand-atomic -mtriple=powerpc64-unknown-unknown \
 ; RUN:   < %s 2>&1 | FileCheck %s
 
 define float @bar(ptr %fp) {
diff --git a/llvm/test/Transforms/AtomicExpand/PowerPC/cmpxchg.ll b/llvm/test/Transforms/AtomicExpand/PowerPC/cmpxchg.ll
index 169d73cc0308d39..19a16a43f0be81e 100644
--- a/llvm/test/Transforms/AtomicExpand/PowerPC/cmpxchg.ll
+++ b/llvm/test/Transforms/AtomicExpand/PowerPC/cmpxchg.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -atomic-expand -S -mtriple=powerpc64-unknown-unknown \
+; RUN: opt -passes=expand-atomic -S -mtriple=powerpc64-unknown-unknown \
 ; RUN:   -mcpu=pwr8 %s | FileCheck %s
-; RUN: opt -atomic-expand -S -mtriple=powerpc64-unknown-unknown \
+; RUN: opt -passes=expand-atomic -S -mtriple=powerpc64-unknown-unknown \
 ; RUN:   -mcpu=pwr7 %s | FileCheck --check-prefix=PWR7 %s
 
 define i1 @test_cmpxchg_seq_cst(ptr %addr, i128 %desire, i128 %new) {
diff --git a/llvm/test/Transforms/AtomicExpand/PowerPC/issue55983.ll b/llvm/test/Transforms/AtomicExpand/PowerPC/issue55983.ll
index 342506301d0046f..b6fe0febf1e553a 100644
--- a/llvm/test/Transforms/AtomicExpand/PowerPC/issue55983.ll
+++ b/llvm/test/Transforms/AtomicExpand/PowerPC/issue55983.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -atomic-expand -S -mtriple=powerpc64le-unknown-unknown \
+; RUN: opt -passes=expand-atomic -S -mtriple=powerpc64le-unknown-unknown \
 ; RUN:   %s | FileCheck %s
-; RUN: opt -atomic-expand -S -mtriple=powerpc64-unknown-unknown \
+; RUN: opt -passes=expand-atomic -S -mtriple=powerpc64-unknown-unknown \
 ; RUN:   %s | FileCheck %s
 
 define ptr @foo(ptr %p) {
diff --git a/llvm/test/Transforms/AtomicExpand/RISCV/atomicrmw-fp.ll b/llvm/test/Transforms/AtomicExpand/RISCV/atomicrmw-fp.ll
index ceaafd89990b05a..20d0347579f3056 100644
--- a/llvm/test/Transforms/AtomicExpand/RISCV/atomicrmw-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/RISCV/atomicrmw-fp.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=riscv32-- -atomic-expand %s | FileCheck %s
+; RUN: opt -S -mtriple=riscv32-- -passes=expand-atomic %s | FileCheck %s
 
 define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) {
 ; CHECK-LABEL: @test_atomicrmw_fadd_f32(
diff --git a/llvm/test/Transforms/AtomicExpand/SPARC/libcalls.ll b/llvm/test/Transforms/AtomicExpand/SPARC/libcalls.ll
index 4427c5e7ed23dc0..7557b722d6949ad 100644
--- a/llvm/test/Transforms/AtomicExpand/SPARC/libcalls.ll
+++ b/llvm/test/Transforms/AtomicExpand/SPARC/libcalls.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S %s -atomic-expand | FileCheck %s
+; RUN: opt -S %s -passes=expand-atomic | FileCheck %s
 
 ;;; NOTE: this test is actually target-independent -- any target which
 ;;; doesn't support inline atomics can be used. (E.g. X86 i386 would
diff --git a/llvm/test/Transforms/AtomicExpand/SPARC/partword.ll b/llvm/test/Transforms/AtomicExpand/SPARC/partword.ll
index 5bcb21105df8bba..9e2385947253948 100644
--- a/llvm/test/Transforms/AtomicExpand/SPARC/partword.ll
+++ b/llvm/test/Transforms/AtomicExpand/SPARC/partword.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S %s -atomic-expand | FileCheck %s
+; RUN: opt -S %s -passes=expand-atomic | FileCheck %s
 
 ;; Verify the cmpxchg and atomicrmw expansions where sub-word-size
 ;; instructions are not available.
diff --git a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-libcall.ll b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-libcall.ll
index 8d71966c04d0396..9af61056137bea0 100644
--- a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-libcall.ll
+++ b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-libcall.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=i686-linux-gnu -atomic-expand %s | FileCheck %s
+; RUN: opt -S -mtriple=i686-linux-gnu -passes=expand-atomic %s | FileCheck %s
 
 
 define i256 @atomic_load256_libcall(ptr %ptr) nounwind {
diff --git a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-non-integer.ll b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-non-integer.ll
index dab7677086e91c5..6105857dcb7a806 100644
--- a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-non-integer.ll
+++ b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-non-integer.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S %s -atomic-expand -mtriple=x86_64-linux-gnu | FileCheck %s
+; RUN: opt -S %s -passes=expand-atomic -mtriple=x86_64-linux-gnu | FileCheck %s
 
 ; This file tests the functions `llvm::convertAtomicLoadToIntegerType` and
 ; `llvm::convertAtomicStoreToIntegerType`. If X86 stops using this 
diff --git a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-fp.ll b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-fp.ll
index 69837b96a90d00e..3d561d54b1146f3 100644
--- a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-fp.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=i686-linux-gnu -atomic-expand %s | FileCheck %s
+; RUN: opt -S -mtriple=i686-linux-gnu -passes=expand-atomic %s | FileCheck %s
 
 define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) {
 ; CHECK-LABEL: @test_atomicrmw_fadd_f32(
diff --git a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-initial-load.ll b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-initial-load.ll
index fba1512368ea273..592b2e8f933dc04 100644
--- a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-initial-load.ll
+++ b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-initial-load.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S %s -atomic-expand -mtriple=i686-linux-gnu | FileCheck %s
+; RUN: opt -S %s -passes=expand-atomic -mtriple=i686-linux-gnu | FileCheck %s
 
 ; This file tests the function `llvm::expandAtomicRMWToCmpXchg`.
 ; It isn't technically target specific, but is exposed through a pass that is.
diff --git a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-xchg-fp.ll b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-xchg-fp.ll
index 2464af3336ef3f9..ba43255b418afb0 100644
--- a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-xchg-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-xchg-fp.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=i686-linux-gnu -atomic-expand %s | FileCheck %s
+; RUN: opt -S -mtriple=i686-linux-gnu -passes=expand-atomic %s | FileCheck %s
 
 define double @atomic_xchg_f64(ptr %ptr) nounwind {
 ; CHECK-LABEL: @atomic_xchg_f64(
diff --git a/llvm/tools/opt/opt.cpp b/llvm/tools/opt/opt.cpp
index c649e6ecddc080e..6ed5a71d93d12aa 100644
--- a/llvm/tools/opt/opt.cpp
+++ b/llvm/tools/opt/opt.cpp
@@ -347,7 +347,6 @@ static bool shouldPinPassToLegacyPM(StringRef Pass) {
       "interleaved-load-combine",
       "unreachableblockelim",
       "verify-safepoint-ir",
-      "atomic-expand",
       "expandvp",
       "mve-tail-predication",
       "interleaved-access",
@@ -427,7 +426,6 @@ int main(int argc, char **argv) {
   initializeSelectOptimizePass(Registry);
   initializeCallBrPreparePass(Registry);
   initializeCodeGenPrepareLegacyPassPass(Registry);
-  initializeAtomicExpandPass(Registry);
   initializeWinEHPreparePass(Registry);
   initializeDwarfEHPrepareLegacyPassPass(Registry);
   initializeSafeStackLegacyPassPass(Registry);
diff --git a/llvm/utils/gn/secondary/llvm/lib/CodeGen/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/CodeGen/BUILD.gn
index 047f6583ec4e88b..1d5af1bf24dd183 100644
--- a/llvm/utils/gn/secondary/llvm/lib/CodeGen/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/lib/CodeGen/BUILD.gn
@@ -39,7 +39,6 @@ static_library("CodeGen") {
     "AllocationOrder.cpp",
     "Analysis.cpp",
     "AssignmentTrackingAnalysis.cpp",
-    "AtomicExpandPass.cpp",
     "BasicBlockPathCloning.cpp",
     "BasicBlockSections.cpp",
     "BasicBlockSectionsProfileReader.cpp",
@@ -69,6 +68,7 @@ static_library("CodeGen") {
     "EarlyIfConversion.cpp",
     "EdgeBundles.cpp",
     "ExecutionDomainFix.cpp",
+    "ExpandAtomicPass.cpp",
     "ExpandLargeDivRem.cpp",
     "ExpandLargeFpConvert.cpp",
     "ExpandMemCmp.cpp",

>From 68ae55b820134067d22cce8afe9bfacb25b87aba Mon Sep 17 00:00:00 2001
From: Rishabh Bali <rishabhsbali at gmail.com>
Date: Sun, 14 Jan 2024 22:58:34 +0530
Subject: [PATCH 2/8] Minor change

---
 llvm/include/llvm/InitializePasses.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/include/llvm/InitializePasses.h b/llvm/include/llvm/InitializePasses.h
index efcfa080912b82b..4c45f4202525381 100644
--- a/llvm/include/llvm/InitializePasses.h
+++ b/llvm/include/llvm/InitializePasses.h
@@ -100,7 +100,7 @@ void initializeEarlyMachineLICMPass(PassRegistry&);
 void initializeEarlyTailDuplicatePass(PassRegistry&);
 void initializeEdgeBundlesPass(PassRegistry&);
 void initializeEHContGuardCatchretPass(PassRegistry &);
-void initializeExpandAtomicPass(PassRegistry&);
+void initializeExpandAtomicLegacyPass(PassRegistry&);
 void initializeExpandLargeFpConvertLegacyPassPass(PassRegistry&);
 void initializeExpandLargeDivRemLegacyPassPass(PassRegistry&);
 void initializeExpandMemCmpLegacyPassPass(PassRegistry &);

>From 7bcf6cb98290819c764182b034ec9909cc3294c3 Mon Sep 17 00:00:00 2001
From: Rishabh Bali <rishabhsbali at gmail.com>
Date: Mon, 15 Jan 2024 00:02:52 +0530
Subject: [PATCH 3/8] Port expand atomic pass to new PM

---
 llvm/include/llvm/CodeGen/Passes.h | 2 +-
 llvm/include/llvm/LinkAllPasses.h  | 2 ++
 2 files changed, 3 insertions(+), 1 deletion(-)

diff --git a/llvm/include/llvm/CodeGen/Passes.h b/llvm/include/llvm/CodeGen/Passes.h
index 4f63c32d9fd2eb1..4642adbc5039efb 100644
--- a/llvm/include/llvm/CodeGen/Passes.h
+++ b/llvm/include/llvm/CodeGen/Passes.h
@@ -44,7 +44,7 @@ namespace llvm {
   /// ExpandAtomicPass - At IR level this pass replace atomic instructions with
   /// __atomic_* library calls, or target specific instruction which implement the
   /// same semantics in a way which better fits the target backend.
-  FunctionPass *createExpandAtomicPass();
+  FunctionPass *createExpandAtomicLegacyPass();
 
   /// createUnreachableBlockEliminationPass - The LLVM code generator does not
   /// work well with unreachable basic blocks (what live ranges make sense for a
diff --git a/llvm/include/llvm/LinkAllPasses.h b/llvm/include/llvm/LinkAllPasses.h
index fe7fedad18bc0e5..bb49055ef19c384 100644
--- a/llvm/include/llvm/LinkAllPasses.h
+++ b/llvm/include/llvm/LinkAllPasses.h
@@ -14,6 +14,7 @@
 #ifndef LLVM_LINKALLPASSES_H
 #define LLVM_LINKALLPASSES_H
 
+#include "CodeGen/Passes.h"
 #include "llvm/ADT/Statistic.h"
 #include "llvm/Analysis/AliasAnalysisEvaluator.h"
 #include "llvm/Analysis/AliasSetTracker.h"
@@ -118,6 +119,7 @@ namespace {
       (void) llvm::createGVNPass();
       (void) llvm::createPostDomTree();
       (void) llvm::createMergeICmpsLegacyPass();
+      (void) llvm::createExpandAtomicLegacyPass();
       (void) llvm::createExpandLargeDivRemPass();
       (void)llvm::createExpandMemCmpLegacyPass();
       (void) llvm::createExpandVectorPredicationPass();

>From bae1090a41dc4606d912c063f484c2fb5e311bac Mon Sep 17 00:00:00 2001
From: Rishabh Bali <rishabhsbali at gmail.com>
Date: Mon, 15 Jan 2024 13:16:29 +0530
Subject: [PATCH 4/8] Tests failures

---
 clang/test/CodeGenCUDA/atomics-remarks-gfx90a.cu     |  2 +-
 .../test/CodeGenOpenCL/atomics-cas-remarks-gfx90a.cl | 12 ++++++------
 .../CodeGen/AArch64/partial-pipeline-execution.ll    |  4 ++--
 .../CodeGen/AMDGPU/atomics-cas-remarks-gfx90a.ll     |  2 +-
 4 files changed, 10 insertions(+), 10 deletions(-)

diff --git a/clang/test/CodeGenCUDA/atomics-remarks-gfx90a.cu b/clang/test/CodeGenCUDA/atomics-remarks-gfx90a.cu
index 946927d88a1ee1f..6ac9c7bdef639b9 100644
--- a/clang/test/CodeGenCUDA/atomics-remarks-gfx90a.cu
+++ b/clang/test/CodeGenCUDA/atomics-remarks-gfx90a.cu
@@ -1,5 +1,5 @@
 // RUN: %clang_cc1 %s -triple=amdgcn-amd-amdhsa -fcuda-is-device \
-// RUN:   -target-cpu gfx90a -Rpass=atomic-expand -S -o - 2>&1 | \
+// RUN:   -target-cpu gfx90a -Rpass=expand-atomic -S -o - 2>&1 | \
 // RUN:   FileCheck %s --check-prefix=GFX90A-CAS
 
 // REQUIRES: amdgpu-registered-target
diff --git a/clang/test/CodeGenOpenCL/atomics-cas-remarks-gfx90a.cl b/clang/test/CodeGenOpenCL/atomics-cas-remarks-gfx90a.cl
index a5321ea7c158dac..5d5464be04a090d 100644
--- a/clang/test/CodeGenOpenCL/atomics-cas-remarks-gfx90a.cl
+++ b/clang/test/CodeGenOpenCL/atomics-cas-remarks-gfx90a.cl
@@ -1,9 +1,9 @@
 // RUN: %clang_cc1 %s -cl-std=CL2.0 -O0 -triple=amdgcn-amd-amdhsa -target-cpu gfx90a \
-// RUN:     -Rpass=atomic-expand -S -o - 2>&1 | \
+// RUN:     -Rpass=expand-atomic -S -o - 2>&1 | \
 // RUN:     FileCheck %s --check-prefix=REMARK
 
 // RUN: %clang_cc1 %s -cl-std=CL2.0 -O0 -triple=amdgcn-amd-amdhsa -target-cpu gfx90a \
-// RUN:     -Rpass=atomic-expand -S -emit-llvm -o - 2>&1 | \
+// RUN:     -Rpass=expand-atomic -S -emit-llvm -o - 2>&1 | \
 // RUN:     FileCheck %s --check-prefix=GFX90A-CAS
 
 // REQUIRES: amdgpu-registered-target
@@ -26,10 +26,10 @@ typedef enum memory_scope {
 #endif
 } memory_scope;
 
-// REMARK: remark: A compare and swap loop was generated for an atomic fadd operation at workgroup-one-as memory scope [-Rpass=atomic-expand]
-// REMARK: remark: A compare and swap loop was generated for an atomic fadd operation at agent-one-as memory scope [-Rpass=atomic-expand]
-// REMARK: remark: A compare and swap loop was generated for an atomic fadd operation at one-as memory scope [-Rpass=atomic-expand]
-// REMARK: remark: A compare and swap loop was generated for an atomic fadd operation at wavefront-one-as memory scope [-Rpass=atomic-expand]
+// REMARK: remark: A compare and swap loop was generated for an atomic fadd operation at workgroup-one-as memory scope [-Rpass=expand-atomic]
+// REMARK: remark: A compare and swap loop was generated for an atomic fadd operation at agent-one-as memory scope [-Rpass=expand-atomic]
+// REMARK: remark: A compare and swap loop was generated for an atomic fadd operation at one-as memory scope [-Rpass=expand-atomic]
+// REMARK: remark: A compare and swap loop was generated for an atomic fadd operation at wavefront-one-as memory scope [-Rpass=expand-atomic]
 // GFX90A-CAS-LABEL: @atomic_cas
 // GFX90A-CAS: atomicrmw fadd ptr addrspace(1) {{.*}} syncscope("workgroup-one-as") monotonic
 // GFX90A-CAS: atomicrmw fadd ptr addrspace(1) {{.*}} syncscope("agent-one-as") monotonic
diff --git a/llvm/test/CodeGen/AArch64/partial-pipeline-execution.ll b/llvm/test/CodeGen/AArch64/partial-pipeline-execution.ll
index c2ef2fa16a9a282..fc071e29c9467fa 100644
--- a/llvm/test/CodeGen/AArch64/partial-pipeline-execution.ll
+++ b/llvm/test/CodeGen/AArch64/partial-pipeline-execution.ll
@@ -1,6 +1,6 @@
 ; RUN: llc -O3 %s -o %t.s
-; RUN: llc -O3 -stop-after=atomic-expand %s -o %t.mir
-; RUN: llc -O3 -start-after=atomic-expand %s -o %t2.s
+; RUN: llc -O3 -stop-after=expand-atomic %s -o %t.mir
+; RUN: llc -O3 -start-after=expand-atomic %s -o %t2.s
 
 ; If we add tti pass correctly files should be identical
 ; Otherwise LSR will use default TargetTransformInfo and
diff --git a/llvm/test/CodeGen/AMDGPU/atomics-cas-remarks-gfx90a.ll b/llvm/test/CodeGen/AMDGPU/atomics-cas-remarks-gfx90a.ll
index 2f7d1e9a6efafda..fcb32f10493d428 100644
--- a/llvm/test/CodeGen/AMDGPU/atomics-cas-remarks-gfx90a.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomics-cas-remarks-gfx90a.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=amdgcn -mcpu=gfx90a -verify-machineinstrs --pass-remarks=atomic-expand \
+; RUN: llc -march=amdgcn -mcpu=gfx90a -verify-machineinstrs --pass-remarks=expand-atomic \
 ; RUN:      %s -o - 2>&1 | FileCheck %s --check-prefix=GFX90A-CAS
 
 ; GFX90A-CAS: A compare and swap loop was generated for an atomic fadd operation at system memory scope

>From b43cdf143908b26a5b37184833a8f217b0a28d1f Mon Sep 17 00:00:00 2001
From: Rishabh Bali <rishabhsbali at gmail.com>
Date: Mon, 15 Jan 2024 22:29:17 +0530
Subject: [PATCH 5/8] Revert to original name

---
 .../CodeGenCUDA/atomics-remarks-gfx90a.cu     |   2 +-
 .../atomics-cas-remarks-gfx90a.cl             |  12 +--
 .../{ExpandAtomic.h => AtomicExpand.h}        |  12 +--
 llvm/include/llvm/CodeGen/AtomicExpandUtils.h |   4 +-
 .../llvm/CodeGen/MachinePassRegistry.def      |   2 +-
 llvm/include/llvm/CodeGen/Passes.h            |   8 +-
 .../llvm/CodeGen/TargetSubtargetInfo.h        |   2 +-
 llvm/include/llvm/InitializePasses.h          |   2 +-
 llvm/include/llvm/LinkAllPasses.h             |   2 +-
 ...andAtomicPass.cpp => AtomicExpandPass.cpp} | 100 +++++++++---------
 llvm/lib/CodeGen/CMakeLists.txt               |   2 +-
 llvm/lib/CodeGen/CodeGen.cpp                  |   2 +-
 llvm/lib/CodeGen/TargetSubtargetInfo.cpp      |   2 +-
 llvm/lib/Passes/PassBuilder.cpp               |   2 +-
 llvm/lib/Passes/PassRegistry.def              |   2 +-
 .../Target/AArch64/AArch64TargetMachine.cpp   |   2 +-
 .../lib/Target/AMDGPU/AMDGPUTargetMachine.cpp |   2 +-
 llvm/lib/Target/ARC/ARCTargetMachine.cpp      |   2 +-
 llvm/lib/Target/ARM/ARMTargetMachine.cpp      |   2 +-
 llvm/lib/Target/BPF/BPFTargetMachine.cpp      |   2 +-
 llvm/lib/Target/CSKY/CSKYTargetMachine.cpp    |   2 +-
 .../Target/Hexagon/HexagonTargetMachine.cpp   |   2 +-
 llvm/lib/Target/Lanai/LanaiTargetMachine.cpp  |   2 +-
 .../LoongArchExpandAtomicPseudoInsts.cpp      |   2 +-
 .../LoongArch/LoongArchTargetMachine.cpp      |   2 +-
 llvm/lib/Target/M68k/M68kTargetMachine.cpp    |   2 +-
 .../lib/Target/MSP430/MSP430TargetMachine.cpp |   2 +-
 llvm/lib/Target/Mips/MipsTargetMachine.cpp    |   2 +-
 llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp  |   2 +-
 .../PowerPC/PPCExpandAtomicPseudoInsts.cpp    |   2 +-
 llvm/lib/Target/PowerPC/PPCTargetMachine.cpp  |   2 +-
 .../RISCV/RISCVExpandAtomicPseudoInsts.cpp    |   2 +-
 llvm/lib/Target/RISCV/RISCVTargetMachine.cpp  |   2 +-
 llvm/lib/Target/Sparc/SparcTargetMachine.cpp  |   2 +-
 .../Target/SystemZ/SystemZTargetMachine.cpp   |   2 +-
 llvm/lib/Target/VE/VETargetMachine.cpp        |   2 +-
 .../WebAssembly/WebAssemblySubtarget.cpp      |   2 +-
 .../Target/WebAssembly/WebAssemblySubtarget.h |   2 +-
 .../WebAssembly/WebAssemblyTargetMachine.cpp  |   2 +-
 llvm/lib/Target/X86/X86TargetMachine.cpp      |   2 +-
 llvm/lib/Target/XCore/XCoreTargetMachine.cpp  |   2 +-
 .../AArch64/partial-pipeline-execution.ll     |   4 +-
 .../AMDGPU/atomics-cas-remarks-gfx90a.ll      |   2 +-
 .../test/CodeGen/AMDGPU/idemponent-atomics.ll |   2 +-
 .../CodeGen/AMDGPU/private-memory-atomics.ll  |   2 +-
 .../AtomicExpand/AArch64/atomicrmw-fp.ll      |   2 +-
 .../AArch64/expand-atomicrmw-xchg-fp.ll       |   4 +-
 .../AtomicExpand/AArch64/pcsections.ll        |   2 +-
 .../AMDGPU/expand-atomic-i16-system.ll        |   2 +-
 .../AtomicExpand/AMDGPU/expand-atomic-i16.ll  |   4 +-
 .../AMDGPU/expand-atomic-i8-system.ll         |   2 +-
 .../AtomicExpand/AMDGPU/expand-atomic-i8.ll   |   4 +-
 ...and-atomic-rmw-fadd-flat-specialization.ll |   8 +-
 .../AMDGPU/expand-atomic-rmw-fadd.ll          |  12 +--
 .../AMDGPU/expand-atomic-rmw-fmax.ll          |   4 +-
 .../AMDGPU/expand-atomic-rmw-fmin.ll          |   4 +-
 .../AMDGPU/expand-atomic-rmw-fsub.ll          |   4 +-
 .../AMDGPU/expand-atomic-rmw-nand.ll          |   4 +-
 .../expand-atomic-simplify-cfg-CAS-block.ll   |   2 +-
 .../AtomicExpand/AMDGPU/unaligned-atomic.ll   |   2 +-
 .../AtomicExpand/ARM/atomic-expansion-v7.ll   |   2 +-
 .../AtomicExpand/ARM/atomic-expansion-v8.ll   |   2 +-
 .../AtomicExpand/ARM/atomicrmw-fp.ll          |   2 +-
 .../AtomicExpand/ARM/cmpxchg-weak.ll          |   2 +-
 .../AtomicExpand/Hexagon/atomicrmw-fp.ll      |   2 +-
 .../AtomicExpand/LoongArch/atomicrmw-fp.ll    |   2 +-
 .../LoongArch/load-store-atomic.ll            |   4 +-
 .../AtomicExpand/Mips/atomicrmw-fp.ll         |   2 +-
 .../AtomicExpand/PowerPC/atomicrmw-fp.ll      |   2 +-
 .../AtomicExpand/PowerPC/cfence-double.ll     |   4 +-
 .../AtomicExpand/PowerPC/cfence-float.ll      |   4 +-
 .../AtomicExpand/PowerPC/cmpxchg.ll           |   4 +-
 .../AtomicExpand/PowerPC/issue55983.ll        |   4 +-
 .../AtomicExpand/RISCV/atomicrmw-fp.ll        |   2 +-
 .../Transforms/AtomicExpand/SPARC/libcalls.ll |   2 +-
 .../Transforms/AtomicExpand/SPARC/partword.ll |   2 +-
 .../AtomicExpand/X86/expand-atomic-libcall.ll |   2 +-
 .../X86/expand-atomic-non-integer.ll          |   2 +-
 .../AtomicExpand/X86/expand-atomic-rmw-fp.ll  |   2 +-
 .../X86/expand-atomic-rmw-initial-load.ll     |   2 +-
 .../AtomicExpand/X86/expand-atomic-xchg-fp.ll |   2 +-
 .../gn/secondary/llvm/lib/CodeGen/BUILD.gn    |   2 +-
 82 files changed, 166 insertions(+), 166 deletions(-)
 rename llvm/include/llvm/CodeGen/{ExpandAtomic.h => AtomicExpand.h} (64%)
 rename llvm/lib/CodeGen/{ExpandAtomicPass.cpp => AtomicExpandPass.cpp} (96%)

diff --git a/clang/test/CodeGenCUDA/atomics-remarks-gfx90a.cu b/clang/test/CodeGenCUDA/atomics-remarks-gfx90a.cu
index 6ac9c7bdef639b9..946927d88a1ee1f 100644
--- a/clang/test/CodeGenCUDA/atomics-remarks-gfx90a.cu
+++ b/clang/test/CodeGenCUDA/atomics-remarks-gfx90a.cu
@@ -1,5 +1,5 @@
 // RUN: %clang_cc1 %s -triple=amdgcn-amd-amdhsa -fcuda-is-device \
-// RUN:   -target-cpu gfx90a -Rpass=expand-atomic -S -o - 2>&1 | \
+// RUN:   -target-cpu gfx90a -Rpass=atomic-expand -S -o - 2>&1 | \
 // RUN:   FileCheck %s --check-prefix=GFX90A-CAS
 
 // REQUIRES: amdgpu-registered-target
diff --git a/clang/test/CodeGenOpenCL/atomics-cas-remarks-gfx90a.cl b/clang/test/CodeGenOpenCL/atomics-cas-remarks-gfx90a.cl
index 5d5464be04a090d..a5321ea7c158dac 100644
--- a/clang/test/CodeGenOpenCL/atomics-cas-remarks-gfx90a.cl
+++ b/clang/test/CodeGenOpenCL/atomics-cas-remarks-gfx90a.cl
@@ -1,9 +1,9 @@
 // RUN: %clang_cc1 %s -cl-std=CL2.0 -O0 -triple=amdgcn-amd-amdhsa -target-cpu gfx90a \
-// RUN:     -Rpass=expand-atomic -S -o - 2>&1 | \
+// RUN:     -Rpass=atomic-expand -S -o - 2>&1 | \
 // RUN:     FileCheck %s --check-prefix=REMARK
 
 // RUN: %clang_cc1 %s -cl-std=CL2.0 -O0 -triple=amdgcn-amd-amdhsa -target-cpu gfx90a \
-// RUN:     -Rpass=expand-atomic -S -emit-llvm -o - 2>&1 | \
+// RUN:     -Rpass=atomic-expand -S -emit-llvm -o - 2>&1 | \
 // RUN:     FileCheck %s --check-prefix=GFX90A-CAS
 
 // REQUIRES: amdgpu-registered-target
@@ -26,10 +26,10 @@ typedef enum memory_scope {
 #endif
 } memory_scope;
 
-// REMARK: remark: A compare and swap loop was generated for an atomic fadd operation at workgroup-one-as memory scope [-Rpass=expand-atomic]
-// REMARK: remark: A compare and swap loop was generated for an atomic fadd operation at agent-one-as memory scope [-Rpass=expand-atomic]
-// REMARK: remark: A compare and swap loop was generated for an atomic fadd operation at one-as memory scope [-Rpass=expand-atomic]
-// REMARK: remark: A compare and swap loop was generated for an atomic fadd operation at wavefront-one-as memory scope [-Rpass=expand-atomic]
+// REMARK: remark: A compare and swap loop was generated for an atomic fadd operation at workgroup-one-as memory scope [-Rpass=atomic-expand]
+// REMARK: remark: A compare and swap loop was generated for an atomic fadd operation at agent-one-as memory scope [-Rpass=atomic-expand]
+// REMARK: remark: A compare and swap loop was generated for an atomic fadd operation at one-as memory scope [-Rpass=atomic-expand]
+// REMARK: remark: A compare and swap loop was generated for an atomic fadd operation at wavefront-one-as memory scope [-Rpass=atomic-expand]
 // GFX90A-CAS-LABEL: @atomic_cas
 // GFX90A-CAS: atomicrmw fadd ptr addrspace(1) {{.*}} syncscope("workgroup-one-as") monotonic
 // GFX90A-CAS: atomicrmw fadd ptr addrspace(1) {{.*}} syncscope("agent-one-as") monotonic
diff --git a/llvm/include/llvm/CodeGen/ExpandAtomic.h b/llvm/include/llvm/CodeGen/AtomicExpand.h
similarity index 64%
rename from llvm/include/llvm/CodeGen/ExpandAtomic.h
rename to llvm/include/llvm/CodeGen/AtomicExpand.h
index 4ba49f8886ca94c..1b8a988ef486649 100644
--- a/llvm/include/llvm/CodeGen/ExpandAtomic.h
+++ b/llvm/include/llvm/CodeGen/AtomicExpand.h
@@ -1,4 +1,4 @@
-//===-- ExpandAtomic.h - Expand Atomic Instructions -------------*- C++ -*-===//
+//===-- AtomicExpand.h - Expand Atomic Instructions -------------*- C++ -*-===//
 //
 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 // See https://llvm.org/LICENSE.txt for license information.
@@ -6,8 +6,8 @@
 //
 //===----------------------------------------------------------------------===//
 
-#ifndef LLVM_CODEGEN_EXPANDATOMIC_H
-#define LLVM_CODEGEN_EXPANDATOMIC_H
+#ifndef LLVM_CODEGEN_ATOMICEXPAND_H
+#define LLVM_CODEGEN_ATOMICEXPAND_H
 
 #include "llvm/IR/PassManager.h"
 
@@ -16,15 +16,15 @@ namespace llvm {
 class Function;
 class TargetMachine;
 
-class ExpandAtomicPass : public PassInfoMixin<ExpandAtomicPass> {
+class AtomicExpandPass : public PassInfoMixin<AtomicExpandPass> {
 private:
   const TargetMachine *TM;
 
 public:
-  ExpandAtomicPass(const TargetMachine *TM) : TM(TM) {}
+  AtomicExpandPass(const TargetMachine *TM) : TM(TM) {}
   PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
 };
 
 } // end namespace llvm
 
-#endif // LLVM_CODEGEN_EXPANDATOMIC_H
+#endif // LLVM_CODEGEN_ATOMICEXPAND_H
diff --git a/llvm/include/llvm/CodeGen/AtomicExpandUtils.h b/llvm/include/llvm/CodeGen/AtomicExpandUtils.h
index 851492678aeba54..1cb410a0c31c69e 100644
--- a/llvm/include/llvm/CodeGen/AtomicExpandUtils.h
+++ b/llvm/include/llvm/CodeGen/AtomicExpandUtils.h
@@ -34,7 +34,7 @@ using CreateCmpXchgInstFun =
 /// instructions directly into a platform specific intrinsics (because, say,
 /// those intrinsics don't exist). If such a pass is able to expand cmpxchg
 /// instructions directly however, then, with this function, it could avoid two
-/// extra module passes (avoiding passes by `-expand-atomic` and itself). A
+/// extra module passes (avoiding passes by `-atomic-expand` and itself). A
 /// specific example would be PNaCl's `RewriteAtomics` pass.
 ///
 /// Given: atomicrmw some_op iN* %addr, iN %incr ordering
@@ -46,7 +46,7 @@ using CreateCmpXchgInstFun =
 /// loop:
 ///     %loaded = phi iN [ %init_loaded, %entry ], [ %new_loaded, %loop ]
 ///     %new = some_op iN %loaded, %incr
-/// ; This is what -expand-atomic will produce using this function on i686
+/// ; This is what -atomic-expand will produce using this function on i686
 /// targets:
 ///     %pair = cmpxchg iN* %addr, iN %loaded, iN %new_val
 ///     %new_loaded = extractvalue { iN, i1 } %pair, 0
diff --git a/llvm/include/llvm/CodeGen/MachinePassRegistry.def b/llvm/include/llvm/CodeGen/MachinePassRegistry.def
index 97cb359630f736e..e789747036ef9aa 100644
--- a/llvm/include/llvm/CodeGen/MachinePassRegistry.def
+++ b/llvm/include/llvm/CodeGen/MachinePassRegistry.def
@@ -135,7 +135,7 @@ MACHINE_FUNCTION_ANALYSIS("pass-instrumentation", PassInstrumentationAnalysis,
 #ifndef DUMMY_FUNCTION_PASS
 #define DUMMY_FUNCTION_PASS(NAME, PASS_NAME, CONSTRUCTOR)
 #endif
-DUMMY_FUNCTION_PASS("expand-atomic", ExpandAtomicPass, ())
+DUMMY_FUNCTION_PASS("atomic-expand", AtomicExpandPass, ())
 #undef DUMMY_FUNCTION_PASS
 
 #ifndef DUMMY_MACHINE_MODULE_PASS
diff --git a/llvm/include/llvm/CodeGen/Passes.h b/llvm/include/llvm/CodeGen/Passes.h
index 4642adbc5039efb..3f0d81fa1d14bad 100644
--- a/llvm/include/llvm/CodeGen/Passes.h
+++ b/llvm/include/llvm/CodeGen/Passes.h
@@ -41,10 +41,10 @@ class FileSystem;
 // List of target independent CodeGen pass IDs.
 namespace llvm {
 
-  /// ExpandAtomicPass - At IR level this pass replace atomic instructions with
+  /// AtomicExpandPass - At IR level this pass replace atomic instructions with
   /// __atomic_* library calls, or target specific instruction which implement the
   /// same semantics in a way which better fits the target backend.
-  FunctionPass *createExpandAtomicLegacyPass();
+  FunctionPass *createAtomicExpandLegacyPass();
 
   /// createUnreachableBlockEliminationPass - The LLVM code generator does not
   /// work well with unreachable basic blocks (what live ranges make sense for a
@@ -101,9 +101,9 @@ namespace llvm {
   /// handling of complex number arithmetic
   FunctionPass *createComplexDeinterleavingPass(const TargetMachine *TM);
 
-  /// ExpandAtomicID -- Lowers atomic operations in terms of either cmpxchg
+  /// AtomicExpandID -- Lowers atomic operations in terms of either cmpxchg
   /// load-linked/store-conditional loops.
-  extern char &ExpandAtomicID;
+  extern char &AtomicExpandID;
 
   /// MachineLoopInfo - This pass is a loop analysis pass.
   extern char &MachineLoopInfoID;
diff --git a/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h b/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h
index da1fd6737b796fe..55ef95c28543190 100644
--- a/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h
+++ b/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h
@@ -215,7 +215,7 @@ class TargetSubtargetInfo : public MCSubtargetInfo {
   virtual bool enablePostRAMachineScheduler() const;
 
   /// True if the subtarget should run the atomic expansion pass.
-  virtual bool enableExpandAtomic() const;
+  virtual bool enableAtomicExpand() const;
 
   /// True if the subtarget should run the indirectbr expansion pass.
   virtual bool enableIndirectBrExpand() const;
diff --git a/llvm/include/llvm/InitializePasses.h b/llvm/include/llvm/InitializePasses.h
index 4c45f4202525381..305a2280a0e4a79 100644
--- a/llvm/include/llvm/InitializePasses.h
+++ b/llvm/include/llvm/InitializePasses.h
@@ -52,6 +52,7 @@ void initializeAAResultsWrapperPassPass(PassRegistry&);
 void initializeAlwaysInlinerLegacyPassPass(PassRegistry&);
 void initializeAssignmentTrackingAnalysisPass(PassRegistry &);
 void initializeAssumptionCacheTrackerPass(PassRegistry&);
+void initializeAtomicExpandLegacyPass(PassRegistry&);
 void initializeBasicBlockPathCloningPass(PassRegistry &);
 void initializeBasicBlockSectionsProfileReaderWrapperPassPass(PassRegistry &);
 void initializeBasicBlockSectionsPass(PassRegistry &);
@@ -100,7 +101,6 @@ void initializeEarlyMachineLICMPass(PassRegistry&);
 void initializeEarlyTailDuplicatePass(PassRegistry&);
 void initializeEdgeBundlesPass(PassRegistry&);
 void initializeEHContGuardCatchretPass(PassRegistry &);
-void initializeExpandAtomicLegacyPass(PassRegistry&);
 void initializeExpandLargeFpConvertLegacyPassPass(PassRegistry&);
 void initializeExpandLargeDivRemLegacyPassPass(PassRegistry&);
 void initializeExpandMemCmpLegacyPassPass(PassRegistry &);
diff --git a/llvm/include/llvm/LinkAllPasses.h b/llvm/include/llvm/LinkAllPasses.h
index bb49055ef19c384..dc33cb29639e159 100644
--- a/llvm/include/llvm/LinkAllPasses.h
+++ b/llvm/include/llvm/LinkAllPasses.h
@@ -65,6 +65,7 @@ namespace {
       (void) llvm::createSCEVAAWrapperPass();
       (void) llvm::createTypeBasedAAWrapperPass();
       (void) llvm::createScopedNoAliasAAWrapperPass();
+      (void) llvm::createAtomicExpandLegacyPass();
       (void) llvm::createBreakCriticalEdgesPass();
       (void) llvm::createCallGraphDOTPrinterPass();
       (void) llvm::createCallGraphViewerPass();
@@ -119,7 +120,6 @@ namespace {
       (void) llvm::createGVNPass();
       (void) llvm::createPostDomTree();
       (void) llvm::createMergeICmpsLegacyPass();
-      (void) llvm::createExpandAtomicLegacyPass();
       (void) llvm::createExpandLargeDivRemPass();
       (void)llvm::createExpandMemCmpLegacyPass();
       (void) llvm::createExpandVectorPredicationPass();
diff --git a/llvm/lib/CodeGen/ExpandAtomicPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp
similarity index 96%
rename from llvm/lib/CodeGen/ExpandAtomicPass.cpp
rename to llvm/lib/CodeGen/AtomicExpandPass.cpp
index 5f8e069bafc7a64..ca7e00a80ae4769 100644
--- a/llvm/lib/CodeGen/ExpandAtomicPass.cpp
+++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp
@@ -1,4 +1,4 @@
-//===- ExpandAtomicPass.cpp - Expand atomic instructions ------------------===//
+//===- AtomicExpandPass.cpp - Expand atomic instructions ------------------===//
 //
 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 // See https://llvm.org/LICENSE.txt for license information.
@@ -19,7 +19,7 @@
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/Analysis/InstSimplifyFolder.h"
 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
-#include "llvm/CodeGen/ExpandAtomic.h"
+#include "llvm/CodeGen/AtomicExpand.h"
 #include "llvm/CodeGen/AtomicExpandUtils.h"
 #include "llvm/CodeGen/RuntimeLibcalls.h"
 #include "llvm/CodeGen/TargetLowering.h"
@@ -56,11 +56,11 @@
 
 using namespace llvm;
 
-#define DEBUG_TYPE "expand-atomic"
+#define DEBUG_TYPE "atomic-expand"
 
 namespace {
 
-class ExpandAtomicImpl {
+class AtomicExpandImpl {
   const TargetLowering *TLI = nullptr;
   const DataLayout *DL = nullptr;
 
@@ -68,7 +68,7 @@ class ExpandAtomicImpl {
   bool bracketInstWithFences(Instruction *I, AtomicOrdering Order);
   IntegerType *getCorrespondingIntegerType(Type *T, const DataLayout &DL);
   LoadInst *convertAtomicLoadToIntegerType(LoadInst *LI);
-  bool tryExpandAtomicLoad(LoadInst *LI);
+  bool tryAtomicExpandLoad(LoadInst *LI);
   bool expandAtomicLoadToLL(LoadInst *LI);
   bool expandAtomicLoadToCmpXchg(LoadInst *LI);
   StoreInst *convertAtomicStoreToIntegerType(StoreInst *SI);
@@ -121,12 +121,12 @@ class ExpandAtomicImpl {
   bool run(Function &F, const TargetMachine *TM);
 };
 
-class ExpandAtomicLegacy : public FunctionPass {
+class AtomicExpandLegacy : public FunctionPass {
 public:
   static char ID; // Pass identification, replacement for typeid
 
-  ExpandAtomicLegacy() : FunctionPass(ID) {
-    initializeExpandAtomicLegacyPass(*PassRegistry::getPassRegistry());
+  AtomicExpandLegacy() : FunctionPass(ID) {
+    initializeAtomicExpandLegacyPass(*PassRegistry::getPassRegistry());
   }
 
   bool runOnFunction(Function &F) override;
@@ -144,14 +144,14 @@ struct ReplacementIRBuilder : IRBuilder<InstSimplifyFolder> {
 
 } // end anonymous namespace
 
-char ExpandAtomicLegacy::ID = 0;
+char AtomicExpandLegacy::ID = 0;
 
-char &llvm::ExpandAtomicID = ExpandAtomicLegacy::ID;
+char &llvm::AtomicExpandID = AtomicExpandLegacy::ID;
 
-INITIALIZE_PASS_BEGIN(ExpandAtomicLegacy, DEBUG_TYPE,
+INITIALIZE_PASS_BEGIN(AtomicExpandLegacy, DEBUG_TYPE,
                       "Expand Atomic instructions", false, false)
 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
-INITIALIZE_PASS_END(ExpandAtomicLegacy, DEBUG_TYPE,
+INITIALIZE_PASS_END(AtomicExpandLegacy, DEBUG_TYPE,
                     "Expand Atomic instructions", false, false)
 
 // Helper functions to retrieve the size of atomic instructions.
@@ -186,9 +186,9 @@ static bool atomicSizeSupported(const TargetLowering *TLI, Inst *I) {
          Size <= TLI->getMaxAtomicSizeInBitsSupported() / 8;
 }
 
-bool ExpandAtomicImpl::run(Function &F, const TargetMachine *TM) {
+bool AtomicExpandImpl::run(Function &F, const TargetMachine *TM) {
   const auto *Subtarget = TM->getSubtargetImpl(F);
-  if (!Subtarget->enableExpandAtomic())
+  if (!Subtarget->enableAtomicExpand())
     return false;
   TLI = Subtarget->getTargetLowering();
   DL = &F.getParent()->getDataLayout();
@@ -342,7 +342,7 @@ bool ExpandAtomicImpl::run(Function &F, const TargetMachine *TM) {
   return MadeChange;
 }
 
-bool ExpandAtomicLegacy::runOnFunction(Function &F) {
+bool AtomicExpandLegacy::runOnFunction(Function &F) {
   if (skipFunction(F))
     return false;
 
@@ -352,17 +352,17 @@ bool ExpandAtomicLegacy::runOnFunction(Function &F) {
 
   auto *TM = &TPC->getTM<TargetMachine>();
 
-  ExpandAtomicImpl AE;
+  AtomicExpandImpl AE;
   return AE.run(F, TM);
 }
 
-FunctionPass *llvm::createExpandAtomicLegacyPass() {
-  return new ExpandAtomicLegacy();
+FunctionPass *llvm::createAtomicExpandLegacyPass() {
+  return new AtomicExpandLegacy();
 }
 
-PreservedAnalyses ExpandAtomicPass::run(Function &F,
+PreservedAnalyses AtomicExpandPass::run(Function &F,
                                         FunctionAnalysisManager &AM) {
-  ExpandAtomicImpl AE;
+  AtomicExpandImpl AE;
 
   bool Changed = AE.run(F, TM);
   if (!Changed)
@@ -373,7 +373,7 @@ PreservedAnalyses ExpandAtomicPass::run(Function &F,
   return PA;
 }
 
-bool ExpandAtomicImpl::bracketInstWithFences(Instruction *I,
+bool AtomicExpandImpl::bracketInstWithFences(Instruction *I,
                                              AtomicOrdering Order) {
   ReplacementIRBuilder Builder(I, *DL);
 
@@ -390,7 +390,7 @@ bool ExpandAtomicImpl::bracketInstWithFences(Instruction *I,
 
 /// Get the iX type with the same bitwidth as T.
 IntegerType *
-ExpandAtomicImpl::getCorrespondingIntegerType(Type *T, const DataLayout &DL) {
+AtomicExpandImpl::getCorrespondingIntegerType(Type *T, const DataLayout &DL) {
   EVT VT = TLI->getMemValueType(DL, T);
   unsigned BitWidth = VT.getStoreSizeInBits();
   assert(BitWidth == VT.getSizeInBits() && "must be a power of two");
@@ -400,7 +400,7 @@ ExpandAtomicImpl::getCorrespondingIntegerType(Type *T, const DataLayout &DL) {
 /// Convert an atomic load of a non-integral type to an integer load of the
 /// equivalent bitwidth.  See the function comment on
 /// convertAtomicStoreToIntegerType for background.
-LoadInst *ExpandAtomicImpl::convertAtomicLoadToIntegerType(LoadInst *LI) {
+LoadInst *AtomicExpandImpl::convertAtomicLoadToIntegerType(LoadInst *LI) {
   auto *M = LI->getModule();
   Type *NewTy = getCorrespondingIntegerType(LI->getType(), M->getDataLayout());
 
@@ -421,7 +421,7 @@ LoadInst *ExpandAtomicImpl::convertAtomicLoadToIntegerType(LoadInst *LI) {
 }
 
 AtomicRMWInst *
-ExpandAtomicImpl::convertAtomicXchgToIntegerType(AtomicRMWInst *RMWI) {
+AtomicExpandImpl::convertAtomicXchgToIntegerType(AtomicRMWInst *RMWI) {
   auto *M = RMWI->getModule();
   Type *NewTy =
       getCorrespondingIntegerType(RMWI->getType(), M->getDataLayout());
@@ -448,7 +448,7 @@ ExpandAtomicImpl::convertAtomicXchgToIntegerType(AtomicRMWInst *RMWI) {
   return NewRMWI;
 }
 
-bool ExpandAtomicImpl::tryExpandAtomicLoad(LoadInst *LI) {
+bool AtomicExpandImpl::tryExpandAtomicLoad(LoadInst *LI) {
   switch (TLI->shouldExpandAtomicLoadInIR(LI)) {
   case TargetLoweringBase::AtomicExpansionKind::None:
     return false;
@@ -470,7 +470,7 @@ bool ExpandAtomicImpl::tryExpandAtomicLoad(LoadInst *LI) {
   }
 }
 
-bool ExpandAtomicImpl::tryExpandAtomicStore(StoreInst *SI) {
+bool AtomicExpandImpl::tryExpandAtomicStore(StoreInst *SI) {
   switch (TLI->shouldExpandAtomicStoreInIR(SI)) {
   case TargetLoweringBase::AtomicExpansionKind::None:
     return false;
@@ -485,7 +485,7 @@ bool ExpandAtomicImpl::tryExpandAtomicStore(StoreInst *SI) {
   }
 }
 
-bool ExpandAtomicImpl::expandAtomicLoadToLL(LoadInst *LI) {
+bool AtomicExpandImpl::expandAtomicLoadToLL(LoadInst *LI) {
   ReplacementIRBuilder Builder(LI, *DL);
 
   // On some architectures, load-linked instructions are atomic for larger
@@ -501,7 +501,7 @@ bool ExpandAtomicImpl::expandAtomicLoadToLL(LoadInst *LI) {
   return true;
 }
 
-bool ExpandAtomicImpl::expandAtomicLoadToCmpXchg(LoadInst *LI) {
+bool AtomicExpandImpl::expandAtomicLoadToCmpXchg(LoadInst *LI) {
   ReplacementIRBuilder Builder(LI, *DL);
   AtomicOrdering Order = LI->getOrdering();
   if (Order == AtomicOrdering::Unordered)
@@ -530,7 +530,7 @@ bool ExpandAtomicImpl::expandAtomicLoadToCmpXchg(LoadInst *LI) {
 /// instruction select from the original atomic store, but as a migration
 /// mechanism, we convert back to the old format which the backends understand.
 /// Each backend will need individual work to recognize the new format.
-StoreInst *ExpandAtomicImpl::convertAtomicStoreToIntegerType(StoreInst *SI) {
+StoreInst *AtomicExpandImpl::convertAtomicStoreToIntegerType(StoreInst *SI) {
   ReplacementIRBuilder Builder(SI, *DL);
   auto *M = SI->getModule();
   Type *NewTy = getCorrespondingIntegerType(SI->getValueOperand()->getType(),
@@ -548,7 +548,7 @@ StoreInst *ExpandAtomicImpl::convertAtomicStoreToIntegerType(StoreInst *SI) {
   return NewSI;
 }
 
-void ExpandAtomicImpl::expandAtomicStore(StoreInst *SI) {
+void AtomicExpandImpl::expandAtomicStore(StoreInst *SI) {
   // This function is only called on atomic stores that are too large to be
   // atomic if implemented as a native store. So we replace them by an
   // atomic swap, that can be implemented for example as a ldrex/strex on ARM
@@ -595,7 +595,7 @@ static void createCmpXchgInstFun(IRBuilderBase &Builder, Value *Addr,
     NewLoaded = Builder.CreateBitCast(NewLoaded, OrigTy);
 }
 
-bool ExpandAtomicImpl::tryExpandAtomicRMW(AtomicRMWInst *AI) {
+bool AtomicExpandImpl::tryExpandAtomicRMW(AtomicRMWInst *AI) {
   LLVMContext &Ctx = AI->getModule()->getContext();
   TargetLowering::AtomicExpansionKind Kind = TLI->shouldExpandAtomicRMWInIR(AI);
   switch (Kind) {
@@ -877,7 +877,7 @@ static Value *performMaskedAtomicOp(AtomicRMWInst::BinOp Op,
 /// way as a typical atomicrmw expansion. The only difference here is
 /// that the operation inside of the loop may operate upon only a
 /// part of the value.
-void ExpandAtomicImpl::expandPartwordAtomicRMW(
+void AtomicExpandImpl::expandPartwordAtomicRMW(
     AtomicRMWInst *AI, TargetLoweringBase::AtomicExpansionKind ExpansionKind) {
   AtomicOrdering MemOpOrder = AI->getOrdering();
   SyncScope::ID SSID = AI->getSyncScopeID();
@@ -921,7 +921,7 @@ void ExpandAtomicImpl::expandPartwordAtomicRMW(
 }
 
 // Widen the bitwise atomicrmw (or/xor/and) to the minimum supported width.
-AtomicRMWInst *ExpandAtomicImpl::widenPartwordAtomicRMW(AtomicRMWInst *AI) {
+AtomicRMWInst *AtomicExpandImpl::widenPartwordAtomicRMW(AtomicRMWInst *AI) {
   ReplacementIRBuilder Builder(AI, *DL);
   AtomicRMWInst::BinOp Op = AI->getOperation();
 
@@ -956,7 +956,7 @@ AtomicRMWInst *ExpandAtomicImpl::widenPartwordAtomicRMW(AtomicRMWInst *AI) {
   return NewAI;
 }
 
-bool ExpandAtomicImpl::expandPartwordCmpXchg(AtomicCmpXchgInst *CI) {
+bool AtomicExpandImpl::expandPartwordCmpXchg(AtomicCmpXchgInst *CI) {
   // The basic idea here is that we're expanding a cmpxchg of a
   // smaller memory size up to a word-sized cmpxchg. To do this, we
   // need to add a retry-loop for strong cmpxchg, so that
@@ -1081,7 +1081,7 @@ bool ExpandAtomicImpl::expandPartwordCmpXchg(AtomicCmpXchgInst *CI) {
   return true;
 }
 
-void ExpandAtomicImpl::expandAtomicOpToLLSC(
+void AtomicExpandImpl::expandAtomicOpToLLSC(
     Instruction *I, Type *ResultType, Value *Addr, Align AddrAlign,
     AtomicOrdering MemOpOrder,
     function_ref<Value *(IRBuilderBase &, Value *)> PerformOp) {
@@ -1093,7 +1093,7 @@ void ExpandAtomicImpl::expandAtomicOpToLLSC(
   I->eraseFromParent();
 }
 
-void ExpandAtomicImpl::expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI) {
+void AtomicExpandImpl::expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI) {
   ReplacementIRBuilder Builder(AI, *DL);
 
   PartwordMaskValues PMV =
@@ -1119,7 +1119,7 @@ void ExpandAtomicImpl::expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI) {
   AI->eraseFromParent();
 }
 
-void ExpandAtomicImpl::expandAtomicCmpXchgToMaskedIntrinsic(
+void AtomicExpandImpl::expandAtomicCmpXchgToMaskedIntrinsic(
     AtomicCmpXchgInst *CI) {
   ReplacementIRBuilder Builder(CI, *DL);
 
@@ -1147,7 +1147,7 @@ void ExpandAtomicImpl::expandAtomicCmpXchgToMaskedIntrinsic(
   CI->eraseFromParent();
 }
 
-Value *ExpandAtomicImpl::insertRMWLLSCLoop(
+Value *AtomicExpandImpl::insertRMWLLSCLoop(
     IRBuilderBase &Builder, Type *ResultTy, Value *Addr, Align AddrAlign,
     AtomicOrdering MemOpOrder,
     function_ref<Value *(IRBuilderBase &, Value *)> PerformOp) {
@@ -1203,7 +1203,7 @@ Value *ExpandAtomicImpl::insertRMWLLSCLoop(
 /// way to represent a pointer cmpxchg so that we can update backends one by
 /// one.
 AtomicCmpXchgInst *
-ExpandAtomicImpl::convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI) {
+AtomicExpandImpl::convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI) {
   auto *M = CI->getModule();
   Type *NewTy = getCorrespondingIntegerType(CI->getCompareOperand()->getType(),
                                             M->getDataLayout());
@@ -1236,7 +1236,7 @@ ExpandAtomicImpl::convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI) {
   return NewCI;
 }
 
-bool ExpandAtomicImpl::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
+bool AtomicExpandImpl::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
   AtomicOrdering SuccessOrder = CI->getSuccessOrdering();
   AtomicOrdering FailureOrder = CI->getFailureOrdering();
   Value *Addr = CI->getPointerOperand();
@@ -1482,7 +1482,7 @@ bool ExpandAtomicImpl::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
   return true;
 }
 
-bool ExpandAtomicImpl::isIdempotentRMW(AtomicRMWInst *RMWI) {
+bool AtomicExpandImpl::isIdempotentRMW(AtomicRMWInst *RMWI) {
   auto C = dyn_cast<ConstantInt>(RMWI->getValOperand());
   if (!C)
     return false;
@@ -1502,7 +1502,7 @@ bool ExpandAtomicImpl::isIdempotentRMW(AtomicRMWInst *RMWI) {
   }
 }
 
-bool ExpandAtomicImpl::simplifyIdempotentRMW(AtomicRMWInst *RMWI) {
+bool AtomicExpandImpl::simplifyIdempotentRMW(AtomicRMWInst *RMWI) {
   if (auto ResultingLoad = TLI->lowerIdempotentRMWIntoFencedLoad(RMWI)) {
     tryExpandAtomicLoad(ResultingLoad);
     return true;
@@ -1510,7 +1510,7 @@ bool ExpandAtomicImpl::simplifyIdempotentRMW(AtomicRMWInst *RMWI) {
   return false;
 }
 
-Value *ExpandAtomicImpl::insertRMWCmpXchgLoop(
+Value *AtomicExpandImpl::insertRMWCmpXchgLoop(
     IRBuilderBase &Builder, Type *ResultTy, Value *Addr, Align AddrAlign,
     AtomicOrdering MemOpOrder, SyncScope::ID SSID,
     function_ref<Value *(IRBuilderBase &, Value *)> PerformOp,
@@ -1571,7 +1571,7 @@ Value *ExpandAtomicImpl::insertRMWCmpXchgLoop(
   return NewLoaded;
 }
 
-bool ExpandAtomicImpl::tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
+bool AtomicExpandImpl::tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
   unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8;
   unsigned ValueSize = getAtomicOpSize(CI);
 
@@ -1602,7 +1602,7 @@ bool llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI,
 
   // FIXME: If FP exceptions are observable, we should force them off for the
   // loop for the FP atomics.
-  Value *Loaded = ExpandAtomicImpl::insertRMWCmpXchgLoop(
+  Value *Loaded = AtomicExpandImpl::insertRMWCmpXchgLoop(
       Builder, AI->getType(), AI->getPointerOperand(), AI->getAlign(),
       AI->getOrdering(), AI->getSyncScopeID(),
       [&](IRBuilderBase &Builder, Value *Loaded) {
@@ -1636,7 +1636,7 @@ static bool canUseSizedAtomicCall(unsigned Size, Align Alignment,
          Size <= LargestSize;
 }
 
-void ExpandAtomicImpl::expandAtomicLoadToLibcall(LoadInst *I) {
+void AtomicExpandImpl::expandAtomicLoadToLibcall(LoadInst *I) {
   static const RTLIB::Libcall Libcalls[6] = {
       RTLIB::ATOMIC_LOAD,   RTLIB::ATOMIC_LOAD_1, RTLIB::ATOMIC_LOAD_2,
       RTLIB::ATOMIC_LOAD_4, RTLIB::ATOMIC_LOAD_8, RTLIB::ATOMIC_LOAD_16};
@@ -1649,7 +1649,7 @@ void ExpandAtomicImpl::expandAtomicLoadToLibcall(LoadInst *I) {
     report_fatal_error("expandAtomicOpToLibcall shouldn't fail for Load");
 }
 
-void ExpandAtomicImpl::expandAtomicStoreToLibcall(StoreInst *I) {
+void AtomicExpandImpl::expandAtomicStoreToLibcall(StoreInst *I) {
   static const RTLIB::Libcall Libcalls[6] = {
       RTLIB::ATOMIC_STORE,   RTLIB::ATOMIC_STORE_1, RTLIB::ATOMIC_STORE_2,
       RTLIB::ATOMIC_STORE_4, RTLIB::ATOMIC_STORE_8, RTLIB::ATOMIC_STORE_16};
@@ -1662,7 +1662,7 @@ void ExpandAtomicImpl::expandAtomicStoreToLibcall(StoreInst *I) {
     report_fatal_error("expandAtomicOpToLibcall shouldn't fail for Store");
 }
 
-void ExpandAtomicImpl::expandAtomicCASToLibcall(AtomicCmpXchgInst *I) {
+void AtomicExpandImpl::expandAtomicCASToLibcall(AtomicCmpXchgInst *I) {
   static const RTLIB::Libcall Libcalls[6] = {
       RTLIB::ATOMIC_COMPARE_EXCHANGE,   RTLIB::ATOMIC_COMPARE_EXCHANGE_1,
       RTLIB::ATOMIC_COMPARE_EXCHANGE_2, RTLIB::ATOMIC_COMPARE_EXCHANGE_4,
@@ -1740,7 +1740,7 @@ static ArrayRef<RTLIB::Libcall> GetRMWLibcall(AtomicRMWInst::BinOp Op) {
   llvm_unreachable("Unexpected AtomicRMW operation.");
 }
 
-void ExpandAtomicImpl::expandAtomicRMWToLibcall(AtomicRMWInst *I) {
+void AtomicExpandImpl::expandAtomicRMWToLibcall(AtomicRMWInst *I) {
   ArrayRef<RTLIB::Libcall> Libcalls = GetRMWLibcall(I->getOperation());
 
   unsigned Size = getAtomicOpSize(I);
@@ -1779,7 +1779,7 @@ void ExpandAtomicImpl::expandAtomicRMWToLibcall(AtomicRMWInst *I) {
 // ATOMIC libcalls to be emitted. All of the other arguments besides
 // 'I' are extracted from the Instruction subclass by the
 // caller. Depending on the particular call, some will be null.
-bool ExpandAtomicImpl::expandAtomicOpToLibcall(
+bool AtomicExpandImpl::expandAtomicOpToLibcall(
     Instruction *I, unsigned Size, Align Alignment, Value *PointerOperand,
     Value *ValueOperand, Value *CASExpected, AtomicOrdering Ordering,
     AtomicOrdering Ordering2, ArrayRef<RTLIB::Libcall> Libcalls) {
diff --git a/llvm/lib/CodeGen/CMakeLists.txt b/llvm/lib/CodeGen/CMakeLists.txt
index c237574bdf542fb..df2d1831ee5fdbf 100644
--- a/llvm/lib/CodeGen/CMakeLists.txt
+++ b/llvm/lib/CodeGen/CMakeLists.txt
@@ -40,6 +40,7 @@ add_llvm_component_library(LLVMCodeGen
   AllocationOrder.cpp
   Analysis.cpp
   AssignmentTrackingAnalysis.cpp
+  AtomicExpandPass.cpp
   BasicTargetTransformInfo.cpp
   BranchFolding.cpp
   BranchRelaxation.cpp
@@ -68,7 +69,6 @@ add_llvm_component_library(LLVMCodeGen
   EdgeBundles.cpp
   EHContGuardCatchret.cpp
   ExecutionDomainFix.cpp
-  ExpandAtomicPass.cpp
   ExpandLargeDivRem.cpp
   ExpandLargeFpConvert.cpp
   ExpandMemCmp.cpp
diff --git a/llvm/lib/CodeGen/CodeGen.cpp b/llvm/lib/CodeGen/CodeGen.cpp
index 230776984af0ce2..be1813451228d68 100644
--- a/llvm/lib/CodeGen/CodeGen.cpp
+++ b/llvm/lib/CodeGen/CodeGen.cpp
@@ -19,6 +19,7 @@ using namespace llvm;
 /// initializeCodeGen - Initialize all passes linked into the CodeGen library.
 void llvm::initializeCodeGen(PassRegistry &Registry) {
   initializeAssignmentTrackingAnalysisPass(Registry);
+  initializeAtomicExpandLegacyPass(Registry);
   initializeBasicBlockPathCloningPass(Registry);
   initializeBasicBlockSectionsPass(Registry);
   initializeBranchFolderPassPass(Registry);
@@ -38,7 +39,6 @@ void llvm::initializeCodeGen(PassRegistry &Registry) {
   initializeEarlyIfPredicatorPass(Registry);
   initializeEarlyMachineLICMPass(Registry);
   initializeEarlyTailDuplicatePass(Registry);
-  initializeExpandAtomicLegacyPass(Registry);
   initializeExpandLargeDivRemLegacyPassPass(Registry);
   initializeExpandLargeFpConvertLegacyPassPass(Registry);
   initializeExpandMemCmpLegacyPassPass(Registry);
diff --git a/llvm/lib/CodeGen/TargetSubtargetInfo.cpp b/llvm/lib/CodeGen/TargetSubtargetInfo.cpp
index a8fa14bebbe6860..6c97bc0568bdeee 100644
--- a/llvm/lib/CodeGen/TargetSubtargetInfo.cpp
+++ b/llvm/lib/CodeGen/TargetSubtargetInfo.cpp
@@ -24,7 +24,7 @@ TargetSubtargetInfo::TargetSubtargetInfo(
 
 TargetSubtargetInfo::~TargetSubtargetInfo() = default;
 
-bool TargetSubtargetInfo::enableExpandAtomic() const {
+bool TargetSubtargetInfo::enableAtomicExpand() const {
   return true;
 }
 
diff --git a/llvm/lib/Passes/PassBuilder.cpp b/llvm/lib/Passes/PassBuilder.cpp
index c40d8aa9bbfb166..2b5436b677be250 100644
--- a/llvm/lib/Passes/PassBuilder.cpp
+++ b/llvm/lib/Passes/PassBuilder.cpp
@@ -73,11 +73,11 @@
 #include "llvm/Analysis/TypeBasedAliasAnalysis.h"
 #include "llvm/Analysis/UniformityAnalysis.h"
 #include "llvm/CodeGen/AssignmentTrackingAnalysis.h"
+#include "llvm/CodeGen/AtomicExpand.h"
 #include "llvm/CodeGen/BasicBlockSectionsProfileReader.h"
 #include "llvm/CodeGen/CallBrPrepare.h"
 #include "llvm/CodeGen/CodeGenPrepare.h"
 #include "llvm/CodeGen/DwarfEHPrepare.h"
-#include "llvm/CodeGen/ExpandAtomic.h"
 #include "llvm/CodeGen/ExpandLargeDivRem.h"
 #include "llvm/CodeGen/ExpandLargeFpConvert.h"
 #include "llvm/CodeGen/ExpandMemCmp.h"
diff --git a/llvm/lib/Passes/PassRegistry.def b/llvm/lib/Passes/PassRegistry.def
index abf3ae0dfbbedd2..16eb21d0e32c5a1 100644
--- a/llvm/lib/Passes/PassRegistry.def
+++ b/llvm/lib/Passes/PassRegistry.def
@@ -288,6 +288,7 @@ FUNCTION_PASS("alignment-from-assumptions", AlignmentFromAssumptionsPass())
 FUNCTION_PASS("annotation-remarks", AnnotationRemarksPass())
 FUNCTION_PASS("assume-builder", AssumeBuilderPass())
 FUNCTION_PASS("assume-simplify", AssumeSimplifyPass())
+FUNCTION_PASS("atomic-expand", AtomicExpandPass(TM))
 FUNCTION_PASS("bdce", BDCEPass())
 FUNCTION_PASS("bounds-checking", BoundsCheckingPass())
 FUNCTION_PASS("break-crit-edges", BreakCriticalEdgesPass())
@@ -312,7 +313,6 @@ FUNCTION_PASS("dot-post-dom", PostDomPrinter())
 FUNCTION_PASS("dot-post-dom-only", PostDomOnlyPrinter())
 FUNCTION_PASS("dse", DSEPass())
 FUNCTION_PASS("dwarf-eh-prepare", DwarfEHPreparePass(TM))
-FUNCTION_PASS("expand-atomic", ExpandAtomicPass(TM))
 FUNCTION_PASS("expand-large-div-rem", ExpandLargeDivRemPass(TM))
 FUNCTION_PASS("expand-large-fp-convert", ExpandLargeFpConvertPass(TM))
 FUNCTION_PASS("expand-memcmp", ExpandMemCmpPass(TM))
diff --git a/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp b/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp
index 449ebb6803fb0d8..598903ee91a6628 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp
@@ -564,7 +564,7 @@ std::unique_ptr<CSEConfigBase> AArch64PassConfig::getCSEConfig() const {
 void AArch64PassConfig::addIRPasses() {
   // Always expand atomic operations, we don't deal with atomicrmw or cmpxchg
   // ourselves.
-  addPass(createExpandAtomicLegacyPass());
+  addPass(createAtomicExpandLegacyPass());
 
   // Expand any SVE vector library calls that we can't code generate directly.
   if (EnableSVEIntrinsicOpts &&
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index e802c0ad89a6985..58f729cf8d9714d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -1054,7 +1054,7 @@ void AMDGPUPassConfig::addIRPasses() {
     addPass(createAMDGPUAtomicOptimizerPass(AMDGPUAtomicOptimizerStrategy));
   }
 
-  addPass(createExpandAtomicLegacyPass());
+  addPass(createAtomicExpandLegacyPass());
 
   if (TM.getOptLevel() > CodeGenOptLevel::None) {
     addPass(createAMDGPUPromoteAlloca());
diff --git a/llvm/lib/Target/ARC/ARCTargetMachine.cpp b/llvm/lib/Target/ARC/ARCTargetMachine.cpp
index 6f16b328af7efa8..f50c3c0265e3490 100644
--- a/llvm/lib/Target/ARC/ARCTargetMachine.cpp
+++ b/llvm/lib/Target/ARC/ARCTargetMachine.cpp
@@ -70,7 +70,7 @@ TargetPassConfig *ARCTargetMachine::createPassConfig(PassManagerBase &PM) {
 }
 
 void ARCPassConfig::addIRPasses() {
-  addPass(createExpandAtomicLegacyPass());
+  addPass(createAtomicExpandLegacyPass());
 
   TargetPassConfig::addIRPasses();
 }
diff --git a/llvm/lib/Target/ARM/ARMTargetMachine.cpp b/llvm/lib/Target/ARM/ARMTargetMachine.cpp
index a5d32793e6f57eb..4ef00df57ef9ab4 100644
--- a/llvm/lib/Target/ARM/ARMTargetMachine.cpp
+++ b/llvm/lib/Target/ARM/ARMTargetMachine.cpp
@@ -418,7 +418,7 @@ void ARMPassConfig::addIRPasses() {
   if (TM->Options.ThreadModel == ThreadModel::Single)
     addPass(createLowerAtomicPass());
   else
-    addPass(createExpandAtomicLegacyPass());
+    addPass(createAtomicExpandLegacyPass());
 
   // Cmpxchg instructions are often used with a subsequent comparison to
   // determine whether it succeeded. We can exploit existing control-flow in
diff --git a/llvm/lib/Target/BPF/BPFTargetMachine.cpp b/llvm/lib/Target/BPF/BPFTargetMachine.cpp
index d94466c8892eb19..08ac4b25540f70b 100644
--- a/llvm/lib/Target/BPF/BPFTargetMachine.cpp
+++ b/llvm/lib/Target/BPF/BPFTargetMachine.cpp
@@ -149,7 +149,7 @@ void BPFTargetMachine::registerPassBuilderCallbacks(
 }
 
 void BPFPassConfig::addIRPasses() {
-  addPass(createExpandAtomicLegacyPass());
+  addPass(createAtomicExpandLegacyPass());
   addPass(createBPFCheckAndAdjustIR());
 
   TargetPassConfig::addIRPasses();
diff --git a/llvm/lib/Target/CSKY/CSKYTargetMachine.cpp b/llvm/lib/Target/CSKY/CSKYTargetMachine.cpp
index 223e27b49fe3dfa..0bbfabe93147c55 100644
--- a/llvm/lib/Target/CSKY/CSKYTargetMachine.cpp
+++ b/llvm/lib/Target/CSKY/CSKYTargetMachine.cpp
@@ -118,7 +118,7 @@ TargetPassConfig *CSKYTargetMachine::createPassConfig(PassManagerBase &PM) {
 }
 
 void CSKYPassConfig::addIRPasses() {
-  addPass(createExpandAtomicLegacyPass());
+  addPass(createAtomicExpandLegacyPass());
   TargetPassConfig::addIRPasses();
 }
 
diff --git a/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp b/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
index 786641ce063435a..c9ab10de9c94ae0 100644
--- a/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
@@ -339,7 +339,7 @@ void HexagonPassConfig::addIRPasses() {
     addPass(createDeadCodeEliminationPass());
   }
 
-  addPass(createExpandAtomicLegacyPass());
+  addPass(createAtomicExpandLegacyPass());
 
   if (!NoOpt) {
     if (EnableInitialCFGCleanup)
diff --git a/llvm/lib/Target/Lanai/LanaiTargetMachine.cpp b/llvm/lib/Target/Lanai/LanaiTargetMachine.cpp
index 0434bf5010e880f..2357221b0120f95 100644
--- a/llvm/lib/Target/Lanai/LanaiTargetMachine.cpp
+++ b/llvm/lib/Target/Lanai/LanaiTargetMachine.cpp
@@ -106,7 +106,7 @@ LanaiTargetMachine::createPassConfig(PassManagerBase &PassManager) {
 }
 
 void LanaiPassConfig::addIRPasses() {
-  addPass(createExpandAtomicLegacyPass());
+  addPass(createAtomicExpandLegacyPass());
 
   TargetPassConfig::addIRPasses();
 }
diff --git a/llvm/lib/Target/LoongArch/LoongArchExpandAtomicPseudoInsts.cpp b/llvm/lib/Target/LoongArch/LoongArchExpandAtomicPseudoInsts.cpp
index 18a532b55ee5a92..06f6feffb4bbe76 100644
--- a/llvm/lib/Target/LoongArch/LoongArchExpandAtomicPseudoInsts.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchExpandAtomicPseudoInsts.cpp
@@ -604,7 +604,7 @@ bool LoongArchExpandAtomicPseudo::expandAtomicCmpXchg(
 
 } // end namespace
 
-INITIALIZE_PASS(LoongArchExpandAtomicPseudo, "loongarch-expand-atomic-pseudo",
+INITIALIZE_PASS(LoongArchExpandAtomicPseudo, "loongarch-atomic-expand-pseudo",
                 LoongArch_EXPAND_ATOMIC_PSEUDO_NAME, false, false)
 
 namespace llvm {
diff --git a/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp b/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp
index 786b845e32f4808..e5494488e113578 100644
--- a/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp
@@ -161,7 +161,7 @@ void LoongArchPassConfig::addIRPasses() {
   // pointer values N iterations ahead.
   if (TM->getOptLevel() != CodeGenOptLevel::None && EnableLoopDataPrefetch)
     addPass(createLoopDataPrefetchPass());
-  addPass(createExpandAtomicLegacyPass());
+  addPass(createAtomicExpandLegacyPass());
 
   TargetPassConfig::addIRPasses();
 }
diff --git a/llvm/lib/Target/M68k/M68kTargetMachine.cpp b/llvm/lib/Target/M68k/M68kTargetMachine.cpp
index e8d399e50e6c685..bbbcb1556ed557e 100644
--- a/llvm/lib/Target/M68k/M68kTargetMachine.cpp
+++ b/llvm/lib/Target/M68k/M68kTargetMachine.cpp
@@ -171,7 +171,7 @@ TargetPassConfig *M68kTargetMachine::createPassConfig(PassManagerBase &PM) {
 }
 
 void M68kPassConfig::addIRPasses() {
-  addPass(createExpandAtomicLegacyPass());
+  addPass(createAtomicExpandLegacyPass());
   TargetPassConfig::addIRPasses();
 }
 
diff --git a/llvm/lib/Target/MSP430/MSP430TargetMachine.cpp b/llvm/lib/Target/MSP430/MSP430TargetMachine.cpp
index a8cad1d8aefcb0a..ed0fcf7110b78ce 100644
--- a/llvm/lib/Target/MSP430/MSP430TargetMachine.cpp
+++ b/llvm/lib/Target/MSP430/MSP430TargetMachine.cpp
@@ -83,7 +83,7 @@ MachineFunctionInfo *MSP430TargetMachine::createMachineFunctionInfo(
 }
 
 void MSP430PassConfig::addIRPasses() {
-  addPass(createExpandAtomicLegacyPass());
+  addPass(createAtomicExpandLegacyPass());
 
   TargetPassConfig::addIRPasses();
 }
diff --git a/llvm/lib/Target/Mips/MipsTargetMachine.cpp b/llvm/lib/Target/Mips/MipsTargetMachine.cpp
index 5113b51938e7a49..4c4bf70e22c6c1f 100644
--- a/llvm/lib/Target/Mips/MipsTargetMachine.cpp
+++ b/llvm/lib/Target/Mips/MipsTargetMachine.cpp
@@ -263,7 +263,7 @@ std::unique_ptr<CSEConfigBase> MipsPassConfig::getCSEConfig() const {
 
 void MipsPassConfig::addIRPasses() {
   TargetPassConfig::addIRPasses();
-  addPass(createExpandAtomicLegacyPass());
+  addPass(createAtomicExpandLegacyPass());
   if (getMipsSubtarget().os16())
     addPass(createMipsOs16Pass());
   if (getMipsSubtarget().inMips16HardFloat())
diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp b/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
index a3a959de2d5f90a..69d4596f7843e4a 100644
--- a/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
@@ -380,7 +380,7 @@ void NVPTXPassConfig::addIRPasses() {
     addStraightLineScalarOptimizationPasses();
   }
 
-  addPass(createExpandAtomicLegacyPass());
+  addPass(createAtomicExpandLegacyPass());
   addPass(createNVPTXCtorDtorLoweringLegacyPass());
 
   // === LSR and other generic IR passes ===
diff --git a/llvm/lib/Target/PowerPC/PPCExpandAtomicPseudoInsts.cpp b/llvm/lib/Target/PowerPC/PPCExpandAtomicPseudoInsts.cpp
index a71a1b8e8439b08..aee57a5075ff719 100644
--- a/llvm/lib/Target/PowerPC/PPCExpandAtomicPseudoInsts.cpp
+++ b/llvm/lib/Target/PowerPC/PPCExpandAtomicPseudoInsts.cpp
@@ -23,7 +23,7 @@
 
 using namespace llvm;
 
-#define DEBUG_TYPE "ppc-expand-atomic"
+#define DEBUG_TYPE "ppc-atomic-expand"
 
 namespace {
 
diff --git a/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp b/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp
index 4dc3d1937548961..714cf69827a1e70 100644
--- a/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp
+++ b/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp
@@ -457,7 +457,7 @@ TargetPassConfig *PPCTargetMachine::createPassConfig(PassManagerBase &PM) {
 void PPCPassConfig::addIRPasses() {
   if (TM->getOptLevel() != CodeGenOptLevel::None)
     addPass(createPPCBoolRetToIntPass());
-  addPass(createExpandAtomicLegacyPass());
+  addPass(createAtomicExpandLegacyPass());
 
   // Lower generic MASSV routines to PowerPC subtarget-specific entries.
   addPass(createPPCLowerMASSVEntriesPass());
diff --git a/llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp b/llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp
index bb772fc5da92244..b12cbda6e93c04f 100644
--- a/llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp
+++ b/llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp
@@ -730,7 +730,7 @@ bool RISCVExpandAtomicPseudo::expandAtomicCmpXchg(
 
 } // end of anonymous namespace
 
-INITIALIZE_PASS(RISCVExpandAtomicPseudo, "riscv-expand-atomic-pseudo",
+INITIALIZE_PASS(RISCVExpandAtomicPseudo, "riscv-atomic-expand-pseudo",
                 RISCV_EXPAND_ATOMIC_PSEUDO_NAME, false, false)
 
 namespace llvm {
diff --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
index 27133b8ca952394..6a7d0c191dcafe6 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
@@ -425,7 +425,7 @@ bool RISCVPassConfig::addRegAssignAndRewriteOptimized() {
 }
 
 void RISCVPassConfig::addIRPasses() {
-  addPass(createExpandAtomicLegacyPass());
+  addPass(createAtomicExpandLegacyPass());
 
   if (getOptLevel() != CodeGenOptLevel::None) {
     if (EnableLoopDataPrefetch)
diff --git a/llvm/lib/Target/Sparc/SparcTargetMachine.cpp b/llvm/lib/Target/Sparc/SparcTargetMachine.cpp
index ac2d73d99bc3c71..20ddafb0e43d07f 100644
--- a/llvm/lib/Target/Sparc/SparcTargetMachine.cpp
+++ b/llvm/lib/Target/Sparc/SparcTargetMachine.cpp
@@ -175,7 +175,7 @@ TargetPassConfig *SparcTargetMachine::createPassConfig(PassManagerBase &PM) {
 }
 
 void SparcPassConfig::addIRPasses() {
-  addPass(createExpandAtomicLegacyPass());
+  addPass(createAtomicExpandLegacyPass());
 
   TargetPassConfig::addIRPasses();
 }
diff --git a/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp b/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp
index 670e7b0a75fa971..121512d5a7e589d 100644
--- a/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp
@@ -228,7 +228,7 @@ void SystemZPassConfig::addIRPasses() {
     addPass(createLoopDataPrefetchPass());
   }
 
-  addPass(createExpandAtomicLegacyPass());
+  addPass(createAtomicExpandLegacyPass());
 
   TargetPassConfig::addIRPasses();
 }
diff --git a/llvm/lib/Target/VE/VETargetMachine.cpp b/llvm/lib/Target/VE/VETargetMachine.cpp
index c9c6f134b61faa4..6f4e137e4d2f186 100644
--- a/llvm/lib/Target/VE/VETargetMachine.cpp
+++ b/llvm/lib/Target/VE/VETargetMachine.cpp
@@ -134,7 +134,7 @@ TargetPassConfig *VETargetMachine::createPassConfig(PassManagerBase &PM) {
 
 void VEPassConfig::addIRPasses() {
   // VE requires atomic expand pass.
-  addPass(createExpandAtomicLegacyPass());
+  addPass(createAtomicExpandLegacyPass());
   TargetPassConfig::addIRPasses();
 }
 
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp b/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp
index 100e3a60c5ea3ba..912f61765579f83 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp
@@ -45,7 +45,7 @@ WebAssemblySubtarget::WebAssemblySubtarget(const Triple &TT,
       TargetTriple(TT), InstrInfo(initializeSubtargetDependencies(CPU, FS)),
       TLInfo(TM, *this) {}
 
-bool WebAssemblySubtarget::enableExpandAtomic() const {
+bool WebAssemblySubtarget::enableAtomicExpand() const {
   // If atomics are disabled, atomic ops are lowered instead of expanded
   return hasAtomics();
 }
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h b/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h
index 1f5bb72b027bdd7..85d02b087c786ed 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h
+++ b/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h
@@ -84,7 +84,7 @@ class WebAssemblySubtarget final : public WebAssemblyGenSubtargetInfo {
     return &getInstrInfo()->getRegisterInfo();
   }
   const Triple &getTargetTriple() const { return TargetTriple; }
-  bool enableExpandAtomic() const override;
+  bool enableAtomicExpand() const override;
   bool enableIndirectBrExpand() const override { return true; }
   bool enableMachineScheduler() const override;
   bool useAA() const override;
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp
index 77048a336e699e1..9f24452b21fb207 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp
@@ -478,7 +478,7 @@ void WebAssemblyPassConfig::addISelPrepare() {
   addPass(new CoalesceFeaturesAndStripAtomics(&getWebAssemblyTargetMachine()));
 
   // This is a no-op if atomics are not used in the module
-  addPass(createExpandAtomicLegacyPass());
+  addPass(createAtomicExpandLegacyPass());
 
   TargetPassConfig::addISelPrepare();
 }
diff --git a/llvm/lib/Target/X86/X86TargetMachine.cpp b/llvm/lib/Target/X86/X86TargetMachine.cpp
index 70dedfbc4cff146..027481db398a6a6 100644
--- a/llvm/lib/Target/X86/X86TargetMachine.cpp
+++ b/llvm/lib/Target/X86/X86TargetMachine.cpp
@@ -435,7 +435,7 @@ MachineFunctionInfo *X86TargetMachine::createMachineFunctionInfo(
 }
 
 void X86PassConfig::addIRPasses() {
-  addPass(createExpandAtomicLegacyPass());
+  addPass(createAtomicExpandLegacyPass());
 
   // We add both pass anyway and when these two passes run, we skip the pass
   // based on the option level and option attribute.
diff --git a/llvm/lib/Target/XCore/XCoreTargetMachine.cpp b/llvm/lib/Target/XCore/XCoreTargetMachine.cpp
index c230efda852ecca..374e91d01bdacec 100644
--- a/llvm/lib/Target/XCore/XCoreTargetMachine.cpp
+++ b/llvm/lib/Target/XCore/XCoreTargetMachine.cpp
@@ -84,7 +84,7 @@ TargetPassConfig *XCoreTargetMachine::createPassConfig(PassManagerBase &PM) {
 }
 
 void XCorePassConfig::addIRPasses() {
-  addPass(createExpandAtomicLegacyPass());
+  addPass(createAtomicExpandLegacyPass());
 
   TargetPassConfig::addIRPasses();
 }
diff --git a/llvm/test/CodeGen/AArch64/partial-pipeline-execution.ll b/llvm/test/CodeGen/AArch64/partial-pipeline-execution.ll
index fc071e29c9467fa..c2ef2fa16a9a282 100644
--- a/llvm/test/CodeGen/AArch64/partial-pipeline-execution.ll
+++ b/llvm/test/CodeGen/AArch64/partial-pipeline-execution.ll
@@ -1,6 +1,6 @@
 ; RUN: llc -O3 %s -o %t.s
-; RUN: llc -O3 -stop-after=expand-atomic %s -o %t.mir
-; RUN: llc -O3 -start-after=expand-atomic %s -o %t2.s
+; RUN: llc -O3 -stop-after=atomic-expand %s -o %t.mir
+; RUN: llc -O3 -start-after=atomic-expand %s -o %t2.s
 
 ; If we add tti pass correctly files should be identical
 ; Otherwise LSR will use default TargetTransformInfo and
diff --git a/llvm/test/CodeGen/AMDGPU/atomics-cas-remarks-gfx90a.ll b/llvm/test/CodeGen/AMDGPU/atomics-cas-remarks-gfx90a.ll
index fcb32f10493d428..2f7d1e9a6efafda 100644
--- a/llvm/test/CodeGen/AMDGPU/atomics-cas-remarks-gfx90a.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomics-cas-remarks-gfx90a.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=amdgcn -mcpu=gfx90a -verify-machineinstrs --pass-remarks=expand-atomic \
+; RUN: llc -march=amdgcn -mcpu=gfx90a -verify-machineinstrs --pass-remarks=atomic-expand \
 ; RUN:      %s -o - 2>&1 | FileCheck %s --check-prefix=GFX90A-CAS
 
 ; GFX90A-CAS: A compare and swap loop was generated for an atomic fadd operation at system memory scope
diff --git a/llvm/test/CodeGen/AMDGPU/idemponent-atomics.ll b/llvm/test/CodeGen/AMDGPU/idemponent-atomics.ll
index 0a0fe156305a971..6385466e1341601 100644
--- a/llvm/test/CodeGen/AMDGPU/idemponent-atomics.ll
+++ b/llvm/test/CodeGen/AMDGPU/idemponent-atomics.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX940 %s
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=expand-atomic < %s | FileCheck --check-prefix=OPT %s
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=atomic-expand < %s | FileCheck --check-prefix=OPT %s
 
 define i32 @global_agent_monotonic_idempotent_or(ptr addrspace(1) %in) {
 ; GFX940-LABEL: global_agent_monotonic_idempotent_or:
diff --git a/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll b/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll
index fc88f3f9f5cce38..6fdc0d5834ef6e6 100644
--- a/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll
+++ b/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: opt -S -mtriple=amdgcn-- -mcpu=tahiti -passes=expand-atomic < %s | FileCheck -check-prefix=IR %s
+; RUN: opt -S -mtriple=amdgcn-- -mcpu=tahiti -passes=atomic-expand < %s | FileCheck -check-prefix=IR %s
 ; RUN: llc -mtriple=amdgcn-- -mcpu=tahiti < %s | FileCheck -check-prefix=GCN %s
 
 define i32 @load_atomic_private_seq_cst_i32(ptr addrspace(5) %ptr) {
diff --git a/llvm/test/Transforms/AtomicExpand/AArch64/atomicrmw-fp.ll b/llvm/test/Transforms/AtomicExpand/AArch64/atomicrmw-fp.ll
index d454e125844639f..ba6802f85c03cd3 100644
--- a/llvm/test/Transforms/AtomicExpand/AArch64/atomicrmw-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/AArch64/atomicrmw-fp.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=aarch64-linux-gnu -passes=expand-atomic %s | FileCheck %s
+; RUN: opt -S -mtriple=aarch64-linux-gnu -passes=atomic-expand %s | FileCheck %s
 
 define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) {
 ; CHECK-LABEL: @test_atomicrmw_fadd_f32(
diff --git a/llvm/test/Transforms/AtomicExpand/AArch64/expand-atomicrmw-xchg-fp.ll b/llvm/test/Transforms/AtomicExpand/AArch64/expand-atomicrmw-xchg-fp.ll
index 046324bd9d917f6..ef2b5fe3672be08 100644
--- a/llvm/test/Transforms/AtomicExpand/AArch64/expand-atomicrmw-xchg-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/AArch64/expand-atomicrmw-xchg-fp.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -codegen-opt-level=1 -S -mtriple=aarch64-- -passes=expand-atomic %s | FileCheck %s
-; RUN: opt -codegen-opt-level=1 -S -mtriple=aarch64-- -mattr=+outline-atomics -passes=expand-atomic %s | FileCheck %s --check-prefix=OUTLINE-ATOMICS
+; RUN: opt -codegen-opt-level=1 -S -mtriple=aarch64-- -passes=atomic-expand %s | FileCheck %s
+; RUN: opt -codegen-opt-level=1 -S -mtriple=aarch64-- -mattr=+outline-atomics -passes=atomic-expand %s | FileCheck %s --check-prefix=OUTLINE-ATOMICS
 
 define void @atomic_swap_f16(ptr %ptr, half %val) nounwind {
 ; CHECK-LABEL: @atomic_swap_f16(
diff --git a/llvm/test/Transforms/AtomicExpand/AArch64/pcsections.ll b/llvm/test/Transforms/AtomicExpand/AArch64/pcsections.ll
index 5c6c0ae31d9dd7b..cc42407c0210e7a 100644
--- a/llvm/test/Transforms/AtomicExpand/AArch64/pcsections.ll
+++ b/llvm/test/Transforms/AtomicExpand/AArch64/pcsections.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=aarch64-linux-gnu -passes=expand-atomic %s | FileCheck %s
+; RUN: opt -S -mtriple=aarch64-linux-gnu -passes=atomic-expand %s | FileCheck %s
 
 define i8 @atomic8_load_unordered(ptr %a) nounwind uwtable {
 ; CHECK-LABEL: @atomic8_load_unordered(
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16-system.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16-system.ll
index c968c4ec5cc9612..08d990fb58039e6 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16-system.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16-system.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=expand-atomic %s | FileCheck %s
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=atomic-expand %s | FileCheck %s
 
 target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:160:256:256:32-p8:128:128-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll
index 3f902ce2d3ef535..94f1b733877ed7a 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=expand-atomic %s | FileCheck %s
-; RUN: opt -mtriple=r600-mesa-mesa3d -S -passes=expand-atomic %s | FileCheck %s
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=atomic-expand %s | FileCheck %s
+; RUN: opt -mtriple=r600-mesa-mesa3d -S -passes=atomic-expand %s | FileCheck %s
 
 target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:160:256:256:32-p8:128:128-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8-system.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8-system.ll
index f3982fcd9897df4..80cf19ed8c636a7 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8-system.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8-system.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=expand-atomic %s | FileCheck %s
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=atomic-expand %s | FileCheck %s
 
 define i8 @test_atomicrmw_xchg_i8_global_system(ptr addrspace(1) %ptr, i8 %value) {
 ; CHECK-LABEL: @test_atomicrmw_xchg_i8_global_system(
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll
index 9b725ed4cb06c2a..711580158e7251b 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=expand-atomic %s | FileCheck %s --check-prefixes=CHECK,GCN
-; RUN: opt -mtriple=r600-mesa-mesa3d -S -passes=expand-atomic %s | FileCheck %s --check-prefixes=CHECK,R600
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=atomic-expand %s | FileCheck %s --check-prefixes=CHECK,GCN
+; RUN: opt -mtriple=r600-mesa-mesa3d -S -passes=atomic-expand %s | FileCheck %s --check-prefixes=CHECK,R600
 
 define i8 @test_atomicrmw_xchg_i8_global_agent(ptr addrspace(1) %ptr, i8 %value) {
 ; GCN-LABEL: @test_atomicrmw_xchg_i8_global_agent(
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd-flat-specialization.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd-flat-specialization.ll
index e601021cd3ca382..c2f7057dc26f3d2 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd-flat-specialization.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd-flat-specialization.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 -passes=expand-atomic %s | FileCheck -check-prefix=GFX908 %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -passes=expand-atomic %s | FileCheck -check-prefix=GFX90A %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -passes=expand-atomic %s | FileCheck -check-prefix=GFX940 %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -passes=expand-atomic %s | FileCheck -check-prefix=GFX1100 %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 -passes=atomic-expand %s | FileCheck -check-prefix=GFX908 %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -passes=atomic-expand %s | FileCheck -check-prefix=GFX90A %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -passes=atomic-expand %s | FileCheck -check-prefix=GFX940 %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -passes=atomic-expand %s | FileCheck -check-prefix=GFX1100 %s
 
 define float @syncscope_system(ptr %addr, float %val) #0 {
 ; GFX908-LABEL: @syncscope_system(
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll
index 6be6771ed920ac1..b6e6b260249523a 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll
@@ -1,10 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -passes=expand-atomic %s | FileCheck -check-prefix=CI %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=expand-atomic %s | FileCheck -check-prefix=GFX9 %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 -passes=expand-atomic %s | FileCheck -check-prefix=GFX908 %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -passes=expand-atomic %s | FileCheck -check-prefix=GFX90A %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -passes=expand-atomic %s | FileCheck -check-prefix=GFX940 %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -passes=expand-atomic %s | FileCheck -check-prefix=GFX11 %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -passes=atomic-expand %s | FileCheck -check-prefix=CI %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=atomic-expand %s | FileCheck -check-prefix=GFX9 %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 -passes=atomic-expand %s | FileCheck -check-prefix=GFX908 %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -passes=atomic-expand %s | FileCheck -check-prefix=GFX90A %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -passes=atomic-expand %s | FileCheck -check-prefix=GFX940 %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -passes=atomic-expand %s | FileCheck -check-prefix=GFX11 %s
 
 define void @test_atomicrmw_fadd_f32_global_no_use_unsafe(ptr addrspace(1) %ptr, float %value) #0 {
 ; CI-LABEL: @test_atomicrmw_fadd_f32_global_no_use_unsafe(
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fmax.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fmax.ll
index 9df002ea0b2a871..5a79bc26807696b 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fmax.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fmax.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -passes=expand-atomic %s | FileCheck -check-prefix=GCN %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=expand-atomic %s | FileCheck -check-prefix=GCN %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -passes=atomic-expand %s | FileCheck -check-prefix=GCN %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=atomic-expand %s | FileCheck -check-prefix=GCN %s
 
 define float @test_atomicrmw_fmax_f32_flat(ptr %ptr, float %value) {
 ; GCN-LABEL: @test_atomicrmw_fmax_f32_flat(
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fmin.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fmin.ll
index 78cf7d0769ce39f..e3d3bfde3be6853 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fmin.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fmin.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -passes=expand-atomic %s | FileCheck -check-prefix=GCN %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=expand-atomic %s | FileCheck -check-prefix=GCN %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -passes=atomic-expand %s | FileCheck -check-prefix=GCN %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=atomic-expand %s | FileCheck -check-prefix=GCN %s
 
 define float @test_atomicrmw_fmin_f32_flat(ptr %ptr, float %value) {
 ; GCN-LABEL: @test_atomicrmw_fmin_f32_flat(
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fsub.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fsub.ll
index 5d4a219a8fe5fad..bbcc6b8a2724f99 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fsub.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fsub.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -passes=expand-atomic %s | FileCheck -check-prefix=GCN %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=expand-atomic %s | FileCheck -check-prefix=GCN %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -passes=atomic-expand %s | FileCheck -check-prefix=GCN %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=atomic-expand %s | FileCheck -check-prefix=GCN %s
 
 define float @test_atomicrmw_fsub_f32_flat(ptr %ptr, float %value) {
 ; GCN-LABEL: @test_atomicrmw_fsub_f32_flat(
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-nand.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-nand.ll
index e1b0ea7d0f5bcf1..ac88ff1dd88078f 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-nand.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-nand.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=expand-atomic %s | FileCheck %s
-; RUN: opt -mtriple=r600-mesa-mesa3d -S -passes=expand-atomic %s | FileCheck %s
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=atomic-expand %s | FileCheck %s
+; RUN: opt -mtriple=r600-mesa-mesa3d -S -passes=atomic-expand %s | FileCheck %s
 
 define i32 @test_atomicrmw_nand_i32_flat(ptr %ptr, i32 %value) {
 ; CHECK-LABEL: @test_atomicrmw_nand_i32_flat(
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-simplify-cfg-CAS-block.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-simplify-cfg-CAS-block.ll
index 2fc671b2624227b..e92651978359425 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-simplify-cfg-CAS-block.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-simplify-cfg-CAS-block.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -passes=expand-atomic %s | FileCheck -check-prefix=GFX90A %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -passes=atomic-expand %s | FileCheck -check-prefix=GFX90A %s
 
 declare i32 @llvm.amdgcn.workitem.id.x()
 
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/unaligned-atomic.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/unaligned-atomic.ll
index bda3cc4c57a88e9..acf726a7de5e0ee 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/unaligned-atomic.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/unaligned-atomic.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=expand-atomic %s 2>&1 | FileCheck --check-prefix=GCN %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=atomic-expand %s 2>&1 | FileCheck --check-prefix=GCN %s
 
 define i32 @atomic_load_global_align1(ptr addrspace(1) %ptr) {
 ; GCN-LABEL: @atomic_load_global_align1(
diff --git a/llvm/test/Transforms/AtomicExpand/ARM/atomic-expansion-v7.ll b/llvm/test/Transforms/AtomicExpand/ARM/atomic-expansion-v7.ll
index 786aad24c4d3482..2e72d26ed45666d 100644
--- a/llvm/test/Transforms/AtomicExpand/ARM/atomic-expansion-v7.ll
+++ b/llvm/test/Transforms/AtomicExpand/ARM/atomic-expansion-v7.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S -o - -mtriple=armv7-apple-ios7.0 -passes=expand-atomic -codegen-opt-level=1 %s | FileCheck %s
+; RUN: opt -S -o - -mtriple=armv7-apple-ios7.0 -passes=atomic-expand -codegen-opt-level=1 %s | FileCheck %s
 
 define i8 @test_atomic_xchg_i8(ptr %ptr, i8 %xchgend) {
 ; CHECK-LABEL: @test_atomic_xchg_i8
diff --git a/llvm/test/Transforms/AtomicExpand/ARM/atomic-expansion-v8.ll b/llvm/test/Transforms/AtomicExpand/ARM/atomic-expansion-v8.ll
index a8a212aa2cb36db..10073e23f5d4621 100644
--- a/llvm/test/Transforms/AtomicExpand/ARM/atomic-expansion-v8.ll
+++ b/llvm/test/Transforms/AtomicExpand/ARM/atomic-expansion-v8.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S -o - -mtriple=armv8-linux-gnueabihf -passes=expand-atomic %s -codegen-opt-level=1 | FileCheck %s
+; RUN: opt -S -o - -mtriple=armv8-linux-gnueabihf -passes=atomic-expand %s -codegen-opt-level=1 | FileCheck %s
 
 define i8 @test_atomic_xchg_i8(ptr %ptr, i8 %xchgend) {
 ; CHECK-LABEL: @test_atomic_xchg_i8
diff --git a/llvm/test/Transforms/AtomicExpand/ARM/atomicrmw-fp.ll b/llvm/test/Transforms/AtomicExpand/ARM/atomicrmw-fp.ll
index 50336e82719bb39..9c4ce50da691705 100644
--- a/llvm/test/Transforms/AtomicExpand/ARM/atomicrmw-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/ARM/atomicrmw-fp.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=armv7-apple-ios7.0 -passes=expand-atomic %s | FileCheck %s
+; RUN: opt -S -mtriple=armv7-apple-ios7.0 -passes=atomic-expand %s | FileCheck %s
 
 define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) {
 ; CHECK-LABEL: @test_atomicrmw_fadd_f32(
diff --git a/llvm/test/Transforms/AtomicExpand/ARM/cmpxchg-weak.ll b/llvm/test/Transforms/AtomicExpand/ARM/cmpxchg-weak.ll
index 56583ce6aed46f4..23aa57e18ecc5ae 100644
--- a/llvm/test/Transforms/AtomicExpand/ARM/cmpxchg-weak.ll
+++ b/llvm/test/Transforms/AtomicExpand/ARM/cmpxchg-weak.ll
@@ -1,4 +1,4 @@
-; RUN: opt -passes=expand-atomic -codegen-opt-level=1 -S -mtriple=thumbv7s-apple-ios7.0 %s | FileCheck %s
+; RUN: opt -passes=atomic-expand -codegen-opt-level=1 -S -mtriple=thumbv7s-apple-ios7.0 %s | FileCheck %s
 
 define i32 @test_cmpxchg_seq_cst(ptr %addr, i32 %desired, i32 %new) {
 ; CHECK-LABEL: @test_cmpxchg_seq_cst
diff --git a/llvm/test/Transforms/AtomicExpand/Hexagon/atomicrmw-fp.ll b/llvm/test/Transforms/AtomicExpand/Hexagon/atomicrmw-fp.ll
index 4d759ecbc7c7a63..9e64db0a5e31d3e 100644
--- a/llvm/test/Transforms/AtomicExpand/Hexagon/atomicrmw-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/Hexagon/atomicrmw-fp.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=hexagon-- -passes=expand-atomic %s | FileCheck %s
+; RUN: opt -S -mtriple=hexagon-- -passes=atomic-expand %s | FileCheck %s
 
 define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) {
 ; CHECK-LABEL: @test_atomicrmw_fadd_f32(
diff --git a/llvm/test/Transforms/AtomicExpand/LoongArch/atomicrmw-fp.ll b/llvm/test/Transforms/AtomicExpand/LoongArch/atomicrmw-fp.ll
index 18977862cfca06d..0e6585ed4279eb6 100644
--- a/llvm/test/Transforms/AtomicExpand/LoongArch/atomicrmw-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/LoongArch/atomicrmw-fp.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S --mtriple=loongarch64 -passes=expand-atomic --mattr=+d %s | FileCheck %s
+; RUN: opt -S --mtriple=loongarch64 -passes=atomic-expand --mattr=+d %s | FileCheck %s
 
 define float @atomicrmw_fadd_float(ptr %ptr, float %value) {
 ; CHECK-LABEL: @atomicrmw_fadd_float(
diff --git a/llvm/test/Transforms/AtomicExpand/LoongArch/load-store-atomic.ll b/llvm/test/Transforms/AtomicExpand/LoongArch/load-store-atomic.ll
index 6bb42d17bd5b953..72bdcf5f3576c3e 100644
--- a/llvm/test/Transforms/AtomicExpand/LoongArch/load-store-atomic.ll
+++ b/llvm/test/Transforms/AtomicExpand/LoongArch/load-store-atomic.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S --mtriple=loongarch32 -passes=expand-atomic %s | FileCheck %s --check-prefix=LA32
-; RUN: opt -S --mtriple=loongarch64 -passes=expand-atomic %s | FileCheck %s --check-prefix=LA64
+; RUN: opt -S --mtriple=loongarch32 -passes=atomic-expand %s | FileCheck %s --check-prefix=LA32
+; RUN: opt -S --mtriple=loongarch64 -passes=atomic-expand %s | FileCheck %s --check-prefix=LA64
 
 define i8 @load_acquire_i8(ptr %ptr) {
 ; LA32-LABEL: @load_acquire_i8(
diff --git a/llvm/test/Transforms/AtomicExpand/Mips/atomicrmw-fp.ll b/llvm/test/Transforms/AtomicExpand/Mips/atomicrmw-fp.ll
index acc7e2e9869a95e..0fd1cbe8bdd0e90 100644
--- a/llvm/test/Transforms/AtomicExpand/Mips/atomicrmw-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/Mips/atomicrmw-fp.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=mips64-mti-linux-gnu -passes=expand-atomic %s | FileCheck %s
+; RUN: opt -S -mtriple=mips64-mti-linux-gnu -passes=atomic-expand %s | FileCheck %s
 
 define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) {
 ; CHECK-LABEL: @test_atomicrmw_fadd_f32(
diff --git a/llvm/test/Transforms/AtomicExpand/PowerPC/atomicrmw-fp.ll b/llvm/test/Transforms/AtomicExpand/PowerPC/atomicrmw-fp.ll
index d8ab8d9b987ef35..a3d62e06a7cd60f 100644
--- a/llvm/test/Transforms/AtomicExpand/PowerPC/atomicrmw-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/PowerPC/atomicrmw-fp.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=powerpc64-unknown-unknown -passes=expand-atomic %s | FileCheck %s
+; RUN: opt -S -mtriple=powerpc64-unknown-unknown -passes=atomic-expand %s | FileCheck %s
 
 define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) {
 ; CHECK-LABEL: @test_atomicrmw_fadd_f32(
diff --git a/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-double.ll b/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-double.ll
index 90ed8128e8898f6..1bd2c6cb5607f13 100644
--- a/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-double.ll
+++ b/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-double.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -passes=expand-atomic -mtriple=powerpc64le-unknown-unknown \
+; RUN: opt -S -passes=atomic-expand -mtriple=powerpc64le-unknown-unknown \
 ; RUN:   < %s 2>&1 | FileCheck %s
-; RUN: opt -S -passes=expand-atomic -mtriple=powerpc64-unknown-unknown \
+; RUN: opt -S -passes=atomic-expand -mtriple=powerpc64-unknown-unknown \
 ; RUN:   < %s 2>&1 | FileCheck %s
 
 define double @foo(ptr %dp) {
diff --git a/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-float.ll b/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-float.ll
index b5be91fca077ffe..2233ce0fcf68925 100644
--- a/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-float.ll
+++ b/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-float.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -passes=expand-atomic -mtriple=powerpc64le-unknown-unknown \
+; RUN: opt -S -passes=atomic-expand -mtriple=powerpc64le-unknown-unknown \
 ; RUN:   < %s 2>&1 | FileCheck %s
-; RUN: opt -S -passes=expand-atomic -mtriple=powerpc64-unknown-unknown \
+; RUN: opt -S -passes=atomic-expand -mtriple=powerpc64-unknown-unknown \
 ; RUN:   < %s 2>&1 | FileCheck %s
 
 define float @bar(ptr %fp) {
diff --git a/llvm/test/Transforms/AtomicExpand/PowerPC/cmpxchg.ll b/llvm/test/Transforms/AtomicExpand/PowerPC/cmpxchg.ll
index 19a16a43f0be81e..b94023b97a29504 100644
--- a/llvm/test/Transforms/AtomicExpand/PowerPC/cmpxchg.ll
+++ b/llvm/test/Transforms/AtomicExpand/PowerPC/cmpxchg.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -passes=expand-atomic -S -mtriple=powerpc64-unknown-unknown \
+; RUN: opt -passes=atomic-expand -S -mtriple=powerpc64-unknown-unknown \
 ; RUN:   -mcpu=pwr8 %s | FileCheck %s
-; RUN: opt -passes=expand-atomic -S -mtriple=powerpc64-unknown-unknown \
+; RUN: opt -passes=atomic-expand -S -mtriple=powerpc64-unknown-unknown \
 ; RUN:   -mcpu=pwr7 %s | FileCheck --check-prefix=PWR7 %s
 
 define i1 @test_cmpxchg_seq_cst(ptr %addr, i128 %desire, i128 %new) {
diff --git a/llvm/test/Transforms/AtomicExpand/PowerPC/issue55983.ll b/llvm/test/Transforms/AtomicExpand/PowerPC/issue55983.ll
index b6fe0febf1e553a..4a0df4c6739e43c 100644
--- a/llvm/test/Transforms/AtomicExpand/PowerPC/issue55983.ll
+++ b/llvm/test/Transforms/AtomicExpand/PowerPC/issue55983.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -passes=expand-atomic -S -mtriple=powerpc64le-unknown-unknown \
+; RUN: opt -passes=atomic-expand -S -mtriple=powerpc64le-unknown-unknown \
 ; RUN:   %s | FileCheck %s
-; RUN: opt -passes=expand-atomic -S -mtriple=powerpc64-unknown-unknown \
+; RUN: opt -passes=atomic-expand -S -mtriple=powerpc64-unknown-unknown \
 ; RUN:   %s | FileCheck %s
 
 define ptr @foo(ptr %p) {
diff --git a/llvm/test/Transforms/AtomicExpand/RISCV/atomicrmw-fp.ll b/llvm/test/Transforms/AtomicExpand/RISCV/atomicrmw-fp.ll
index 20d0347579f3056..7e41583189c3d37 100644
--- a/llvm/test/Transforms/AtomicExpand/RISCV/atomicrmw-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/RISCV/atomicrmw-fp.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=riscv32-- -passes=expand-atomic %s | FileCheck %s
+; RUN: opt -S -mtriple=riscv32-- -passes=atomic-expand %s | FileCheck %s
 
 define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) {
 ; CHECK-LABEL: @test_atomicrmw_fadd_f32(
diff --git a/llvm/test/Transforms/AtomicExpand/SPARC/libcalls.ll b/llvm/test/Transforms/AtomicExpand/SPARC/libcalls.ll
index 7557b722d6949ad..682c1e6848b3134 100644
--- a/llvm/test/Transforms/AtomicExpand/SPARC/libcalls.ll
+++ b/llvm/test/Transforms/AtomicExpand/SPARC/libcalls.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S %s -passes=expand-atomic | FileCheck %s
+; RUN: opt -S %s -passes=atomic-expand | FileCheck %s
 
 ;;; NOTE: this test is actually target-independent -- any target which
 ;;; doesn't support inline atomics can be used. (E.g. X86 i386 would
diff --git a/llvm/test/Transforms/AtomicExpand/SPARC/partword.ll b/llvm/test/Transforms/AtomicExpand/SPARC/partword.ll
index 9e2385947253948..e8be1bc18748362 100644
--- a/llvm/test/Transforms/AtomicExpand/SPARC/partword.ll
+++ b/llvm/test/Transforms/AtomicExpand/SPARC/partword.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S %s -passes=expand-atomic | FileCheck %s
+; RUN: opt -S %s -passes=atomic-expand | FileCheck %s
 
 ;; Verify the cmpxchg and atomicrmw expansions where sub-word-size
 ;; instructions are not available.
diff --git a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-libcall.ll b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-libcall.ll
index 9af61056137bea0..20a9e9f6cb86a88 100644
--- a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-libcall.ll
+++ b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-libcall.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=i686-linux-gnu -passes=expand-atomic %s | FileCheck %s
+; RUN: opt -S -mtriple=i686-linux-gnu -passes=atomic-expand %s | FileCheck %s
 
 
 define i256 @atomic_load256_libcall(ptr %ptr) nounwind {
diff --git a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-non-integer.ll b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-non-integer.ll
index 6105857dcb7a806..5929c153d5961d9 100644
--- a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-non-integer.ll
+++ b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-non-integer.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S %s -passes=expand-atomic -mtriple=x86_64-linux-gnu | FileCheck %s
+; RUN: opt -S %s -passes=atomic-expand -mtriple=x86_64-linux-gnu | FileCheck %s
 
 ; This file tests the functions `llvm::convertAtomicLoadToIntegerType` and
 ; `llvm::convertAtomicStoreToIntegerType`. If X86 stops using this 
diff --git a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-fp.ll b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-fp.ll
index 3d561d54b1146f3..3866530abb79666 100644
--- a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-fp.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=i686-linux-gnu -passes=expand-atomic %s | FileCheck %s
+; RUN: opt -S -mtriple=i686-linux-gnu -passes=atomic-expand %s | FileCheck %s
 
 define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) {
 ; CHECK-LABEL: @test_atomicrmw_fadd_f32(
diff --git a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-initial-load.ll b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-initial-load.ll
index 592b2e8f933dc04..316660ddfc49e41 100644
--- a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-initial-load.ll
+++ b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-initial-load.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S %s -passes=expand-atomic -mtriple=i686-linux-gnu | FileCheck %s
+; RUN: opt -S %s -passes=atomic-expand -mtriple=i686-linux-gnu | FileCheck %s
 
 ; This file tests the function `llvm::expandAtomicRMWToCmpXchg`.
 ; It isn't technically target specific, but is exposed through a pass that is.
diff --git a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-xchg-fp.ll b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-xchg-fp.ll
index ba43255b418afb0..211c6c5886413ac 100644
--- a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-xchg-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-xchg-fp.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=i686-linux-gnu -passes=expand-atomic %s | FileCheck %s
+; RUN: opt -S -mtriple=i686-linux-gnu -passes=atomic-expand %s | FileCheck %s
 
 define double @atomic_xchg_f64(ptr %ptr) nounwind {
 ; CHECK-LABEL: @atomic_xchg_f64(
diff --git a/llvm/utils/gn/secondary/llvm/lib/CodeGen/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/CodeGen/BUILD.gn
index 1d5af1bf24dd183..047f6583ec4e88b 100644
--- a/llvm/utils/gn/secondary/llvm/lib/CodeGen/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/lib/CodeGen/BUILD.gn
@@ -39,6 +39,7 @@ static_library("CodeGen") {
     "AllocationOrder.cpp",
     "Analysis.cpp",
     "AssignmentTrackingAnalysis.cpp",
+    "AtomicExpandPass.cpp",
     "BasicBlockPathCloning.cpp",
     "BasicBlockSections.cpp",
     "BasicBlockSectionsProfileReader.cpp",
@@ -68,7 +69,6 @@ static_library("CodeGen") {
     "EarlyIfConversion.cpp",
     "EdgeBundles.cpp",
     "ExecutionDomainFix.cpp",
-    "ExpandAtomicPass.cpp",
     "ExpandLargeDivRem.cpp",
     "ExpandLargeFpConvert.cpp",
     "ExpandMemCmp.cpp",

>From 3cfe22484dab9270b237d233445d6b529bdb1b53 Mon Sep 17 00:00:00 2001
From: Rishabh Bali <rishabhsbali at gmail.com>
Date: Mon, 15 Jan 2024 22:42:59 +0530
Subject: [PATCH 6/8] Minor correction

---
 llvm/lib/Target/LoongArch/LoongArchExpandAtomicPseudoInsts.cpp | 2 +-
 llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp         | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/llvm/lib/Target/LoongArch/LoongArchExpandAtomicPseudoInsts.cpp b/llvm/lib/Target/LoongArch/LoongArchExpandAtomicPseudoInsts.cpp
index 06f6feffb4bbe76..18a532b55ee5a92 100644
--- a/llvm/lib/Target/LoongArch/LoongArchExpandAtomicPseudoInsts.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchExpandAtomicPseudoInsts.cpp
@@ -604,7 +604,7 @@ bool LoongArchExpandAtomicPseudo::expandAtomicCmpXchg(
 
 } // end namespace
 
-INITIALIZE_PASS(LoongArchExpandAtomicPseudo, "loongarch-atomic-expand-pseudo",
+INITIALIZE_PASS(LoongArchExpandAtomicPseudo, "loongarch-expand-atomic-pseudo",
                 LoongArch_EXPAND_ATOMIC_PSEUDO_NAME, false, false)
 
 namespace llvm {
diff --git a/llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp b/llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp
index b12cbda6e93c04f..bb772fc5da92244 100644
--- a/llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp
+++ b/llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp
@@ -730,7 +730,7 @@ bool RISCVExpandAtomicPseudo::expandAtomicCmpXchg(
 
 } // end of anonymous namespace
 
-INITIALIZE_PASS(RISCVExpandAtomicPseudo, "riscv-atomic-expand-pseudo",
+INITIALIZE_PASS(RISCVExpandAtomicPseudo, "riscv-expand-atomic-pseudo",
                 RISCV_EXPAND_ATOMIC_PSEUDO_NAME, false, false)
 
 namespace llvm {

>From 1364f67851fa4b5e6285c25ad97a5f715c095532 Mon Sep 17 00:00:00 2001
From: Rishabh Bali <rishabhsbali at gmail.com>
Date: Mon, 15 Jan 2024 22:57:08 +0530
Subject: [PATCH 7/8] Minor correction

---
 llvm/lib/CodeGen/AtomicExpandPass.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp
index ca7e00a80ae4769..d756b972596c51b 100644
--- a/llvm/lib/CodeGen/AtomicExpandPass.cpp
+++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp
@@ -68,7 +68,7 @@ class AtomicExpandImpl {
   bool bracketInstWithFences(Instruction *I, AtomicOrdering Order);
   IntegerType *getCorrespondingIntegerType(Type *T, const DataLayout &DL);
   LoadInst *convertAtomicLoadToIntegerType(LoadInst *LI);
-  bool tryAtomicExpandLoad(LoadInst *LI);
+  bool tryExpandAtomicLoad(LoadInst *LI);
   bool expandAtomicLoadToLL(LoadInst *LI);
   bool expandAtomicLoadToCmpXchg(LoadInst *LI);
   StoreInst *convertAtomicStoreToIntegerType(StoreInst *SI);

>From 516800aa1ef1363f1ccc3ed406c16552b78e7cb0 Mon Sep 17 00:00:00 2001
From: Rishabh Bali <rishabhsbali at gmail.com>
Date: Wed, 17 Jan 2024 22:36:26 +0530
Subject: [PATCH 8/8] Changes

---
 llvm/lib/CodeGen/AtomicExpandPass.cpp | 10 ++--------
 1 file changed, 2 insertions(+), 8 deletions(-)

diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp
index d756b972596c51b..ed73f8090018b54 100644
--- a/llvm/lib/CodeGen/AtomicExpandPass.cpp
+++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp
@@ -346,11 +346,7 @@ bool AtomicExpandLegacy::runOnFunction(Function &F) {
   if (skipFunction(F))
     return false;
 
-  auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
-  if (!TPC)
-    return false;
-
-  auto *TM = &TPC->getTM<TargetMachine>();
+  auto *TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
 
   AtomicExpandImpl AE;
   return AE.run(F, TM);
@@ -368,9 +364,7 @@ PreservedAnalyses AtomicExpandPass::run(Function &F,
   if (!Changed)
     return PreservedAnalyses::all();
 
-  PreservedAnalyses PA;
-  PA.preserveSet<CFGAnalyses>();
-  return PA;
+  return PreservedAnalyses::none();
 }
 
 bool AtomicExpandImpl::bracketInstWithFences(Instruction *I,



More information about the cfe-commits mailing list