[llvm] [RFC] implement convergence control in MIR using SelectionDAG (PR #71785)

Sameer Sahasrabuddhe via llvm-commits llvm-commits at lists.llvm.org
Mon Jan 8 22:54:02 PST 2024


https://github.com/ssahasra updated https://github.com/llvm/llvm-project/pull/71785

>From 0581fec4508ed761184a6efffab198f7de60d56b Mon Sep 17 00:00:00 2001
From: Sameer Sahasrabuddhe <sameer.sahasrabuddhe at amd.com>
Date: Thu, 9 Nov 2023 10:24:36 +0530
Subject: [PATCH 1/8] [RFC] implement convergence control in MIR using
 SelectionDAG

LLVM function calls carry convergence control tokens as operand bundles, where
the tokens themselves are produced by convergence control intrinsics. This patch
implements convergence control tokens in MIR as follows:

1. Introduce target-independent ISD opcodes and MIR opcodes for convergence
   control intrinsics.
2. Model token values as untyped virtual registers in MIR.

The actual lowering of controlled convergent operations (including function
calls) and convergence control intrinsics is target-specific.

On AMDGPU, the convergence control operand bundle at a non-intrinsic call is
translated to an explicit argument to the SI_CALL_ISEL instruction.
Post-selection adjustment converts this explicit argument to an implicit
argument on the SI_CALL instruction. For intrinsics, AMDGPU introduces an
AMDGPUISD opcode CONVERGENCECTRL_GLUE and a corresponding machine opcode with
the same spelling. This is used as a glued argument to SDNodes that is later
translated to an implicit argument in the MIR.

All convergent intrinsics on AMDGPU need to set hasPostISelHook in their
description.
---
 llvm/include/llvm/CodeGen/ISDOpcodes.h        |  4 ++
 llvm/include/llvm/CodeGen/SelectionDAGISel.h  |  4 ++
 llvm/include/llvm/CodeGen/TargetLowering.h    | 17 ++++--
 llvm/include/llvm/Support/TargetOpcodes.def   |  4 ++
 llvm/include/llvm/Target/Target.td            | 28 +++++++++
 .../include/llvm/Target/TargetSelectionDAG.td |  8 +++
 .../lib/CodeGen/SelectionDAG/InstrEmitter.cpp |  4 +-
 .../SelectionDAG/SelectionDAGBuilder.cpp      | 39 +++++++++++-
 .../SelectionDAG/SelectionDAGBuilder.h        |  1 +
 .../SelectionDAG/SelectionDAGDumper.cpp       |  4 ++
 .../CodeGen/SelectionDAG/SelectionDAGISel.cpp | 29 ++++++++-
 .../CodeGen/SelectionDAG/TargetLowering.cpp   |  4 +-
 llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp | 28 +++++++--
 llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 15 +++++
 llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h   |  6 ++
 llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td     |  3 +
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     | 46 ++++++++++++--
 llvm/lib/Target/AMDGPU/SIISelLowering.h       |  5 +-
 llvm/lib/Target/AMDGPU/SIInstructions.td      | 22 +++++--
 llvm/lib/Target/AMDGPU/VOP1Instructions.td    |  1 +
 llvm/lib/Target/ARM/ARMISelLowering.cpp       |  5 +-
 llvm/lib/Target/ARM/ARMISelLowering.h         |  5 +-
 .../Target/Hexagon/HexagonISelLowering.cpp    |  5 +-
 llvm/lib/Target/Hexagon/HexagonISelLowering.h |  5 +-
 llvm/lib/Target/Mips/MipsISelLowering.cpp     |  5 +-
 llvm/lib/Target/Mips/MipsISelLowering.h       |  5 +-
 llvm/lib/Target/PowerPC/PPCISelLowering.cpp   |  6 +-
 llvm/lib/Target/PowerPC/PPCISelLowering.h     |  6 +-
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   |  5 +-
 llvm/lib/Target/RISCV/RISCVISelLowering.h     |  5 +-
 llvm/lib/Target/Sparc/SparcISelLowering.cpp   |  5 +-
 llvm/lib/Target/Sparc/SparcISelLowering.h     |  5 +-
 .../test/CodeGen/AMDGPU/convergence-tokens.ll | 40 +++++++++++++
 .../CodeGen/AMDGPU/isel-amdgpu-cs-chain-cc.ll | 18 ++++++
 .../kernel-vgpr-spill-mubuf-with-voffset.ll   |  1 +
 .../AMDGPU/need-fp-from-vgpr-spills.ll        | 34 ++++++-----
 .../AMDGPU/no-source-locations-in-prologue.ll |  1 +
 .../AMDGPU/sgpr-spills-split-regalloc.ll      | 15 +++--
 .../CodeGen/AMDGPU/stacksave_stackrestore.ll  | 52 ++++++++--------
 llvm/test/CodeGen/AMDGPU/vgpr-liverange-ir.ll | 26 ++++----
 .../CodeGen/AMDGPU/vgpr_constant_to_sgpr.ll   |  1 +
 .../AMDGPU/whole-wave-register-spill.ll       |  1 +
 .../test/CodeGen/AMDGPU/wwm-reserved-spill.ll |  2 +
 llvm/test/CodeGen/AMDGPU/wwm-reserved.ll      |  4 ++
 llvm/test/CodeGen/PowerPC/fmf-propagation.ll  | 12 ++--
 .../builtins/match-table-replacerreg.td       |  2 +-
 .../match-table-imms.td                       | 29 ++++-----
 .../match-table-patfrag-root.td               |  2 +-
 .../match-table-variadics.td                  |  2 +-
 .../GlobalISelCombinerEmitter/match-table.td  | 60 +++++++++----------
 50 files changed, 472 insertions(+), 164 deletions(-)
 create mode 100644 llvm/test/CodeGen/AMDGPU/convergence-tokens.ll

diff --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h
index 349d1286c8dc4f..ecda88d9abd677 100644
--- a/llvm/include/llvm/CodeGen/ISDOpcodes.h
+++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -1378,6 +1378,10 @@ enum NodeType {
 #define BEGIN_REGISTER_VP_SDNODE(VPSDID, ...) VPSDID,
 #include "llvm/IR/VPIntrinsics.def"
 
+  CONVERGENCECTRL_ANCHOR,
+  CONVERGENCECTRL_ENTRY,
+  CONVERGENCECTRL_LOOP,
+
   /// BUILTIN_OP_END - This must be the last enum value in this list.
   /// The target-specific pre-isel opcode values start here.
   BUILTIN_OP_END
diff --git a/llvm/include/llvm/CodeGen/SelectionDAGISel.h b/llvm/include/llvm/CodeGen/SelectionDAGISel.h
index 884fdfadfcbef7..8f7f19fd3aa942 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAGISel.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAGISel.h
@@ -324,6 +324,10 @@ class SelectionDAGISel : public MachineFunctionPass {
   void Select_ARITH_FENCE(SDNode *N);
   void Select_MEMBARRIER(SDNode *N);
 
+  void Select_CONVERGENCECTRL_ANCHOR(SDNode *N);
+  void Select_CONVERGENCECTRL_ENTRY(SDNode *N);
+  void Select_CONVERGENCECTRL_LOOP(SDNode *N);
+
   void pushStackMapLiveVariable(SmallVectorImpl<SDValue> &Ops, SDValue Operand,
                                 SDLoc DL);
   void Select_STACKMAP(SDNode *N);
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index 58aad70c4bb36e..4bb937b3cfeaec 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -4369,6 +4369,7 @@ class TargetLowering : public TargetLoweringBase {
     SmallVector<ISD::InputArg, 32> Ins;
     SmallVector<SDValue, 4> InVals;
     const ConstantInt *CFIType = nullptr;
+    SDValue ConvergenceControlToken;
 
     CallLoweringInfo(SelectionDAG &DAG)
         : RetSExt(false), RetZExt(false), IsVarArg(false), IsInReg(false),
@@ -4502,6 +4503,11 @@ class TargetLowering : public TargetLoweringBase {
       return *this;
     }
 
+    CallLoweringInfo &setConvergenceControlToken(SDValue Token) {
+      ConvergenceControlToken = Token;
+      return *this;
+    }
+
     ArgListTy &getArgs() {
       return Args;
     }
@@ -4907,9 +4913,9 @@ class TargetLowering : public TargetLoweringBase {
 
   // Targets may override this function to collect operands from the CallInst
   // and for example, lower them into the SelectionDAG operands.
-  virtual void CollectTargetIntrinsicOperands(const CallInst &I,
-                                              SmallVectorImpl<SDValue> &Ops,
-                                              SelectionDAG &DAG) const;
+  virtual void CollectTargetIntrinsicOperands(
+      const CallInst &I, SmallVectorImpl<SDValue> &Ops, SelectionDAG &DAG,
+      function_ref<SDValue(const Value *)> getValue) const;
 
   //===--------------------------------------------------------------------===//
   // Div utility functions
@@ -5337,8 +5343,9 @@ class TargetLowering : public TargetLoweringBase {
   /// the 'hasPostISelHook' flag. These instructions must be adjusted after
   /// instruction selection by target hooks.  e.g. To fill in optional defs for
   /// ARM 's' setting instructions.
-  virtual void AdjustInstrPostInstrSelection(MachineInstr &MI,
-                                             SDNode *Node) const;
+  virtual void
+  AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node,
+                                function_ref<Register(SDValue)> getVR) const;
 
   /// If this function returns true, SelectionDAGBuilder emits a
   /// LOAD_STACK_GUARD node when it is lowering Intrinsic::stackprotector.
diff --git a/llvm/include/llvm/Support/TargetOpcodes.def b/llvm/include/llvm/Support/TargetOpcodes.def
index 941c6d5f8cad8c..0cda85a8944a8c 100644
--- a/llvm/include/llvm/Support/TargetOpcodes.def
+++ b/llvm/include/llvm/Support/TargetOpcodes.def
@@ -230,6 +230,10 @@ HANDLE_TARGET_OPCODE(MEMBARRIER)
 // using.
 HANDLE_TARGET_OPCODE(JUMP_TABLE_DEBUG_INFO)
 
+HANDLE_TARGET_OPCODE(CONVERGENCECTRL_ENTRY)
+HANDLE_TARGET_OPCODE(CONVERGENCECTRL_ANCHOR)
+HANDLE_TARGET_OPCODE(CONVERGENCECTRL_LOOP)
+
 /// The following generic opcodes are not supposed to appear after ISel.
 /// This is something we might want to relax, but for now, this is convenient
 /// to produce diagnostics.
diff --git a/llvm/include/llvm/Target/Target.td b/llvm/include/llvm/Target/Target.td
index 3b1d2f45267e9e..621ac6048d0c76 100644
--- a/llvm/include/llvm/Target/Target.td
+++ b/llvm/include/llvm/Target/Target.td
@@ -1437,6 +1437,34 @@ def JUMP_TABLE_DEBUG_INFO : StandardPseudoInstruction {
   let isMeta = true;
 }
 
+def CONVERGENCECTRL_ANCHOR : StandardPseudoInstruction {
+  let OutOperandList = (outs unknown:$dst);
+  let InOperandList = (ins);
+  let AsmString = "";
+  let hasSideEffects = false;
+  let Size = 0;
+  let isMeta = true;
+  let isConvergent = true;
+}
+def CONVERGENCECTRL_ENTRY : StandardPseudoInstruction {
+  let OutOperandList = (outs unknown:$dst);
+  let InOperandList = (ins);
+  let AsmString = "";
+  let hasSideEffects = false;
+  let Size = 0;
+  let isMeta = true;
+  let isConvergent = true;
+}
+def CONVERGENCECTRL_LOOP : StandardPseudoInstruction {
+  let OutOperandList = (outs unknown:$dst);
+  let InOperandList = (ins unknown:$src);
+  let AsmString = "";
+  let hasSideEffects = false;
+  let Size = 0;
+  let isMeta = true;
+  let isConvergent = true;
+}
+
 // Generic opcodes used in GlobalISel.
 include "llvm/Target/GenericOpcodes.td"
 
diff --git a/llvm/include/llvm/Target/TargetSelectionDAG.td b/llvm/include/llvm/Target/TargetSelectionDAG.td
index fa5761c3a199a5..e71cd165543fd1 100644
--- a/llvm/include/llvm/Target/TargetSelectionDAG.td
+++ b/llvm/include/llvm/Target/TargetSelectionDAG.td
@@ -770,6 +770,14 @@ def assertsext : SDNode<"ISD::AssertSext", SDT_assert>;
 def assertzext : SDNode<"ISD::AssertZext", SDT_assert>;
 def assertalign : SDNode<"ISD::AssertAlign", SDT_assert>;
 
+def convergencectrl_anchor : SDNode<"ISD::CONVERGENCECTRL_ANCHOR",
+                                    SDTypeProfile<1, 0, [SDTCisVT<0,untyped>]>>;
+def convergencectrl_entry  : SDNode<"ISD::CONVERGENCECTRL_ENTRY",
+                                    SDTypeProfile<1, 0, [SDTCisVT<0,untyped>]>>;
+def convergencectrl_loop   : SDNode<"ISD::CONVERGENCECTRL_LOOP",
+                                    SDTypeProfile<1, 1,
+                                      [SDTCisVT<0,untyped>, SDTCisVT<1,untyped>]>>;
+
 //===----------------------------------------------------------------------===//
 // Selection DAG Condition Codes
 
diff --git a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
index a27febe15db832..10e01d2086cb9e 100644
--- a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
@@ -1193,7 +1193,9 @@ EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned,
 
   // Run post-isel target hook to adjust this instruction if needed.
   if (II.hasPostISelHook())
-    TLI->AdjustInstrPostInstrSelection(*MIB, Node);
+    TLI->AdjustInstrPostInstrSelection(
+        *MIB, Node,
+        [this, &VRBaseMap](SDValue Op) { return getVR(Op, VRBaseMap); });
 }
 
 /// EmitSpecialNode - Generate machine code for a target-independent node and
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index aab0d5c5a348bf..32251d240d457b 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -4969,7 +4969,8 @@ void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
   // Create the node.
   SDValue Result;
   // In some cases, custom collection of operands from CallInst I may be needed.
-  TLI.CollectTargetIntrinsicOperands(I, Ops, DAG);
+  TLI.CollectTargetIntrinsicOperands(
+      I, Ops, DAG, [this](const Value *V) -> SDValue { return getValue(V); });
   if (IsTgtIntrinsic) {
     // This is target intrinsic that touches memory
     //
@@ -5969,6 +5970,24 @@ bool SelectionDAGBuilder::visitEntryValueDbgValue(const DbgValueInst &DI) {
   return true;
 }
 
+/// Lower the call to the specified intrinsic function.
+void SelectionDAGBuilder::visitConvergenceControl(const CallInst &I,
+                                                  unsigned Intrinsic) {
+  SDLoc sdl = getCurSDLoc();
+  switch (Intrinsic) {
+  case Intrinsic::experimental_convergence_anchor:
+    setValue(&I, DAG.getNode(ISD::CONVERGENCECTRL_ANCHOR, sdl, MVT::Untyped));
+    break;
+  case Intrinsic::experimental_convergence_entry:
+    setValue(&I, DAG.getNode(ISD::CONVERGENCECTRL_ENTRY, sdl, MVT::Untyped));
+    break;
+  case Intrinsic::experimental_convergence_loop: {
+    assert(false && "coming soon");
+    break;
+  }
+  }
+}
+
 /// Lower the call to the specified intrinsic function.
 void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
                                              unsigned Intrinsic) {
@@ -7669,6 +7688,10 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
   case Intrinsic::experimental_vector_deinterleave2:
     visitVectorDeinterleave(I);
     return;
+  case Intrinsic::experimental_convergence_anchor:
+  case Intrinsic::experimental_convergence_entry:
+  case Intrinsic::experimental_convergence_loop:
+    visitConvergenceControl(I, Intrinsic);
   }
 }
 
@@ -8343,6 +8366,14 @@ void SelectionDAGBuilder::LowerCallTo(const CallBase &CB, SDValue Callee,
     }
   }
 
+  SDValue ConvControlToken;
+  if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_convergencectrl)) {
+    auto *Token = Bundle->Inputs[0].get();
+    ConvControlToken = getValue(Token);
+  } else {
+    ConvControlToken = DAG.getUNDEF(MVT::Untyped);
+  }
+
   TargetLowering::CallLoweringInfo CLI(DAG);
   CLI.setDebugLoc(getCurSDLoc())
       .setChain(getRoot())
@@ -8351,7 +8382,8 @@ void SelectionDAGBuilder::LowerCallTo(const CallBase &CB, SDValue Callee,
       .setConvergent(CB.isConvergent())
       .setIsPreallocated(
           CB.countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0)
-      .setCFIType(CFIType);
+      .setCFIType(CFIType)
+      .setConvergenceControlToken(ConvControlToken);
   std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
 
   if (Result.first.getNode()) {
@@ -8903,7 +8935,8 @@ void SelectionDAGBuilder::visitCall(const CallInst &I) {
   assert(!I.hasOperandBundlesOtherThan(
              {LLVMContext::OB_deopt, LLVMContext::OB_funclet,
               LLVMContext::OB_cfguardtarget, LLVMContext::OB_preallocated,
-              LLVMContext::OB_clang_arc_attachedcall, LLVMContext::OB_kcfi}) &&
+              LLVMContext::OB_clang_arc_attachedcall, LLVMContext::OB_kcfi,
+              LLVMContext::OB_convergencectrl}) &&
          "Cannot lower calls with arbitrary operand bundles!");
 
   SDValue Callee = getValue(I.getCalledOperand());
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
index a97884f0efb9a9..0640236f5ac693 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
@@ -631,6 +631,7 @@ class SelectionDAGBuilder {
   void visitIntrinsicCall(const CallInst &I, unsigned Intrinsic);
   void visitTargetIntrinsic(const CallInst &I, unsigned Intrinsic);
   void visitConstrainedFPIntrinsic(const ConstrainedFPIntrinsic &FPI);
+  void visitConvergenceControl(const CallInst &I, unsigned Intrinsic);
   void visitVPLoad(const VPIntrinsic &VPIntrin, EVT VT,
                    const SmallVectorImpl<SDValue> &OpValues);
   void visitVPStore(const VPIntrinsic &VPIntrin,
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
index 78cc60084068a5..ea28a2432c70e0 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
@@ -446,6 +446,10 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
   case ISD::SET_FPMODE:                 return "set_fpmode";
   case ISD::RESET_FPMODE:               return "reset_fpmode";
 
+  case ISD::CONVERGENCECTRL_ANCHOR: return "convergencectrl_anchor";
+  case ISD::CONVERGENCECTRL_ENTRY:  return "convergencectrl_entry";
+  case ISD::CONVERGENCECTRL_LOOP:   return "convergencectrl_loop";
+
   // Bit manipulation
   case ISD::ABS:                        return "abs";
   case ISD::BITREVERSE:                 return "bitreverse";
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index 0cc426254806dd..0324022d979296 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -303,8 +303,9 @@ TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
   llvm_unreachable(nullptr);
 }
 
-void TargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
-                                                   SDNode *Node) const {
+void TargetLowering::AdjustInstrPostInstrSelection(
+    MachineInstr &MI, SDNode *Node,
+    function_ref<Register(SDValue)> getVR) const {
   assert(!MI.hasPostISelHook() &&
          "If a target marks an instruction with 'hasPostISelHook', "
          "it must implement TargetLowering::AdjustInstrPostInstrSelection!");
@@ -2324,6 +2325,21 @@ void SelectionDAGISel::Select_MEMBARRIER(SDNode *N) {
                        N->getOperand(0));
 }
 
+void SelectionDAGISel::Select_CONVERGENCECTRL_ANCHOR(SDNode *N) {
+  CurDAG->SelectNodeTo(N, TargetOpcode::CONVERGENCECTRL_ANCHOR,
+                       N->getValueType(0));
+}
+
+void SelectionDAGISel::Select_CONVERGENCECTRL_ENTRY(SDNode *N) {
+  CurDAG->SelectNodeTo(N, TargetOpcode::CONVERGENCECTRL_ENTRY,
+                       N->getValueType(0));
+}
+
+void SelectionDAGISel::Select_CONVERGENCECTRL_LOOP(SDNode *N) {
+  CurDAG->SelectNodeTo(N, TargetOpcode::CONVERGENCECTRL_LOOP,
+                       N->getValueType(0), N->getOperand(0));
+}
+
 void SelectionDAGISel::pushStackMapLiveVariable(SmallVectorImpl<SDValue> &Ops,
                                                 SDValue OpVal, SDLoc DL) {
   SDNode *OpNode = OpVal.getNode();
@@ -3003,6 +3019,15 @@ void SelectionDAGISel::SelectCodeCommon(SDNode *NodeToMatch,
   case ISD::JUMP_TABLE_DEBUG_INFO:
     Select_JUMP_TABLE_DEBUG_INFO(NodeToMatch);
     return;
+  case ISD::CONVERGENCECTRL_ANCHOR:
+    Select_CONVERGENCECTRL_ANCHOR(NodeToMatch);
+    return;
+  case ISD::CONVERGENCECTRL_ENTRY:
+    Select_CONVERGENCECTRL_ENTRY(NodeToMatch);
+    return;
+  case ISD::CONVERGENCECTRL_LOOP:
+    Select_CONVERGENCECTRL_LOOP(NodeToMatch);
+    return;
   }
 
   assert(!NodeToMatch->isMachineOpcode() && "Node already selected!");
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 80f595ac4d4d9c..8454e9a3009db0 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -5517,8 +5517,8 @@ void TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
 }
 
 void TargetLowering::CollectTargetIntrinsicOperands(
-    const CallInst &I, SmallVectorImpl<SDValue> &Ops, SelectionDAG &DAG) const {
-}
+    const CallInst &I, SmallVectorImpl<SDValue> &Ops, SelectionDAG &DAG,
+    function_ref<SDValue(const Value *)> getValue) const {}
 
 std::pair<unsigned, const TargetRegisterClass *>
 TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *RI,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
index cd810f0b43e50d..cfbbb6683023e3 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
@@ -2610,7 +2610,17 @@ void AMDGPUDAGToDAGISel::SelectINTRINSIC_W_CHAIN(SDNode *N) {
 
 void AMDGPUDAGToDAGISel::SelectINTRINSIC_WO_CHAIN(SDNode *N) {
   unsigned IntrID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
-  unsigned Opcode;
+  unsigned Opcode = 0;
+  SDNode *ConvGlueNode = N->getGluedNode();
+  if (ConvGlueNode) {
+    // FIXME: Possibly iterate over multiple glue nodes?
+    assert(ConvGlueNode->getOpcode() == AMDGPUISD::CONVERGENCECTRL_GLUE);
+    ConvGlueNode = ConvGlueNode->getOperand(0).getNode();
+    ConvGlueNode = CurDAG->getMachineNode(AMDGPU::CONVERGENCECTRL_GLUE, {},
+                                          MVT::Glue, SDValue(ConvGlueNode, 0));
+  } else {
+    ConvGlueNode = nullptr;
+  }
   switch (IntrID) {
   case Intrinsic::amdgcn_wqm:
     Opcode = AMDGPU::WQM;
@@ -2627,7 +2637,7 @@ void AMDGPUDAGToDAGISel::SelectINTRINSIC_WO_CHAIN(SDNode *N) {
     break;
   case Intrinsic::amdgcn_interp_p1_f16:
     SelectInterpP1F16(N);
-    return;
+    break;
   case Intrinsic::amdgcn_inverse_ballot:
     switch (N->getOperand(1).getValueSizeInBits()) {
     case 32:
@@ -2642,11 +2652,19 @@ void AMDGPUDAGToDAGISel::SelectINTRINSIC_WO_CHAIN(SDNode *N) {
     break;
   default:
     SelectCode(N);
-    return;
+    break;
+  }
+
+  if (Opcode) {
+    SDValue Src = N->getOperand(1);
+    CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), {Src});
   }
 
-  SDValue Src = N->getOperand(1);
-  CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), {Src});
+  if (ConvGlueNode) {
+    SmallVector<SDValue, 4> NewOps(N->op_begin(), N->op_end());
+    NewOps.push_back(SDValue(ConvGlueNode, 0));
+    CurDAG->MorphNodeTo(N, N->getOpcode(), N->getVTList(), NewOps);
+  }
 }
 
 void AMDGPUDAGToDAGISel::SelectINTRINSIC_VOID(SDNode *N) {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index d4df9b6f49eb11..d882df72081de5 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -901,6 +901,20 @@ bool AMDGPUTargetLowering::aggressivelyPreferBuildVectorSources(EVT VecVT) const
   return true;
 }
 
+void AMDGPUTargetLowering::CollectTargetIntrinsicOperands(
+    const CallInst &I, SmallVectorImpl<SDValue> &Ops, SelectionDAG &DAG,
+    function_ref<SDValue(const Value *)> getValue) const {
+  if (auto Bundle = I.getOperandBundle(LLVMContext::OB_convergencectrl)) {
+    auto *Token = Bundle->Inputs[0].get();
+    SDValue ConvControlToken = getValue(Token);
+    // FIXME: Possibly handle the case where the last node in Ops is already a
+    // glue node?
+    ConvControlToken = DAG.getNode(AMDGPUISD::CONVERGENCECTRL_GLUE, {}, MVT::Glue,
+                                   ConvControlToken);
+    Ops.push_back(ConvControlToken);
+  }
+}
+
 bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const {
   // Truncate is just accessing a subregister.
 
@@ -5209,6 +5223,7 @@ const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
   NODE_NAME_CASE(IF)
   NODE_NAME_CASE(ELSE)
   NODE_NAME_CASE(LOOP)
+  NODE_NAME_CASE(CONVERGENCECTRL_GLUE)
   NODE_NAME_CASE(CALL)
   NODE_NAME_CASE(TC_RETURN)
   NODE_NAME_CASE(TC_RETURN_GFX)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
index 51f95ef97a3308..98a02ebce9d25e 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
@@ -192,6 +192,10 @@ class AMDGPUTargetLowering : public TargetLowering {
   bool isZExtFree(Type *Src, Type *Dest) const override;
   bool isZExtFree(EVT Src, EVT Dest) const override;
 
+  void CollectTargetIntrinsicOperands(
+      const CallInst &I, SmallVectorImpl<SDValue> &Ops, SelectionDAG &DAG,
+      function_ref<SDValue(const Value *)> getValue) const override;
+
   SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG,
                                bool LegalOperations, bool ForCodeSize,
                                NegatibleCost &Cost,
@@ -397,6 +401,8 @@ enum NodeType : unsigned {
   ELSE,
   LOOP,
 
+  CONVERGENCECTRL_GLUE,
+
   // A uniform kernel return that terminates the wavefront.
   ENDPGM,
 
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td
index 324285e580bbaf..0ac91c9d235a2d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td
@@ -66,6 +66,9 @@ def AMDGPUif : SDNode<"AMDGPUISD::IF", AMDGPUIfOp, [SDNPHasChain]>;
 def AMDGPUelse : SDNode<"AMDGPUISD::ELSE", AMDGPUElseOp, [SDNPHasChain]>;
 def AMDGPUloop : SDNode<"AMDGPUISD::LOOP", AMDGPULoopOp, [SDNPHasChain]>;
 
+def AMDGPUConvGlue : SDNode<"AMDGPUISD::CONVERGENCECTRL_GLUE",
+  SDTypeProfile<0, 1, [SDTCisVT<0, untyped>]>>;
+
 def callseq_start : SDNode<"ISD::CALLSEQ_START",
   SDCallSeqStart<[ SDTCisVT<0, i32>, SDTCisVT<1, i32> ]>,
   [SDNPHasChain, SDNPOutGlue]
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 4681004d3ba74f..9b77425646a780 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -98,6 +98,7 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
 
   addRegisterClass(MVT::f64, V64RegClass);
   addRegisterClass(MVT::v2f32, V64RegClass);
+  addRegisterClass(MVT::Untyped, V64RegClass);
 
   addRegisterClass(MVT::v3i32, &AMDGPU::SGPR_96RegClass);
   addRegisterClass(MVT::v3f32, TRI->getVGPRClassForBitWidth(96));
@@ -3640,6 +3641,9 @@ SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
     Ops.push_back(DAG.getTargetConstant(0, DL, MVT::i64));
   }
 
+  if (!IsTailCall)
+    Ops.push_back(CLI.ConvergenceControlToken);
+
   if (IsTailCall) {
     // Each tail call may have to adjust the stack by a different amount, so
     // this information must travel along with the operation for eventual
@@ -4877,8 +4881,26 @@ MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
     MachineInstrBuilder MIB;
     MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_CALL), ReturnAddrReg);
 
-    for (const MachineOperand &MO : MI.operands())
-      MIB.add(MO);
+    for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
+      MachineOperand &MO = MI.getOperand(I);
+      if (I != 2) {
+        MIB.add(MO);
+        continue;
+      }
+    }
+
+    MachineOperand &MO = MI.getOperand(2);
+    MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
+    // The token operand is always a register, whose definition is IMPLICIT_DEF
+    // iff there was no token on the call.
+    if (MachineInstr *Def = MRI.getVRegDef(MO.getReg())) {
+      if (Def->getOpcode() != TargetOpcode::IMPLICIT_DEF) {
+        Def->dump();
+        MO.dump();
+        MO.setImplicit();
+        MIB.add(MO);
+      }
+    }
 
     MIB.cloneMemRefs(MI);
     MI.eraseFromParent();
@@ -14241,8 +14263,9 @@ void SITargetLowering::AddIMGInit(MachineInstr &MI) const {
 
 /// Assign the register class depending on the number of
 /// bits set in the writemask
-void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
-                                                     SDNode *Node) const {
+void SITargetLowering::AdjustInstrPostInstrSelection(
+    MachineInstr &MI, SDNode *Node,
+    function_ref<Register(SDValue)> getVR) const {
   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
 
   MachineFunction *MF = MI.getParent()->getParent();
@@ -14310,6 +14333,21 @@ void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
       AddIMGInit(MI);
     TII->enforceOperandRCAlignment(MI, AMDGPU::OpName::vaddr);
   }
+
+  if (MI.getOpcode() == AMDGPU::CONVERGENCECTRL_GLUE) {
+    MI.getOperand(0).setIsKill(false);
+  }
+
+  if (SDNode *GluedNode = Node->getGluedNode()) {
+    // FIXME: Possibly iterate over multiple glue nodes?
+    GluedNode->dump();
+    if (GluedNode->getOpcode() == ~(unsigned)AMDGPU::CONVERGENCECTRL_GLUE) {
+      Register VReg = getVR(GluedNode->getOperand(0));
+      MachineOperand MO = MachineOperand::CreateReg(VReg, /* isDef = */ false,
+                                                    /* isImp = */ true);
+      MI.addOperand(MO);
+    }
+  }
 }
 
 static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL,
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h
index 746a88c5ea13a3..f6444e790803ea 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h
@@ -455,8 +455,9 @@ class SITargetLowering final : public AMDGPUTargetLowering {
   SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
   SDNode *PostISelFolding(MachineSDNode *N, SelectionDAG &DAG) const override;
   void AddIMGInit(MachineInstr &MI) const;
-  void AdjustInstrPostInstrSelection(MachineInstr &MI,
-                                     SDNode *Node) const override;
+  void AdjustInstrPostInstrSelection(
+      MachineInstr &MI, SDNode *Node,
+      function_ref<Register(SDValue)> getVR) const override;
 
   SDNode *legalizeTargetIndependentNode(SDNode *Node, SelectionDAG &DAG) const;
 
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index 9362fe5d9678b4..2de75704c9bb0b 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -612,8 +612,8 @@ def SI_RETURN : SPseudoInstSI <
 // This version is only needed so we can fill in the output register
 // in the custom inserter.
 def SI_CALL_ISEL : SPseudoInstSI <
-  (outs), (ins SSrc_b64:$src0, unknown:$callee),
-  [(AMDGPUcall i64:$src0, tglobaladdr:$callee)]> {
+  (outs), (ins SSrc_b64:$src0, unknown:$callee, unknown:$token),
+  [(AMDGPUcall i64:$src0, tglobaladdr:$callee, untyped:$token)]> {
   let Size = 4;
   let isCall = 1;
   let SchedRW = [WriteBranch];
@@ -623,10 +623,15 @@ def SI_CALL_ISEL : SPseudoInstSI <
 }
 
 def : GCNPat<
-  (AMDGPUcall i64:$src0, (i64 0)),
-  (SI_CALL_ISEL $src0, (i64 0))
+  (AMDGPUcall i64:$src0, (i64 0), untyped:$token),
+  (SI_CALL_ISEL $src0, (i64 0), untyped:$token)
 >;
 
+// def : GCNPat<
+//   (AMDGPUcall i64:$src0, (i64 0)),
+//   (SI_CALL_ISEL $src0, (i64 0), undef_tied_input)
+// >;
+
 // Wrapper around s_swappc_b64 with extra $callee parameter to track
 // the called function after regalloc.
 def SI_CALL : SPseudoInstSI <
@@ -715,6 +720,15 @@ multiclass si_cs_chain_tc_patterns<
 defm : si_cs_chain_tc_patterns<i32>;
 defm : si_cs_chain_tc_patterns<i64>;
 
+def CONVERGENCECTRL_GLUE : VPseudoInstSI <
+  (outs), (ins unknown:$token)> {
+  let mayLoad = 0;
+  let mayStore = 0;
+  let hasSideEffects = 0;
+  let isConvergent = 1;
+  let hasPostISelHook = 1;
+}
+
 def ADJCALLSTACKUP : SPseudoInstSI<
   (outs), (ins i32imm:$amt0, i32imm:$amt1),
   [(callseq_start timm:$amt0, timm:$amt1)],
diff --git a/llvm/lib/Target/AMDGPU/VOP1Instructions.td b/llvm/lib/Target/AMDGPU/VOP1Instructions.td
index 9c19091e3d583f..ac04ebcf2a7c43 100644
--- a/llvm/lib/Target/AMDGPU/VOP1Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP1Instructions.td
@@ -246,6 +246,7 @@ def V_READFIRSTLANE_B32 :
   let VALU = 1;
   let Uses = [EXEC];
   let isConvergent = 1;
+  let hasPostISelHook = 1;
 
   bits<8> vdst;
   bits<9> src0;
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 45d04c0669aea0..e22a34b2f37cbf 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -12324,8 +12324,9 @@ static void attachMEMCPYScratchRegs(const ARMSubtarget *Subtarget,
   }
 }
 
-void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
-                                                      SDNode *Node) const {
+void ARMTargetLowering::AdjustInstrPostInstrSelection(
+    MachineInstr &MI, SDNode *Node,
+    function_ref<Register(SDValue)> getVR) const {
   if (MI.getOpcode() == ARM::MEMCPY) {
     attachMEMCPYScratchRegs(Subtarget, MI, Node);
     return;
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h
index 6c2b92de7a1df2..f7756a3b4805e1 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.h
+++ b/llvm/lib/Target/ARM/ARMISelLowering.h
@@ -419,8 +419,9 @@ class VectorType;
     EmitInstrWithCustomInserter(MachineInstr &MI,
                                 MachineBasicBlock *MBB) const override;
 
-    void AdjustInstrPostInstrSelection(MachineInstr &MI,
-                                       SDNode *Node) const override;
+    void AdjustInstrPostInstrSelection(
+        MachineInstr &MI, SDNode *Node,
+        function_ref<Register(SDValue)> getVR) const override;
 
     SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const;
     SDValue PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const;
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
index e950b44341c922..17e152e36b9316 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -3831,8 +3831,9 @@ bool HexagonTargetLowering::shouldReduceLoadWidth(SDNode *Load,
   return true;
 }
 
-void HexagonTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
-      SDNode *Node) const {
+void HexagonTargetLowering::AdjustInstrPostInstrSelection(
+    MachineInstr &MI, SDNode *Node,
+    function_ref<Register(SDValue)> getVR) const {
   AdjustHvxInstrPostInstrSelection(MI, Node);
 }
 
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.h b/llvm/lib/Target/Hexagon/HexagonISelLowering.h
index 8c7d0b70f38578..8a04d4fbffb7ee 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLowering.h
+++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.h
@@ -342,8 +342,9 @@ class HexagonTargetLowering : public TargetLowering {
   bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy,
                              EVT NewVT) const override;
 
-  void AdjustInstrPostInstrSelection(MachineInstr &MI,
-                                     SDNode *Node) const override;
+  void AdjustInstrPostInstrSelection(
+      MachineInstr &MI, SDNode *Node,
+      function_ref<Register(SDValue)> getVR) const override;
 
   // Handling of atomic RMW instructions.
   Value *emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr,
diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp
index f09d52aa5fd641..88ad4a72ffeba6 100644
--- a/llvm/lib/Target/Mips/MipsISelLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp
@@ -3102,8 +3102,9 @@ getOpndList(SmallVectorImpl<SDValue> &Ops,
     Ops.push_back(InGlue);
 }
 
-void MipsTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
-                                                       SDNode *Node) const {
+void MipsTargetLowering::AdjustInstrPostInstrSelection(
+    MachineInstr &MI, SDNode *Node,
+    function_ref<Register(SDValue)> getVR) const {
   switch (MI.getOpcode()) {
     default:
       return;
diff --git a/llvm/lib/Target/Mips/MipsISelLowering.h b/llvm/lib/Target/Mips/MipsISelLowering.h
index 81d37ff2f065fe..288d57800c148b 100644
--- a/llvm/lib/Target/Mips/MipsISelLowering.h
+++ b/llvm/lib/Target/Mips/MipsISelLowering.h
@@ -340,8 +340,9 @@ class TargetRegisterClass;
     EmitInstrWithCustomInserter(MachineInstr &MI,
                                 MachineBasicBlock *MBB) const override;
 
-    void AdjustInstrPostInstrSelection(MachineInstr &MI,
-                                       SDNode *Node) const override;
+    void AdjustInstrPostInstrSelection(
+        MachineInstr &MI, SDNode *Node,
+        function_ref<Register(SDValue)> getVR) const override;
 
     void HandleByVal(CCState *, unsigned &, Align) const override;
 
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 1c6460f8149b4b..b2d5748446cf7c 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -16793,9 +16793,9 @@ void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
 }
 
-void PPCTargetLowering::CollectTargetIntrinsicOperands(const CallInst &I,
-                                              SmallVectorImpl<SDValue> &Ops,
-                                              SelectionDAG &DAG) const {
+void PPCTargetLowering::CollectTargetIntrinsicOperands(
+    const CallInst &I, SmallVectorImpl<SDValue> &Ops, SelectionDAG &DAG,
+    function_ref<SDValue(const Value *)> getValue) const {
   if (I.getNumOperands() <= 1)
     return;
   if (!isa<ConstantSDNode>(Ops[1].getNode()))
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h
index d8679dcf401808..0cc2494494e8c1 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.h
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h
@@ -992,9 +992,9 @@ namespace llvm {
       return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
     }
 
-    void CollectTargetIntrinsicOperands(const CallInst &I,
-                                 SmallVectorImpl<SDValue> &Ops,
-                                 SelectionDAG &DAG) const override;
+    void CollectTargetIntrinsicOperands(
+        const CallInst &I, SmallVectorImpl<SDValue> &Ops, SelectionDAG &DAG,
+        function_ref<SDValue(const Value *)> getValue) const override;
 
     /// isLegalAddressingMode - Return true if the addressing mode represented
     /// by AM is legal for this target, for a load/store of the specified type.
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 392ceeb537b692..4c9a384fc4e95b 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -16599,8 +16599,9 @@ RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
   }
 }
 
-void RISCVTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
-                                                        SDNode *Node) const {
+void RISCVTargetLowering::AdjustInstrPostInstrSelection(
+    MachineInstr &MI, SDNode *Node,
+    function_ref<Register(SDValue)> getVR) const {
   // Add FRM dependency to any instructions with dynamic rounding mode.
   int Idx = RISCV::getNamedOperandIdx(MI.getOpcode(), RISCV::OpName::frm);
   if (Idx < 0) {
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index f5764e7d4ba8cf..3de9a6c6578bba 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -580,8 +580,9 @@ class RISCVTargetLowering : public TargetLowering {
   EmitInstrWithCustomInserter(MachineInstr &MI,
                               MachineBasicBlock *BB) const override;
 
-  void AdjustInstrPostInstrSelection(MachineInstr &MI,
-                                     SDNode *Node) const override;
+  void AdjustInstrPostInstrSelection(
+      MachineInstr &MI, SDNode *Node,
+      function_ref<Register(SDValue)> getVR) const override;
 
   EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
                          EVT VT) const override;
diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
index 4f080147921105..9fccdf99a82102 100644
--- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
@@ -3642,8 +3642,9 @@ void SparcTargetLowering::insertSSPDeclarations(Module &M) const {
     return TargetLowering::insertSSPDeclarations(M);
 }
 
-void SparcTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
-                                                        SDNode *Node) const {
+void SparcTargetLowering::AdjustInstrPostInstrSelection(
+    MachineInstr &MI, SDNode *Node,
+    function_ref<Register(SDValue)> getVR) const {
   assert(MI.getOpcode() == SP::SUBCCrr || MI.getOpcode() == SP::SUBCCri);
   // If the result is dead, replace it with %g0.
   if (!Node->hasAnyUseOfValue(0))
diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.h b/llvm/lib/Target/Sparc/SparcISelLowering.h
index 15d09bc9309754..7e09c4373995bb 100644
--- a/llvm/lib/Target/Sparc/SparcISelLowering.h
+++ b/llvm/lib/Target/Sparc/SparcISelLowering.h
@@ -223,8 +223,9 @@ namespace llvm {
     MachineBasicBlock *expandSelectCC(MachineInstr &MI, MachineBasicBlock *BB,
                                       unsigned BROpcode) const;
 
-    void AdjustInstrPostInstrSelection(MachineInstr &MI,
-                                       SDNode *Node) const override;
+    void AdjustInstrPostInstrSelection(
+        MachineInstr &MI, SDNode *Node,
+        function_ref<Register(SDValue)> getVR) const override;
   };
 } // end namespace llvm
 
diff --git a/llvm/test/CodeGen/AMDGPU/convergence-tokens.ll b/llvm/test/CodeGen/AMDGPU/convergence-tokens.ll
new file mode 100644
index 00000000000000..844671d98eae21
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/convergence-tokens.ll
@@ -0,0 +1,40 @@
+; RUN: llc -stop-after=amdgpu-isel -mtriple=amdgcn-- -mcpu=gfx900 -verify-machineinstrs -o - %s | FileCheck --check-prefixes=CHECK,ISEL %s
+; RUN: llc -stop-after=dead-mi-elimination -mtriple=amdgcn-- -mcpu=gfx900 -verify-machineinstrs -o - %s | FileCheck --check-prefixes=CHECK,DEADMI %s
+
+; CHECK-LABEL: name:            basic_call
+;       CHECK:    [[TOKEN:%[0-9]+]]:sreg_64 = CONVERGENCECTRL_ENTRY
+;        ISEL:    {{.*}} SI_CALL_ISEL {{.*}}, @foo, killed [[TOKEN]], csr_amdgpu, {{.*}}
+;      DEADMI:    {{.*}} SI_CALL {{.*}}, @foo, csr_amdgpu, {{.*}}, implicit killed [[TOKEN]]
+define i32 @basic_call(i32 %src) #0 {
+  %t = call token @llvm.experimental.convergence.entry()
+  %r = call i32 @foo(i32 %src) [ "convergencectrl"(token %t) ]
+  ret i32 %r
+}
+
+; CHECK-LABEL: name:            basic_intrinsic
+;       CHECK:    [[TOKEN:%[0-9]+]]:sreg_64 = CONVERGENCECTRL_ANCHOR
+;        ISEL:    CONVERGENCECTRL_GLUE [[TOKEN]]
+;  DEADMI-NOT:    CONVERGENCECTRL_GLUE
+;       CHECK:    {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[TOKEN]]
+
+define i32 @basic_intrinsic(i32 %src) #0 {
+  %t = call token @llvm.experimental.convergence.anchor()
+  %r = call i32 @llvm.amdgcn.readfirstlane(i32 %src) [ "convergencectrl"(token %t) ]
+  ret i32 %r
+}
+
+define i32 @uncontrolled_call(i32 %src) #0 {
+  %r = call i32 @foo(i32 %src)
+  ret i32 %r
+}
+
+declare i32 @foo(i32 %x) #0
+
+declare i32 @llvm.amdgcn.readfirstlane(i32) #0
+
+declare token @llvm.experimental.convergence.entry()
+declare token @llvm.experimental.convergence.anchor()
+declare token @llvm.experimental.convergence.loop()
+
+attributes #0 = { nounwind readnone convergent }
+attributes #1 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/isel-amdgpu-cs-chain-cc.ll b/llvm/test/CodeGen/AMDGPU/isel-amdgpu-cs-chain-cc.ll
index f75d83ab6cbb54..5c7e77a530073e 100644
--- a/llvm/test/CodeGen/AMDGPU/isel-amdgpu-cs-chain-cc.ll
+++ b/llvm/test/CodeGen/AMDGPU/isel-amdgpu-cs-chain-cc.ll
@@ -92,6 +92,7 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc(<4 x i32> inreg %a, <4 x i32> %b
   ; DAGISEL-GFX11-NEXT:   $vgpr5 = COPY [[COPY2]]
   ; DAGISEL-GFX11-NEXT:   $vgpr6 = COPY [[COPY1]]
   ; DAGISEL-GFX11-NEXT:   $vgpr7 = COPY [[COPY]]
+  ; DAGISEL-GFX11-NEXT:   [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
   ; DAGISEL-GFX11-NEXT:   $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
   ; DAGISEL-GFX11-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
   ; DAGISEL-GFX11-NEXT:   S_ENDPGM 0
@@ -121,6 +122,7 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc(<4 x i32> inreg %a, <4 x i32> %b
   ; DAGISEL-GFX10-NEXT:   $vgpr5 = COPY [[COPY2]]
   ; DAGISEL-GFX10-NEXT:   $vgpr6 = COPY [[COPY1]]
   ; DAGISEL-GFX10-NEXT:   $vgpr7 = COPY [[COPY]]
+  ; DAGISEL-GFX10-NEXT:   [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
   ; DAGISEL-GFX10-NEXT:   $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
   ; DAGISEL-GFX10-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
   ; DAGISEL-GFX10-NEXT:   S_ENDPGM 0
@@ -232,6 +234,7 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_ptr(ptr inreg %a, ptr %b, ptr ad
   ; DAGISEL-GFX11-NEXT:   $vgpr9 = COPY [[COPY2]]
   ; DAGISEL-GFX11-NEXT:   $vgpr10 = COPY [[COPY1]]
   ; DAGISEL-GFX11-NEXT:   $vgpr11 = COPY [[COPY]]
+  ; DAGISEL-GFX11-NEXT:   [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
   ; DAGISEL-GFX11-NEXT:   $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
   ; DAGISEL-GFX11-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
   ; DAGISEL-GFX11-NEXT:   S_ENDPGM 0
@@ -269,6 +272,7 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_ptr(ptr inreg %a, ptr %b, ptr ad
   ; DAGISEL-GFX10-NEXT:   $vgpr9 = COPY [[COPY2]]
   ; DAGISEL-GFX10-NEXT:   $vgpr10 = COPY [[COPY1]]
   ; DAGISEL-GFX10-NEXT:   $vgpr11 = COPY [[COPY]]
+  ; DAGISEL-GFX10-NEXT:   [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
   ; DAGISEL-GFX10-NEXT:   $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
   ; DAGISEL-GFX10-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
   ; DAGISEL-GFX10-NEXT:   S_ENDPGM 0
@@ -400,6 +404,7 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_struct( {ptr, i32, <4 x i32>} in
   ; DAGISEL-GFX11-NEXT:   $vgpr11 = COPY [[COPY2]]
   ; DAGISEL-GFX11-NEXT:   $vgpr12 = COPY [[COPY1]]
   ; DAGISEL-GFX11-NEXT:   $vgpr13 = COPY [[COPY]]
+  ; DAGISEL-GFX11-NEXT:   [[DEF2:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
   ; DAGISEL-GFX11-NEXT:   $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13
   ; DAGISEL-GFX11-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
   ; DAGISEL-GFX11-NEXT:   S_ENDPGM 0
@@ -449,6 +454,7 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_struct( {ptr, i32, <4 x i32>} in
   ; DAGISEL-GFX10-NEXT:   $vgpr11 = COPY [[COPY2]]
   ; DAGISEL-GFX10-NEXT:   $vgpr12 = COPY [[COPY1]]
   ; DAGISEL-GFX10-NEXT:   $vgpr13 = COPY [[COPY]]
+  ; DAGISEL-GFX10-NEXT:   [[DEF2:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
   ; DAGISEL-GFX10-NEXT:   $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13
   ; DAGISEL-GFX10-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
   ; DAGISEL-GFX10-NEXT:   S_ENDPGM 0
@@ -500,6 +506,7 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_float(float inreg %a, float %b)
   ; DAGISEL-GFX11-NEXT:   [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM killed [[SI_PC_ADD_REL_OFFSET]], 0, 0 :: (dereferenceable invariant load (s64) from got, addrspace 4)
   ; DAGISEL-GFX11-NEXT:   $vgpr0 = COPY [[COPY1]]
   ; DAGISEL-GFX11-NEXT:   $vgpr1 = COPY [[COPY]]
+  ; DAGISEL-GFX11-NEXT:   [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
   ; DAGISEL-GFX11-NEXT:   $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $vgpr1
   ; DAGISEL-GFX11-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
   ; DAGISEL-GFX11-NEXT:   S_ENDPGM 0
@@ -517,6 +524,7 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_float(float inreg %a, float %b)
   ; DAGISEL-GFX10-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY2]]
   ; DAGISEL-GFX10-NEXT:   $vgpr0 = COPY [[COPY1]]
   ; DAGISEL-GFX10-NEXT:   $vgpr1 = COPY [[COPY]]
+  ; DAGISEL-GFX10-NEXT:   [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
   ; DAGISEL-GFX10-NEXT:   $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $vgpr0, implicit $vgpr1
   ; DAGISEL-GFX10-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
   ; DAGISEL-GFX10-NEXT:   S_ENDPGM 0
@@ -568,6 +576,7 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_half(half inreg %a, half %b) {
   ; DAGISEL-GFX11-NEXT:   [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM killed [[SI_PC_ADD_REL_OFFSET]], 0, 0 :: (dereferenceable invariant load (s64) from got, addrspace 4)
   ; DAGISEL-GFX11-NEXT:   $vgpr0 = COPY [[COPY1]]
   ; DAGISEL-GFX11-NEXT:   $vgpr1 = COPY [[COPY]]
+  ; DAGISEL-GFX11-NEXT:   [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
   ; DAGISEL-GFX11-NEXT:   $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $vgpr1
   ; DAGISEL-GFX11-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
   ; DAGISEL-GFX11-NEXT:   S_ENDPGM 0
@@ -585,6 +594,7 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_half(half inreg %a, half %b) {
   ; DAGISEL-GFX10-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY2]]
   ; DAGISEL-GFX10-NEXT:   $vgpr0 = COPY [[COPY1]]
   ; DAGISEL-GFX10-NEXT:   $vgpr1 = COPY [[COPY]]
+  ; DAGISEL-GFX10-NEXT:   [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
   ; DAGISEL-GFX10-NEXT:   $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $vgpr0, implicit $vgpr1
   ; DAGISEL-GFX10-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
   ; DAGISEL-GFX10-NEXT:   S_ENDPGM 0
@@ -636,6 +646,7 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_bfloat(bfloat inreg %a, bfloat %
   ; DAGISEL-GFX11-NEXT:   [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM killed [[SI_PC_ADD_REL_OFFSET]], 0, 0 :: (dereferenceable invariant load (s64) from got, addrspace 4)
   ; DAGISEL-GFX11-NEXT:   $vgpr0 = COPY [[COPY1]]
   ; DAGISEL-GFX11-NEXT:   $vgpr1 = COPY [[COPY]]
+  ; DAGISEL-GFX11-NEXT:   [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
   ; DAGISEL-GFX11-NEXT:   $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $vgpr1
   ; DAGISEL-GFX11-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
   ; DAGISEL-GFX11-NEXT:   S_ENDPGM 0
@@ -653,6 +664,7 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_bfloat(bfloat inreg %a, bfloat %
   ; DAGISEL-GFX10-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY2]]
   ; DAGISEL-GFX10-NEXT:   $vgpr0 = COPY [[COPY1]]
   ; DAGISEL-GFX10-NEXT:   $vgpr1 = COPY [[COPY]]
+  ; DAGISEL-GFX10-NEXT:   [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
   ; DAGISEL-GFX10-NEXT:   $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $vgpr0, implicit $vgpr1
   ; DAGISEL-GFX10-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
   ; DAGISEL-GFX10-NEXT:   S_ENDPGM 0
@@ -704,6 +716,7 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_i16(i16 inreg %a, i16 %b) {
   ; DAGISEL-GFX11-NEXT:   [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM killed [[SI_PC_ADD_REL_OFFSET]], 0, 0 :: (dereferenceable invariant load (s64) from got, addrspace 4)
   ; DAGISEL-GFX11-NEXT:   $vgpr0 = COPY [[COPY1]]
   ; DAGISEL-GFX11-NEXT:   $vgpr1 = COPY [[COPY]]
+  ; DAGISEL-GFX11-NEXT:   [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
   ; DAGISEL-GFX11-NEXT:   $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $vgpr1
   ; DAGISEL-GFX11-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
   ; DAGISEL-GFX11-NEXT:   S_ENDPGM 0
@@ -721,6 +734,7 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_i16(i16 inreg %a, i16 %b) {
   ; DAGISEL-GFX10-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY2]]
   ; DAGISEL-GFX10-NEXT:   $vgpr0 = COPY [[COPY1]]
   ; DAGISEL-GFX10-NEXT:   $vgpr1 = COPY [[COPY]]
+  ; DAGISEL-GFX10-NEXT:   [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
   ; DAGISEL-GFX10-NEXT:   $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $vgpr0, implicit $vgpr1
   ; DAGISEL-GFX10-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
   ; DAGISEL-GFX10-NEXT:   S_ENDPGM 0
@@ -856,6 +870,7 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_v16i16(<16 x i16> inreg %a, <16
   ; DAGISEL-GFX11-NEXT:   $vgpr13 = COPY [[COPY2]]
   ; DAGISEL-GFX11-NEXT:   $vgpr14 = COPY [[COPY1]]
   ; DAGISEL-GFX11-NEXT:   $vgpr15 = COPY [[COPY]]
+  ; DAGISEL-GFX11-NEXT:   [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
   ; DAGISEL-GFX11-NEXT:   $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15
   ; DAGISEL-GFX11-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
   ; DAGISEL-GFX11-NEXT:   S_ENDPGM 0
@@ -901,6 +916,7 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_v16i16(<16 x i16> inreg %a, <16
   ; DAGISEL-GFX10-NEXT:   $vgpr13 = COPY [[COPY2]]
   ; DAGISEL-GFX10-NEXT:   $vgpr14 = COPY [[COPY1]]
   ; DAGISEL-GFX10-NEXT:   $vgpr15 = COPY [[COPY]]
+  ; DAGISEL-GFX10-NEXT:   [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
   ; DAGISEL-GFX10-NEXT:   $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15
   ; DAGISEL-GFX10-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
   ; DAGISEL-GFX10-NEXT:   S_ENDPGM 0
@@ -2464,6 +2480,7 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_many_regs(<36 x i32> inreg %a, <128
   ; DAGISEL-GFX11-NEXT:   $vgpr29 = COPY [[COPY134]]
   ; DAGISEL-GFX11-NEXT:   $vgpr30 = COPY [[COPY133]]
   ; DAGISEL-GFX11-NEXT:   $vgpr31 = COPY [[COPY132]]
+  ; DAGISEL-GFX11-NEXT:   [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
   ; DAGISEL-GFX11-NEXT:   $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $vgpr31
   ; DAGISEL-GFX11-NEXT:   ADJCALLSTACKDOWN 0, 528, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
   ; DAGISEL-GFX11-NEXT:   S_ENDPGM 0
@@ -2810,6 +2827,7 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_many_regs(<36 x i32> inreg %a, <128
   ; DAGISEL-GFX10-NEXT:   $vgpr29 = COPY [[COPY134]]
   ; DAGISEL-GFX10-NEXT:   $vgpr30 = COPY [[COPY133]]
   ; DAGISEL-GFX10-NEXT:   $vgpr31 = COPY [[COPY132]]
+  ; DAGISEL-GFX10-NEXT:   [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
   ; DAGISEL-GFX10-NEXT:   $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $vgpr31
   ; DAGISEL-GFX10-NEXT:   ADJCALLSTACKDOWN 0, 528, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
   ; DAGISEL-GFX10-NEXT:   S_ENDPGM 0
diff --git a/llvm/test/CodeGen/AMDGPU/kernel-vgpr-spill-mubuf-with-voffset.ll b/llvm/test/CodeGen/AMDGPU/kernel-vgpr-spill-mubuf-with-voffset.ll
index 03c85b4470628f..406d2fa9922557 100644
--- a/llvm/test/CodeGen/AMDGPU/kernel-vgpr-spill-mubuf-with-voffset.ll
+++ b/llvm/test/CodeGen/AMDGPU/kernel-vgpr-spill-mubuf-with-voffset.ll
@@ -67,6 +67,7 @@ define amdgpu_kernel void @test_kernel(i32 %val) #0 {
 ; CHECK-NEXT:    ; implicit-def: $sgpr15
 ; CHECK-NEXT:    s_mov_b64 s[0:1], s[20:21]
 ; CHECK-NEXT:    s_mov_b64 s[2:3], s[22:23]
+; CHECK-NEXT:    ; implicit-def: $sgpr18_sgpr19
 ; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
 ; CHECK-NEXT:    s_swappc_b64 s[30:31], s[16:17]
 ; CHECK-NEXT:    s_or_saveexec_b64 s[34:35], -1
diff --git a/llvm/test/CodeGen/AMDGPU/need-fp-from-vgpr-spills.ll b/llvm/test/CodeGen/AMDGPU/need-fp-from-vgpr-spills.ll
index f4114a01e9b486..c12723c1eb5334 100644
--- a/llvm/test/CodeGen/AMDGPU/need-fp-from-vgpr-spills.ll
+++ b/llvm/test/CodeGen/AMDGPU/need-fp-from-vgpr-spills.ll
@@ -27,7 +27,7 @@ define internal fastcc void @csr_vgpr_spill_fp_callee() #0 {
 ; CHECK-LABEL: csr_vgpr_spill_fp_callee:
 ; CHECK:       ; %bb.0: ; %bb
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    s_mov_b32 s18, s33
+; CHECK-NEXT:    s_mov_b32 s24, s33
 ; CHECK-NEXT:    s_mov_b32 s33, s32
 ; CHECK-NEXT:    s_xor_saveexec_b64 s[16:17], -1
 ; CHECK-NEXT:    buffer_store_dword v1, off, s[0:3], s33 offset:4 ; 4-byte Folded Spill
@@ -43,6 +43,7 @@ define internal fastcc void @csr_vgpr_spill_fp_callee() #0 {
 ; CHECK-NEXT:    s_mov_b64 s[20:21], s[0:1]
 ; CHECK-NEXT:    s_mov_b64 s[0:1], s[20:21]
 ; CHECK-NEXT:    s_mov_b64 s[2:3], s[22:23]
+; CHECK-NEXT:    ; implicit-def: $sgpr18_sgpr19
 ; CHECK-NEXT:    s_swappc_b64 s[30:31], s[16:17]
 ; CHECK-NEXT:    ;;#ASMSTART
 ; CHECK-NEXT:    ; clobber csr v40
@@ -54,7 +55,7 @@ define internal fastcc void @csr_vgpr_spill_fp_callee() #0 {
 ; CHECK-NEXT:    buffer_load_dword v1, off, s[0:3], s33 offset:4 ; 4-byte Folded Reload
 ; CHECK-NEXT:    s_mov_b64 exec, s[4:5]
 ; CHECK-NEXT:    s_add_i32 s32, s32, 0xfffffc00
-; CHECK-NEXT:    s_mov_b32 s33, s18
+; CHECK-NEXT:    s_mov_b32 s33, s24
 ; CHECK-NEXT:    s_waitcnt vmcnt(0)
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
 bb:
@@ -73,9 +74,9 @@ define amdgpu_kernel void @kernel_call() {
 ; CHECK-NEXT:    s_addc_u32 s1, s1, 0
 ; CHECK-NEXT:    ; implicit-def: $vgpr3 : SGPR spill to VGPR lane
 ; CHECK-NEXT:    v_writelane_b32 v3, s16, 0
-; CHECK-NEXT:    s_or_saveexec_b64 s[24:25], -1
+; CHECK-NEXT:    s_or_saveexec_b64 s[26:27], -1
 ; CHECK-NEXT:    buffer_store_dword v3, off, s[0:3], 0 offset:4 ; 4-byte Folded Spill
-; CHECK-NEXT:    s_mov_b64 exec, s[24:25]
+; CHECK-NEXT:    s_mov_b64 exec, s[26:27]
 ; CHECK-NEXT:    s_mov_b32 s13, s15
 ; CHECK-NEXT:    s_mov_b32 s12, s14
 ; CHECK-NEXT:    v_readlane_b32 s14, v3, 0
@@ -92,10 +93,11 @@ define amdgpu_kernel void @kernel_call() {
 ; CHECK-NEXT:    ; implicit-def: $sgpr15
 ; CHECK-NEXT:    s_mov_b64 s[0:1], s[20:21]
 ; CHECK-NEXT:    s_mov_b64 s[2:3], s[22:23]
+; CHECK-NEXT:    ; implicit-def: $sgpr18_sgpr19
 ; CHECK-NEXT:    s_swappc_b64 s[30:31], s[16:17]
-; CHECK-NEXT:    s_or_saveexec_b64 s[24:25], -1
+; CHECK-NEXT:    s_or_saveexec_b64 s[26:27], -1
 ; CHECK-NEXT:    buffer_load_dword v0, off, s[0:3], 0 offset:4 ; 4-byte Folded Reload
-; CHECK-NEXT:    s_mov_b64 exec, s[24:25]
+; CHECK-NEXT:    s_mov_b64 exec, s[26:27]
 ; CHECK-NEXT:    ; kill: killed $vgpr0
 ; CHECK-NEXT:    s_endpgm
 bb:
@@ -160,6 +162,7 @@ define amdgpu_kernel void @kernel_tailcall() {
 ; CHECK-NEXT:    ; implicit-def: $sgpr15
 ; CHECK-NEXT:    s_mov_b64 s[0:1], s[20:21]
 ; CHECK-NEXT:    s_mov_b64 s[2:3], s[22:23]
+; CHECK-NEXT:    ; implicit-def: $sgpr18_sgpr19
 ; CHECK-NEXT:    s_swappc_b64 s[30:31], s[16:17]
 ; CHECK-NEXT:    s_or_saveexec_b64 s[24:25], -1
 ; CHECK-NEXT:    buffer_load_dword v0, off, s[0:3], 0 offset:4 ; 4-byte Folded Reload
@@ -188,7 +191,7 @@ define hidden i32 @caller_save_vgpr_spill_fp_tail_call() #0 {
 ; CHECK-LABEL: caller_save_vgpr_spill_fp_tail_call:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    s_mov_b32 s18, s33
+; CHECK-NEXT:    s_mov_b32 s24, s33
 ; CHECK-NEXT:    s_mov_b32 s33, s32
 ; CHECK-NEXT:    s_xor_saveexec_b64 s[16:17], -1
 ; CHECK-NEXT:    buffer_store_dword v1, off, s[0:3], s33 ; 4-byte Folded Spill
@@ -203,6 +206,7 @@ define hidden i32 @caller_save_vgpr_spill_fp_tail_call() #0 {
 ; CHECK-NEXT:    s_mov_b64 s[20:21], s[0:1]
 ; CHECK-NEXT:    s_mov_b64 s[0:1], s[20:21]
 ; CHECK-NEXT:    s_mov_b64 s[2:3], s[22:23]
+; CHECK-NEXT:    ; implicit-def: $sgpr18_sgpr19
 ; CHECK-NEXT:    s_swappc_b64 s[30:31], s[16:17]
 ; CHECK-NEXT:    v_readlane_b32 s31, v1, 1
 ; CHECK-NEXT:    v_readlane_b32 s30, v1, 0
@@ -210,7 +214,7 @@ define hidden i32 @caller_save_vgpr_spill_fp_tail_call() #0 {
 ; CHECK-NEXT:    buffer_load_dword v1, off, s[0:3], s33 ; 4-byte Folded Reload
 ; CHECK-NEXT:    s_mov_b64 exec, s[4:5]
 ; CHECK-NEXT:    s_add_i32 s32, s32, 0xfffffc00
-; CHECK-NEXT:    s_mov_b32 s33, s18
+; CHECK-NEXT:    s_mov_b32 s33, s24
 ; CHECK-NEXT:    s_waitcnt vmcnt(0)
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
 entry:
@@ -222,7 +226,7 @@ define hidden i32 @caller_save_vgpr_spill_fp() #0 {
 ; CHECK-LABEL: caller_save_vgpr_spill_fp:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    s_mov_b32 s19, s33
+; CHECK-NEXT:    s_mov_b32 s25, s33
 ; CHECK-NEXT:    s_mov_b32 s33, s32
 ; CHECK-NEXT:    s_xor_saveexec_b64 s[16:17], -1
 ; CHECK-NEXT:    buffer_store_dword v2, off, s[0:3], s33 ; 4-byte Folded Spill
@@ -237,6 +241,7 @@ define hidden i32 @caller_save_vgpr_spill_fp() #0 {
 ; CHECK-NEXT:    s_mov_b64 s[20:21], s[0:1]
 ; CHECK-NEXT:    s_mov_b64 s[0:1], s[20:21]
 ; CHECK-NEXT:    s_mov_b64 s[2:3], s[22:23]
+; CHECK-NEXT:    ; implicit-def: $sgpr18_sgpr19
 ; CHECK-NEXT:    s_swappc_b64 s[30:31], s[16:17]
 ; CHECK-NEXT:    v_readlane_b32 s31, v2, 1
 ; CHECK-NEXT:    v_readlane_b32 s30, v2, 0
@@ -244,7 +249,7 @@ define hidden i32 @caller_save_vgpr_spill_fp() #0 {
 ; CHECK-NEXT:    buffer_load_dword v2, off, s[0:3], s33 ; 4-byte Folded Reload
 ; CHECK-NEXT:    s_mov_b64 exec, s[4:5]
 ; CHECK-NEXT:    s_add_i32 s32, s32, 0xfffffc00
-; CHECK-NEXT:    s_mov_b32 s33, s19
+; CHECK-NEXT:    s_mov_b32 s33, s25
 ; CHECK-NEXT:    s_waitcnt vmcnt(0)
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
 entry:
@@ -262,9 +267,9 @@ define protected amdgpu_kernel void @kernel() {
 ; CHECK-NEXT:    s_addc_u32 s1, s1, 0
 ; CHECK-NEXT:    ; implicit-def: $vgpr3 : SGPR spill to VGPR lane
 ; CHECK-NEXT:    v_writelane_b32 v3, s16, 0
-; CHECK-NEXT:    s_or_saveexec_b64 s[24:25], -1
+; CHECK-NEXT:    s_or_saveexec_b64 s[26:27], -1
 ; CHECK-NEXT:    buffer_store_dword v3, off, s[0:3], 0 offset:4 ; 4-byte Folded Spill
-; CHECK-NEXT:    s_mov_b64 exec, s[24:25]
+; CHECK-NEXT:    s_mov_b64 exec, s[26:27]
 ; CHECK-NEXT:    s_mov_b32 s13, s15
 ; CHECK-NEXT:    s_mov_b32 s12, s14
 ; CHECK-NEXT:    v_readlane_b32 s14, v3, 0
@@ -281,11 +286,12 @@ define protected amdgpu_kernel void @kernel() {
 ; CHECK-NEXT:    ; implicit-def: $sgpr15
 ; CHECK-NEXT:    s_mov_b64 s[0:1], s[20:21]
 ; CHECK-NEXT:    s_mov_b64 s[2:3], s[22:23]
+; CHECK-NEXT:    ; implicit-def: $sgpr18_sgpr19
 ; CHECK-NEXT:    s_swappc_b64 s[30:31], s[16:17]
 ; CHECK-NEXT:    ; kill: def $vgpr1 killed $vgpr0 killed $exec
-; CHECK-NEXT:    s_or_saveexec_b64 s[24:25], -1
+; CHECK-NEXT:    s_or_saveexec_b64 s[26:27], -1
 ; CHECK-NEXT:    buffer_load_dword v0, off, s[0:3], 0 offset:4 ; 4-byte Folded Reload
-; CHECK-NEXT:    s_mov_b64 exec, s[24:25]
+; CHECK-NEXT:    s_mov_b64 exec, s[26:27]
 ; CHECK-NEXT:    ; kill: killed $vgpr0
 ; CHECK-NEXT:    s_endpgm
 entry:
diff --git a/llvm/test/CodeGen/AMDGPU/no-source-locations-in-prologue.ll b/llvm/test/CodeGen/AMDGPU/no-source-locations-in-prologue.ll
index 9999cb9173b5db..34e67d0993fb7a 100644
--- a/llvm/test/CodeGen/AMDGPU/no-source-locations-in-prologue.ll
+++ b/llvm/test/CodeGen/AMDGPU/no-source-locations-in-prologue.ll
@@ -32,6 +32,7 @@ define hidden void @_ZL3barv() #0 !dbg !1644 {
 ; CHECK-NEXT:    s_mov_b64 s[20:21], s[0:1]
 ; CHECK-NEXT:    s_mov_b64 s[0:1], s[20:21]
 ; CHECK-NEXT:    s_mov_b64 s[2:3], s[22:23]
+; CHECK-NEXT:    ; implicit-def: $sgpr18_sgpr19
 ; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
 ; CHECK-NEXT:    s_swappc_b64 s[30:31], s[16:17]
 ; CHECK-NEXT:  .Ltmp1:
diff --git a/llvm/test/CodeGen/AMDGPU/sgpr-spills-split-regalloc.ll b/llvm/test/CodeGen/AMDGPU/sgpr-spills-split-regalloc.ll
index 16550fc9588aef..9ca4dae0993b84 100644
--- a/llvm/test/CodeGen/AMDGPU/sgpr-spills-split-regalloc.ll
+++ b/llvm/test/CodeGen/AMDGPU/sgpr-spills-split-regalloc.ll
@@ -16,7 +16,7 @@ define void @spill_sgpr_with_no_lower_vgpr_available() #0 {
 ; GCN-LABEL: spill_sgpr_with_no_lower_vgpr_available:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    s_mov_b32 s18, s33
+; GCN-NEXT:    s_mov_b32 s24, s33
 ; GCN-NEXT:    s_mov_b32 s33, s32
 ; GCN-NEXT:    s_or_saveexec_b64 s[16:17], -1
 ; GCN-NEXT:    buffer_store_dword v255, off, s[0:3], s33 offset:448 ; 4-byte Folded Spill
@@ -150,6 +150,7 @@ define void @spill_sgpr_with_no_lower_vgpr_available() #0 {
 ; GCN-NEXT:    s_mov_b64 s[20:21], s[0:1]
 ; GCN-NEXT:    s_mov_b64 s[0:1], s[20:21]
 ; GCN-NEXT:    s_mov_b64 s[2:3], s[22:23]
+; GCN-NEXT:    ; implicit-def: $sgpr18_sgpr19
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NEXT:    s_swappc_b64 s[30:31], s[16:17]
 ; GCN-NEXT:    v_readlane_b32 s31, v255, 1
@@ -269,7 +270,7 @@ define void @spill_sgpr_with_no_lower_vgpr_available() #0 {
 ; GCN-NEXT:    buffer_load_dword v255, off, s[0:3], s33 offset:448 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_mov_b64 exec, s[4:5]
 ; GCN-NEXT:    s_add_i32 s32, s32, 0xffff8c00
-; GCN-NEXT:    s_mov_b32 s33, s18
+; GCN-NEXT:    s_mov_b32 s33, s24
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %alloca = alloca i32, align 4, addrspace(5)
@@ -310,7 +311,7 @@ define void @spill_to_lowest_available_vgpr() #0 {
 ; GCN-LABEL: spill_to_lowest_available_vgpr:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    s_mov_b32 s18, s33
+; GCN-NEXT:    s_mov_b32 s24, s33
 ; GCN-NEXT:    s_mov_b32 s33, s32
 ; GCN-NEXT:    s_or_saveexec_b64 s[16:17], -1
 ; GCN-NEXT:    buffer_store_dword v254, off, s[0:3], s33 offset:444 ; 4-byte Folded Spill
@@ -443,6 +444,7 @@ define void @spill_to_lowest_available_vgpr() #0 {
 ; GCN-NEXT:    s_mov_b64 s[20:21], s[0:1]
 ; GCN-NEXT:    s_mov_b64 s[0:1], s[20:21]
 ; GCN-NEXT:    s_mov_b64 s[2:3], s[22:23]
+; GCN-NEXT:    ; implicit-def: $sgpr18_sgpr19
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NEXT:    s_swappc_b64 s[30:31], s[16:17]
 ; GCN-NEXT:    v_readlane_b32 s31, v254, 1
@@ -561,7 +563,7 @@ define void @spill_to_lowest_available_vgpr() #0 {
 ; GCN-NEXT:    buffer_load_dword v254, off, s[0:3], s33 offset:444 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_mov_b64 exec, s[4:5]
 ; GCN-NEXT:    s_add_i32 s32, s32, 0xffff8c00
-; GCN-NEXT:    s_mov_b32 s33, s18
+; GCN-NEXT:    s_mov_b32 s33, s24
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %alloca = alloca i32, align 4, addrspace(5)
@@ -1528,7 +1530,7 @@ define void @spill_sgpr_no_free_vgpr_ipra() #0 {
 ; GCN-LABEL: spill_sgpr_no_free_vgpr_ipra:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    s_mov_b32 s18, s33
+; GCN-NEXT:    s_mov_b32 s24, s33
 ; GCN-NEXT:    s_mov_b32 s33, s32
 ; GCN-NEXT:    s_add_i32 s32, s32, 0x7400
 ; GCN-NEXT:    buffer_store_dword v40, off, s[0:3], s33 offset:444 ; 4-byte Folded Spill
@@ -1666,6 +1668,7 @@ define void @spill_sgpr_no_free_vgpr_ipra() #0 {
 ; GCN-NEXT:    s_mov_b64 s[20:21], s[0:1]
 ; GCN-NEXT:    s_mov_b64 s[0:1], s[20:21]
 ; GCN-NEXT:    s_mov_b64 s[2:3], s[22:23]
+; GCN-NEXT:    ; implicit-def: $sgpr18_sgpr19
 ; GCN-NEXT:    s_swappc_b64 s[30:31], s[16:17]
 ; GCN-NEXT:    s_mov_b64 s[4:5], exec
 ; GCN-NEXT:    s_mov_b64 exec, 1
@@ -1798,7 +1801,7 @@ define void @spill_sgpr_no_free_vgpr_ipra() #0 {
 ; GCN-NEXT:    buffer_load_dword v41, off, s[0:3], s33 offset:440 ; 4-byte Folded Reload
 ; GCN-NEXT:    buffer_load_dword v40, off, s[0:3], s33 offset:444 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_add_i32 s32, s32, 0xffff8c00
-; GCN-NEXT:    s_mov_b32 s33, s18
+; GCN-NEXT:    s_mov_b32 s33, s24
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   call void @child_function_ipra()
diff --git a/llvm/test/CodeGen/AMDGPU/stacksave_stackrestore.ll b/llvm/test/CodeGen/AMDGPU/stacksave_stackrestore.ll
index 36b5e2a00f6d4d..19418aaf7cef8a 100644
--- a/llvm/test/CodeGen/AMDGPU/stacksave_stackrestore.ll
+++ b/llvm/test/CodeGen/AMDGPU/stacksave_stackrestore.ll
@@ -752,13 +752,13 @@ define amdgpu_kernel void @kernel_stacksave_stackrestore_call_with_stack_objects
 ; WAVE32-O0-LABEL: kernel_stacksave_stackrestore_call_with_stack_objects:
 ; WAVE32-O0:       ; %bb.0:
 ; WAVE32-O0-NEXT:    s_mov_b32 s32, 0x1200
-; WAVE32-O0-NEXT:    s_getpc_b64 s[20:21]
-; WAVE32-O0-NEXT:    s_mov_b32 s20, s0
-; WAVE32-O0-NEXT:    s_load_dwordx4 s[20:23], s[20:21], 0x0
+; WAVE32-O0-NEXT:    s_getpc_b64 s[24:25]
+; WAVE32-O0-NEXT:    s_mov_b32 s24, s0
+; WAVE32-O0-NEXT:    s_load_dwordx4 s[24:27], s[24:25], 0x0
 ; WAVE32-O0-NEXT:    s_waitcnt lgkmcnt(0)
-; WAVE32-O0-NEXT:    s_bitset0_b32 s23, 21
-; WAVE32-O0-NEXT:    s_add_u32 s20, s20, s11
-; WAVE32-O0-NEXT:    s_addc_u32 s21, s21, 0
+; WAVE32-O0-NEXT:    s_bitset0_b32 s27, 21
+; WAVE32-O0-NEXT:    s_add_u32 s24, s24, s11
+; WAVE32-O0-NEXT:    s_addc_u32 s25, s25, 0
 ; WAVE32-O0-NEXT:    ; implicit-def: $vgpr3 : SGPR spill to VGPR lane
 ; WAVE32-O0-NEXT:    s_mov_b32 s14, s10
 ; WAVE32-O0-NEXT:    s_mov_b32 s13, s9
@@ -771,17 +771,17 @@ define amdgpu_kernel void @kernel_stacksave_stackrestore_call_with_stack_objects
 ; WAVE32-O0-NEXT:    v_writelane_b32 v3, s0, 0
 ; WAVE32-O0-NEXT:    s_lshr_b32 s0, s0, 5
 ; WAVE32-O0-NEXT:    v_writelane_b32 v3, s0, 1
-; WAVE32-O0-NEXT:    s_or_saveexec_b32 s19, -1
-; WAVE32-O0-NEXT:    buffer_store_dword v3, off, s[20:23], 0 offset:132 ; 4-byte Folded Spill
-; WAVE32-O0-NEXT:    s_mov_b32 exec_lo, s19
+; WAVE32-O0-NEXT:    s_or_saveexec_b32 s20, -1
+; WAVE32-O0-NEXT:    buffer_store_dword v3, off, s[24:27], 0 offset:132 ; 4-byte Folded Spill
+; WAVE32-O0-NEXT:    s_mov_b32 exec_lo, s20
 ; WAVE32-O0-NEXT:    v_mov_b32_e32 v3, 42
-; WAVE32-O0-NEXT:    buffer_store_dword v3, off, s[20:23], 0 offset:4
+; WAVE32-O0-NEXT:    buffer_store_dword v3, off, s[24:27], 0 offset:4
 ; WAVE32-O0-NEXT:    s_waitcnt_vscnt null, 0x0
-; WAVE32-O0-NEXT:    s_mov_b64 s[0:1], s[20:21]
-; WAVE32-O0-NEXT:    s_mov_b64 s[2:3], s[22:23]
+; WAVE32-O0-NEXT:    s_mov_b64 s[0:1], s[24:25]
+; WAVE32-O0-NEXT:    s_mov_b64 s[2:3], s[26:27]
 ; WAVE32-O0-NEXT:    s_mov_b32 s15, s32
 ; WAVE32-O0-NEXT:    v_mov_b32_e32 v3, 17
-; WAVE32-O0-NEXT:    buffer_store_dword v3, off, s[20:23], s15 offset:4
+; WAVE32-O0-NEXT:    buffer_store_dword v3, off, s[24:27], s15 offset:4
 ; WAVE32-O0-NEXT:    s_mov_b32 s15, stack_passed_argument at abs32@hi
 ; WAVE32-O0-NEXT:    s_mov_b32 s16, stack_passed_argument at abs32@lo
 ; WAVE32-O0-NEXT:    ; kill: def $sgpr16 killed $sgpr16 def $sgpr16_sgpr17
@@ -854,10 +854,11 @@ define amdgpu_kernel void @kernel_stacksave_stackrestore_call_with_stack_objects
 ; WAVE32-O0-NEXT:    v_mov_b32_e32 v29, s18
 ; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
 ; WAVE32-O0-NEXT:    v_mov_b32_e32 v30, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18_sgpr19
 ; WAVE32-O0-NEXT:    s_swappc_b64 s[30:31], s[16:17]
-; WAVE32-O0-NEXT:    s_or_saveexec_b32 s19, -1
-; WAVE32-O0-NEXT:    buffer_load_dword v0, off, s[20:23], 0 offset:132 ; 4-byte Folded Reload
-; WAVE32-O0-NEXT:    s_mov_b32 exec_lo, s19
+; WAVE32-O0-NEXT:    s_or_saveexec_b32 s20, -1
+; WAVE32-O0-NEXT:    buffer_load_dword v0, off, s[24:27], 0 offset:132 ; 4-byte Folded Reload
+; WAVE32-O0-NEXT:    s_mov_b32 exec_lo, s20
 ; WAVE32-O0-NEXT:    s_waitcnt vmcnt(0)
 ; WAVE32-O0-NEXT:    v_readlane_b32 s1, v0, 1
 ; WAVE32-O0-NEXT:    v_readlane_b32 s0, v0, 0
@@ -972,6 +973,7 @@ define amdgpu_kernel void @kernel_stacksave_stackrestore_call_with_stack_objects
 ; WAVE64-O0-NEXT:    v_mov_b32_e32 v29, s18
 ; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
 ; WAVE64-O0-NEXT:    v_mov_b32_e32 v30, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18_sgpr19
 ; WAVE64-O0-NEXT:    s_swappc_b64 s[30:31], s[16:17]
 ; WAVE64-O0-NEXT:    s_or_saveexec_b64 s[20:21], -1
 ; WAVE64-O0-NEXT:    buffer_load_dword v0, off, s[24:27], 0 offset:132 ; 4-byte Folded Reload
@@ -1068,7 +1070,7 @@ define void @func_stacksave_stackrestore_call_with_stack_objects() {
 ; WAVE32-O0-LABEL: func_stacksave_stackrestore_call_with_stack_objects:
 ; WAVE32-O0:       ; %bb.0:
 ; WAVE32-O0-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; WAVE32-O0-NEXT:    s_mov_b32 s25, s33
+; WAVE32-O0-NEXT:    s_mov_b32 s26, s33
 ; WAVE32-O0-NEXT:    s_mov_b32 s33, s32
 ; WAVE32-O0-NEXT:    s_xor_saveexec_b32 s16, -1
 ; WAVE32-O0-NEXT:    buffer_store_dword v32, off, s[0:3], s33 offset:128 ; 4-byte Folded Spill
@@ -1082,9 +1084,9 @@ define void @func_stacksave_stackrestore_call_with_stack_objects() {
 ; WAVE32-O0-NEXT:    v_writelane_b32 v0, s16, 0
 ; WAVE32-O0-NEXT:    s_lshr_b32 s16, s16, 5
 ; WAVE32-O0-NEXT:    v_writelane_b32 v0, s16, 1
-; WAVE32-O0-NEXT:    s_or_saveexec_b32 s24, -1
+; WAVE32-O0-NEXT:    s_or_saveexec_b32 s25, -1
 ; WAVE32-O0-NEXT:    buffer_store_dword v0, off, s[0:3], s33 offset:132 ; 4-byte Folded Spill
-; WAVE32-O0-NEXT:    s_mov_b32 exec_lo, s24
+; WAVE32-O0-NEXT:    s_mov_b32 exec_lo, s25
 ; WAVE32-O0-NEXT:    v_mov_b32_e32 v0, 42
 ; WAVE32-O0-NEXT:    buffer_store_dword v0, off, s[0:3], s33
 ; WAVE32-O0-NEXT:    s_waitcnt_vscnt null, 0x0
@@ -1161,10 +1163,11 @@ define void @func_stacksave_stackrestore_call_with_stack_objects() {
 ; WAVE32-O0-NEXT:    v_mov_b32_e32 v29, s18
 ; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
 ; WAVE32-O0-NEXT:    v_mov_b32_e32 v30, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18_sgpr19
 ; WAVE32-O0-NEXT:    s_swappc_b64 s[30:31], s[16:17]
-; WAVE32-O0-NEXT:    s_or_saveexec_b32 s24, -1
+; WAVE32-O0-NEXT:    s_or_saveexec_b32 s25, -1
 ; WAVE32-O0-NEXT:    buffer_load_dword v0, off, s[0:3], s33 offset:132 ; 4-byte Folded Reload
-; WAVE32-O0-NEXT:    s_mov_b32 exec_lo, s24
+; WAVE32-O0-NEXT:    s_mov_b32 exec_lo, s25
 ; WAVE32-O0-NEXT:    s_waitcnt vmcnt(0)
 ; WAVE32-O0-NEXT:    v_readlane_b32 s5, v0, 1
 ; WAVE32-O0-NEXT:    v_readlane_b32 s4, v0, 0
@@ -1180,14 +1183,14 @@ define void @func_stacksave_stackrestore_call_with_stack_objects() {
 ; WAVE32-O0-NEXT:    buffer_load_dword v0, off, s[0:3], s33 offset:136 ; 4-byte Folded Reload
 ; WAVE32-O0-NEXT:    s_mov_b32 exec_lo, s4
 ; WAVE32-O0-NEXT:    s_add_i32 s32, s32, 0xffffee00
-; WAVE32-O0-NEXT:    s_mov_b32 s33, s25
+; WAVE32-O0-NEXT:    s_mov_b32 s33, s26
 ; WAVE32-O0-NEXT:    s_waitcnt vmcnt(0)
 ; WAVE32-O0-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; WAVE64-O0-LABEL: func_stacksave_stackrestore_call_with_stack_objects:
 ; WAVE64-O0:       ; %bb.0:
 ; WAVE64-O0-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; WAVE64-O0-NEXT:    s_mov_b32 s19, s33
+; WAVE64-O0-NEXT:    s_mov_b32 s28, s33
 ; WAVE64-O0-NEXT:    s_mov_b32 s33, s32
 ; WAVE64-O0-NEXT:    s_xor_saveexec_b64 s[16:17], -1
 ; WAVE64-O0-NEXT:    buffer_store_dword v32, off, s[0:3], s33 offset:128 ; 4-byte Folded Spill
@@ -1280,6 +1283,7 @@ define void @func_stacksave_stackrestore_call_with_stack_objects() {
 ; WAVE64-O0-NEXT:    v_mov_b32_e32 v29, s18
 ; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
 ; WAVE64-O0-NEXT:    v_mov_b32_e32 v30, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18_sgpr19
 ; WAVE64-O0-NEXT:    s_swappc_b64 s[30:31], s[16:17]
 ; WAVE64-O0-NEXT:    s_or_saveexec_b64 s[26:27], -1
 ; WAVE64-O0-NEXT:    buffer_load_dword v0, off, s[0:3], s33 offset:132 ; 4-byte Folded Reload
@@ -1299,7 +1303,7 @@ define void @func_stacksave_stackrestore_call_with_stack_objects() {
 ; WAVE64-O0-NEXT:    buffer_load_dword v0, off, s[0:3], s33 offset:136 ; 4-byte Folded Reload
 ; WAVE64-O0-NEXT:    s_mov_b64 exec, s[4:5]
 ; WAVE64-O0-NEXT:    s_add_i32 s32, s32, 0xffffdc00
-; WAVE64-O0-NEXT:    s_mov_b32 s33, s19
+; WAVE64-O0-NEXT:    s_mov_b32 s33, s28
 ; WAVE64-O0-NEXT:    s_waitcnt vmcnt(0)
 ; WAVE64-O0-NEXT:    s_setpc_b64 s[30:31]
   %alloca = alloca [32 x i32], addrspace(5)
diff --git a/llvm/test/CodeGen/AMDGPU/vgpr-liverange-ir.ll b/llvm/test/CodeGen/AMDGPU/vgpr-liverange-ir.ll
index 963575d2acc51d..d73cc0a9deea68 100644
--- a/llvm/test/CodeGen/AMDGPU/vgpr-liverange-ir.ll
+++ b/llvm/test/CodeGen/AMDGPU/vgpr-liverange-ir.ll
@@ -233,10 +233,10 @@ define amdgpu_ps float @loop(i32 %z, float %v, i32 inreg %bound, ptr %extern_fun
   ; SI-NEXT: bb.1.Flow:
   ; SI-NEXT:   successors: %bb.2(0x40000000), %bb.10(0x40000000)
   ; SI-NEXT: {{  $}}
-  ; SI-NEXT:   [[PHI:%[0-9]+]]:vgpr_32 = PHI undef %47:vgpr_32, %bb.0, %4, %bb.9
-  ; SI-NEXT:   [[PHI1:%[0-9]+]]:vgpr_32 = PHI [[COPY4]], %bb.0, undef %49:vgpr_32, %bb.9
-  ; SI-NEXT:   [[PHI2:%[0-9]+]]:vgpr_32 = PHI [[COPY3]], %bb.0, undef %51:vgpr_32, %bb.9
-  ; SI-NEXT:   [[PHI3:%[0-9]+]]:vgpr_32 = PHI [[COPY2]], %bb.0, undef %53:vgpr_32, %bb.9
+  ; SI-NEXT:   [[PHI:%[0-9]+]]:vgpr_32 = PHI undef %49:vgpr_32, %bb.0, %4, %bb.9
+  ; SI-NEXT:   [[PHI1:%[0-9]+]]:vgpr_32 = PHI [[COPY4]], %bb.0, undef %51:vgpr_32, %bb.9
+  ; SI-NEXT:   [[PHI2:%[0-9]+]]:vgpr_32 = PHI [[COPY3]], %bb.0, undef %53:vgpr_32, %bb.9
+  ; SI-NEXT:   [[PHI3:%[0-9]+]]:vgpr_32 = PHI [[COPY2]], %bb.0, undef %55:vgpr_32, %bb.9
   ; SI-NEXT:   [[SI_ELSE:%[0-9]+]]:sreg_32 = SI_ELSE killed [[SI_IF]], %bb.10, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
   ; SI-NEXT:   S_BRANCH %bb.2
   ; SI-NEXT: {{  $}}
@@ -249,8 +249,8 @@ define amdgpu_ps float @loop(i32 %z, float %v, i32 inreg %bound, ptr %extern_fun
   ; SI-NEXT: bb.3:
   ; SI-NEXT:   successors: %bb.4(0x80000000)
   ; SI-NEXT: {{  $}}
-  ; SI-NEXT:   [[PHI4:%[0-9]+]]:vreg_64 = PHI undef %55:vreg_64, %bb.4, [[REG_SEQUENCE]], %bb.2
-  ; SI-NEXT:   [[PHI5:%[0-9]+]]:vgpr_32 = PHI undef %57:vgpr_32, %bb.4, [[PHI1]], %bb.2
+  ; SI-NEXT:   [[PHI4:%[0-9]+]]:vreg_64 = PHI undef %57:vreg_64, %bb.4, [[REG_SEQUENCE]], %bb.2
+  ; SI-NEXT:   [[PHI5:%[0-9]+]]:vgpr_32 = PHI undef %59:vgpr_32, %bb.4, [[PHI1]], %bb.2
   ; SI-NEXT:   [[V_READFIRSTLANE_B32_:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[PHI4]].sub0, implicit $exec
   ; SI-NEXT:   [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[PHI4]].sub1, implicit $exec
   ; SI-NEXT:   [[REG_SEQUENCE1:%[0-9]+]]:sgpr_64 = REG_SEQUENCE killed [[V_READFIRSTLANE_B32_]], %subreg.sub0, killed [[V_READFIRSTLANE_B32_1]], %subreg.sub1
@@ -286,8 +286,8 @@ define amdgpu_ps float @loop(i32 %z, float %v, i32 inreg %bound, ptr %extern_fun
   ; SI-NEXT: bb.7:
   ; SI-NEXT:   successors: %bb.8(0x80000000)
   ; SI-NEXT: {{  $}}
-  ; SI-NEXT:   [[PHI6:%[0-9]+]]:vreg_64 = PHI undef %59:vreg_64, %bb.8, [[REG_SEQUENCE2]], %bb.6
-  ; SI-NEXT:   [[PHI7:%[0-9]+]]:vgpr_32 = PHI undef %61:vgpr_32, %bb.8, [[COPY4]], %bb.6
+  ; SI-NEXT:   [[PHI6:%[0-9]+]]:vreg_64 = PHI undef %61:vreg_64, %bb.8, [[REG_SEQUENCE2]], %bb.6
+  ; SI-NEXT:   [[PHI7:%[0-9]+]]:vgpr_32 = PHI undef %63:vgpr_32, %bb.8, [[COPY4]], %bb.6
   ; SI-NEXT:   [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[PHI6]].sub0, implicit $exec
   ; SI-NEXT:   [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[PHI6]].sub1, implicit $exec
   ; SI-NEXT:   [[REG_SEQUENCE3:%[0-9]+]]:sgpr_64 = REG_SEQUENCE killed [[V_READFIRSTLANE_B32_2]], %subreg.sub0, killed [[V_READFIRSTLANE_B32_3]], %subreg.sub1
@@ -356,9 +356,9 @@ define amdgpu_ps float @loop_with_use(i32 %z, float %v, i32 inreg %bound, ptr %e
   ; SI-NEXT: bb.1.Flow:
   ; SI-NEXT:   successors: %bb.2(0x40000000), %bb.10(0x40000000)
   ; SI-NEXT: {{  $}}
-  ; SI-NEXT:   [[PHI:%[0-9]+]]:vgpr_32 = PHI undef %48:vgpr_32, %bb.0, %4, %bb.9
-  ; SI-NEXT:   [[PHI1:%[0-9]+]]:vgpr_32 = PHI [[COPY3]], %bb.0, undef %50:vgpr_32, %bb.9
-  ; SI-NEXT:   [[PHI2:%[0-9]+]]:vgpr_32 = PHI [[COPY2]], %bb.0, undef %52:vgpr_32, %bb.9
+  ; SI-NEXT:   [[PHI:%[0-9]+]]:vgpr_32 = PHI undef %50:vgpr_32, %bb.0, %4, %bb.9
+  ; SI-NEXT:   [[PHI1:%[0-9]+]]:vgpr_32 = PHI [[COPY3]], %bb.0, undef %52:vgpr_32, %bb.9
+  ; SI-NEXT:   [[PHI2:%[0-9]+]]:vgpr_32 = PHI [[COPY2]], %bb.0, undef %54:vgpr_32, %bb.9
   ; SI-NEXT:   [[SI_ELSE:%[0-9]+]]:sreg_32 = SI_ELSE killed [[SI_IF]], %bb.10, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
   ; SI-NEXT:   S_BRANCH %bb.2
   ; SI-NEXT: {{  $}}
@@ -371,7 +371,7 @@ define amdgpu_ps float @loop_with_use(i32 %z, float %v, i32 inreg %bound, ptr %e
   ; SI-NEXT: bb.3:
   ; SI-NEXT:   successors: %bb.4(0x80000000)
   ; SI-NEXT: {{  $}}
-  ; SI-NEXT:   [[PHI3:%[0-9]+]]:vreg_64 = PHI undef %54:vreg_64, %bb.4, [[REG_SEQUENCE]], %bb.2
+  ; SI-NEXT:   [[PHI3:%[0-9]+]]:vreg_64 = PHI undef %56:vreg_64, %bb.4, [[REG_SEQUENCE]], %bb.2
   ; SI-NEXT:   [[V_READFIRSTLANE_B32_:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[PHI3]].sub0, implicit $exec
   ; SI-NEXT:   [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[PHI3]].sub1, implicit $exec
   ; SI-NEXT:   [[REG_SEQUENCE1:%[0-9]+]]:sgpr_64 = REG_SEQUENCE killed [[V_READFIRSTLANE_B32_]], %subreg.sub0, killed [[V_READFIRSTLANE_B32_1]], %subreg.sub1
@@ -407,7 +407,7 @@ define amdgpu_ps float @loop_with_use(i32 %z, float %v, i32 inreg %bound, ptr %e
   ; SI-NEXT: bb.7:
   ; SI-NEXT:   successors: %bb.8(0x80000000)
   ; SI-NEXT: {{  $}}
-  ; SI-NEXT:   [[PHI4:%[0-9]+]]:vreg_64 = PHI undef %56:vreg_64, %bb.8, [[REG_SEQUENCE2]], %bb.6
+  ; SI-NEXT:   [[PHI4:%[0-9]+]]:vreg_64 = PHI undef %58:vreg_64, %bb.8, [[REG_SEQUENCE2]], %bb.6
   ; SI-NEXT:   [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[PHI4]].sub0, implicit $exec
   ; SI-NEXT:   [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[PHI4]].sub1, implicit $exec
   ; SI-NEXT:   [[REG_SEQUENCE3:%[0-9]+]]:sgpr_64 = REG_SEQUENCE killed [[V_READFIRSTLANE_B32_2]], %subreg.sub0, killed [[V_READFIRSTLANE_B32_3]], %subreg.sub1
diff --git a/llvm/test/CodeGen/AMDGPU/vgpr_constant_to_sgpr.ll b/llvm/test/CodeGen/AMDGPU/vgpr_constant_to_sgpr.ll
index 1f286b766102c7..42736feb723a76 100644
--- a/llvm/test/CodeGen/AMDGPU/vgpr_constant_to_sgpr.ll
+++ b/llvm/test/CodeGen/AMDGPU/vgpr_constant_to_sgpr.ll
@@ -54,6 +54,7 @@ define protected amdgpu_kernel void @kern(ptr %addr) !llvm.amdgcn.lds.kernel.id
 ; CHECK-NEXT:    s_mov_b32 s15, 42
 ; CHECK-NEXT:    s_mov_b64 s[0:1], s[20:21]
 ; CHECK-NEXT:    s_mov_b64 s[2:3], s[22:23]
+; CHECK-NEXT:    ; implicit-def: $sgpr18_sgpr19
 ; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
 ; CHECK-NEXT:    s_swappc_b64 s[30:31], s[16:17]
 ; CHECK-NEXT:    s_or_saveexec_b32 s33, -1
diff --git a/llvm/test/CodeGen/AMDGPU/whole-wave-register-spill.ll b/llvm/test/CodeGen/AMDGPU/whole-wave-register-spill.ll
index 7eabe982ff2bcd..3a33194f17c875 100644
--- a/llvm/test/CodeGen/AMDGPU/whole-wave-register-spill.ll
+++ b/llvm/test/CodeGen/AMDGPU/whole-wave-register-spill.ll
@@ -101,6 +101,7 @@ define void @test() #0 {
 ; GCN-O0-NEXT:    s_mov_b64 s[20:21], s[0:1]
 ; GCN-O0-NEXT:    s_mov_b64 s[0:1], s[20:21]
 ; GCN-O0-NEXT:    s_mov_b64 s[2:3], s[22:23]
+; GCN-O0-NEXT:    ; implicit-def: $sgpr18_sgpr19
 ; GCN-O0-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-O0-NEXT:    s_swappc_b64 s[30:31], s[16:17]
 ; GCN-O0-NEXT:    s_or_saveexec_b64 s[28:29], -1
diff --git a/llvm/test/CodeGen/AMDGPU/wwm-reserved-spill.ll b/llvm/test/CodeGen/AMDGPU/wwm-reserved-spill.ll
index ab7d3ca0ab425f..0c60e0bde24f6e 100644
--- a/llvm/test/CodeGen/AMDGPU/wwm-reserved-spill.ll
+++ b/llvm/test/CodeGen/AMDGPU/wwm-reserved-spill.ll
@@ -406,6 +406,7 @@ define amdgpu_gfx void @strict_wwm_call(ptr addrspace(8) inreg %tmp14, i32 inreg
 ; GFX9-O0-NEXT:    s_mov_b64 s[0:1], s[44:45]
 ; GFX9-O0-NEXT:    s_mov_b64 s[2:3], s[46:47]
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v0, v3
+; GFX9-O0-NEXT:    ; implicit-def: $sgpr44_sgpr45
 ; GFX9-O0-NEXT:    s_swappc_b64 s[30:31], s[42:43]
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v1, v0
 ; GFX9-O0-NEXT:    v_add_u32_e64 v1, v1, v3
@@ -632,6 +633,7 @@ define amdgpu_gfx void @strict_wwm_call_i64(ptr addrspace(8) inreg %tmp14, i64 i
 ; GFX9-O0-NEXT:    s_mov_b64 s[2:3], s[38:39]
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v0, v2
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v1, v3
+; GFX9-O0-NEXT:    ; implicit-def: $sgpr36_sgpr37
 ; GFX9-O0-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-O0-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-O0-NEXT:    s_or_saveexec_b64 s[46:47], -1
diff --git a/llvm/test/CodeGen/AMDGPU/wwm-reserved.ll b/llvm/test/CodeGen/AMDGPU/wwm-reserved.ll
index 7a489f343d6f42..0fad0f380a43fa 100644
--- a/llvm/test/CodeGen/AMDGPU/wwm-reserved.ll
+++ b/llvm/test/CodeGen/AMDGPU/wwm-reserved.ll
@@ -412,6 +412,7 @@ define amdgpu_kernel void @call(ptr addrspace(8) inreg %tmp14, i32 inreg %arg) {
 ; GFX9-O0-NEXT:    ; implicit-def: $sgpr15
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v31, v3
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v0, v6
+; GFX9-O0-NEXT:    ; implicit-def: $sgpr18_sgpr19
 ; GFX9-O0-NEXT:    s_swappc_b64 s[30:31], s[16:17]
 ; GFX9-O0-NEXT:    s_or_saveexec_b64 s[20:21], -1
 ; GFX9-O0-NEXT:    buffer_load_dword v1, off, s[24:27], 0 offset:4 ; 4-byte Folded Reload
@@ -654,6 +655,7 @@ define amdgpu_kernel void @call_i64(ptr addrspace(8) inreg %tmp14, i64 inreg %ar
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v31, v3
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v0, v6
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v1, v7
+; GFX9-O0-NEXT:    ; implicit-def: $sgpr18_sgpr19
 ; GFX9-O0-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-O0-NEXT:    s_swappc_b64 s[30:31], s[16:17]
 ; GFX9-O0-NEXT:    s_or_saveexec_b64 s[20:21], -1
@@ -1280,6 +1282,7 @@ define amdgpu_kernel void @strict_wwm_call(ptr addrspace(8) inreg %tmp14, i32 in
 ; GFX9-O0-NEXT:    ; implicit-def: $sgpr15
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v31, v3
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v0, v6
+; GFX9-O0-NEXT:    ; implicit-def: $sgpr18_sgpr19
 ; GFX9-O0-NEXT:    s_swappc_b64 s[30:31], s[16:17]
 ; GFX9-O0-NEXT:    s_or_saveexec_b64 s[20:21], -1
 ; GFX9-O0-NEXT:    buffer_load_dword v1, off, s[24:27], 0 offset:4 ; 4-byte Folded Reload
@@ -1522,6 +1525,7 @@ define amdgpu_kernel void @strict_wwm_call_i64(ptr addrspace(8) inreg %tmp14, i6
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v31, v3
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v0, v6
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v1, v7
+; GFX9-O0-NEXT:    ; implicit-def: $sgpr18_sgpr19
 ; GFX9-O0-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-O0-NEXT:    s_swappc_b64 s[30:31], s[16:17]
 ; GFX9-O0-NEXT:    s_or_saveexec_b64 s[20:21], -1
diff --git a/llvm/test/CodeGen/PowerPC/fmf-propagation.ll b/llvm/test/CodeGen/PowerPC/fmf-propagation.ll
index 58b3ee485ea4b1..4e72a5ac5ede31 100644
--- a/llvm/test/CodeGen/PowerPC/fmf-propagation.ll
+++ b/llvm/test/CodeGen/PowerPC/fmf-propagation.ll
@@ -577,15 +577,15 @@ define double @fcmp_nnan(double %a, double %y, double %z) {
 ; FP library calls can have fast-math-flags.
 
 ; FMFDEBUG-LABEL: Optimized lowered selection DAG: %bb.0 'log2_approx:'
-; FMFDEBUG:         ch,glue = PPCISD::CALL_NOP t11, TargetGlobalAddress:i64<ptr @log2>
-; FMFDEBUG:         ch,glue = callseq_end t15, TargetConstant:i64<32>, TargetConstant:i64<0>, t15:1
-; FMFDEBUG:         f64,ch,glue = CopyFromReg t16, Register:f64 $f1, t16:1
+; FMFDEBUG:         ch,glue = PPCISD::CALL_NOP {{t[0-9]+}}, TargetGlobalAddress:i64<ptr @log2>
+; FMFDEBUG:         ch,glue = callseq_end [[T15:t[0-9]+]], TargetConstant:i64<32>, TargetConstant:i64<0>, [[T15]]:1
+; FMFDEBUG:         f64,ch,glue = CopyFromReg [[T16:t[0-9]+]], Register:f64 $f1, [[T16]]:1
 ; FMFDEBUG:       Type-legalized selection DAG: %bb.0 'log2_approx:'
 
 ; GLOBALDEBUG-LABEL: Optimized lowered selection DAG: %bb.0 'log2_approx:'
-; GLOBALDEBUG:         ch,glue = PPCISD::CALL_NOP t11, TargetGlobalAddress:i64<ptr @log2>
-; GLOBALDEBUG:         ch,glue = callseq_end t15, TargetConstant:i64<32>, TargetConstant:i64<0>, t15:1
-; GLOBALDEBUG:         f64,ch,glue = CopyFromReg t16, Register:f64 $f1, t16:1
+; GLOBALDEBUG:         ch,glue = PPCISD::CALL_NOP {{t[0-9]+}}, TargetGlobalAddress:i64<ptr @log2>
+; GLOBALDEBUG:         ch,glue = callseq_end [[T15:t[0-9]+]], TargetConstant:i64<32>, TargetConstant:i64<0>, [[T15]]:1
+; GLOBALDEBUG:         f64,ch,glue = CopyFromReg [[T16:t[0-9]+]], Register:f64 $f1, [[T16]]:1
 ; GLOBALDEBUG:       Type-legalized selection DAG: %bb.0 'log2_approx:'
 
 declare double @log2(double)
diff --git a/llvm/test/TableGen/GlobalISelCombinerEmitter/builtins/match-table-replacerreg.td b/llvm/test/TableGen/GlobalISelCombinerEmitter/builtins/match-table-replacerreg.td
index 2d968977701fda..e8432564a0448a 100644
--- a/llvm/test/TableGen/GlobalISelCombinerEmitter/builtins/match-table-replacerreg.td
+++ b/llvm/test/TableGen/GlobalISelCombinerEmitter/builtins/match-table-replacerreg.td
@@ -28,7 +28,7 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
 
 // CHECK:      const int64_t *GenMyCombiner::getMatchTable() const {
 // CHECK-NEXT:   constexpr static int64_t MatchTable0[] = {
-// CHECK-NEXT:     GIM_SwitchOpcode, /*MI*/0, /*[*/65, 180, /*)*//*default:*//*Label 2*/ 192,
+// CHECK-NEXT:     GIM_SwitchOpcode, /*MI*/0, /*[*/68, 183, /*)*//*default:*//*Label 2*/ 192,
 // CHECK-NEXT:     /*TargetOpcode::G_UNMERGE_VALUES*//*Label 0*/ 120, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 // CHECK-NEXT:     /*TargetOpcode::G_FNEG*//*Label 1*/ 165,
 // CHECK-NEXT:     // Label 0: @120
diff --git a/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table-imms.td b/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table-imms.td
index 0495a66a7c577d..c8f46a97e3bf82 100644
--- a/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table-imms.td
+++ b/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table-imms.td
@@ -32,14 +32,15 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
   CImmInstTest1
 ]>;
 
+
 // CHECK:      const int64_t *GenMyCombiner::getMatchTable() const {
 // CHECK-NEXT:   constexpr static int64_t MatchTable0[] = {
-// CHECK-NEXT:     GIM_SwitchOpcode, /*MI*/0, /*[*/19, 126, /*)*//*default:*//*Label 3*/ 194,
-// CHECK-NEXT:     /*TargetOpcode::COPY*//*Label 0*/ 112, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-// CHECK-NEXT:     /*TargetOpcode::G_CONSTANT*//*Label 1*/ 138, 0, 0, 0, 0, 0,
-// CHECK-NEXT:     /*TargetOpcode::G_ZEXT*//*Label 2*/ 165,
-// CHECK-NEXT:     // Label 0: @112
-// CHECK-NEXT:     GIM_Try, /*On fail goto*//*Label 4*/ 137, // Rule ID 0 //
+// CHECK-NEXT:     GIM_SwitchOpcode, /*MI*/0, /*[*/19, 129, /*)*//*default:*//*Label 3*/ 197,
+// CHECK-NEXT:     /*TargetOpcode::COPY*//*Label 0*/ 115, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+// CHECK-NEXT:     /*TargetOpcode::G_CONSTANT*//*Label 1*/ 141, 0, 0, 0, 0, 0,
+// CHECK-NEXT:     /*TargetOpcode::G_ZEXT*//*Label 2*/ 168,
+// CHECK-NEXT:     // Label 0: @115
+// CHECK-NEXT:     GIM_Try, /*On fail goto*//*Label 4*/ 140, // Rule ID 0 //
 // CHECK-NEXT:       GIM_CheckSimplePredicate, GICXXPred_Simple_IsRule0Enabled,
 // CHECK-NEXT:       GIM_CheckType, /*MI*/0, /*Op*/1, /*Type*/GILLT_s32,
 // CHECK-NEXT:       // MIs[0] a
@@ -51,10 +52,10 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
 // CHECK-NEXT:       GIR_AddImm, /*InsnID*/0, /*Imm*/0,
 // CHECK-NEXT:       GIR_EraseFromParent, /*InsnID*/0,
 // CHECK-NEXT:       GIR_Done,
-// CHECK-NEXT:     // Label 4: @137
+// CHECK-NEXT:     // Label 4: @140
 // CHECK-NEXT:     GIM_Reject,
-// CHECK-NEXT:     // Label 1: @138
-// CHECK-NEXT:     GIM_Try, /*On fail goto*//*Label 5*/ 164, // Rule ID 2 //
+// CHECK-NEXT:     // Label 1: @141
+// CHECK-NEXT:     GIM_Try, /*On fail goto*//*Label 5*/ 167, // Rule ID 2 //
 // CHECK-NEXT:       GIM_CheckSimplePredicate, GICXXPred_Simple_IsRule2Enabled,
 // CHECK-NEXT:       GIM_CheckType, /*MI*/0, /*Op*/1, /*Type*/GILLT_s32,
 // CHECK-NEXT:       // MIs[0] a
@@ -66,10 +67,10 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
 // CHECK-NEXT:       GIR_AddCImm, /*InsnID*/0, /*Type*/GILLT_s32, /*Imm*/42,
 // CHECK-NEXT:       GIR_EraseFromParent, /*InsnID*/0,
 // CHECK-NEXT:       GIR_Done,
-// CHECK-NEXT:     // Label 5: @164
+// CHECK-NEXT:     // Label 5: @167
 // CHECK-NEXT:     GIM_Reject,
-// CHECK-NEXT:     // Label 2: @165
-// CHECK-NEXT:     GIM_Try, /*On fail goto*//*Label 6*/ 193, // Rule ID 1 //
+// CHECK-NEXT:     // Label 2: @168
+// CHECK-NEXT:     GIM_Try, /*On fail goto*//*Label 6*/ 196, // Rule ID 1 //
 // CHECK-NEXT:       GIM_CheckSimplePredicate, GICXXPred_Simple_IsRule1Enabled,
 // CHECK-NEXT:       // MIs[0] a
 // CHECK-NEXT:       // No operand predicates
@@ -83,9 +84,9 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
 // CHECK-NEXT:       GIR_AddTempRegister, /*InsnID*/0, /*TempRegID*/0, /*TempRegFlags*/0,
 // CHECK-NEXT:       GIR_EraseFromParent, /*InsnID*/0,
 // CHECK-NEXT:       GIR_Done,
-// CHECK-NEXT:     // Label 6: @193
+// CHECK-NEXT:     // Label 6: @196
 // CHECK-NEXT:     GIM_Reject,
-// CHECK-NEXT:     // Label 3: @194
+// CHECK-NEXT:     // Label 3: @197
 // CHECK-NEXT:     GIM_Reject,
 // CHECK-NEXT:     };
 // CHECK-NEXT:   return MatchTable0;
diff --git a/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table-patfrag-root.td b/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table-patfrag-root.td
index 5cb9206ca5f2ca..ad7cf69a8adb78 100644
--- a/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table-patfrag-root.td
+++ b/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table-patfrag-root.td
@@ -28,7 +28,7 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
 
 // CHECK:      const int64_t *GenMyCombiner::getMatchTable() const {
 // CHECK-NEXT:   constexpr static int64_t MatchTable0[] = {
-// CHECK-NEXT:     GIM_SwitchOpcode, /*MI*/0, /*[*/118, 181, /*)*//*default:*//*Label 3*/ 152,
+// CHECK-NEXT:     GIM_SwitchOpcode, /*MI*/0, /*[*/121, 184, /*)*//*default:*//*Label 3*/ 152,
 // CHECK-NEXT:     /*TargetOpcode::G_TRUNC*//*Label 0*/ 68, 0, 0, 0, 0, 0, 0,
 // CHECK-NEXT:     /*TargetOpcode::G_ZEXT*//*Label 1*/ 93, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 // CHECK-NEXT:     /*TargetOpcode::G_FPEXT*//*Label 2*/ 127,
diff --git a/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table-variadics.td b/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table-variadics.td
index 5226795cd9d352..ccf946bb539551 100644
--- a/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table-variadics.td
+++ b/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table-variadics.td
@@ -37,7 +37,7 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
 
 // CHECK:      const int64_t *GenMyCombiner::getMatchTable() const {
 // CHECK-NEXT:   constexpr static int64_t MatchTable0[] = {
-// CHECK-NEXT:     GIM_SwitchOpcode, /*MI*/0, /*[*/65, 69, /*)*//*default:*//*Label 2*/ 51,
+// CHECK-NEXT:     GIM_SwitchOpcode, /*MI*/0, /*[*/68, 72, /*)*//*default:*//*Label 2*/ 51,
 // CHECK-NEXT:     /*TargetOpcode::G_UNMERGE_VALUES*//*Label 0*/ 9, 0, 0,
 // CHECK-NEXT:     /*TargetOpcode::G_BUILD_VECTOR*//*Label 1*/ 30,
 // CHECK-NEXT:     // Label 0: @9
diff --git a/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table.td b/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table.td
index a74f7fbbe1cce6..6f272d2f979928 100644
--- a/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table.td
+++ b/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table.td
@@ -132,15 +132,15 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
 // Verify match table.
 // CHECK:      const int64_t *GenMyCombiner::getMatchTable() const {
 // CHECK-NEXT:   constexpr static int64_t MatchTable0[] = {
-// CHECK-NEXT:     GIM_SwitchOpcode, /*MI*/0, /*[*/19, 126, /*)*//*default:*//*Label 6*/ 267,
-// CHECK-NEXT:     /*TargetOpcode::COPY*//*Label 0*/ 112, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-// CHECK-NEXT:     /*TargetOpcode::G_AND*//*Label 1*/ 141, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-// CHECK-NEXT:     /*TargetOpcode::G_STORE*//*Label 2*/ 181, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-// CHECK-NEXT:     /*TargetOpcode::G_TRUNC*//*Label 3*/ 216, 0, 0, 0, 0,
-// CHECK-NEXT:     /*TargetOpcode::G_SEXT*//*Label 4*/ 231, 0,
-// CHECK-NEXT:     /*TargetOpcode::G_ZEXT*//*Label 5*/ 239,
-// CHECK-NEXT:     // Label 0: @112
-// CHECK-NEXT:     GIM_Try, /*On fail goto*//*Label 7*/ 133, // Rule ID 4 //
+// CHECK-NEXT:     GIM_SwitchOpcode, /*MI*/0, /*[*/19, 129, /*)*//*default:*//*Label 6*/ 270,
+// CHECK-NEXT:     /*TargetOpcode::COPY*//*Label 0*/ 115, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+// CHECK-NEXT:     /*TargetOpcode::G_AND*//*Label 1*/ 144, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+// CHECK-NEXT:     /*TargetOpcode::G_STORE*//*Label 2*/ 184, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+// CHECK-NEXT:     /*TargetOpcode::G_TRUNC*//*Label 3*/ 219, 0, 0, 0, 0,
+// CHECK-NEXT:     /*TargetOpcode::G_SEXT*//*Label 4*/ 234, 0,
+// CHECK-NEXT:     /*TargetOpcode::G_ZEXT*//*Label 5*/ 242,
+// CHECK-NEXT:     // Label 0: @115
+// CHECK-NEXT:     GIM_Try, /*On fail goto*//*Label 7*/ 136, // Rule ID 4 //
 // CHECK-NEXT:       GIM_CheckFeatures, GIFBS_HasAnswerToEverything,
 // CHECK-NEXT:       GIM_CheckSimplePredicate, GICXXPred_Simple_IsRule3Enabled,
 // CHECK-NEXT:       // MIs[0] a
@@ -155,8 +155,8 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
 // CHECK-NEXT:       // Combiner Rule #3: InstTest1
 // CHECK-NEXT:       GIR_CustomAction, GICXXCustomAction_CombineApplyGICombiner0,
 // CHECK-NEXT:       GIR_Done,
-// CHECK-NEXT:     // Label 7: @133
-// CHECK-NEXT:     GIM_Try, /*On fail goto*//*Label 8*/ 140, // Rule ID 3 //
+// CHECK-NEXT:     // Label 7: @136
+// CHECK-NEXT:     GIM_Try, /*On fail goto*//*Label 8*/ 143, // Rule ID 3 //
 // CHECK-NEXT:       GIM_CheckSimplePredicate, GICXXPred_Simple_IsRule2Enabled,
 // CHECK-NEXT:       // MIs[0] a
 // CHECK-NEXT:       // No operand predicates
@@ -165,10 +165,10 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
 // CHECK-NEXT:       // Combiner Rule #2: InstTest0
 // CHECK-NEXT:       GIR_CustomAction, GICXXCustomAction_CombineApplyGICombiner1,
 // CHECK-NEXT:       GIR_Done,
-// CHECK-NEXT:     // Label 8: @140
+// CHECK-NEXT:     // Label 8: @143
 // CHECK-NEXT:     GIM_Reject,
-// CHECK-NEXT:     // Label 1: @141
-// CHECK-NEXT:     GIM_Try, /*On fail goto*//*Label 9*/ 180, // Rule ID 6 //
+// CHECK-NEXT:     // Label 1: @144
+// CHECK-NEXT:     GIM_Try, /*On fail goto*//*Label 9*/ 183, // Rule ID 6 //
 // CHECK-NEXT:       GIM_CheckSimplePredicate, GICXXPred_Simple_IsRule5Enabled,
 // CHECK-NEXT:       GIM_CheckType, /*MI*/0, /*Op*/2, /*Type*/GILLT_s32,
 // CHECK-NEXT:       // MIs[0] dst
@@ -186,10 +186,10 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
 // CHECK-NEXT:       GIR_Copy, /*NewInsnID*/0, /*OldInsnID*/1, /*OpIdx*/1, // z
 // CHECK-NEXT:       GIR_EraseFromParent, /*InsnID*/0,
 // CHECK-NEXT:       GIR_Done,
-// CHECK-NEXT:     // Label 9: @180
+// CHECK-NEXT:     // Label 9: @183
 // CHECK-NEXT:     GIM_Reject,
-// CHECK-NEXT:     // Label 2: @181
-// CHECK-NEXT:     GIM_Try, /*On fail goto*//*Label 10*/ 215, // Rule ID 5 //
+// CHECK-NEXT:     // Label 2: @184
+// CHECK-NEXT:     GIM_Try, /*On fail goto*//*Label 10*/ 218, // Rule ID 5 //
 // CHECK-NEXT:       GIM_CheckSimplePredicate, GICXXPred_Simple_IsRule4Enabled,
 // CHECK-NEXT:       // MIs[0] tmp
 // CHECK-NEXT:       GIM_RecordInsnIgnoreCopies, /*DefineMI*/1, /*MI*/0, /*OpIdx*/0, // MIs[1]
@@ -207,32 +207,32 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
 // CHECK-NEXT:       GIR_EraseFromParent, /*InsnID*/0,
 // CHECK-NEXT:       GIR_CustomAction, GICXXCustomAction_CombineApplyGICombiner2,
 // CHECK-NEXT:       GIR_Done,
-// CHECK-NEXT:     // Label 10: @215
+// CHECK-NEXT:     // Label 10: @218
 // CHECK-NEXT:     GIM_Reject,
-// CHECK-NEXT:     // Label 3: @216
-// CHECK-NEXT:     GIM_Try, /*On fail goto*//*Label 11*/ 223, // Rule ID 0 //
+// CHECK-NEXT:     // Label 3: @219
+// CHECK-NEXT:     GIM_Try, /*On fail goto*//*Label 11*/ 226, // Rule ID 0 //
 // CHECK-NEXT:       GIM_CheckSimplePredicate, GICXXPred_Simple_IsRule0Enabled,
 // CHECK-NEXT:       // Combiner Rule #0: WipOpcodeTest0; wip_match_opcode 'G_TRUNC'
 // CHECK-NEXT:       GIR_CustomAction, GICXXCustomAction_CombineApplyGICombiner0,
 // CHECK-NEXT:       GIR_Done,
-// CHECK-NEXT:     // Label 11: @223
-// CHECK-NEXT:     GIM_Try, /*On fail goto*//*Label 12*/ 230, // Rule ID 1 //
+// CHECK-NEXT:     // Label 11: @226
+// CHECK-NEXT:     GIM_Try, /*On fail goto*//*Label 12*/ 233, // Rule ID 1 //
 // CHECK-NEXT:       GIM_CheckSimplePredicate, GICXXPred_Simple_IsRule1Enabled,
 // CHECK-NEXT:       // Combiner Rule #1: WipOpcodeTest1; wip_match_opcode 'G_TRUNC'
 // CHECK-NEXT:       GIR_CustomAction, GICXXCustomAction_CombineApplyGICombiner0,
 // CHECK-NEXT:       GIR_Done,
-// CHECK-NEXT:     // Label 12: @230
+// CHECK-NEXT:     // Label 12: @233
 // CHECK-NEXT:     GIM_Reject,
-// CHECK-NEXT:     // Label 4: @231
-// CHECK-NEXT:     GIM_Try, /*On fail goto*//*Label 13*/ 238, // Rule ID 2 //
+// CHECK-NEXT:     // Label 4: @234
+// CHECK-NEXT:     GIM_Try, /*On fail goto*//*Label 13*/ 241, // Rule ID 2 //
 // CHECK-NEXT:       GIM_CheckSimplePredicate, GICXXPred_Simple_IsRule1Enabled,
 // CHECK-NEXT:       // Combiner Rule #1: WipOpcodeTest1; wip_match_opcode 'G_SEXT'
 // CHECK-NEXT:       GIR_CustomAction, GICXXCustomAction_CombineApplyGICombiner0,
 // CHECK-NEXT:       GIR_Done,
-// CHECK-NEXT:     // Label 13: @238
+// CHECK-NEXT:     // Label 13: @241
 // CHECK-NEXT:     GIM_Reject,
-// CHECK-NEXT:     // Label 5: @239
-// CHECK-NEXT:     GIM_Try, /*On fail goto*//*Label 14*/ 266, // Rule ID 7 //
+// CHECK-NEXT:     // Label 5: @242
+// CHECK-NEXT:     GIM_Try, /*On fail goto*//*Label 14*/ 269, // Rule ID 7 //
 // CHECK-NEXT:       GIM_CheckSimplePredicate, GICXXPred_Simple_IsRule6Enabled,
 // CHECK-NEXT:       // MIs[0] dst
 // CHECK-NEXT:       // No operand predicates
@@ -247,9 +247,9 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
 // CHECK-NEXT:       GIR_AddTempRegister, /*InsnID*/0, /*TempRegID*/0, /*TempRegFlags*/0,
 // CHECK-NEXT:       GIR_EraseFromParent, /*InsnID*/0,
 // CHECK-NEXT:       GIR_Done,
-// CHECK-NEXT:     // Label 14: @266
+// CHECK-NEXT:     // Label 14: @269
 // CHECK-NEXT:     GIM_Reject,
-// CHECK-NEXT:     // Label 6: @267
+// CHECK-NEXT:     // Label 6: @270
 // CHECK-NEXT:     GIM_Reject,
 // CHECK-NEXT:     };
 // CHECK-NEXT:   return MatchTable0;

>From 68dff42c9befc5a7dc870104d03c3a48b2c9ecdf Mon Sep 17 00:00:00 2001
From: Sameer Sahasrabuddhe <sameer.sahasrabuddhe at amd.com>
Date: Wed, 3 Jan 2024 14:21:51 +0530
Subject: [PATCH 2/8] cleanup for clang-format

---
 llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp | 12 +++++++++---
 llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp        |  4 ++--
 2 files changed, 11 insertions(+), 5 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
index ea28a2432c70e0..393898b362092e 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
@@ -164,6 +164,9 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
     if (cast<ConstantSDNode>(this)->isOpaque())
       return "OpaqueTargetConstant";
     return "TargetConstant";
+
+    // clang-format off
+
   case ISD::TargetConstantFP:           return "TargetConstantFP";
   case ISD::TargetGlobalAddress:        return "TargetGlobalAddress";
   case ISD::TargetGlobalTLSAddress:     return "TargetGlobalTLSAddress";
@@ -446,9 +449,10 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
   case ISD::SET_FPMODE:                 return "set_fpmode";
   case ISD::RESET_FPMODE:               return "reset_fpmode";
 
-  case ISD::CONVERGENCECTRL_ANCHOR: return "convergencectrl_anchor";
-  case ISD::CONVERGENCECTRL_ENTRY:  return "convergencectrl_entry";
-  case ISD::CONVERGENCECTRL_LOOP:   return "convergencectrl_loop";
+  // Convergence control instructions
+  case ISD::CONVERGENCECTRL_ANCHOR:     return "convergencectrl_anchor";
+  case ISD::CONVERGENCECTRL_ENTRY:      return "convergencectrl_entry";
+  case ISD::CONVERGENCECTRL_LOOP:       return "convergencectrl_loop";
 
   // Bit manipulation
   case ISD::ABS:                        return "abs";
@@ -465,6 +469,8 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
   case ISD::INIT_TRAMPOLINE:            return "init_trampoline";
   case ISD::ADJUST_TRAMPOLINE:          return "adjust_trampoline";
 
+    // clang-format on
+
   case ISD::CONDCODE:
     switch (cast<CondCodeSDNode>(this)->get()) {
     default: llvm_unreachable("Unknown setcc condition!");
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index d882df72081de5..ddd3e20c441df5 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -909,8 +909,8 @@ void AMDGPUTargetLowering::CollectTargetIntrinsicOperands(
     SDValue ConvControlToken = getValue(Token);
     // FIXME: Possibly handle the case where the last node in Ops is already a
     // glue node?
-    ConvControlToken = DAG.getNode(AMDGPUISD::CONVERGENCECTRL_GLUE, {}, MVT::Glue,
-                                   ConvControlToken);
+    ConvControlToken = DAG.getNode(AMDGPUISD::CONVERGENCECTRL_GLUE, {},
+                                   MVT::Glue, ConvControlToken);
     Ops.push_back(ConvControlToken);
   }
 }

>From f25ce5a8ce133e068a3b771feb5ea10836d438f9 Mon Sep 17 00:00:00 2001
From: Sameer Sahasrabuddhe <sameer.sahasrabuddhe at amd.com>
Date: Mon, 18 Dec 2023 14:06:22 +0530
Subject: [PATCH 3/8] minor review comments

---
 llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
index cfbbb6683023e3..fe99061a741c0b 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
@@ -2610,7 +2610,7 @@ void AMDGPUDAGToDAGISel::SelectINTRINSIC_W_CHAIN(SDNode *N) {
 
 void AMDGPUDAGToDAGISel::SelectINTRINSIC_WO_CHAIN(SDNode *N) {
   unsigned IntrID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
-  unsigned Opcode = 0;
+  unsigned Opcode = AMDGPU::INSTRUCTION_LIST_END;
   SDNode *ConvGlueNode = N->getGluedNode();
   if (ConvGlueNode) {
     // FIXME: Possibly iterate over multiple glue nodes?
@@ -2637,7 +2637,7 @@ void AMDGPUDAGToDAGISel::SelectINTRINSIC_WO_CHAIN(SDNode *N) {
     break;
   case Intrinsic::amdgcn_interp_p1_f16:
     SelectInterpP1F16(N);
-    break;
+    return;
   case Intrinsic::amdgcn_inverse_ballot:
     switch (N->getOperand(1).getValueSizeInBits()) {
     case 32:
@@ -2655,7 +2655,7 @@ void AMDGPUDAGToDAGISel::SelectINTRINSIC_WO_CHAIN(SDNode *N) {
     break;
   }
 
-  if (Opcode) {
+  if (Opcode != AMDGPU::INSTRUCTION_LIST_END) {
     SDValue Src = N->getOperand(1);
     CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), {Src});
   }

>From 5c53c2fa461a3b2e1c9c8ee75c5b5247b3237788 Mon Sep 17 00:00:00 2001
From: Sameer Sahasrabuddhe <sameer.sahasrabuddhe at amd.com>
Date: Tue, 2 Jan 2024 13:42:56 +0530
Subject: [PATCH 4/8] [AMDGPU] Do not preserve UniformityInfo (#76696)

UniformityInfo has a transitive dependence on CycleInfo. A transform may
change the CFG in trivial ways that do not affect uniformity, but that
can leave cycles in a slightly inconsistent state. In the absence of
updates to CycleInfo, it's cleaner to just invalidate both analyses.
---
 llvm/include/llvm/ADT/GenericUniformityImpl.h            | 6 ++++++
 llvm/lib/Target/AMDGPU/AMDGPURewriteUndefForPHI.cpp      | 1 -
 llvm/lib/Target/AMDGPU/AMDGPUUnifyDivergentExitNodes.cpp | 3 ---
 3 files changed, 6 insertions(+), 4 deletions(-)

diff --git a/llvm/include/llvm/ADT/GenericUniformityImpl.h b/llvm/include/llvm/ADT/GenericUniformityImpl.h
index b7d0a1228ebfc0..d397b937d78ccc 100644
--- a/llvm/include/llvm/ADT/GenericUniformityImpl.h
+++ b/llvm/include/llvm/ADT/GenericUniformityImpl.h
@@ -33,6 +33,12 @@
 /// the propagation of the impact of divergent control flow on the divergence of
 /// values (sync dependencies).
 ///
+/// NOTE: In general, no interface exists for a transform to update
+/// (Machine)UniformityInfo. Additionally, (Machine)CycleAnalysis is a
+/// transitive dependence, but it also does not provide an interface for
+/// updating itself. Given that, transforms should not preserve uniformity in
+/// their getAnalysisUsage() callback.
+///
 //===----------------------------------------------------------------------===//
 
 #ifndef LLVM_ADT_GENERICUNIFORMITYIMPL_H
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURewriteUndefForPHI.cpp b/llvm/lib/Target/AMDGPU/AMDGPURewriteUndefForPHI.cpp
index 459400e3359ca1..79e9312034da45 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURewriteUndefForPHI.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURewriteUndefForPHI.cpp
@@ -85,7 +85,6 @@ class AMDGPURewriteUndefForPHILegacy : public FunctionPass {
     AU.addRequired<DominatorTreeWrapperPass>();
 
     AU.addPreserved<DominatorTreeWrapperPass>();
-    AU.addPreserved<UniformityInfoWrapperPass>();
     AU.setPreservesCFG();
   }
 };
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUUnifyDivergentExitNodes.cpp b/llvm/lib/Target/AMDGPU/AMDGPUUnifyDivergentExitNodes.cpp
index 9bc3ba161c9ebe..1bfb7c0edd80a9 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUUnifyDivergentExitNodes.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUUnifyDivergentExitNodes.cpp
@@ -109,9 +109,6 @@ void AMDGPUUnifyDivergentExitNodes::getAnalysisUsage(AnalysisUsage &AU) const {
     // FIXME: preserve PostDominatorTreeWrapperPass
   }
 
-  // No divergent values are changed, only blocks and branch edges.
-  AU.addPreserved<UniformityInfoWrapperPass>();
-
   // We preserve the non-critical-edgeness property
   AU.addPreservedID(BreakCriticalEdgesID);
 

>From a4863c1e4e8372e3ec00d3b078e7e45e5ee1498b Mon Sep 17 00:00:00 2001
From: Sameer Sahasrabuddhe <sameer.sahasrabuddhe at amd.com>
Date: Sat, 23 Dec 2023 07:58:43 +0530
Subject: [PATCH 5/8] [LLVM] ConvergenceControlInst as a derived class of
 IntrinsicInst (#76230)

---
 .../llvm/IR/GenericConvergenceVerifierImpl.h  | 13 +---------
 llvm/include/llvm/IR/IntrinsicInst.h          | 24 +++++++++++++++++++
 2 files changed, 25 insertions(+), 12 deletions(-)

diff --git a/llvm/include/llvm/IR/GenericConvergenceVerifierImpl.h b/llvm/include/llvm/IR/GenericConvergenceVerifierImpl.h
index e2ece30b186418..f6eb5066d5535d 100644
--- a/llvm/include/llvm/IR/GenericConvergenceVerifierImpl.h
+++ b/llvm/include/llvm/IR/GenericConvergenceVerifierImpl.h
@@ -29,7 +29,7 @@
 #include "llvm/ADT/GenericConvergenceVerifier.h"
 #include "llvm/ADT/PostOrderIterator.h"
 #include "llvm/ADT/Twine.h"
-#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/IntrinsicInst.h"
 
 #define Check(C, ...)                                                          \
   do {                                                                         \
@@ -48,17 +48,6 @@
   } while (false)
 
 namespace llvm {
-static bool isConvergenceControlIntrinsic(unsigned IntrinsicID) {
-  switch (IntrinsicID) {
-  default:
-    return false;
-  case Intrinsic::experimental_convergence_anchor:
-  case Intrinsic::experimental_convergence_entry:
-  case Intrinsic::experimental_convergence_loop:
-    return true;
-  }
-}
-
 template <class ContextT> void GenericConvergenceVerifier<ContextT>::clear() {
   Tokens.clear();
   CI.clear();
diff --git a/llvm/include/llvm/IR/IntrinsicInst.h b/llvm/include/llvm/IR/IntrinsicInst.h
index c26ecef6eaaee1..8c078c750a4cfd 100644
--- a/llvm/include/llvm/IR/IntrinsicInst.h
+++ b/llvm/include/llvm/IR/IntrinsicInst.h
@@ -1712,6 +1712,30 @@ class AssumeInst : public IntrinsicInst {
   }
 };
 
+/// Check if \p ID corresponds to a convergence control intrinsic.
+static inline bool isConvergenceControlIntrinsic(unsigned IntrinsicID) {
+  switch (IntrinsicID) {
+  default:
+    return false;
+  case Intrinsic::experimental_convergence_anchor:
+  case Intrinsic::experimental_convergence_entry:
+  case Intrinsic::experimental_convergence_loop:
+    return true;
+  }
+}
+
+/// Represents calls to the llvm.experimintal.convergence.* intrinsics.
+class ConvergenceControlInst : public IntrinsicInst {
+public:
+  static bool classof(const IntrinsicInst *I) {
+    return isConvergenceControlIntrinsic(I->getIntrinsicID());
+  }
+
+  static bool classof(const Value *V) {
+    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+  }
+};
+
 } // end namespace llvm
 
 #endif // LLVM_IR_INTRINSICINST_H

>From 0a1104effd5eb2124f7963871d507bcced0cc7de Mon Sep 17 00:00:00 2001
From: Sameer Sahasrabuddhe <sameer.sahasrabuddhe at amd.com>
Date: Fri, 22 Dec 2023 15:57:47 +0530
Subject: [PATCH 6/8] handle .loop intrinsic and add more tests

---
 .../llvm/CodeGen/FunctionLoweringInfo.h       | 10 +---
 .../SelectionDAG/FunctionLoweringInfo.cpp     | 10 ++++
 .../SelectionDAG/SelectionDAGBuilder.cpp      |  5 +-
 llvm/lib/CodeGen/ValueTypes.cpp               |  2 +
 .../lib/Target/AMDGPU/AMDGPUTargetMachine.cpp | 12 ++++-
 llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.h  |  1 +
 .../test/CodeGen/AMDGPU/convergence-tokens.ll | 49 +++++++++++++++++--
 7 files changed, 74 insertions(+), 15 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h b/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h
index 4c17e8dcc41a66..394ee1afb7df65 100644
--- a/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h
+++ b/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h
@@ -214,15 +214,7 @@ class FunctionLoweringInfo {
 
   Register CreateRegs(Type *Ty, bool isDivergent = false);
 
-  Register InitializeRegForValue(const Value *V) {
-    // Tokens never live in vregs.
-    if (V->getType()->isTokenTy())
-      return 0;
-    Register &R = ValueMap[V];
-    assert(R == 0 && "Already initialized this value register!");
-    assert(VirtReg2Value.empty());
-    return R = CreateRegs(V);
-  }
+  Register InitializeRegForValue(const Value *V);
 
   /// GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the
   /// register is a PHI destination and the PHI's LiveOutInfo is not valid.
diff --git a/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
index 1d0a03ccfcdc6a..d54ab6861fa926 100644
--- a/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
@@ -387,6 +387,16 @@ Register FunctionLoweringInfo::CreateRegs(const Value *V) {
                                       !TLI->requiresUniformRegister(*MF, V));
 }
 
+Register FunctionLoweringInfo::InitializeRegForValue(const Value *V) {
+  // Tokens live in vregs only when used for convergence control.
+  if (V->getType()->isTokenTy() && !isa<ConvergenceControlInst>(V))
+    return 0;
+  Register &R = ValueMap[V];
+  assert(R == 0 && "Already initialized this value register!");
+  assert(VirtReg2Value.empty());
+  return R = CreateRegs(V);
+}
+
 /// GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the
 /// register is a PHI destination and the PHI's LiveOutInfo is not valid. If
 /// the register's LiveOutInfo is for a smaller bit width, it is extended to
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 32251d240d457b..0d797dae302c1f 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -5982,7 +5982,10 @@ void SelectionDAGBuilder::visitConvergenceControl(const CallInst &I,
     setValue(&I, DAG.getNode(ISD::CONVERGENCECTRL_ENTRY, sdl, MVT::Untyped));
     break;
   case Intrinsic::experimental_convergence_loop: {
-    assert(false && "coming soon");
+    auto Bundle = I.getOperandBundle(LLVMContext::OB_convergencectrl);
+    auto *Token = Bundle->Inputs[0].get();
+    setValue(&I, DAG.getNode(ISD::CONVERGENCECTRL_LOOP, sdl, MVT::Untyped,
+                             getValue(Token)));
     break;
   }
   }
diff --git a/llvm/lib/CodeGen/ValueTypes.cpp b/llvm/lib/CodeGen/ValueTypes.cpp
index 2d16ff2dfb2fbf..99d1d19cf155d1 100644
--- a/llvm/lib/CodeGen/ValueTypes.cpp
+++ b/llvm/lib/CodeGen/ValueTypes.cpp
@@ -617,6 +617,8 @@ EVT EVT::getEVT(Type *Ty, bool HandleUnknown){
   switch (Ty->getTypeID()) {
   default:
     return MVT::getVT(Ty, HandleUnknown);
+  case Type::TokenTyID:
+    return MVT::Untyped;
   case Type::IntegerTyID:
     return getIntegerVT(Ty->getContext(), cast<IntegerType>(Ty)->getBitWidth());
   case Type::FixedVectorTyID:
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index 951ed9420594b1..6130f75e034461 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -245,6 +245,13 @@ static cl::opt<bool, true> LateCFGStructurize(
   cl::location(AMDGPUTargetMachine::EnableLateStructurizeCFG),
   cl::Hidden);
 
+// Disable structurizer-based control-flow lowering in order to test convergence
+// control tokens. This should eventually be replaced by the wave-transform.
+static cl::opt<bool, true> DisableStructurizer(
+    "amdgpu-disable-structurizer",
+    cl::desc("Disable structurizer for experiments; produces unusable code"),
+    cl::location(AMDGPUTargetMachine::DisableStructurizer), cl::Hidden);
+
 // Enable lib calls simplifications
 static cl::opt<bool> EnableLibCallSimplify(
   "amdgpu-simplify-libcall",
@@ -576,6 +583,7 @@ AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, const Triple &TT,
 bool AMDGPUTargetMachine::EnableLateStructurizeCFG = false;
 bool AMDGPUTargetMachine::EnableFunctionCalls = false;
 bool AMDGPUTargetMachine::EnableLowerModuleLDS = true;
+bool AMDGPUTargetMachine::DisableStructurizer = false;
 
 AMDGPUTargetMachine::~AMDGPUTargetMachine() = default;
 
@@ -1167,7 +1175,7 @@ bool GCNPassConfig::addPreISel() {
   // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
   // regions formed by them.
   addPass(&AMDGPUUnifyDivergentExitNodesID);
-  if (!LateCFGStructurize) {
+  if (!LateCFGStructurize && !DisableStructurizer) {
     if (EnableStructurizerWorkarounds) {
       addPass(createFixIrreduciblePass());
       addPass(createUnifyLoopExitsPass());
@@ -1175,7 +1183,7 @@ bool GCNPassConfig::addPreISel() {
     addPass(createStructurizeCFGPass(false)); // true -> SkipUniformRegions
   }
   addPass(createAMDGPUAnnotateUniformValues());
-  if (!LateCFGStructurize) {
+  if (!LateCFGStructurize && !DisableStructurizer) {
     addPass(createSIAnnotateControlFlowPass());
     // TODO: Move this right after structurizeCFG to avoid extra divergence
     // analysis. This depends on stopping SIAnnotateControlFlow from making
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.h b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.h
index 9051a61e65570c..cdc6f604a375a1 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.h
@@ -37,6 +37,7 @@ class AMDGPUTargetMachine : public LLVMTargetMachine {
   static bool EnableLateStructurizeCFG;
   static bool EnableFunctionCalls;
   static bool EnableLowerModuleLDS;
+  static bool DisableStructurizer;
 
   AMDGPUTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
                       StringRef FS, TargetOptions Options,
diff --git a/llvm/test/CodeGen/AMDGPU/convergence-tokens.ll b/llvm/test/CodeGen/AMDGPU/convergence-tokens.ll
index 844671d98eae21..5c7fd8d4f16cb9 100644
--- a/llvm/test/CodeGen/AMDGPU/convergence-tokens.ll
+++ b/llvm/test/CodeGen/AMDGPU/convergence-tokens.ll
@@ -1,5 +1,5 @@
-; RUN: llc -stop-after=amdgpu-isel -mtriple=amdgcn-- -mcpu=gfx900 -verify-machineinstrs -o - %s | FileCheck --check-prefixes=CHECK,ISEL %s
-; RUN: llc -stop-after=dead-mi-elimination -mtriple=amdgcn-- -mcpu=gfx900 -verify-machineinstrs -o - %s | FileCheck --check-prefixes=CHECK,DEADMI %s
+; RUN: llc --amdgpu-disable-structurizer -stop-after=amdgpu-isel -mtriple=amdgcn-- -mcpu=gfx900 -verify-machineinstrs -o - %s | FileCheck --check-prefixes=CHECK,ISEL %s
+; RUN: llc --amdgpu-disable-structurizer -stop-after=dead-mi-elimination -mtriple=amdgcn-- -mcpu=gfx900 -verify-machineinstrs -o - %s | FileCheck --check-prefixes=CHECK,DEADMI %s
 
 ; CHECK-LABEL: name:            basic_call
 ;       CHECK:    [[TOKEN:%[0-9]+]]:sreg_64 = CONVERGENCECTRL_ENTRY
@@ -16,18 +16,61 @@ define i32 @basic_call(i32 %src) #0 {
 ;        ISEL:    CONVERGENCECTRL_GLUE [[TOKEN]]
 ;  DEADMI-NOT:    CONVERGENCECTRL_GLUE
 ;       CHECK:    {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[TOKEN]]
-
 define i32 @basic_intrinsic(i32 %src) #0 {
   %t = call token @llvm.experimental.convergence.anchor()
   %r = call i32 @llvm.amdgcn.readfirstlane(i32 %src) [ "convergencectrl"(token %t) ]
   ret i32 %r
 }
 
+; There's nothing to check here. The test is just meant to catch any crashes
+; when a convergent call has no token.
 define i32 @uncontrolled_call(i32 %src) #0 {
   %r = call i32 @foo(i32 %src)
   ret i32 %r
 }
 
+; CHECK-LABEL: name:            basic_branch
+;       CHECK:  bb.0.entry:
+;       CHECK:    [[TOKEN:%[0-9]+]]:sreg_64 = CONVERGENCECTRL_ANCHOR
+;       CHECK:  bb.1.then:
+;        ISEL:    CONVERGENCECTRL_GLUE [[TOKEN]]
+;  DEADMI-NOT:    CONVERGENCECTRL_GLUE
+;       CHECK:    {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[TOKEN]]
+define i32 @basic_branch(i32 %src, i1 %cond) #0 {
+entry:
+  %t = call token @llvm.experimental.convergence.anchor()
+  %x = add i32 %src, 1
+  br i1 %cond, label %then, label %else
+
+then:
+  %r = call i32 @llvm.amdgcn.readfirstlane(i32 %x) [ "convergencectrl"(token %t) ]
+  br label %else
+
+else:
+  %p = phi i32 [%r, %then], [%x, %entry]
+  ret i32 %p
+}
+
+; CHECK-LABEL: name:            basic_loop
+;       CHECK:    [[TOKEN:%[0-9]+]]:sreg_64 = CONVERGENCECTRL_ANCHOR
+;       CHECK:  bb.1.loop:
+;       CHECK:    [[LOOP:%[0-9]+]]:sreg_64 = CONVERGENCECTRL_LOOP [[TOKEN]]
+;        ISEL:    CONVERGENCECTRL_GLUE [[LOOP]]
+;  DEADMI-NOT:    CONVERGENCECTRL_GLUE
+;       CHECK:    {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[LOOP]]
+define i32 @basic_loop(i32 %src, i1 %cond) #0 {
+  %t1 = call token @llvm.experimental.convergence.anchor()
+  br label %loop
+
+loop:
+  %t2 = call token @llvm.experimental.convergence.loop() [ "convergencectrl"(token %t1) ]
+  %r = call i32 @llvm.amdgcn.readfirstlane(i32 %src) [ "convergencectrl"(token %t2) ]
+  br i1 %cond, label %loop, label %end
+
+end:
+  ret i32 %r
+}
+
 declare i32 @foo(i32 %x) #0
 
 declare i32 @llvm.amdgcn.readfirstlane(i32) #0

>From cf81594eaf33dfcabc724cdcf8114d3e7c01936e Mon Sep 17 00:00:00 2001
From: Sameer Sahasrabuddhe <sameer.sahasrabuddhe at amd.com>
Date: Wed, 3 Jan 2024 13:13:41 +0530
Subject: [PATCH 7/8] machine verifier for convergence tokens

---
 .../llvm/ADT/GenericConvergenceVerifier.h     |  9 +-
 .../llvm/CodeGen/MachineConvergenceVerifier.h | 28 ++++++
 .../llvm/IR/GenericConvergenceVerifierImpl.h  | 25 +++---
 llvm/lib/CodeGen/CMakeLists.txt               |  1 +
 .../CodeGen/MachineConvergenceVerifier.cpp    | 86 +++++++++++++++++++
 llvm/lib/CodeGen/MachineVerifier.cpp          | 34 ++++++++
 llvm/lib/IR/ConvergenceVerifier.cpp           | 31 +++++--
 llvm/lib/IR/Verifier.cpp                      |  2 +-
 .../AMDGPU/verify-convergencectrl/basic.mir   | 37 ++++++++
 .../AMDGPU/verify-convergencectrl/cycles.mir  | 52 +++++++++++
 .../AMDGPU/verify-convergencectrl/mixed2.mir  | 15 ++++
 .../AMDGPU/verify-convergencectrl/not-ssa.mir | 11 +++
 .../verify-convergencectrl/region-nesting.mir | 24 ++++++
 13 files changed, 335 insertions(+), 20 deletions(-)
 create mode 100644 llvm/include/llvm/CodeGen/MachineConvergenceVerifier.h
 create mode 100644 llvm/lib/CodeGen/MachineConvergenceVerifier.cpp
 create mode 100644 llvm/test/CodeGen/AMDGPU/verify-convergencectrl/basic.mir
 create mode 100644 llvm/test/CodeGen/AMDGPU/verify-convergencectrl/cycles.mir
 create mode 100644 llvm/test/CodeGen/AMDGPU/verify-convergencectrl/mixed2.mir
 create mode 100644 llvm/test/CodeGen/AMDGPU/verify-convergencectrl/not-ssa.mir
 create mode 100644 llvm/test/CodeGen/AMDGPU/verify-convergencectrl/region-nesting.mir

diff --git a/llvm/include/llvm/ADT/GenericConvergenceVerifier.h b/llvm/include/llvm/ADT/GenericConvergenceVerifier.h
index 0810a070132290..fe51b09650551f 100644
--- a/llvm/include/llvm/ADT/GenericConvergenceVerifier.h
+++ b/llvm/include/llvm/ADT/GenericConvergenceVerifier.h
@@ -32,11 +32,12 @@ template <typename ContextT> class GenericConvergenceVerifier {
 
   void initialize(raw_ostream *OS,
                   function_ref<void(const Twine &Message)> FailureCB,
-                  const FunctionT &F) {
+                  const FunctionT &F, bool TokensAllowed_) {
     clear();
     this->OS = OS;
     this->FailureCB = FailureCB;
     Context = ContextT(&F);
+    TokensAllowed = TokensAllowed_;
   }
 
   void clear();
@@ -52,6 +53,7 @@ template <typename ContextT> class GenericConvergenceVerifier {
   DominatorTreeT *DT;
   CycleInfoT CI;
   ContextT Context;
+  bool TokensAllowed;
 
   /// Whether the current function has convergencectrl operand bundles.
   enum {
@@ -60,6 +62,10 @@ template <typename ContextT> class GenericConvergenceVerifier {
     NoConvergence
   } ConvergenceKind = NoConvergence;
 
+  /// The control token operation performed by a convergence control Intrinsic
+  /// in LLVM IR, or by a CONVERGENCECTRL* instruction in MIR
+  enum ConvOpKind { CONV_ANCHOR, CONV_ENTRY, CONV_LOOP, CONV_NONE };
+
   // Cache token uses found so far. Note that we track the unique definitions
   // and not the token values.
   DenseMap<const InstructionT *, const InstructionT *> Tokens;
@@ -68,6 +74,7 @@ template <typename ContextT> class GenericConvergenceVerifier {
 
   static bool isInsideConvergentFunction(const InstructionT &I);
   static bool isConvergent(const InstructionT &I);
+  static ConvOpKind getConvOp(const InstructionT &I);
   const InstructionT *findAndCheckConvergenceTokenUsed(const InstructionT &I);
 
   void reportFailure(const Twine &Message, ArrayRef<Printable> Values);
diff --git a/llvm/include/llvm/CodeGen/MachineConvergenceVerifier.h b/llvm/include/llvm/CodeGen/MachineConvergenceVerifier.h
new file mode 100644
index 00000000000000..b2faa30816c682
--- /dev/null
+++ b/llvm/include/llvm/CodeGen/MachineConvergenceVerifier.h
@@ -0,0 +1,28 @@
+//===- MachineConvergenceVerifier.h - Verify convergenctrl ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file declares the MIR specialization of the GenericConvergenceVerifier
+/// template.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINECONVERGENCEVERIFIER_H
+#define LLVM_CODEGEN_MACHINECONVERGENCEVERIFIER_H
+
+#include "llvm/ADT/GenericConvergenceVerifier.h"
+#include "llvm/CodeGen/MachineSSAContext.h"
+
+namespace llvm {
+
+using MachineConvergenceVerifier =
+    GenericConvergenceVerifier<MachineSSAContext>;
+
+} // namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINECONVERGENCEVERIFIER_H
diff --git a/llvm/include/llvm/IR/GenericConvergenceVerifierImpl.h b/llvm/include/llvm/IR/GenericConvergenceVerifierImpl.h
index f6eb5066d5535d..fd9c272632dae8 100644
--- a/llvm/include/llvm/IR/GenericConvergenceVerifierImpl.h
+++ b/llvm/include/llvm/IR/GenericConvergenceVerifierImpl.h
@@ -52,6 +52,7 @@ template <class ContextT> void GenericConvergenceVerifier<ContextT>::clear() {
   Tokens.clear();
   CI.clear();
   ConvergenceKind = NoConvergence;
+  TokensAllowed = false;
 }
 
 template <class ContextT>
@@ -61,12 +62,16 @@ void GenericConvergenceVerifier<ContextT>::visit(const BlockT &BB) {
 
 template <class ContextT>
 void GenericConvergenceVerifier<ContextT>::visit(const InstructionT &I) {
-  auto ID = ContextT::getIntrinsicID(I);
+  ConvOpKind ConvOp = getConvOp(I);
+  if (!TokensAllowed) {
+    Check(ConvOp == CONV_NONE, "Convergence control requires SSA.",
+          {Context.print(&I)});
+    return;
+  }
   auto *TokenDef = findAndCheckConvergenceTokenUsed(I);
-  bool IsCtrlIntrinsic = true;
 
-  switch (ID) {
-  case Intrinsic::experimental_convergence_entry:
+  switch (ConvOp) {
+  case CONV_ENTRY:
     Check(isInsideConvergentFunction(I),
           "Entry intrinsic can occur only in a convergent function.",
           {Context.print(&I)});
@@ -78,13 +83,13 @@ void GenericConvergenceVerifier<ContextT>::visit(const InstructionT &I) {
           "same basic block.",
           {Context.print(&I)});
     LLVM_FALLTHROUGH;
-  case Intrinsic::experimental_convergence_anchor:
+  case CONV_ANCHOR:
     Check(!TokenDef,
           "Entry or anchor intrinsic cannot have a convergencectrl token "
           "operand.",
           {Context.print(&I)});
     break;
-  case Intrinsic::experimental_convergence_loop:
+  case CONV_LOOP:
     Check(TokenDef, "Loop intrinsic must have a convergencectrl token operand.",
           {Context.print(&I)});
     Check(!SeenFirstConvOp,
@@ -93,14 +98,13 @@ void GenericConvergenceVerifier<ContextT>::visit(const InstructionT &I) {
           {Context.print(&I)});
     break;
   default:
-    IsCtrlIntrinsic = false;
     break;
   }
 
   if (isConvergent(I))
     SeenFirstConvOp = true;
 
-  if (TokenDef || IsCtrlIntrinsic) {
+  if (TokenDef || ConvOp != CONV_NONE) {
     Check(isConvergent(I),
           "Convergence control token can only be used in a convergent call.",
           {Context.print(&I)});
@@ -161,8 +165,7 @@ void GenericConvergenceVerifier<ContextT>::verify(const DominatorTreeT &DT) {
       return;
     }
 
-    Check(ContextT::getIntrinsicID(*User) ==
-              Intrinsic::experimental_convergence_loop,
+    Check(getConvOp(*User) == CONV_LOOP,
           "Convergence token used by an instruction other than "
           "llvm.experimental.convergence.loop in a cycle that does "
           "not contain the token's definition.",
@@ -199,7 +202,7 @@ void GenericConvergenceVerifier<ContextT>::verify(const DominatorTreeT &DT) {
     for (auto &I : *BB) {
       if (auto *Token = Tokens.lookup(&I))
         checkToken(Token, &I, LiveTokens);
-      if (isConvergenceControlIntrinsic(ContextT::getIntrinsicID(I)))
+      if (getConvOp(I) != CONV_NONE)
         LiveTokens.push_back(&I);
     }
 
diff --git a/llvm/lib/CodeGen/CMakeLists.txt b/llvm/lib/CodeGen/CMakeLists.txt
index df2d1831ee5fdb..88682e12a2b6a3 100644
--- a/llvm/lib/CodeGen/CMakeLists.txt
+++ b/llvm/lib/CodeGen/CMakeLists.txt
@@ -122,6 +122,7 @@ add_llvm_component_library(LLVMCodeGen
   MachineBranchProbabilityInfo.cpp
   MachineCFGPrinter.cpp
   MachineCombiner.cpp
+  MachineConvergenceVerifier.cpp
   MachineCopyPropagation.cpp
   MachineCSE.cpp
   MachineCheckDebugify.cpp
diff --git a/llvm/lib/CodeGen/MachineConvergenceVerifier.cpp b/llvm/lib/CodeGen/MachineConvergenceVerifier.cpp
new file mode 100644
index 00000000000000..2f384fe6204d1b
--- /dev/null
+++ b/llvm/lib/CodeGen/MachineConvergenceVerifier.cpp
@@ -0,0 +1,86 @@
+//===- ConvergenceVerifier.cpp - Verify convergence control -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/MachineConvergenceVerifier.h"
+#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/MachineSSAContext.h"
+#include "llvm/IR/GenericConvergenceVerifierImpl.h"
+
+using namespace llvm;
+
+template <>
+auto GenericConvergenceVerifier<MachineSSAContext>::getConvOp(
+    const MachineInstr &MI) -> ConvOpKind {
+  switch (MI.getOpcode()) {
+  default:
+    return CONV_NONE;
+  case TargetOpcode::CONVERGENCECTRL_ENTRY:
+    return CONV_ENTRY;
+  case TargetOpcode::CONVERGENCECTRL_ANCHOR:
+    return CONV_ANCHOR;
+  case TargetOpcode::CONVERGENCECTRL_LOOP:
+    return CONV_LOOP;
+  }
+}
+
+template <>
+const MachineInstr *
+GenericConvergenceVerifier<MachineSSAContext>::findAndCheckConvergenceTokenUsed(
+    const MachineInstr &MI) {
+  const MachineRegisterInfo &MRI = Context.getFunction()->getRegInfo();
+  const MachineInstr *TokenDef = nullptr;
+
+  for (const MachineOperand &MO : MI.uses()) {
+    if (!MO.isReg())
+      continue;
+    Register OpReg = MO.getReg();
+    if (!OpReg.isVirtual())
+      continue;
+
+    const MachineInstr *Def = MRI.getVRegDef(OpReg);
+    if (!Def)
+      continue;
+    if (getConvOp(*Def) == CONV_NONE)
+      continue;
+
+    CheckOrNull(
+        MI.isConvergent(),
+        "Convergence control tokens can only be used by convergent operations.",
+        {Context.print(OpReg), Context.print(&MI)});
+
+    CheckOrNull(!TokenDef,
+                "An operation can use at most one convergence control token.",
+                {Context.print(OpReg), Context.print(&MI)});
+
+    TokenDef = Def;
+  }
+
+  if (TokenDef)
+    Tokens[&MI] = TokenDef;
+
+  return TokenDef;
+}
+
+template <>
+bool GenericConvergenceVerifier<MachineSSAContext>::isInsideConvergentFunction(
+    const MachineInstr &MI) {
+  // The class MachineFunction does not have any property to indicate whether it
+  // is convergent. Trivially return true so that the check always passes.
+  return true;
+}
+
+template <>
+bool GenericConvergenceVerifier<MachineSSAContext>::isConvergent(
+    const MachineInstr &MI) {
+  return MI.isConvergent();
+}
+
+template class llvm::GenericConvergenceVerifier<MachineSSAContext>;
diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp
index da1d9c6f0679c7..13176d167ae03f 100644
--- a/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -40,6 +40,8 @@
 #include "llvm/CodeGen/LiveVariables.h"
 #include "llvm/CodeGen/LowLevelType.h"
 #include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineConvergenceVerifier.h"
+#include "llvm/CodeGen/MachineDominators.h"
 #include "llvm/CodeGen/MachineFrameInfo.h"
 #include "llvm/CodeGen/MachineFunction.h"
 #include "llvm/CodeGen/MachineFunctionPass.h"
@@ -220,6 +222,11 @@ namespace {
     LiveStacks *LiveStks = nullptr;
     SlotIndexes *Indexes = nullptr;
 
+    // This is calculated only when trying to verify convergence control tokens.
+    // Similar to the LLVM IR verifier, we calculate this locally instead of
+    // relying on the pass manager.
+    MachineDomTree DT;
+
     void visitMachineFunctionBefore();
     void visitMachineBasicBlockBefore(const MachineBasicBlock *MBB);
     void visitMachineBundleBefore(const MachineInstr *MI);
@@ -2934,7 +2941,34 @@ void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) {
   }
 }
 
+static void
+verifyConvergenceControl(const MachineFunction &MF, MachineDomTree &DT,
+                         std::function<void(const Twine &Message)> FailureCB) {
+  using MFP = MachineFunctionProperties::Property;
+  const MachineFunctionProperties &Properties = MF.getProperties();
+  bool TokensAllowed = Properties.hasProperty(MFP::IsSSA);
+
+  MachineConvergenceVerifier CV;
+  CV.initialize(&errs(), FailureCB, MF, TokensAllowed);
+
+  for (const auto &MBB : MF) {
+    CV.visit(MBB);
+    for (const auto &MI : MBB.instrs())
+      CV.visit(MI);
+  }
+
+  if (CV.sawTokens()) {
+    DT.recalculate(const_cast<MachineFunction &>(MF));
+    CV.verify(DT);
+  }
+}
+
 void MachineVerifier::visitMachineFunctionAfter() {
+  auto FailureCB = [this](const Twine &Message) {
+    report(Message.str().c_str(), MF);
+  };
+  verifyConvergenceControl(*MF, DT, FailureCB);
+
   calcRegsPassed();
 
   for (const MachineBasicBlock &MBB : *MF)
diff --git a/llvm/lib/IR/ConvergenceVerifier.cpp b/llvm/lib/IR/ConvergenceVerifier.cpp
index 336c202b6f94ce..03dfe674fd699d 100644
--- a/llvm/lib/IR/ConvergenceVerifier.cpp
+++ b/llvm/lib/IR/ConvergenceVerifier.cpp
@@ -14,6 +14,24 @@
 
 using namespace llvm;
 
+template <>
+auto GenericConvergenceVerifier<SSAContext>::getConvOp(const Instruction &I)
+    -> ConvOpKind {
+  const auto *CB = dyn_cast<CallBase>(&I);
+  if (!CB)
+    return CONV_NONE;
+  switch (CB->getIntrinsicID()) {
+  default:
+    return CONV_NONE;
+  case Intrinsic::experimental_convergence_anchor:
+    return CONV_ANCHOR;
+  case Intrinsic::experimental_convergence_entry:
+    return CONV_ENTRY;
+  case Intrinsic::experimental_convergence_loop:
+    return CONV_LOOP;
+  }
+}
+
 template <>
 const Instruction *
 GenericConvergenceVerifier<SSAContext>::findAndCheckConvergenceTokenUsed(
@@ -38,11 +56,10 @@ GenericConvergenceVerifier<SSAContext>::findAndCheckConvergenceTokenUsed(
   auto *Token = Bundle->Inputs[0].get();
   auto *Def = dyn_cast<Instruction>(Token);
 
-  CheckOrNull(
-      Def && isConvergenceControlIntrinsic(SSAContext::getIntrinsicID(*Def)),
-      "Convergence control tokens can only be produced by calls to the "
-      "convergence control intrinsics.",
-      {Context.print(Token), Context.print(&I)});
+  CheckOrNull(Def && getConvOp(*Def) != CONV_NONE,
+              "Convergence control tokens can only be produced by calls to the "
+              "convergence control intrinsics.",
+              {Context.print(Token), Context.print(&I)});
 
   if (Def)
     Tokens[&I] = Def;
@@ -52,14 +69,14 @@ GenericConvergenceVerifier<SSAContext>::findAndCheckConvergenceTokenUsed(
 
 template <>
 bool GenericConvergenceVerifier<SSAContext>::isInsideConvergentFunction(
-    const InstructionT &I) {
+    const Instruction &I) {
   auto *F = I.getFunction();
   return F->isConvergent();
 }
 
 template <>
 bool GenericConvergenceVerifier<SSAContext>::isConvergent(
-    const InstructionT &I) {
+    const Instruction &I) {
   if (auto *CB = dyn_cast<CallBase>(&I)) {
     return CB->isConvergent();
   }
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index b1d1075285c221..160eeebf40b527 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -408,7 +408,7 @@ class Verifier : public InstVisitor<Verifier>, VerifierSupport {
     auto FailureCB = [this](const Twine &Message) {
       this->CheckFailed(Message);
     };
-    ConvergenceVerifyHelper.initialize(OS, FailureCB, F);
+    ConvergenceVerifyHelper.initialize(OS, FailureCB, F, true);
 
     Broken = false;
     // FIXME: We strip const here because the inst visitor strips const.
diff --git a/llvm/test/CodeGen/AMDGPU/verify-convergencectrl/basic.mir b/llvm/test/CodeGen/AMDGPU/verify-convergencectrl/basic.mir
new file mode 100644
index 00000000000000..7cb68865f9ba29
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/verify-convergencectrl/basic.mir
@@ -0,0 +1,37 @@
+# RUN: not --crash llc -mtriple=amdgcn-- -run-pass=machineverifier -o /dev/null %s 2>&1 | FileCheck %s
+---
+name:            basic
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    successors: %bb.1, %bb.2;
+    %0:sgpr_64 = CONVERGENCECTRL_ANCHOR
+    ; CHECK: Entry intrinsic cannot be preceded by a convergent operation in the same basic block.
+    ; CHECK: CONVERGENCECTRL_ENTRY
+    %1:sgpr_64 = CONVERGENCECTRL_ENTRY
+    ; CHECK: Loop intrinsic cannot be preceded by a convergent operation in the same basic block.
+    ; CHECK: CONVERGENCECTRL_LOOP
+    %2:sgpr_64 = CONVERGENCECTRL_LOOP %0:sgpr_64
+    S_CBRANCH_EXECZ %bb.1, implicit $exec
+    S_BRANCH %bb.2
+
+  bb.1:
+    successors: %bb.2;
+    ; CHECK: Entry intrinsic can occur only in the entry block.
+    ; CHECK: CONVERGENCECTRL_ENTRY
+    %5:sgpr_64 = CONVERGENCECTRL_ENTRY
+
+  bb.2:
+    ; CHECK: Convergence control tokens can only be used by convergent operations.
+    ; CHECK: G_PHI
+    %6:sgpr_64 = G_PHI %0:sgpr_64, %bb.0, %0:sgpr_64, %bb.1
+    %7:sgpr_64 = CONVERGENCECTRL_ANCHOR
+    %8:sgpr_64 = IMPLICIT_DEF
+    %4:sgpr_64 = SI_CALL %8:sgpr_64, 1, implicit %7:sgpr_64
+    ; CHECK: An operation can use at most one convergence control token.
+    ; CHECK: SI_CALL %{{[0-9]}}:sgpr_64, 2
+    %9:sgpr_64 = SI_CALL %8:sgpr_64, 2, implicit %7:sgpr_64, implicit %7:sgpr_64
+    ; CHECK: Cannot mix controlled and uncontrolled convergence in the same function.
+    ; CHECK: SI_CALL %{{[0-9]}}:sgpr_64, 3
+    %10:sgpr_64 = SI_CALL %8:sgpr_64, 3
+...
diff --git a/llvm/test/CodeGen/AMDGPU/verify-convergencectrl/cycles.mir b/llvm/test/CodeGen/AMDGPU/verify-convergencectrl/cycles.mir
new file mode 100644
index 00000000000000..34b152de9a97a6
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/verify-convergencectrl/cycles.mir
@@ -0,0 +1,52 @@
+# RUN: not --crash llc -mtriple=amdgcn-- -run-pass=machineverifier -o /dev/null %s 2>&1 | FileCheck %s
+---
+name:            cycles
+body:             |
+  bb.0:
+    %0:sgpr_64 = CONVERGENCECTRL_ANCHOR
+    %1:sgpr_64 = IMPLICIT_DEF
+    S_CBRANCH_EXECZ %bb.9, implicit $exec
+    S_BRANCH %bb.1
+
+  bb.1:
+    S_CBRANCH_EXECZ %bb.8, implicit $exec
+    S_BRANCH %bb.5
+
+  bb.2:
+    S_CBRANCH_EXECZ %bb.3, implicit $exec
+    S_BRANCH %bb.4
+
+  bb.3:
+    ; CHECK: Cycle heart must dominate all blocks in the cycle.
+    ; Irreducible cycle: entries(bb.4 bb.3)
+    %3:sgpr_64 = CONVERGENCECTRL_LOOP %0:sgpr_64
+    S_BRANCH %bb.4
+
+  bb.4:
+    S_BRANCH %bb.3
+
+  bb.5:
+    S_CBRANCH_EXECZ %bb.6, implicit $exec
+    S_BRANCH %bb.2
+
+  bb.6:
+    S_BRANCH %bb.7
+
+  bb.7:
+    ; CHECK: Cycle heart must dominate all blocks in the cycle.
+    ; Reducible cycle: entries(bb.6) bb.7
+    %4:sgpr_64 = CONVERGENCECTRL_LOOP %0:sgpr_64
+    S_BRANCH %bb.6
+
+  bb.8:
+    ; CHECK: Two static convergence token uses in a cycle that does not contain either token's definition.
+    %5:sgpr_64 = CONVERGENCECTRL_LOOP %0:sgpr_64
+    %6:sgpr_64 = CONVERGENCECTRL_LOOP %0:sgpr_64
+    S_BRANCH %bb.8
+
+  bb.9:
+    ; CHECK: Convergence token used by an instruction other than llvm.experimental.convergence.loop in a cycle that does not contain the token's definition.
+    %7:sgpr_64 = G_SI_CALL %1:sgpr_64, 3, implicit %0:sgpr_64
+    S_BRANCH %bb.9
+
+...
diff --git a/llvm/test/CodeGen/AMDGPU/verify-convergencectrl/mixed2.mir b/llvm/test/CodeGen/AMDGPU/verify-convergencectrl/mixed2.mir
new file mode 100644
index 00000000000000..5339f86024e69a
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/verify-convergencectrl/mixed2.mir
@@ -0,0 +1,15 @@
+# RUN: not --crash llc -mtriple=amdgcn-- -run-pass=machineverifier -o /dev/null %s 2>&1 | FileCheck %s
+---
+name:            mixed2
+body:             |
+  bb.0:
+    %0:sgpr_64 = IMPLICIT_DEF
+    %1:sgpr_64 = SI_CALL %0, 1
+    ; CHECK: Cannot mix controlled and uncontrolled convergence in the same function.
+    ; CHECK: CONVERGENCECTRL_ANCHOR
+    %2:sgpr_64 = CONVERGENCECTRL_ANCHOR
+    ; CHECK: Cannot mix controlled and uncontrolled convergence in the same function.
+    ; CHECK: SI_CALL %{{[0-9]}}:sgpr_64, 2
+    %3:sgpr_64 = SI_CALL %0, 2, implicit %2:sgpr_64
+
+...
diff --git a/llvm/test/CodeGen/AMDGPU/verify-convergencectrl/not-ssa.mir b/llvm/test/CodeGen/AMDGPU/verify-convergencectrl/not-ssa.mir
new file mode 100644
index 00000000000000..ad12cf3d100e25
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/verify-convergencectrl/not-ssa.mir
@@ -0,0 +1,11 @@
+# RUN: not --crash llc -mtriple=amdgcn-- -run-pass=machineverifier -o /dev/null %s 2>&1 | FileCheck %s
+---
+name:            not_ssa
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    ; CHECK: Convergence control requires SSA.
+    %0:sgpr_64 = CONVERGENCECTRL_ANCHOR
+    %8:sgpr_64 = IMPLICIT_DEF
+    %8:sgpr_64 = IMPLICIT_DEF
+...
diff --git a/llvm/test/CodeGen/AMDGPU/verify-convergencectrl/region-nesting.mir b/llvm/test/CodeGen/AMDGPU/verify-convergencectrl/region-nesting.mir
new file mode 100644
index 00000000000000..2503af8ab11115
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/verify-convergencectrl/region-nesting.mir
@@ -0,0 +1,24 @@
+# RUN: not --crash llc -mtriple=amdgcn-- -run-pass=machineverifier -o /dev/null %s 2>&1 | FileCheck %s
+---
+name:            region_nesting
+body:             |
+  bb.0:
+    %0:sgpr_64 = CONVERGENCECTRL_ANCHOR
+    %1:sgpr_64 = CONVERGENCECTRL_ANCHOR
+    %2:sgpr_64 = IMPLICIT_DEF
+    %3:sgpr_64 = SI_CALL %2, 1, implicit %0:sgpr_64
+    ; CHECK: Convergence region is not well-nested.
+    ; CHECK: SI_CALL %{{[0-9]}}:sgpr_64, 2
+    %4:sgpr_64 = SI_CALL %2, 2, implicit %1:sgpr_64
+    S_CBRANCH_EXECZ %bb.1, implicit $exec
+    S_BRANCH %bb.2
+
+  bb.1:
+    %5:sgpr_64 = SI_CALL %2, 3, implicit %0:sgpr_64
+
+  bb.2:
+    ; CHECK: Convergence region is not well-nested.
+    ; CHECK: SI_CALL %{{[0-9]}}:sgpr_64, 4
+    %6:sgpr_64 = SI_CALL %2, 4, implicit %1:sgpr_64
+
+...

>From 06010d4add8a88dec82626f97c4268b7400ee88e Mon Sep 17 00:00:00 2001
From: Sameer Sahasrabuddhe <sameer.sahasrabuddhe at amd.com>
Date: Tue, 9 Jan 2024 12:06:41 +0530
Subject: [PATCH 8/8] make option ReallyHidden; move tests to
 test/MachineVerifier

---
 llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp                  | 2 +-
 .../convergencectrl/AMDGPU}/basic.mir                           | 0
 .../convergencectrl/AMDGPU}/cycles.mir                          | 0
 llvm/test/MachineVerifier/convergencectrl/AMDGPU/lit.local.cfg  | 2 ++
 .../convergencectrl/AMDGPU}/mixed2.mir                          | 0
 .../convergencectrl/AMDGPU}/not-ssa.mir                         | 0
 .../convergencectrl/AMDGPU}/region-nesting.mir                  | 0
 7 files changed, 3 insertions(+), 1 deletion(-)
 rename llvm/test/{CodeGen/AMDGPU/verify-convergencectrl => MachineVerifier/convergencectrl/AMDGPU}/basic.mir (100%)
 rename llvm/test/{CodeGen/AMDGPU/verify-convergencectrl => MachineVerifier/convergencectrl/AMDGPU}/cycles.mir (100%)
 create mode 100644 llvm/test/MachineVerifier/convergencectrl/AMDGPU/lit.local.cfg
 rename llvm/test/{CodeGen/AMDGPU/verify-convergencectrl => MachineVerifier/convergencectrl/AMDGPU}/mixed2.mir (100%)
 rename llvm/test/{CodeGen/AMDGPU/verify-convergencectrl => MachineVerifier/convergencectrl/AMDGPU}/not-ssa.mir (100%)
 rename llvm/test/{CodeGen/AMDGPU/verify-convergencectrl => MachineVerifier/convergencectrl/AMDGPU}/region-nesting.mir (100%)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index 6130f75e034461..df4a6f74b5baaa 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -250,7 +250,7 @@ static cl::opt<bool, true> LateCFGStructurize(
 static cl::opt<bool, true> DisableStructurizer(
     "amdgpu-disable-structurizer",
     cl::desc("Disable structurizer for experiments; produces unusable code"),
-    cl::location(AMDGPUTargetMachine::DisableStructurizer), cl::Hidden);
+    cl::location(AMDGPUTargetMachine::DisableStructurizer), cl::ReallyHidden);
 
 // Enable lib calls simplifications
 static cl::opt<bool> EnableLibCallSimplify(
diff --git a/llvm/test/CodeGen/AMDGPU/verify-convergencectrl/basic.mir b/llvm/test/MachineVerifier/convergencectrl/AMDGPU/basic.mir
similarity index 100%
rename from llvm/test/CodeGen/AMDGPU/verify-convergencectrl/basic.mir
rename to llvm/test/MachineVerifier/convergencectrl/AMDGPU/basic.mir
diff --git a/llvm/test/CodeGen/AMDGPU/verify-convergencectrl/cycles.mir b/llvm/test/MachineVerifier/convergencectrl/AMDGPU/cycles.mir
similarity index 100%
rename from llvm/test/CodeGen/AMDGPU/verify-convergencectrl/cycles.mir
rename to llvm/test/MachineVerifier/convergencectrl/AMDGPU/cycles.mir
diff --git a/llvm/test/MachineVerifier/convergencectrl/AMDGPU/lit.local.cfg b/llvm/test/MachineVerifier/convergencectrl/AMDGPU/lit.local.cfg
new file mode 100644
index 00000000000000..7c492428aec761
--- /dev/null
+++ b/llvm/test/MachineVerifier/convergencectrl/AMDGPU/lit.local.cfg
@@ -0,0 +1,2 @@
+if not "AMDGPU" in config.root.targets:
+    config.unsupported = True
diff --git a/llvm/test/CodeGen/AMDGPU/verify-convergencectrl/mixed2.mir b/llvm/test/MachineVerifier/convergencectrl/AMDGPU/mixed2.mir
similarity index 100%
rename from llvm/test/CodeGen/AMDGPU/verify-convergencectrl/mixed2.mir
rename to llvm/test/MachineVerifier/convergencectrl/AMDGPU/mixed2.mir
diff --git a/llvm/test/CodeGen/AMDGPU/verify-convergencectrl/not-ssa.mir b/llvm/test/MachineVerifier/convergencectrl/AMDGPU/not-ssa.mir
similarity index 100%
rename from llvm/test/CodeGen/AMDGPU/verify-convergencectrl/not-ssa.mir
rename to llvm/test/MachineVerifier/convergencectrl/AMDGPU/not-ssa.mir
diff --git a/llvm/test/CodeGen/AMDGPU/verify-convergencectrl/region-nesting.mir b/llvm/test/MachineVerifier/convergencectrl/AMDGPU/region-nesting.mir
similarity index 100%
rename from llvm/test/CodeGen/AMDGPU/verify-convergencectrl/region-nesting.mir
rename to llvm/test/MachineVerifier/convergencectrl/AMDGPU/region-nesting.mir



More information about the llvm-commits mailing list