[llvm] [GlobalISel] convergence control tokens and intrinsics (PR #67006)

Sameer Sahasrabuddhe via llvm-commits llvm-commits at lists.llvm.org
Thu Sep 21 05:26:43 PDT 2023


https://github.com/ssahasra created https://github.com/llvm/llvm-project/pull/67006

Support for lowering convergence control to GMIR:
- Introduce new G_CONVERGENCECTRL_* opcodes.
- In the IR translator, convert the LLVM token type to LLT::token(), which is an alias for the s0 type. These show up as implicit uses on convergent operations.
- In the machine verifier, enforce the static rules of convergence control.

Note that the lowering of the new GMIR opcodes is entirely target-specific. It is generally expected that the backend will use convergence control to change the CFG of each function, and then discard the tokens. This is currently a work in progress for AMDGPU.

Differential Revision: https://reviews.llvm.org/D158147

>From 1b156e012b411f95b3fbb317055b38ec3b296cd3 Mon Sep 17 00:00:00 2001
From: Sameer Sahasrabuddhe <sameer.sahasrabuddhe at amd.com>
Date: Tue, 19 Sep 2023 12:22:51 +0530
Subject: [PATCH] [GlobalISel] convergence control tokens and intrinsics

Support for lowering convergence control to GMIR:
- Introduce new G_CONVERGENCECTRL_* opcodes.
- In the IR translator, convert the LLVM token type to LLT::token(), which is an
  alias for the s0 type. These show up as implicit uses on convergent operations.
- In the machine verifier, enforce the static rules of convergence control.

Note that the lowering of the new GMIR opcodes is entirely target-specific. It
is generally expected that the backend will use convergence control to change
the CFG of each function, and then discard the tokens. This is currently a work
in progress for AMDGPU.

Differential Revision: https://reviews.llvm.org/D158147
---
 .../llvm/ADT/GenericConvergenceVerifier.h     |  5 +
 .../llvm/CodeGen/GlobalISel/CallLowering.h    |  4 +
 .../llvm/CodeGen/GlobalISel/IRTranslator.h    | 21 ++++
 llvm/include/llvm/CodeGen/LowLevelType.h      | 29 ++++++
 .../llvm/CodeGen/MachineConvergenceVerifier.h | 28 ++++++
 .../llvm/IR/GenericConvergenceVerifierImpl.h  | 30 ++----
 llvm/include/llvm/Support/TargetOpcodes.def   |  4 +
 llvm/include/llvm/Target/GenericOpcodes.td    | 28 ++++++
 llvm/lib/CodeGen/CMakeLists.txt               |  1 +
 llvm/lib/CodeGen/GlobalISel/CallLowering.cpp  | 14 +++
 llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp  | 61 ++++++++++--
 .../CodeGen/GlobalISel/InlineAsmLowering.cpp  |  8 ++
 llvm/lib/CodeGen/LowLevelTypeUtils.cpp        |  4 +
 llvm/lib/CodeGen/MIRParser/MIParser.cpp       | 13 ++-
 .../CodeGen/MachineConvergenceVerifier.cpp    | 97 +++++++++++++++++++
 llvm/lib/CodeGen/MachineVerifier.cpp          | 34 +++++++
 llvm/lib/IR/ConvergenceVerifier.cpp           | 31 ++++--
 llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp |  6 ++
 .../GlobalISel/legalizer-info-validation.mir  |  9 ++
 .../irtranslator-convergence-tokens.ll        | 83 ++++++++++++++++
 .../AMDGPU/verify-convergencectrl/basic.mir   | 38 ++++++++
 .../AMDGPU/verify-convergencectrl/cycles.mir  | 53 ++++++++++
 .../AMDGPU/verify-convergencectrl/mixed2.mir  | 15 +++
 .../verify-convergencectrl/region-nesting.mir | 25 +++++
 .../AArch64/parse-low-level-type-invalid4.mir | 10 --
 .../AArch64/parse-low-level-type-invalid6.mir |  2 +-
 26 files changed, 602 insertions(+), 51 deletions(-)
 create mode 100644 llvm/include/llvm/CodeGen/MachineConvergenceVerifier.h
 create mode 100644 llvm/lib/CodeGen/MachineConvergenceVerifier.cpp
 create mode 100644 llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-convergence-tokens.ll
 create mode 100644 llvm/test/CodeGen/AMDGPU/verify-convergencectrl/basic.mir
 create mode 100644 llvm/test/CodeGen/AMDGPU/verify-convergencectrl/cycles.mir
 create mode 100644 llvm/test/CodeGen/AMDGPU/verify-convergencectrl/mixed2.mir
 create mode 100644 llvm/test/CodeGen/AMDGPU/verify-convergencectrl/region-nesting.mir
 delete mode 100644 llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid4.mir

diff --git a/llvm/include/llvm/ADT/GenericConvergenceVerifier.h b/llvm/include/llvm/ADT/GenericConvergenceVerifier.h
index 0810a0701322907..24cd0cda3708517 100644
--- a/llvm/include/llvm/ADT/GenericConvergenceVerifier.h
+++ b/llvm/include/llvm/ADT/GenericConvergenceVerifier.h
@@ -60,6 +60,10 @@ template <typename ContextT> class GenericConvergenceVerifier {
     NoConvergence
   } ConvergenceKind = NoConvergence;
 
+  /// The control token operation performed by a convergence control Intrinsic in LLVM IR,
+  /// or by a G_CONVERGENCECTRL* instruction in GMIR.
+  enum ConvOpKind { CONV_ANCHOR, CONV_ENTRY, CONV_LOOP, CONV_NONE };
+
   // Cache token uses found so far. Note that we track the unique definitions
   // and not the token values.
   DenseMap<const InstructionT *, const InstructionT *> Tokens;
@@ -68,6 +72,7 @@ template <typename ContextT> class GenericConvergenceVerifier {
 
   static bool isInsideConvergentFunction(const InstructionT &I);
   static bool isConvergent(const InstructionT &I);
+  static ConvOpKind getConvOp(const InstructionT &I);
   const InstructionT *findAndCheckConvergenceTokenUsed(const InstructionT &I);
 
   void reportFailure(const Twine &Message, ArrayRef<Printable> Values);
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h b/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h
index 4a799eec8899a51..ff2721495dc3b97 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h
@@ -117,6 +117,9 @@ class CallLowering {
     /// vreg that the swifterror should be copied into after the call.
     Register SwiftErrorVReg;
 
+    /// Valid if the call is a controlled convergent operation.
+    Register ConvergenceCtrlToken;
+
     /// Original IR callsite corresponding to this call, if available.
     const CallBase *CB = nullptr;
 
@@ -583,6 +586,7 @@ class CallLowering {
   bool lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &Call,
                  ArrayRef<Register> ResRegs,
                  ArrayRef<ArrayRef<Register>> ArgRegs, Register SwiftErrorVReg,
+                 Register ConvergenceCtrlToken,
                  std::function<unsigned()> GetCalleeReg) const;
 
   /// For targets which want to use big-endian can enable it with
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h b/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
index bffc03ed0187e63..d51daa926c9575d 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
@@ -554,6 +554,10 @@ class IRTranslator : public MachineFunctionPass {
     return false;
   }
 
+  bool translateConvergenceControlIntrinsic(const CallInst &CI,
+                                            Intrinsic::ID ID,
+                                            MachineIRBuilder &MIRBuilder);
+
   /// @}
 
   // Builder for machine instruction a la IRBuilder.
@@ -671,6 +675,23 @@ class IRTranslator : public MachineFunctionPass {
     return Regs[0];
   }
 
+  Register getOrCreateConvergenceTokenVReg(const Value &Token) {
+    assert(Token.getType()->isTokenTy());
+    auto &Regs = *VMap.getVRegs(Token);
+    if (!Regs.empty()) {
+      assert(Regs.size() == 1 &&
+             "Expected a single register for convergence tokens.");
+      return Regs[0];
+    }
+
+    auto Reg = MRI->createGenericVirtualRegister(LLT::token());
+    Regs.push_back(Reg);
+    auto &Offsets = *VMap.getOffsets(Token);
+    if (Offsets.empty())
+      Offsets.push_back(0);
+    return Reg;
+  }
+
   /// Allocate some vregs and offsets in the VMap. Then populate just the
   /// offsets while leaving the vregs empty.
   ValueToVRegInfo::VRegListT &allocateVRegs(const Value &Val);
diff --git a/llvm/include/llvm/CodeGen/LowLevelType.h b/llvm/include/llvm/CodeGen/LowLevelType.h
index b7477834a1ff5e1..e0cd56849909422 100644
--- a/llvm/include/llvm/CodeGen/LowLevelType.h
+++ b/llvm/include/llvm/CodeGen/LowLevelType.h
@@ -45,6 +45,13 @@ class LLT {
                /*AddressSpace=*/0};
   }
 
+  /// Get a low-level token; just a scalar with zero bits (or no size).
+  static constexpr LLT token() {
+    return LLT{/*isPointer=*/false, /*isVector=*/false, /*isScalar=*/true,
+               ElementCount::getFixed(0), /*SizeInBits=*/0,
+               /*AddressSpace=*/0};
+  }
+
   /// Get a low-level pointer in the given address space.
   static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits) {
     assert(SizeInBits > 0 && "invalid pointer size");
@@ -304,6 +311,28 @@ class LLT {
   /// described in static const *Field variables. Each of these variables
   /// is a 2-element array, with the first element describing the bitfield size
   /// and the second element describing the bitfield offset.
+  ///
+  /// +--------+---------+--------+----------+----------------------+
+  /// |isScalar|isPointer|isVector| RawData  |Notes                 |
+  /// +--------+---------+--------+----------+----------------------+
+  /// |   0    |    0    |   0    |    0     |Invalid               |
+  /// +--------+---------+--------+----------+----------------------+
+  /// |   0    |    0    |   1    |    0     |Tombstone Key         |
+  /// +--------+---------+--------+----------+----------------------+
+  /// |   0    |    1    |   0    |    0     |Empty Key             |
+  /// +--------+---------+--------+----------+----------------------+
+  /// |   1    |    0    |   0    |    0     |Token                 |
+  /// +--------+---------+--------+----------+----------------------+
+  /// |   1    |    0    |   0    | non-zero |Scalar                |
+  /// +--------+---------+--------+----------+----------------------+
+  /// |   0    |    1    |   0    | non-zero |Pointer               |
+  /// +--------+---------+--------+----------+----------------------+
+  /// |   0    |    0    |   1    | non-zero |Vector of non-pointer |
+  /// +--------+---------+--------+----------+----------------------+
+  /// |   0    |    1    |   1    | non-zero |Vector of pointer     |
+  /// +--------+---------+--------+----------+----------------------+
+  ///
+  /// Everything else is reserved.
   typedef int BitFieldInfo[2];
   ///
   /// This is how the bitfields are packed per Kind:
diff --git a/llvm/include/llvm/CodeGen/MachineConvergenceVerifier.h b/llvm/include/llvm/CodeGen/MachineConvergenceVerifier.h
new file mode 100644
index 000000000000000..fd13546f2ae32f0
--- /dev/null
+++ b/llvm/include/llvm/CodeGen/MachineConvergenceVerifier.h
@@ -0,0 +1,28 @@
+//===- MachineConvergenceVerifier.h - Verify convergenctrl ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file declares the GMIR IR specialization of the
+/// GenericConvergenceVerifier template.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINECONVERGENCEVERIFIER_H
+#define LLVM_CODEGEN_MACHINECONVERGENCEVERIFIER_H
+
+#include "llvm/ADT/GenericConvergenceVerifier.h"
+#include "llvm/CodeGen/MachineSSAContext.h"
+
+namespace llvm {
+
+using MachineConvergenceVerifier =
+    GenericConvergenceVerifier<MachineSSAContext>;
+
+} // namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINECONVERGENCEVERIFIER_H
diff --git a/llvm/include/llvm/IR/GenericConvergenceVerifierImpl.h b/llvm/include/llvm/IR/GenericConvergenceVerifierImpl.h
index 2ba81015cb7b641..037e406fc57d38b 100644
--- a/llvm/include/llvm/IR/GenericConvergenceVerifierImpl.h
+++ b/llvm/include/llvm/IR/GenericConvergenceVerifierImpl.h
@@ -49,17 +49,6 @@ using namespace llvm;
     }                                                                          \
   } while (false)
 
-static bool isConvergenceControlIntrinsic(unsigned IntrinsicID) {
-  switch (IntrinsicID) {
-  default:
-    return false;
-  case Intrinsic::experimental_convergence_anchor:
-  case Intrinsic::experimental_convergence_entry:
-  case Intrinsic::experimental_convergence_loop:
-    return true;
-  }
-}
-
 namespace llvm {
 template <class ContextT> void GenericConvergenceVerifier<ContextT>::clear() {
   Tokens.clear();
@@ -74,12 +63,11 @@ void GenericConvergenceVerifier<ContextT>::visit(const BlockT &BB) {
 
 template <class ContextT>
 void GenericConvergenceVerifier<ContextT>::visit(const InstructionT &I) {
-  auto ID = ContextT::getIntrinsicID(I);
+  auto ConvOp = getConvOp(I);
   auto *TokenDef = findAndCheckConvergenceTokenUsed(I);
-  bool IsCtrlIntrinsic = true;
 
-  switch (ID) {
-  case Intrinsic::experimental_convergence_entry:
+  switch (ConvOp) {
+  case CONV_ENTRY:
     Check(isInsideConvergentFunction(I),
           "Entry intrinsic can occur only in a convergent function.",
           {Context.print(&I)});
@@ -91,13 +79,13 @@ void GenericConvergenceVerifier<ContextT>::visit(const InstructionT &I) {
           "same basic block.",
           {Context.print(&I)});
     LLVM_FALLTHROUGH;
-  case Intrinsic::experimental_convergence_anchor:
+  case CONV_ANCHOR:
     Check(!TokenDef,
           "Entry or anchor intrinsic cannot have a convergencectrl token "
           "operand.",
           {Context.print(&I)});
     break;
-  case Intrinsic::experimental_convergence_loop:
+  case CONV_LOOP:
     Check(TokenDef, "Loop intrinsic must have a convergencectrl token operand.",
           {Context.print(&I)});
     Check(!SeenFirstConvOp,
@@ -106,14 +94,13 @@ void GenericConvergenceVerifier<ContextT>::visit(const InstructionT &I) {
           {Context.print(&I)});
     break;
   default:
-    IsCtrlIntrinsic = false;
     break;
   }
 
   if (isConvergent(I))
     SeenFirstConvOp = true;
 
-  if (TokenDef || IsCtrlIntrinsic) {
+  if (TokenDef || ConvOp != CONV_NONE) {
     Check(isConvergent(I),
           "Convergence control token can only be used in a convergent call.",
           {Context.print(&I)});
@@ -174,8 +161,7 @@ void GenericConvergenceVerifier<ContextT>::verify(const DominatorTreeT &DT) {
       return;
     }
 
-    Check(ContextT::getIntrinsicID(*User) ==
-              Intrinsic::experimental_convergence_loop,
+    Check(getConvOp(*User) == CONV_LOOP,
           "Convergence token used by an instruction other than "
           "llvm.experimental.convergence.loop in a cycle that does "
           "not contain the token's definition.",
@@ -212,7 +198,7 @@ void GenericConvergenceVerifier<ContextT>::verify(const DominatorTreeT &DT) {
     for (auto &I : *BB) {
       if (auto *Token = Tokens.lookup(&I))
         checkToken(Token, &I, LiveTokens);
-      if (isConvergenceControlIntrinsic(ContextT::getIntrinsicID(I)))
+      if (getConvOp(I) != CONV_NONE)
         LiveTokens.push_back(&I);
     }
 
diff --git a/llvm/include/llvm/Support/TargetOpcodes.def b/llvm/include/llvm/Support/TargetOpcodes.def
index e02b1a1d01cfec0..04bdc395b302235 100644
--- a/llvm/include/llvm/Support/TargetOpcodes.def
+++ b/llvm/include/llvm/Support/TargetOpcodes.def
@@ -436,6 +436,10 @@ HANDLE_TARGET_OPCODE(G_INTRINSIC_CONVERGENT)
 /// Generic intrinsic use (with side effects).
 HANDLE_TARGET_OPCODE(G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS)
 
+HANDLE_TARGET_OPCODE(G_CONVERGENCECTRL_ENTRY)
+HANDLE_TARGET_OPCODE(G_CONVERGENCECTRL_ANCHOR)
+HANDLE_TARGET_OPCODE(G_CONVERGENCECTRL_LOOP)
+
 /// Generic extension allowing rubbish in high bits.
 HANDLE_TARGET_OPCODE(G_ANYEXT)
 
diff --git a/llvm/include/llvm/Target/GenericOpcodes.td b/llvm/include/llvm/Target/GenericOpcodes.td
index 0a4fbaa12f96cec..27d4b8a4759ff92 100644
--- a/llvm/include/llvm/Target/GenericOpcodes.td
+++ b/llvm/include/llvm/Target/GenericOpcodes.td
@@ -1276,6 +1276,34 @@ def G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS : GenericInstruction {
   let isConvergent = true;
 }
 
+//------------------------------------------------------------------------------
+// Convergence control operations.
+//------------------------------------------------------------------------------
+
+// Capture the set of threads that are converged on entry to a function.
+def G_CONVERGENCECTRL_ENTRY : GenericInstruction {
+  let InOperandList = (ins);
+  let OutOperandList = (outs type0:$dst);
+  let isConvergent = true;
+  let hasSideEffects = false;
+}
+
+// Capture an implementation-defined subset of converged threads.
+def G_CONVERGENCECTRL_ANCHOR : GenericInstruction {
+  let InOperandList = (ins);
+  let OutOperandList = (outs type0:$dst);
+  let isConvergent = true;
+  let hasSideEffects = false;
+}
+
+// Capture the convergence of threads in a cycle.
+def G_CONVERGENCECTRL_LOOP : GenericInstruction {
+  let InOperandList = (ins type0:$src);
+  let OutOperandList = (outs type0:$dst);
+  let isConvergent = true;
+  let hasSideEffects = false;
+}
+
 //------------------------------------------------------------------------------
 // Branches.
 //------------------------------------------------------------------------------
diff --git a/llvm/lib/CodeGen/CMakeLists.txt b/llvm/lib/CodeGen/CMakeLists.txt
index 389c70d04f17ba3..41f6612b68a1f12 100644
--- a/llvm/lib/CodeGen/CMakeLists.txt
+++ b/llvm/lib/CodeGen/CMakeLists.txt
@@ -121,6 +121,7 @@ add_llvm_component_library(LLVMCodeGen
   MachineBranchProbabilityInfo.cpp
   MachineCFGPrinter.cpp
   MachineCombiner.cpp
+  MachineConvergenceVerifier.cpp
   MachineCopyPropagation.cpp
   MachineCSE.cpp
   MachineCheckDebugify.cpp
diff --git a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
index 0b1f151135be9fc..45738cc636cb7af 100644
--- a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
@@ -21,6 +21,7 @@
 #include "llvm/CodeGen/MachineRegisterInfo.h"
 #include "llvm/CodeGen/TargetLowering.h"
 #include "llvm/IR/DataLayout.h"
+#include "llvm/IR/IntrinsicInst.h"
 #include "llvm/IR/LLVMContext.h"
 #include "llvm/IR/Module.h"
 #include "llvm/Target/TargetMachine.h"
@@ -87,10 +88,20 @@ void CallLowering::addArgFlagsFromAttributes(ISD::ArgFlagsTy &Flags,
   });
 }
 
+static bool hasConvergenceEntryToken(const CallBase &CB) {
+  auto Bundle = CB.getOperandBundle(LLVMContext::OB_convergencectrl);
+  if (!Bundle)
+    return true;
+  auto *Token = Bundle->Inputs[0].get();
+  auto *Def = cast<IntrinsicInst>(Token);
+  return Def->getIntrinsicID() == Intrinsic::experimental_convergence_entry;
+}
+
 bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
                              ArrayRef<Register> ResRegs,
                              ArrayRef<ArrayRef<Register>> ArgRegs,
                              Register SwiftErrorVReg,
+                             Register ConvergenceCtrlToken,
                              std::function<unsigned()> GetCalleeReg) const {
   CallLoweringInfo Info;
   const DataLayout &DL = MIRBuilder.getDataLayout();
@@ -121,6 +132,8 @@ bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
     CanBeTailCalled = false;
   }
 
+  if (!hasConvergenceEntryToken(CB))
+    CanBeTailCalled = false;
 
   // First step is to marshall all the function's parameters into the correct
   // physregs and memory locations. Gather the sequence of argument types that
@@ -176,6 +189,7 @@ bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
   Info.KnownCallees = CB.getMetadata(LLVMContext::MD_callees);
   Info.CallConv = CallConv;
   Info.SwiftErrorVReg = SwiftErrorVReg;
+  Info.ConvergenceCtrlToken = ConvergenceCtrlToken;
   Info.IsMustTailCall = CB.isMustTailCall();
   Info.IsTailCall = CanBeTailCalled;
   Info.IsVarArg = IsVarArg;
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 9dac3d083994f9f..692e2fbdb313add 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -210,8 +210,9 @@ ArrayRef<Register> IRTranslator::getOrCreateVRegs(const Value &Val) {
   auto *VRegs = VMap.getVRegs(Val);
   auto *Offsets = VMap.getOffsets(Val);
 
-  assert(Val.getType()->isSized() &&
-         "Don't know how to create an empty vreg");
+  if (!Val.getType()->isTokenTy())
+    assert(Val.getType()->isSized() &&
+           "Don't know how to create an empty vreg");
 
   SmallVector<LLT, 4> SplitTys;
   computeValueLLTs(*DL, *Val.getType(), SplitTys,
@@ -1953,6 +1954,37 @@ bool IRTranslator::translateIfEntryValueArgument(
   return true;
 }
 
+static unsigned getConvOpcode(Intrinsic::ID ID) {
+  switch (ID) {
+  default:
+    llvm_unreachable("Unexpected intrinsic");
+    return 0;
+  case Intrinsic::experimental_convergence_anchor:
+    return TargetOpcode::G_CONVERGENCECTRL_ANCHOR;
+  case Intrinsic::experimental_convergence_entry:
+    return TargetOpcode::G_CONVERGENCECTRL_ENTRY;
+  case Intrinsic::experimental_convergence_loop:
+    return TargetOpcode::G_CONVERGENCECTRL_LOOP;
+  }
+}
+
+bool IRTranslator::translateConvergenceControlIntrinsic(
+    const CallInst &CI, Intrinsic::ID ID, MachineIRBuilder &MIRBuilder) {
+  MachineInstrBuilder MIB = MIRBuilder.buildInstr(getConvOpcode(ID));
+  Register OutputReg = getOrCreateConvergenceTokenVReg(CI);
+  MIB.addDef(OutputReg);
+
+  if (ID == Intrinsic::experimental_convergence_loop) {
+    auto Bundle = CI.getOperandBundle(LLVMContext::OB_convergencectrl);
+    assert(Bundle && "Expected a convergence control token.");
+    Register InputReg =
+        getOrCreateConvergenceTokenVReg(*Bundle->Inputs[0].get());
+    MIB.addUse(InputReg);
+  }
+
+  return true;
+}
+
 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
                                            MachineIRBuilder &MIRBuilder) {
   if (auto *MI = dyn_cast<AnyMemIntrinsic>(&CI)) {
@@ -2410,7 +2442,10 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
 #include "llvm/IR/ConstrainedOps.def"
     return translateConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(CI),
                                            MIRBuilder);
-
+  case Intrinsic::experimental_convergence_anchor:
+  case Intrinsic::experimental_convergence_entry:
+  case Intrinsic::experimental_convergence_loop:
+    return translateConvergenceControlIntrinsic(CI, ID, MIRBuilder);
   }
   return false;
 }
@@ -2461,12 +2496,18 @@ bool IRTranslator::translateCallBase(const CallBase &CB,
     }
   }
 
+  Register ConvergenceCtrlToken = 0;
+  if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_convergencectrl)) {
+    const auto &Token = *Bundle->Inputs[0].get();
+    ConvergenceCtrlToken = getOrCreateConvergenceTokenVReg(Token);
+  }
+
   // We don't set HasCalls on MFI here yet because call lowering may decide to
   // optimize into tail calls. Instead, we defer that to selection where a final
   // scan is done to check if any instructions are calls.
-  bool Success =
-      CLI->lowerCall(MIRBuilder, CB, Res, Args, SwiftErrorVReg,
-                     [&]() { return getOrCreateVReg(*CB.getCalledOperand()); });
+  bool Success = CLI->lowerCall(
+      MIRBuilder, CB, Res, Args, SwiftErrorVReg, ConvergenceCtrlToken,
+      [&]() { return getOrCreateVReg(*CB.getCalledOperand()); });
 
   // Check if we just inserted a tail call.
   if (Success) {
@@ -2581,6 +2622,14 @@ bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
         MF->getMachineMemOperand(MPI, Info.flags, MemTy, Alignment, CI.getAAMetadata()));
   }
 
+  if (CI.isConvergent()) {
+    if (auto Bundle = CI.getOperandBundle(LLVMContext::OB_convergencectrl)) {
+      auto *Token = Bundle->Inputs[0].get();
+      Register TokenReg = getOrCreateVReg(*Token);
+      MIB.addUse(TokenReg, RegState::Implicit);
+    }
+  }
+
   return true;
 }
 
diff --git a/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp b/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp
index 00dba57fcb80227..2dfaa7381ae3355 100644
--- a/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp
@@ -592,6 +592,14 @@ bool InlineAsmLowering::lowerInlineAsm(
     }
   }
 
+  if (auto Bundle = Call.getOperandBundle(LLVMContext::OB_convergencectrl)) {
+    auto *Token = Bundle->Inputs[0].get();
+    ArrayRef<Register> SourceRegs = GetOrCreateVRegs(*Token);
+    assert(SourceRegs.size() == 1 &&
+           "Expected the control token to fit into a single virtual register");
+    Inst.addUse(SourceRegs[0], RegState::Implicit);
+  }
+
   if (const MDNode *SrcLoc = Call.getMetadata("srcloc"))
     Inst.addMetadata(SrcLoc);
 
diff --git a/llvm/lib/CodeGen/LowLevelTypeUtils.cpp b/llvm/lib/CodeGen/LowLevelTypeUtils.cpp
index bc2ea3f05b6da6c..3499fe1ee757764 100644
--- a/llvm/lib/CodeGen/LowLevelTypeUtils.cpp
+++ b/llvm/lib/CodeGen/LowLevelTypeUtils.cpp
@@ -39,6 +39,10 @@ LLT llvm::getLLTForType(Type &Ty, const DataLayout &DL) {
     return LLT::scalar(SizeInBits);
   }
 
+  if (Ty.isTokenTy()) {
+    return LLT::token();
+  }
+
   return LLT();
 }
 
diff --git a/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
index 65280c65b68781e..e52b0ce05b1648a 100644
--- a/llvm/lib/CodeGen/MIRParser/MIParser.cpp
+++ b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
@@ -1921,10 +1921,13 @@ bool MIParser::parseLowLevelType(StringRef::iterator Loc, LLT &Ty) {
 
   if (Token.range().front() == 's') {
     auto ScalarSize = APSInt(Token.range().drop_front()).getZExtValue();
-    if (!verifyScalarSize(ScalarSize))
-      return error("invalid size for scalar type");
-
-    Ty = LLT::scalar(ScalarSize);
+    if (ScalarSize) {
+      if (!verifyScalarSize(ScalarSize))
+        return error("invalid size for scalar type");
+      Ty = LLT::scalar(ScalarSize);
+    } else {
+      Ty = LLT::token();
+    }
     lex();
     return false;
   } else if (Token.range().front() == 'p') {
@@ -1965,7 +1968,7 @@ bool MIParser::parseLowLevelType(StringRef::iterator Loc, LLT &Ty) {
   if (Token.range().front() == 's') {
     auto ScalarSize = APSInt(Token.range().drop_front()).getZExtValue();
     if (!verifyScalarSize(ScalarSize))
-      return error("invalid size for scalar type");
+      return error("invalid size for scalar element in vector");
     Ty = LLT::scalar(ScalarSize);
   } else if (Token.range().front() == 'p') {
     const DataLayout &DL = MF.getDataLayout();
diff --git a/llvm/lib/CodeGen/MachineConvergenceVerifier.cpp b/llvm/lib/CodeGen/MachineConvergenceVerifier.cpp
new file mode 100644
index 000000000000000..dc133d7a2279e4e
--- /dev/null
+++ b/llvm/lib/CodeGen/MachineConvergenceVerifier.cpp
@@ -0,0 +1,97 @@
+//===- ConvergenceVerifier.cpp - Verify convergence control -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/MachineConvergenceVerifier.h"
+#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/MachineSSAContext.h"
+#include "llvm/IR/GenericConvergenceVerifierImpl.h"
+
+using namespace llvm;
+
+template <>
+auto GenericConvergenceVerifier<MachineSSAContext>::getConvOp(
+    const MachineInstr &MI) -> ConvOpKind {
+  switch (MI.getOpcode()) {
+  default:
+    return CONV_NONE;
+  case TargetOpcode::G_CONVERGENCECTRL_ENTRY:
+    return CONV_ENTRY;
+  case TargetOpcode::G_CONVERGENCECTRL_ANCHOR:
+    return CONV_ANCHOR;
+  case TargetOpcode::G_CONVERGENCECTRL_LOOP:
+    return CONV_LOOP;
+  }
+}
+
+template <>
+const MachineInstr *
+GenericConvergenceVerifier<MachineSSAContext>::findAndCheckConvergenceTokenUsed(
+    const MachineInstr &MI) {
+  auto &MRI = Context.getFunction()->getRegInfo();
+  const MachineInstr *TokenDef = nullptr;
+
+  for (auto &MO : MI.uses()) {
+    if (!MO.isReg())
+      continue;
+
+    const auto RegTy = MRI.getType(MO.getReg());
+    if (RegTy != LLT::token())
+      continue;
+
+    // A token type operand is a convergence control token iff its unique
+    // definition is a convergence control intrinsic. We can't really verify
+    // that since the token type may have other implicit uses. Instead we use it
+    // as a way to identify convergence control token operands.
+    const auto *Def = MRI.getUniqueVRegDef(MO.getReg());
+    if (!Def)
+      continue;
+
+    if (getConvOp(*Def) == CONV_NONE)
+      continue;
+
+    CheckOrNull(MI.isCall() || getConvOp(MI) != CONV_NONE ||
+                    isa<GIntrinsic>(MI),
+                "Convergence control tokens can only be used by call "
+                "instructions, intrinsics or convergence control instructions.",
+                {Context.print(MO.getReg()), Context.print(&MI)});
+
+    CheckOrNull(getConvOp(MI) != CONV_NONE || MO.isImplicit(),
+                "Convergence control tokens can only be used implicitly.",
+                {Context.print(MO.getReg()), Context.print(&MI)});
+
+    CheckOrNull(!TokenDef,
+                "A call can use at most one convergence control token.",
+                {Context.print(MO.getReg()), Context.print(&MI)});
+
+    TokenDef = Def;
+  }
+
+  if (TokenDef)
+    Tokens[&MI] = TokenDef;
+
+  return TokenDef;
+}
+
+template <>
+bool GenericConvergenceVerifier<MachineSSAContext>::isInsideConvergentFunction(
+    const MachineInstr &MI) {
+  // The class MachineFunction does not have any property to indicate whether it
+  // is convergent. Trivially return true so that the check always passes.
+  return true;
+}
+
+template <>
+bool GenericConvergenceVerifier<MachineSSAContext>::isConvergent(
+    const MachineInstr &MI) {
+  return MI.isConvergent();
+}
+
+template class llvm::GenericConvergenceVerifier<MachineSSAContext>;
diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp
index de6d78027d098bc..8abf2d3cffbe83c 100644
--- a/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -40,6 +40,8 @@
 #include "llvm/CodeGen/LiveVariables.h"
 #include "llvm/CodeGen/LowLevelType.h"
 #include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineConvergenceVerifier.h"
+#include "llvm/CodeGen/MachineDominators.h"
 #include "llvm/CodeGen/MachineFrameInfo.h"
 #include "llvm/CodeGen/MachineFunction.h"
 #include "llvm/CodeGen/MachineFunctionPass.h"
@@ -220,6 +222,11 @@ namespace {
     LiveStacks *LiveStks = nullptr;
     SlotIndexes *Indexes = nullptr;
 
+    // This is calculated only when trying to verify convergence control tokens.
+    // Similar to the LLVM IR verifier, we calculate this locally instead of
+    // relying on the pass manager.
+    MachineDomTree DT;
+
     void visitMachineFunctionBefore();
     void visitMachineBasicBlockBefore(const MachineBasicBlock *MBB);
     void visitMachineBundleBefore(const MachineInstr *MI);
@@ -2920,7 +2927,34 @@ void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) {
   }
 }
 
+static void
+verifyConvergenceControl(const MachineFunction &MF, MachineDomTree &DT,
+                         std::function<void(const Twine &Message)> FailureCB) {
+  MachineConvergenceVerifier CV;
+  CV.initialize(&errs(), FailureCB, MF);
+
+  if (MF.getProperties().hasProperty(
+          MachineFunctionProperties::Property::Selected))
+    return;
+
+  for (const auto &MBB : MF) {
+    CV.visit(MBB);
+    for (const auto &MI : MBB.instrs())
+      CV.visit(MI);
+  }
+
+  if (CV.sawTokens()) {
+    DT.recalculate(const_cast<MachineFunction &>(MF));
+    CV.verify(DT);
+  }
+}
+
 void MachineVerifier::visitMachineFunctionAfter() {
+  auto FailureCB = [this](const Twine &Message) {
+    report(Message.str().c_str(), MF);
+  };
+  verifyConvergenceControl(*MF, DT, FailureCB);
+
   calcRegsPassed();
 
   for (const MachineBasicBlock &MBB : *MF)
diff --git a/llvm/lib/IR/ConvergenceVerifier.cpp b/llvm/lib/IR/ConvergenceVerifier.cpp
index 336c202b6f94cee..03dfe674fd699df 100644
--- a/llvm/lib/IR/ConvergenceVerifier.cpp
+++ b/llvm/lib/IR/ConvergenceVerifier.cpp
@@ -14,6 +14,24 @@
 
 using namespace llvm;
 
+template <>
+auto GenericConvergenceVerifier<SSAContext>::getConvOp(const Instruction &I)
+    -> ConvOpKind {
+  const auto *CB = dyn_cast<CallBase>(&I);
+  if (!CB)
+    return CONV_NONE;
+  switch (CB->getIntrinsicID()) {
+  default:
+    return CONV_NONE;
+  case Intrinsic::experimental_convergence_anchor:
+    return CONV_ANCHOR;
+  case Intrinsic::experimental_convergence_entry:
+    return CONV_ENTRY;
+  case Intrinsic::experimental_convergence_loop:
+    return CONV_LOOP;
+  }
+}
+
 template <>
 const Instruction *
 GenericConvergenceVerifier<SSAContext>::findAndCheckConvergenceTokenUsed(
@@ -38,11 +56,10 @@ GenericConvergenceVerifier<SSAContext>::findAndCheckConvergenceTokenUsed(
   auto *Token = Bundle->Inputs[0].get();
   auto *Def = dyn_cast<Instruction>(Token);
 
-  CheckOrNull(
-      Def && isConvergenceControlIntrinsic(SSAContext::getIntrinsicID(*Def)),
-      "Convergence control tokens can only be produced by calls to the "
-      "convergence control intrinsics.",
-      {Context.print(Token), Context.print(&I)});
+  CheckOrNull(Def && getConvOp(*Def) != CONV_NONE,
+              "Convergence control tokens can only be produced by calls to the "
+              "convergence control intrinsics.",
+              {Context.print(Token), Context.print(&I)});
 
   if (Def)
     Tokens[&I] = Def;
@@ -52,14 +69,14 @@ GenericConvergenceVerifier<SSAContext>::findAndCheckConvergenceTokenUsed(
 
 template <>
 bool GenericConvergenceVerifier<SSAContext>::isInsideConvergentFunction(
-    const InstructionT &I) {
+    const Instruction &I) {
   auto *F = I.getFunction();
   return F->isConvergent();
 }
 
 template <>
 bool GenericConvergenceVerifier<SSAContext>::isConvergent(
-    const InstructionT &I) {
+    const Instruction &I) {
   if (auto *CB = dyn_cast<CallBase>(&I)) {
     return CB->isConvergent();
   }
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
index db0f56416051f01..e1c55e7bc3bb89a 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
@@ -1269,6 +1269,9 @@ bool AMDGPUCallLowering::lowerTailCall(
   if (!handleAssignments(Handler, OutArgs, CCInfo, ArgLocs, MIRBuilder))
     return false;
 
+  if (Info.ConvergenceCtrlToken) {
+    MIB.addUse(Info.ConvergenceCtrlToken, RegState::Implicit);
+  }
   handleImplicitCallArguments(MIRBuilder, MIB, ST, *FuncInfo, ImplicitArgRegs);
 
   // If we have -tailcallopt, we need to adjust the stack. We'll do the call
@@ -1395,6 +1398,9 @@ bool AMDGPUCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
 
   const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
 
+  if (Info.ConvergenceCtrlToken) {
+    MIB.addUse(Info.ConvergenceCtrlToken, RegState::Implicit);
+  }
   handleImplicitCallArguments(MIRBuilder, MIB, ST, *MFI, ImplicitArgRegs);
 
   // Get a count of how many bytes are to be pushed on the stack.
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
index b38868a530264e9..6f96cac1cb93dff 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
@@ -279,6 +279,15 @@
 # DEBUG-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS (opcode {{[0-9]+}}): 0 type indices, 0 imm indices
 # DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
 # DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: G_CONVERGENCECTRL_ENTRY (opcode {{[0-9]+}}): 1 type index, 0 imm indices
+# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: G_CONVERGENCECTRL_ANCHOR (opcode {{[0-9]+}}): 1 type index, 0 imm indices
+# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: G_CONVERGENCECTRL_LOOP (opcode {{[0-9]+}}): 1 type index, 0 imm indices
+# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
 # DEBUG-NEXT: G_ANYEXT (opcode {{[0-9]+}}): 2 type indices, 0 imm indices
 # DEBUG-NEXT: .. opcode {{[0-9]+}} is aliased to {{[0-9]+}}
 # DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-convergence-tokens.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-convergence-tokens.ll
new file mode 100644
index 000000000000000..b75b04ed0d02a8e
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-convergence-tokens.ll
@@ -0,0 +1,83 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 3
+; RUN: llc -global-isel -stop-after=irtranslator -mtriple=amdgcn-- -mcpu=gfx900 -verify-machineinstrs -o - %s | FileCheck %s
+
+define i32 @basic(i32 %src) #1 {
+  ; CHECK-LABEL: name: basic
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK-NEXT:   liveins: $vgpr0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; CHECK-NEXT:   [[CONVERGENCECTRL_ANCHOR:%[0-9]+]]:_(s0) = G_CONVERGENCECTRL_ANCHOR
+  ; CHECK-NEXT:   [[INTRINSIC_CONVERGENT:%[0-9]+]]:_(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.readfirstlane), [[COPY]](s32), implicit [[CONVERGENCECTRL_ANCHOR]](s0)
+  ; CHECK-NEXT:   $vgpr0 = COPY [[INTRINSIC_CONVERGENT]](s32)
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
+  %t = call token @llvm.experimental.convergence.anchor()
+  %r = call i32 @llvm.amdgcn.readfirstlane(i32 %src) [ "convergencectrl"(token %t) ]
+  ret i32 %r
+}
+
+define i32 @nested(i32 %src) #0 {
+  ; CHECK-LABEL: name: nested
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK-NEXT:   liveins: $vgpr0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; CHECK-NEXT:   [[CONVERGENCECTRL_ENTRY:%[0-9]+]]:_(s0) = G_CONVERGENCECTRL_ENTRY
+  ; CHECK-NEXT:   [[CONVERGENCECTRL_ANCHOR:%[0-9]+]]:_(s0) = G_CONVERGENCECTRL_ANCHOR
+  ; CHECK-NEXT:   [[INTRINSIC_CONVERGENT:%[0-9]+]]:_(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.readfirstlane), [[COPY]](s32), implicit [[CONVERGENCECTRL_ANCHOR]](s0)
+  ; CHECK-NEXT:   [[INTRINSIC_CONVERGENT1:%[0-9]+]]:_(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.readfirstlane), [[COPY]](s32), implicit [[CONVERGENCECTRL_ENTRY]](s0)
+  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[INTRINSIC_CONVERGENT1]], [[INTRINSIC_CONVERGENT]]
+  ; CHECK-NEXT:   $vgpr0 = COPY [[ADD]](s32)
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
+  %t1 = call token @llvm.experimental.convergence.entry()
+  %t2 = call token @llvm.experimental.convergence.anchor()
+  %r2 = call i32 @llvm.amdgcn.readfirstlane(i32 %src) [ "convergencectrl"(token %t2) ]
+  %r1 = call i32 @llvm.amdgcn.readfirstlane(i32 %src) [ "convergencectrl"(token %t1) ]
+  %sum = add i32 %r1, %r2
+  ret i32 %sum
+}
+
+define i32 @trivial_heart(i32 %src) #1 {
+bb1:
+  %t1 = call token @llvm.experimental.convergence.anchor()
+  br label %bb2
+
+bb2:
+  %t2 = call token @llvm.experimental.convergence.loop() [ "convergencectrl"(token %t1) ]
+  %r = call i32 @llvm.amdgcn.readfirstlane(i32 %src) [ "convergencectrl"(token %t2) ]
+  ret i32 %r
+}
+
+;; FIXME: Cannot test lowering of convergence control in the presence of
+;; branches because the backend emits control flow pseudos such as SI_IF that
+;; are marked convergent. These must also assigned convergence control tokens,
+;; to satisfy the requirement that controlled and uncontrolled operations cannot
+;; be mixed.
+;; define i32 @branch(i32 %src, i1 %cond) #1 {
+;; bb1:
+;;   %t = call token @llvm.experimental.convergence.anchor()
+;;   br i1 %cond, label %bb2, label %bb3
+;;
+;; bb2:
+;;   %r2 = call i32 @llvm.amdgcn.readfirstlane(i32 %src) [ "convergencectrl"(token %t) ]
+;;   br label %bb4
+;;
+;; bb3:
+;;   %r3 = call i32 @llvm.amdgcn.readfirstlane(i32 %src) [ "convergencectrl"(token %t) ]
+;;   br label %bb4
+;;
+;; bb4:
+;;   %p = phi i32 [%r2, %bb2], [%r3, %bb3]
+;;   ret i32 %p
+;; }
+
+;; TODO: Write tests using natural loops.
+
+declare i32 @llvm.amdgcn.readfirstlane(i32) #0
+
+declare token @llvm.experimental.convergence.entry()
+declare token @llvm.experimental.convergence.anchor()
+declare token @llvm.experimental.convergence.loop()
+
+attributes #0 = { nounwind readnone convergent }
+attributes #1 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/verify-convergencectrl/basic.mir b/llvm/test/CodeGen/AMDGPU/verify-convergencectrl/basic.mir
new file mode 100644
index 000000000000000..f060d89325d8495
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/verify-convergencectrl/basic.mir
@@ -0,0 +1,38 @@
+# RUN: not --crash llc -mtriple=amdgcn-- -run-pass=machineverifier -o /dev/null %s 2>&1 | FileCheck %s
+---
+name:            basic
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    successors: %bb.1, %bb.2;
+    %0:_(s0) = G_CONVERGENCECTRL_ANCHOR
+    ; CHECK: Entry intrinsic cannot be preceded by a convergent operation in the same basic block.
+    ; CHECK: G_CONVERGENCECTRL_ENTRY
+    %1:_(s0) = G_CONVERGENCECTRL_ENTRY
+    ; CHECK: Loop intrinsic cannot be preceded by a convergent operation in the same basic block.
+    ; CHECK: G_CONVERGENCECTRL_LOOP
+    %2:_(s0) = G_CONVERGENCECTRL_LOOP %0:_(s0)
+    %3:_(s1) = IMPLICIT_DEF
+    G_BRCOND %4:sgpr_64(s1), %bb.1
+    G_BR %bb.2
+
+  bb.1:
+    successors: %bb.2;
+    ; CHECK: Entry intrinsic can occur only in the entry block.
+    ; CHECK: G_CONVERGENCECTRL_ENTRY
+    %5:_(s0) = G_CONVERGENCECTRL_ENTRY
+
+  bb.2:
+    ; CHECK: Convergence control tokens can only be used by call instructions, intrinsics or convergence control instructions.
+    ; CHECK: G_PHI
+    %6:_(s0) = G_PHI %0:_(s0), %bb.0, %0:_(s0), %bb.1
+    %7:_(s0) = G_CONVERGENCECTRL_ANCHOR
+    %8:sgpr_64 = IMPLICIT_DEF
+    %4:sgpr_64(s1) = G_SI_CALL %8:sgpr_64, 1, implicit %7:_(s0)
+    ; CHECK: A call can use at most one convergence control token.
+    ; CHECK: G_SI_CALL %{{[0-9]}}:sgpr_64, 2
+    %9:sgpr_64 = G_SI_CALL %8:sgpr_64, 2, implicit %7:_(s0), implicit %7:_(s0)
+    ; CHECK: Cannot mix controlled and uncontrolled convergence in the same function.
+    ; CHECK: G_SI_CALL %{{[0-9]}}:sgpr_64, 3
+    %10:sgpr_64 = G_SI_CALL %8:sgpr_64, 3
+...
diff --git a/llvm/test/CodeGen/AMDGPU/verify-convergencectrl/cycles.mir b/llvm/test/CodeGen/AMDGPU/verify-convergencectrl/cycles.mir
new file mode 100644
index 000000000000000..3fedf49d96503fa
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/verify-convergencectrl/cycles.mir
@@ -0,0 +1,53 @@
+# RUN: not --crash llc -mtriple=amdgcn-- -run-pass=machineverifier -o /dev/null %s 2>&1 | FileCheck %s
+---
+name:            cycles
+body:             |
+  bb.0:
+    %0:_(s0) = G_CONVERGENCECTRL_ANCHOR
+    %1:_(s1) = IMPLICIT_DEF
+    %2:_(s1) = IMPLICIT_DEF
+    G_BRCOND %2:_(s1), %bb.9
+    G_BR %bb.1
+
+  bb.1:
+    G_BRCOND %2:_(s1), %bb.8
+    G_BR %bb.5
+
+  bb.2:
+    G_BRCOND %2:_(s1), %bb.3
+    G_BR %bb.4
+
+  bb.3:
+    ; CHECK: Cycle heart must dominate all blocks in the cycle.
+    ; Irreducible cycle: entries(bb.4 bb.3)
+    %3:_(s0) = G_CONVERGENCECTRL_LOOP %0:_(s0)
+    G_BR %bb.4
+
+  bb.4:
+    G_BR %bb.3
+
+  bb.5:
+    G_BRCOND %2:_(s1), %bb.6
+    G_BR %bb.2
+
+  bb.6:
+    G_BR %bb.7
+
+  bb.7:
+    ; CHECK: Cycle heart must dominate all blocks in the cycle.
+    ; Reducible cycle: entries(bb.6) bb.7
+    %4:_(s0) = G_CONVERGENCECTRL_LOOP %0:_(s0)
+    G_BR %bb.6
+
+  bb.8:
+    ; CHECK: Two static convergence token uses in a cycle that does not contain either token's definition.
+    %5:_(s0) = G_CONVERGENCECTRL_LOOP %0:_(s0)
+    %6:_(s0) = G_CONVERGENCECTRL_LOOP %0:_(s0)
+    G_BR %bb.8
+
+  bb.9:
+    ; CHECK: Convergence token used by an instruction other than llvm.experimental.convergence.loop in a cycle that does not contain the token's definition.
+    %7:sgpr_64 = G_SI_CALL %1:_(s1), 3, implicit %0:_(s0)
+    G_BR %bb.9
+
+...
diff --git a/llvm/test/CodeGen/AMDGPU/verify-convergencectrl/mixed2.mir b/llvm/test/CodeGen/AMDGPU/verify-convergencectrl/mixed2.mir
new file mode 100644
index 000000000000000..6180060ab0a4327
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/verify-convergencectrl/mixed2.mir
@@ -0,0 +1,15 @@
+# RUN: not --crash llc -mtriple=amdgcn-- -run-pass=machineverifier -o /dev/null %s 2>&1 | FileCheck %s
+---
+name:            mixed2
+body:             |
+  bb.0:
+    %0:sgpr_64 = IMPLICIT_DEF
+    %1:sgpr_64 = G_SI_CALL %0, 1
+    ; CHECK: Cannot mix controlled and uncontrolled convergence in the same function.
+    ; CHECK: G_CONVERGENCECTRL_ANCHOR
+    %2:_(s0) = G_CONVERGENCECTRL_ANCHOR
+    ; CHECK: Cannot mix controlled and uncontrolled convergence in the same function.
+    ; CHECK: G_SI_CALL %{{[0-9]}}:sgpr_64, 2
+    %3:sgpr_64 = G_SI_CALL %0, 2, implicit %2(s0)
+
+...
diff --git a/llvm/test/CodeGen/AMDGPU/verify-convergencectrl/region-nesting.mir b/llvm/test/CodeGen/AMDGPU/verify-convergencectrl/region-nesting.mir
new file mode 100644
index 000000000000000..8993d8761cc6edb
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/verify-convergencectrl/region-nesting.mir
@@ -0,0 +1,25 @@
+# RUN: not --crash llc -mtriple=amdgcn-- -run-pass=machineverifier -o /dev/null %s 2>&1 | FileCheck %s
+---
+name:            region_nesting
+body:             |
+  bb.0:
+    %0:_(s0) = G_CONVERGENCECTRL_ANCHOR
+    %1:_(s0) = G_CONVERGENCECTRL_ANCHOR
+    %2:sgpr_64 = IMPLICIT_DEF
+    %3:sgpr_64 = G_SI_CALL %2, 1, implicit %0(s0)
+    ; CHECK: Convergence region is not well-nested.
+    ; CHECK: G_SI_CALL %{{[0-9]}}:sgpr_64, 2
+    %3:sgpr_64 = G_SI_CALL %2, 2, implicit %1(s0)
+    %4:_(s1) = IMPLICIT_DEF
+    G_BRCOND %4(s1), %bb.1
+    G_BR %bb.2
+
+  bb.1:
+    %5:sgpr_64 = G_SI_CALL %2, 3, implicit %0(s0)
+
+  bb.2:
+    ; CHECK: Convergence region is not well-nested.
+    ; CHECK: G_SI_CALL %{{[0-9]}}:sgpr_64, 4
+    %6:sgpr_64 = G_SI_CALL %2, 4, implicit %1(s0)
+
+...
diff --git a/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid4.mir b/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid4.mir
deleted file mode 100644
index d66dd1044869225..000000000000000
--- a/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid4.mir
+++ /dev/null
@@ -1,10 +0,0 @@
-# RUN: not llc -mtriple=aarch64-- -run-pass none -o /dev/null %s 2>&1 | FileCheck %s
-# When a low-level type is 0 bits
----
-name: test_scalar_size_0
-body: |
-  bb.0:
-    liveins: $x0
-    ; CHECK: [[@LINE+1]]:10: invalid size for scalar type
-    %0:_(s0) = G_IMPLICIT_DEF
-...
diff --git a/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid6.mir b/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid6.mir
index 698568701fb1a47..632e5fa81db111b 100644
--- a/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid6.mir
+++ b/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid6.mir
@@ -5,6 +5,6 @@ name: test_vector_element_size_0
 body: |
   bb.0:
     liveins: $x0
-    ; CHECK: [[@LINE+1]]:15: invalid size for scalar type
+    ; CHECK: [[@LINE+1]]:15: invalid size for scalar element in vector
     %0:_(<2 x s0>) = G_IMPLICIT_DEF
 ...



More information about the llvm-commits mailing list