[llvm] [GlobalISel] convergence control tokens and intrinsics (PR #67006)

Sameer Sahasrabuddhe via llvm-commits llvm-commits at lists.llvm.org
Thu Mar 14 02:29:11 PDT 2024


https://github.com/ssahasra updated https://github.com/llvm/llvm-project/pull/67006

>From 788944cd47533580cc3df6535725a0982027922d Mon Sep 17 00:00:00 2001
From: Sameer Sahasrabuddhe <sameer.sahasrabuddhe at amd.com>
Date: Thu, 14 Mar 2024 12:12:50 +0530
Subject: [PATCH 1/2] [GlobalISel] Introduce LLT:token() as a special scalar
 type

The new token type is used in #67006 for implementing convergence control tokens
in GMIR.
---
 llvm/include/llvm/CodeGenTypes/LowLevelType.h | 50 +++++++++++++++----
 llvm/lib/CodeGen/LowLevelTypeUtils.cpp        |  4 ++
 llvm/unittests/CodeGen/LowLevelTypeTest.cpp   | 26 ++++++++++
 3 files changed, 70 insertions(+), 10 deletions(-)

diff --git a/llvm/include/llvm/CodeGenTypes/LowLevelType.h b/llvm/include/llvm/CodeGenTypes/LowLevelType.h
index 5a16cffb80b32b..62ee28cfac99c5 100644
--- a/llvm/include/llvm/CodeGenTypes/LowLevelType.h
+++ b/llvm/include/llvm/CodeGenTypes/LowLevelType.h
@@ -45,6 +45,14 @@ class LLT {
                /*AddressSpace=*/0};
   }
 
+  /// Get a low-level token; just a scalar with zero bits (or no size).
+  static constexpr LLT token() {
+    return LLT{/*isPointer=*/false, /*isVector=*/false,
+               /*isScalar=*/true,   ElementCount::getFixed(0),
+               /*SizeInBits=*/0,
+               /*AddressSpace=*/0};
+  }
+
   /// Get a low-level pointer in the given address space.
   static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits) {
     assert(SizeInBits > 0 && "invalid pointer size");
@@ -134,17 +142,17 @@ class LLT {
 
   explicit LLT(MVT VT);
 
-  constexpr bool isValid() const { return IsScalar || IsPointer || IsVector; }
-
+  constexpr bool isValid() const { return IsScalar || RawData != 0; }
   constexpr bool isScalar() const { return IsScalar; }
-
-  constexpr bool isPointer() const { return IsPointer && !IsVector; }
-
-  constexpr bool isPointerVector() const { return IsPointer && IsVector; }
-
-  constexpr bool isPointerOrPointerVector() const { return IsPointer; }
-
-  constexpr bool isVector() const { return IsVector; }
+  constexpr bool isToken() const { return IsScalar && RawData == 0; };
+  constexpr bool isVector() const { return isValid() && IsVector; }
+  constexpr bool isPointer() const {
+    return isValid() && IsPointer && !IsVector;
+  }
+  constexpr bool isPointerVector() const { return IsPointer && isVector(); }
+  constexpr bool isPointerOrPointerVector() const {
+    return IsPointer && isValid();
+  }
 
   /// Returns the number of elements in a vector LLT. Must only be called on
   /// vector types.
@@ -314,6 +322,28 @@ class LLT {
   /// described in static const *Field variables. Each of these variables
   /// is a 2-element array, with the first element describing the bitfield size
   /// and the second element describing the bitfield offset.
+  ///
+  /// +--------+---------+--------+----------+----------------------+
+  /// |isScalar|isPointer|isVector| RawData  |Notes                 |
+  /// +--------+---------+--------+----------+----------------------+
+  /// |   0    |    0    |   0    |    0     |Invalid               |
+  /// +--------+---------+--------+----------+----------------------+
+  /// |   0    |    0    |   1    |    0     |Tombstone Key         |
+  /// +--------+---------+--------+----------+----------------------+
+  /// |   0    |    1    |   0    |    0     |Empty Key             |
+  /// +--------+---------+--------+----------+----------------------+
+  /// |   1    |    0    |   0    |    0     |Token                 |
+  /// +--------+---------+--------+----------+----------------------+
+  /// |   1    |    0    |   0    | non-zero |Scalar                |
+  /// +--------+---------+--------+----------+----------------------+
+  /// |   0    |    1    |   0    | non-zero |Pointer               |
+  /// +--------+---------+--------+----------+----------------------+
+  /// |   0    |    0    |   1    | non-zero |Vector of non-pointer |
+  /// +--------+---------+--------+----------+----------------------+
+  /// |   0    |    1    |   1    | non-zero |Vector of pointer     |
+  /// +--------+---------+--------+----------+----------------------+
+  ///
+  /// Everything else is reserved.
   typedef int BitFieldInfo[2];
   ///
   /// This is how the bitfields are packed per Kind:
diff --git a/llvm/lib/CodeGen/LowLevelTypeUtils.cpp b/llvm/lib/CodeGen/LowLevelTypeUtils.cpp
index bc2ea3f05b6da6..3499fe1ee75776 100644
--- a/llvm/lib/CodeGen/LowLevelTypeUtils.cpp
+++ b/llvm/lib/CodeGen/LowLevelTypeUtils.cpp
@@ -39,6 +39,10 @@ LLT llvm::getLLTForType(Type &Ty, const DataLayout &DL) {
     return LLT::scalar(SizeInBits);
   }
 
+  if (Ty.isTokenTy()) {
+    return LLT::token();
+  }
+
   return LLT();
 }
 
diff --git a/llvm/unittests/CodeGen/LowLevelTypeTest.cpp b/llvm/unittests/CodeGen/LowLevelTypeTest.cpp
index cb34802a5de271..3a6ee6120c4d22 100644
--- a/llvm/unittests/CodeGen/LowLevelTypeTest.cpp
+++ b/llvm/unittests/CodeGen/LowLevelTypeTest.cpp
@@ -18,6 +18,28 @@ using namespace llvm;
 
 namespace {
 
+TEST(LowLevelTypeTest, Token) {
+  LLVMContext C;
+  DataLayout DL("");
+
+  const LLT TTy = LLT::token();
+
+  // Test kind.
+  ASSERT_TRUE(TTy.isValid());
+  ASSERT_TRUE(TTy.isScalar());
+  ASSERT_TRUE(TTy.isToken());
+
+  ASSERT_FALSE(TTy.isPointer());
+  ASSERT_FALSE(TTy.isVector());
+
+  const LLT STy = LLT::scalar(0);
+  ASSERT_TRUE(STy == TTy);
+
+  // Test equality operators.
+  EXPECT_TRUE(TTy == TTy);
+  EXPECT_FALSE(TTy != TTy);
+}
+
 TEST(LowLevelTypeTest, Scalar) {
   LLVMContext C;
   DataLayout DL("");
@@ -32,6 +54,8 @@ TEST(LowLevelTypeTest, Scalar) {
     ASSERT_FALSE(Ty.isPointer());
     ASSERT_FALSE(Ty.isVector());
 
+    EXPECT_TRUE(S != 0 || Ty.isToken());
+
     // Test sizes.
     EXPECT_EQ(S, Ty.getSizeInBits());
     EXPECT_EQ(S, Ty.getScalarSizeInBits());
@@ -77,6 +101,7 @@ TEST(LowLevelTypeTest, Vector) {
 
       ASSERT_FALSE(VTy.isScalar());
       ASSERT_FALSE(VTy.isPointer());
+      ASSERT_FALSE(VTy.isToken());
 
       // Test sizes.
       EXPECT_EQ(S, VTy.getScalarSizeInBits());
@@ -300,6 +325,7 @@ TEST(LowLevelTypeTest, Invalid) {
   ASSERT_FALSE(Ty.isScalar());
   ASSERT_FALSE(Ty.isPointer());
   ASSERT_FALSE(Ty.isVector());
+  ASSERT_FALSE(Ty.isToken());
 }
 
 TEST(LowLevelTypeTest, Divide) {

>From ff7f4ef71b6ecc6fefdacd5b729d3134620ea79c Mon Sep 17 00:00:00 2001
From: Sameer Sahasrabuddhe <sameer.sahasrabuddhe at amd.com>
Date: Mon, 4 Mar 2024 15:15:06 +0530
Subject: [PATCH 2/2] [GlobalISel] convergence control tokens and intrinsics

In the IR translator, convert the LLVM token type to LLT::token(), which is an
alias for the s0 type. These show up as implicit uses on convergent operations.

Differential Revision: https://reviews.llvm.org/D158147
---
 .../llvm/CodeGen/GlobalISel/CallLowering.h    |  4 ++
 .../llvm/CodeGen/GlobalISel/IRTranslator.h    | 21 +++++++
 llvm/lib/CodeGen/GlobalISel/CallLowering.cpp  |  4 +-
 llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp  | 60 +++++++++++++++++--
 .../CodeGen/GlobalISel/InlineAsmLowering.cpp  |  8 +++
 llvm/lib/CodeGen/MIRParser/MIParser.cpp       | 13 ++--
 llvm/lib/IR/ConvergenceVerifier.cpp           |  4 +-
 llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp |  6 ++
 .../test/CodeGen/AMDGPU/convergence-tokens.ll | 59 ++++++++++++++----
 .../AArch64/parse-low-level-type-invalid4.mir | 10 ----
 .../AArch64/parse-low-level-type-invalid6.mir |  2 +-
 11 files changed, 155 insertions(+), 36 deletions(-)
 delete mode 100644 llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid4.mir

diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h b/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h
index 3c3b91661d5788..4c187a3068d823 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h
@@ -117,6 +117,9 @@ class CallLowering {
     /// vreg that the swifterror should be copied into after the call.
     Register SwiftErrorVReg;
 
+    /// Valid if the call is a controlled convergent operation.
+    Register ConvergenceCtrlToken;
+
     /// Original IR callsite corresponding to this call, if available.
     const CallBase *CB = nullptr;
 
@@ -584,6 +587,7 @@ class CallLowering {
   bool lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &Call,
                  ArrayRef<Register> ResRegs,
                  ArrayRef<ArrayRef<Register>> ArgRegs, Register SwiftErrorVReg,
+                 Register ConvergenceCtrlToken,
                  std::function<unsigned()> GetCalleeReg) const;
 
   /// For targets which want to use big-endian can enable it with
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h b/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
index 29f675b2203b6b..f22326d38f4f88 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
@@ -579,6 +579,10 @@ class IRTranslator : public MachineFunctionPass {
     return false;
   }
 
+  bool translateConvergenceControlIntrinsic(const CallInst &CI,
+                                            Intrinsic::ID ID,
+                                            MachineIRBuilder &MIRBuilder);
+
   /// @}
 
   // Builder for machine instruction a la IRBuilder.
@@ -697,6 +701,23 @@ class IRTranslator : public MachineFunctionPass {
     return Regs[0];
   }
 
+  Register getOrCreateConvergenceTokenVReg(const Value &Token) {
+    assert(Token.getType()->isTokenTy());
+    auto &Regs = *VMap.getVRegs(Token);
+    if (!Regs.empty()) {
+      assert(Regs.size() == 1 &&
+             "Expected a single register for convergence tokens.");
+      return Regs[0];
+    }
+
+    auto Reg = MRI->createGenericVirtualRegister(LLT::token());
+    Regs.push_back(Reg);
+    auto &Offsets = *VMap.getOffsets(Token);
+    if (Offsets.empty())
+      Offsets.push_back(0);
+    return Reg;
+  }
+
   /// Allocate some vregs and offsets in the VMap. Then populate just the
   /// offsets while leaving the vregs empty.
   ValueToVRegInfo::VRegListT &allocateVRegs(const Value &Val);
diff --git a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
index 3a37bf3cd7a8a3..363fad53b76c35 100644
--- a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
@@ -21,6 +21,7 @@
 #include "llvm/CodeGen/MachineRegisterInfo.h"
 #include "llvm/CodeGen/TargetLowering.h"
 #include "llvm/IR/DataLayout.h"
+#include "llvm/IR/IntrinsicInst.h"
 #include "llvm/IR/LLVMContext.h"
 #include "llvm/IR/Module.h"
 #include "llvm/Target/TargetMachine.h"
@@ -91,6 +92,7 @@ bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
                              ArrayRef<Register> ResRegs,
                              ArrayRef<ArrayRef<Register>> ArgRegs,
                              Register SwiftErrorVReg,
+                             Register ConvergenceCtrlToken,
                              std::function<unsigned()> GetCalleeReg) const {
   CallLoweringInfo Info;
   const DataLayout &DL = MIRBuilder.getDataLayout();
@@ -121,7 +123,6 @@ bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
     CanBeTailCalled = false;
   }
 
-
   // First step is to marshall all the function's parameters into the correct
   // physregs and memory locations. Gather the sequence of argument types that
   // we'll pass to the assigner function.
@@ -187,6 +188,7 @@ bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
   Info.KnownCallees = CB.getMetadata(LLVMContext::MD_callees);
   Info.CallConv = CallConv;
   Info.SwiftErrorVReg = SwiftErrorVReg;
+  Info.ConvergenceCtrlToken = ConvergenceCtrlToken;
   Info.IsMustTailCall = CB.isMustTailCall();
   Info.IsTailCall = CanBeTailCalled;
   Info.IsVarArg = IsVarArg;
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 94fdb37e283bb4..83442ab98752fa 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -213,8 +213,9 @@ ArrayRef<Register> IRTranslator::getOrCreateVRegs(const Value &Val) {
   auto *VRegs = VMap.getVRegs(Val);
   auto *Offsets = VMap.getOffsets(Val);
 
-  assert(Val.getType()->isSized() &&
-         "Don't know how to create an empty vreg");
+  if (!Val.getType()->isTokenTy())
+    assert(Val.getType()->isSized() &&
+           "Don't know how to create an empty vreg");
 
   SmallVector<LLT, 4> SplitTys;
   computeValueLLTs(*DL, *Val.getType(), SplitTys,
@@ -2038,6 +2039,36 @@ bool IRTranslator::translateIfEntryValueArgument(bool isDeclare, Value *Val,
   return true;
 }
 
+static unsigned getConvOpcode(Intrinsic::ID ID) {
+  switch (ID) {
+  default:
+    llvm_unreachable("Unexpected intrinsic");
+  case Intrinsic::experimental_convergence_anchor:
+    return TargetOpcode::CONVERGENCECTRL_ANCHOR;
+  case Intrinsic::experimental_convergence_entry:
+    return TargetOpcode::CONVERGENCECTRL_ENTRY;
+  case Intrinsic::experimental_convergence_loop:
+    return TargetOpcode::CONVERGENCECTRL_LOOP;
+  }
+}
+
+bool IRTranslator::translateConvergenceControlIntrinsic(
+    const CallInst &CI, Intrinsic::ID ID, MachineIRBuilder &MIRBuilder) {
+  MachineInstrBuilder MIB = MIRBuilder.buildInstr(getConvOpcode(ID));
+  Register OutputReg = getOrCreateConvergenceTokenVReg(CI);
+  MIB.addDef(OutputReg);
+
+  if (ID == Intrinsic::experimental_convergence_loop) {
+    auto Bundle = CI.getOperandBundle(LLVMContext::OB_convergencectrl);
+    assert(Bundle && "Expected a convergence control token.");
+    Register InputReg =
+        getOrCreateConvergenceTokenVReg(*Bundle->Inputs[0].get());
+    MIB.addUse(InputReg);
+  }
+
+  return true;
+}
+
 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
                                            MachineIRBuilder &MIRBuilder) {
   if (auto *MI = dyn_cast<AnyMemIntrinsic>(&CI)) {
@@ -2479,7 +2510,10 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
 #include "llvm/IR/ConstrainedOps.def"
     return translateConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(CI),
                                            MIRBuilder);
-
+  case Intrinsic::experimental_convergence_anchor:
+  case Intrinsic::experimental_convergence_entry:
+  case Intrinsic::experimental_convergence_loop:
+    return translateConvergenceControlIntrinsic(CI, ID, MIRBuilder);
   }
   return false;
 }
@@ -2530,12 +2564,18 @@ bool IRTranslator::translateCallBase(const CallBase &CB,
     }
   }
 
+  Register ConvergenceCtrlToken = 0;
+  if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_convergencectrl)) {
+    const auto &Token = *Bundle->Inputs[0].get();
+    ConvergenceCtrlToken = getOrCreateConvergenceTokenVReg(Token);
+  }
+
   // We don't set HasCalls on MFI here yet because call lowering may decide to
   // optimize into tail calls. Instead, we defer that to selection where a final
   // scan is done to check if any instructions are calls.
-  bool Success =
-      CLI->lowerCall(MIRBuilder, CB, Res, Args, SwiftErrorVReg,
-                     [&]() { return getOrCreateVReg(*CB.getCalledOperand()); });
+  bool Success = CLI->lowerCall(
+      MIRBuilder, CB, Res, Args, SwiftErrorVReg, ConvergenceCtrlToken,
+      [&]() { return getOrCreateVReg(*CB.getCalledOperand()); });
 
   // Check if we just inserted a tail call.
   if (Success) {
@@ -2649,6 +2689,14 @@ bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
         MF->getMachineMemOperand(MPI, Info.flags, MemTy, Alignment, CI.getAAMetadata()));
   }
 
+  if (CI.isConvergent()) {
+    if (auto Bundle = CI.getOperandBundle(LLVMContext::OB_convergencectrl)) {
+      auto *Token = Bundle->Inputs[0].get();
+      Register TokenReg = getOrCreateVReg(*Token);
+      MIB.addUse(TokenReg, RegState::Implicit);
+    }
+  }
+
   return true;
 }
 
diff --git a/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp b/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp
index 4089a5e941b051..14e1e1fdf01dea 100644
--- a/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp
@@ -538,6 +538,14 @@ bool InlineAsmLowering::lowerInlineAsm(
     }
   }
 
+  if (auto Bundle = Call.getOperandBundle(LLVMContext::OB_convergencectrl)) {
+    auto *Token = Bundle->Inputs[0].get();
+    ArrayRef<Register> SourceRegs = GetOrCreateVRegs(*Token);
+    assert(SourceRegs.size() == 1 &&
+           "Expected the control token to fit into a single virtual register");
+    Inst.addUse(SourceRegs[0], RegState::Implicit);
+  }
+
   if (const MDNode *SrcLoc = Call.getMetadata("srcloc"))
     Inst.addMetadata(SrcLoc);
 
diff --git a/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
index d41fc97cb80603..691c60d22724f3 100644
--- a/llvm/lib/CodeGen/MIRParser/MIParser.cpp
+++ b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
@@ -1919,10 +1919,13 @@ bool MIParser::parseLowLevelType(StringRef::iterator Loc, LLT &Ty) {
 
   if (Token.range().front() == 's') {
     auto ScalarSize = APSInt(Token.range().drop_front()).getZExtValue();
-    if (!verifyScalarSize(ScalarSize))
-      return error("invalid size for scalar type");
-
-    Ty = LLT::scalar(ScalarSize);
+    if (ScalarSize) {
+      if (!verifyScalarSize(ScalarSize))
+        return error("invalid size for scalar type");
+      Ty = LLT::scalar(ScalarSize);
+    } else {
+      Ty = LLT::token();
+    }
     lex();
     return false;
   } else if (Token.range().front() == 'p') {
@@ -1980,7 +1983,7 @@ bool MIParser::parseLowLevelType(StringRef::iterator Loc, LLT &Ty) {
   if (Token.range().front() == 's') {
     auto ScalarSize = APSInt(Token.range().drop_front()).getZExtValue();
     if (!verifyScalarSize(ScalarSize))
-      return error("invalid size for scalar type");
+      return error("invalid size for scalar element in vector");
     Ty = LLT::scalar(ScalarSize);
   } else if (Token.range().front() == 'p') {
     const DataLayout &DL = MF.getDataLayout();
diff --git a/llvm/lib/IR/ConvergenceVerifier.cpp b/llvm/lib/IR/ConvergenceVerifier.cpp
index e73aeaade5f714..6708f85819df7f 100644
--- a/llvm/lib/IR/ConvergenceVerifier.cpp
+++ b/llvm/lib/IR/ConvergenceVerifier.cpp
@@ -75,14 +75,14 @@ GenericConvergenceVerifier<SSAContext>::findAndCheckConvergenceTokenUsed(
 
 template <>
 bool GenericConvergenceVerifier<SSAContext>::isInsideConvergentFunction(
-    const InstructionT &I) {
+    const Instruction &I) {
   auto *F = I.getFunction();
   return F->isConvergent();
 }
 
 template <>
 bool GenericConvergenceVerifier<SSAContext>::isConvergent(
-    const InstructionT &I) {
+    const Instruction &I) {
   if (auto *CB = dyn_cast<CallBase>(&I)) {
     return CB->isConvergent();
   }
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
index 6d05c3678bf09e..7e1f041fa10933 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
@@ -1301,6 +1301,9 @@ bool AMDGPUCallLowering::lowerTailCall(
   if (!handleAssignments(Handler, OutArgs, CCInfo, ArgLocs, MIRBuilder))
     return false;
 
+  if (Info.ConvergenceCtrlToken) {
+    MIB.addUse(Info.ConvergenceCtrlToken, RegState::Implicit);
+  }
   handleImplicitCallArguments(MIRBuilder, MIB, ST, *FuncInfo, CalleeCC,
                               ImplicitArgRegs);
 
@@ -1483,6 +1486,9 @@ bool AMDGPUCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
 
   const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
 
+  if (Info.ConvergenceCtrlToken) {
+    MIB.addUse(Info.ConvergenceCtrlToken, RegState::Implicit);
+  }
   handleImplicitCallArguments(MIRBuilder, MIB, ST, *MFI, Info.CallConv,
                               ImplicitArgRegs);
 
diff --git a/llvm/test/CodeGen/AMDGPU/convergence-tokens.ll b/llvm/test/CodeGen/AMDGPU/convergence-tokens.ll
index 2ed6d7fd0f598d..6beccce9400e58 100644
--- a/llvm/test/CodeGen/AMDGPU/convergence-tokens.ll
+++ b/llvm/test/CodeGen/AMDGPU/convergence-tokens.ll
@@ -1,10 +1,12 @@
 ; RUN: llc --amdgpu-disable-structurizer -stop-after=amdgpu-isel -mtriple=amdgcn-- -mcpu=gfx900 -verify-machineinstrs -o - %s | FileCheck --check-prefixes=CHECK,ISEL %s
 ; RUN: llc --amdgpu-disable-structurizer -stop-after=dead-mi-elimination -mtriple=amdgcn-- -mcpu=gfx900 -verify-machineinstrs -o - %s | FileCheck --check-prefixes=CHECK,DEADMI %s
+; RUN: llc --amdgpu-disable-structurizer -global-isel -stop-after=irtranslator -mtriple=amdgcn-- -mcpu=gfx900 -verify-machineinstrs -o - %s | FileCheck %s --check-prefixes=CHECK,GISEL
 
 ; CHECK-LABEL: name:            basic_call
-;       CHECK:    [[TOKEN:%[0-9]+]]:sreg_64 = CONVERGENCECTRL_ENTRY
+;       CHECK:    [[TOKEN:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_ENTRY
 ;        ISEL:    {{.*}} SI_CALL_ISEL {{.*}}, @foo, [[TOKEN]], csr_amdgpu, {{.*}}
 ;      DEADMI:    {{.*}} SI_CALL {{.*}}, @foo, csr_amdgpu, {{.*}}, implicit [[TOKEN]]
+;       GISEL:    {{.*}} G_SI_CALL {{.*}}, @foo, csr_amdgpu, {{.*}}, implicit [[TOKEN]]
 define i32 @basic_call(i32 %src) #0 {
   %t = call token @llvm.experimental.convergence.entry()
   %r = call i32 @foo(i32 %src) [ "convergencectrl"(token %t) ]
@@ -12,10 +14,11 @@ define i32 @basic_call(i32 %src) #0 {
 }
 
 ; CHECK-LABEL: name:            basic_intrinsic
-;       CHECK:    [[TOKEN:%[0-9]+]]:sreg_64 = CONVERGENCECTRL_ANCHOR
+;       CHECK:    [[TOKEN:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_ANCHOR
 ;        ISEL:    CONVERGENCECTRL_GLUE [[TOKEN]]
 ;  DEADMI-NOT:    CONVERGENCECTRL_GLUE
-;       CHECK:    {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[TOKEN]]
+;        ISEL:    {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[TOKEN]]
+;       GISEL:    {{.*}} = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.readfirstlane){{.*}}, implicit [[TOKEN]]
 define i32 @basic_intrinsic(i32 %src) #0 {
   %t = call token @llvm.experimental.convergence.anchor()
   %r = call i32 @llvm.amdgcn.readfirstlane(i32 %src) [ "convergencectrl"(token %t) ]
@@ -30,12 +33,13 @@ define i32 @uncontrolled_call(i32 %src) #0 {
 }
 
 ; CHECK-LABEL: name:            basic_branch
-;       CHECK:  bb.0.entry:
-;       CHECK:    [[TOKEN:%[0-9]+]]:sreg_64 = CONVERGENCECTRL_ANCHOR
-;       CHECK:  bb.1.then:
+;       CHECK:  bb.[[#]].entry:
+;       CHECK:    [[TOKEN:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_ANCHOR
+;       CHECK:  bb.[[#]].then:
 ;        ISEL:    CONVERGENCECTRL_GLUE [[TOKEN]]
 ;  DEADMI-NOT:    CONVERGENCECTRL_GLUE
-;       CHECK:    {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[TOKEN]]
+;        ISEL:    {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[TOKEN]]
+;       GISEL:    {{.*}} = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.readfirstlane){{.*}}, implicit [[TOKEN]]
 define i32 @basic_branch(i32 %src, i1 %cond) #0 {
 entry:
   %t = call token @llvm.experimental.convergence.anchor()
@@ -52,12 +56,13 @@ else:
 }
 
 ; CHECK-LABEL: name:            basic_loop
-;       CHECK:    [[TOKEN:%[0-9]+]]:sreg_64 = CONVERGENCECTRL_ANCHOR
-;       CHECK:  bb.1.loop:
-;       CHECK:    [[LOOP:%[0-9]+]]:sreg_64 = CONVERGENCECTRL_LOOP [[TOKEN]]
+;       CHECK:    [[TOKEN:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_ANCHOR
+;       CHECK:  bb.[[#]].loop:
+;       CHECK:    [[LOOP:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_LOOP [[TOKEN]]
 ;        ISEL:    CONVERGENCECTRL_GLUE [[LOOP]]
 ;  DEADMI-NOT:    CONVERGENCECTRL_GLUE
-;       CHECK:    {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[LOOP]]
+;        ISEL:    {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[LOOP]]
+;       GISEL:    {{.*}} = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.readfirstlane){{.*}}, implicit [[LOOP]]
 define i32 @basic_loop(i32 %src, i1 %cond) #0 {
   %t1 = call token @llvm.experimental.convergence.anchor()
   br label %loop
@@ -71,6 +76,38 @@ end:
   ret i32 %r
 }
 
+; CHECK-LABEL: name: nested
+;       CHECK:    [[ENTRY:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_ENTRY
+;       CHECK:    [[ANCHOR:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_ANCHOR
+;        ISEL:    {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[ANCHOR]]
+;       GISEL:    {{.*}} = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.readfirstlane){{.*}}, implicit [[ANCHOR]]
+;        ISEL:    {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[ENTRY]]
+;       GISEL:    {{.*}} = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.readfirstlane){{.*}}, implicit [[ENTRY]]
+define i32 @nested(i32 %src) #0 {
+  %t1 = call token @llvm.experimental.convergence.entry()
+  %t2 = call token @llvm.experimental.convergence.anchor()
+  %r2 = call i32 @llvm.amdgcn.readfirstlane(i32 %src) [ "convergencectrl"(token %t2) ]
+  %r1 = call i32 @llvm.amdgcn.readfirstlane(i32 %src) [ "convergencectrl"(token %t1) ]
+  %sum = add i32 %r1, %r2
+  ret i32 %sum
+}
+
+; COM: FIXME: Tokens on tail-call have not been implemented for SelectionDAG
+; COM:        yet; the corresponding checks have been commented out.
+;
+; CHECK-LABEL: name:            tail_call_void_func_void
+;       GISEL:    [[TOKEN:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_ENTRY
+; COM:  CHECK:    [[TOKEN:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_ENTRY
+; COM:   ISEL:    {{.*}} SI_CALL_ISEL {{.*}}, @external_void_func_void, [[TOKEN]], csr_amdgpu, {{.*}}
+; COM: DEADMI:    {{.*}} SI_CALL {{.*}}, @external_void_func_void, csr_amdgpu, {{.*}}, implicit [[TOKEN]]
+;       GISEL:    {{.*}} SI_TCRETURN {{.*}}, @external_void_func_void, 0, csr_amdgpu, implicit [[TOKEN]]
+define void @tail_call_void_func_void() #0 {
+  %t1 = call token @llvm.experimental.convergence.entry()
+  tail call void @external_void_func_void() [ "convergencectrl"(token %t1) ]
+  ret void
+}
+
+declare hidden void @external_void_func_void() #0
 declare i32 @foo(i32 %x) #0
 
 declare i32 @llvm.amdgcn.readfirstlane(i32) #0
diff --git a/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid4.mir b/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid4.mir
deleted file mode 100644
index d66dd104486922..00000000000000
--- a/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid4.mir
+++ /dev/null
@@ -1,10 +0,0 @@
-# RUN: not llc -mtriple=aarch64-- -run-pass none -o /dev/null %s 2>&1 | FileCheck %s
-# When a low-level type is 0 bits
----
-name: test_scalar_size_0
-body: |
-  bb.0:
-    liveins: $x0
-    ; CHECK: [[@LINE+1]]:10: invalid size for scalar type
-    %0:_(s0) = G_IMPLICIT_DEF
-...
diff --git a/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid6.mir b/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid6.mir
index 698568701fb1a4..632e5fa81db111 100644
--- a/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid6.mir
+++ b/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid6.mir
@@ -5,6 +5,6 @@ name: test_vector_element_size_0
 body: |
   bb.0:
     liveins: $x0
-    ; CHECK: [[@LINE+1]]:15: invalid size for scalar type
+    ; CHECK: [[@LINE+1]]:15: invalid size for scalar element in vector
     %0:_(<2 x s0>) = G_IMPLICIT_DEF
 ...



More information about the llvm-commits mailing list