[llvm] [GlobalISel] convergence control tokens and intrinsics (PR #67006)
Sameer Sahasrabuddhe via llvm-commits
llvm-commits at lists.llvm.org
Thu Mar 7 00:04:44 PST 2024
https://github.com/ssahasra updated https://github.com/llvm/llvm-project/pull/67006
>From 6ac42c8e06bd27f5c53370fe057fc63db2003b93 Mon Sep 17 00:00:00 2001
From: Sameer Sahasrabuddhe <sameer.sahasrabuddhe at amd.com>
Date: Mon, 4 Mar 2024 15:15:06 +0530
Subject: [PATCH 1/2] [GlobalISel] convergence control tokens and intrinsics
In the IR translator, convert the LLVM token type to LLT::token(), which is an
alias for the s0 type. These show up as implicit uses on convergent operations.
Differential Revision: https://reviews.llvm.org/D158147
---
.../llvm/CodeGen/GlobalISel/CallLowering.h | 4 +
.../llvm/CodeGen/GlobalISel/IRTranslator.h | 21 +++++
llvm/include/llvm/CodeGenTypes/LowLevelType.h | 46 +++++++---
llvm/lib/CodeGen/GlobalISel/CallLowering.cpp | 14 ++++
llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp | 61 ++++++++++++--
.../CodeGen/GlobalISel/InlineAsmLowering.cpp | 8 ++
llvm/lib/CodeGen/LowLevelTypeUtils.cpp | 4 +
llvm/lib/CodeGen/MIRParser/MIParser.cpp | 13 +--
llvm/lib/IR/ConvergenceVerifier.cpp | 4 +-
llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp | 3 +
.../irtranslator-convergence-tokens.ll | 83 +++++++++++++++++++
.../test/CodeGen/AMDGPU/convergence-tokens.ll | 42 +++++++---
.../AArch64/parse-low-level-type-invalid4.mir | 10 ---
.../AArch64/parse-low-level-type-invalid6.mir | 2 +-
14 files changed, 270 insertions(+), 45 deletions(-)
create mode 100644 llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-convergence-tokens.ll
delete mode 100644 llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid4.mir
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h b/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h
index 3c3b91661d57889..4c187a3068d8233 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h
@@ -117,6 +117,9 @@ class CallLowering {
/// vreg that the swifterror should be copied into after the call.
Register SwiftErrorVReg;
+ /// Valid if the call is a controlled convergent operation.
+ Register ConvergenceCtrlToken;
+
/// Original IR callsite corresponding to this call, if available.
const CallBase *CB = nullptr;
@@ -584,6 +587,7 @@ class CallLowering {
bool lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &Call,
ArrayRef<Register> ResRegs,
ArrayRef<ArrayRef<Register>> ArgRegs, Register SwiftErrorVReg,
+ Register ConvergenceCtrlToken,
std::function<unsigned()> GetCalleeReg) const;
/// For targets which want to use big-endian can enable it with
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h b/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
index bfac54a65c5b4ea..01f5572631b383c 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
@@ -579,6 +579,10 @@ class IRTranslator : public MachineFunctionPass {
return false;
}
+ bool translateConvergenceControlIntrinsic(const CallInst &CI,
+ Intrinsic::ID ID,
+ MachineIRBuilder &MIRBuilder);
+
/// @}
// Builder for machine instruction a la IRBuilder.
@@ -697,6 +701,23 @@ class IRTranslator : public MachineFunctionPass {
return Regs[0];
}
+ Register getOrCreateConvergenceTokenVReg(const Value &Token) {
+ assert(Token.getType()->isTokenTy());
+ auto &Regs = *VMap.getVRegs(Token);
+ if (!Regs.empty()) {
+ assert(Regs.size() == 1 &&
+ "Expected a single register for convergence tokens.");
+ return Regs[0];
+ }
+
+ auto Reg = MRI->createGenericVirtualRegister(LLT::token());
+ Regs.push_back(Reg);
+ auto &Offsets = *VMap.getOffsets(Token);
+ if (Offsets.empty())
+ Offsets.push_back(0);
+ return Reg;
+ }
+
/// Allocate some vregs and offsets in the VMap. Then populate just the
/// offsets while leaving the vregs empty.
ValueToVRegInfo::VRegListT &allocateVRegs(const Value &Val);
diff --git a/llvm/include/llvm/CodeGenTypes/LowLevelType.h b/llvm/include/llvm/CodeGenTypes/LowLevelType.h
index 5a16cffb80b32b0..31782c28b8d383c 100644
--- a/llvm/include/llvm/CodeGenTypes/LowLevelType.h
+++ b/llvm/include/llvm/CodeGenTypes/LowLevelType.h
@@ -45,6 +45,13 @@ class LLT {
/*AddressSpace=*/0};
}
+ /// Get a low-level token; just a scalar with zero bits (or no size).
+ static constexpr LLT token() {
+ return LLT{/*isPointer=*/false, /*isVector=*/false, /*isScalar=*/true,
+ ElementCount::getFixed(0), /*SizeInBits=*/0,
+ /*AddressSpace=*/0};
+ }
+
/// Get a low-level pointer in the given address space.
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits) {
assert(SizeInBits > 0 && "invalid pointer size");
@@ -134,17 +141,14 @@ class LLT {
explicit LLT(MVT VT);
- constexpr bool isValid() const { return IsScalar || IsPointer || IsVector; }
-
+ constexpr bool isValid() const { return IsScalar || RawData != 0; }
constexpr bool isScalar() const { return IsScalar; }
-
- constexpr bool isPointer() const { return IsPointer && !IsVector; }
-
- constexpr bool isPointerVector() const { return IsPointer && IsVector; }
-
- constexpr bool isPointerOrPointerVector() const { return IsPointer; }
-
- constexpr bool isVector() const { return IsVector; }
+ constexpr bool isVector() const { return isValid() && IsVector; }
+ constexpr bool isPointer() const {
+ return isValid() && IsPointer && !IsVector;
+ }
+ constexpr bool isPointerVector() const { return IsPointer && isVector(); }
+ constexpr bool isPointerOrPointerVector() const { return IsPointer && isValid(); }
/// Returns the number of elements in a vector LLT. Must only be called on
/// vector types.
@@ -314,6 +318,28 @@ class LLT {
/// described in static const *Field variables. Each of these variables
/// is a 2-element array, with the first element describing the bitfield size
/// and the second element describing the bitfield offset.
+ ///
+ /// +--------+---------+--------+----------+----------------------+
+ /// |isScalar|isPointer|isVector| RawData |Notes |
+ /// +--------+---------+--------+----------+----------------------+
+ /// | 0 | 0 | 0 | 0 |Invalid |
+ /// +--------+---------+--------+----------+----------------------+
+ /// | 0 | 0 | 1 | 0 |Tombstone Key |
+ /// +--------+---------+--------+----------+----------------------+
+ /// | 0 | 1 | 0 | 0 |Empty Key |
+ /// +--------+---------+--------+----------+----------------------+
+ /// | 1 | 0 | 0 | 0 |Token |
+ /// +--------+---------+--------+----------+----------------------+
+ /// | 1 | 0 | 0 | non-zero |Scalar |
+ /// +--------+---------+--------+----------+----------------------+
+ /// | 0 | 1 | 0 | non-zero |Pointer |
+ /// +--------+---------+--------+----------+----------------------+
+ /// | 0 | 0 | 1 | non-zero |Vector of non-pointer |
+ /// +--------+---------+--------+----------+----------------------+
+ /// | 0 | 1 | 1 | non-zero |Vector of pointer |
+ /// +--------+---------+--------+----------+----------------------+
+ ///
+ /// Everything else is reserved.
typedef int BitFieldInfo[2];
///
/// This is how the bitfields are packed per Kind:
diff --git a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
index 3a37bf3cd7a8a33..2ada80bd63ee084 100644
--- a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
@@ -21,6 +21,7 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/Target/TargetMachine.h"
@@ -87,10 +88,20 @@ void CallLowering::addArgFlagsFromAttributes(ISD::ArgFlagsTy &Flags,
});
}
+static bool hasConvergenceEntryToken(const CallBase &CB) {
+ auto Bundle = CB.getOperandBundle(LLVMContext::OB_convergencectrl);
+ if (!Bundle)
+ return true;
+ auto *Token = Bundle->Inputs[0].get();
+ auto *Def = cast<IntrinsicInst>(Token);
+ return Def->getIntrinsicID() == Intrinsic::experimental_convergence_entry;
+}
+
bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
ArrayRef<Register> ResRegs,
ArrayRef<ArrayRef<Register>> ArgRegs,
Register SwiftErrorVReg,
+ Register ConvergenceCtrlToken,
std::function<unsigned()> GetCalleeReg) const {
CallLoweringInfo Info;
const DataLayout &DL = MIRBuilder.getDataLayout();
@@ -121,6 +132,8 @@ bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
CanBeTailCalled = false;
}
+ if (!hasConvergenceEntryToken(CB))
+ CanBeTailCalled = false;
// First step is to marshall all the function's parameters into the correct
// physregs and memory locations. Gather the sequence of argument types that
@@ -187,6 +200,7 @@ bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
Info.KnownCallees = CB.getMetadata(LLVMContext::MD_callees);
Info.CallConv = CallConv;
Info.SwiftErrorVReg = SwiftErrorVReg;
+ Info.ConvergenceCtrlToken = ConvergenceCtrlToken;
Info.IsMustTailCall = CB.isMustTailCall();
Info.IsTailCall = CanBeTailCalled;
Info.IsVarArg = IsVarArg;
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 7c986dbbc2c7c88..e3877fdc1e1961f 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -213,8 +213,9 @@ ArrayRef<Register> IRTranslator::getOrCreateVRegs(const Value &Val) {
auto *VRegs = VMap.getVRegs(Val);
auto *Offsets = VMap.getOffsets(Val);
- assert(Val.getType()->isSized() &&
- "Don't know how to create an empty vreg");
+ if (!Val.getType()->isTokenTy())
+ assert(Val.getType()->isSized() &&
+ "Don't know how to create an empty vreg");
SmallVector<LLT, 4> SplitTys;
computeValueLLTs(*DL, *Val.getType(), SplitTys,
@@ -2036,6 +2037,37 @@ bool IRTranslator::translateIfEntryValueArgument(bool isDeclare, Value *Val,
return true;
}
+static unsigned getConvOpcode(Intrinsic::ID ID) {
+ switch (ID) {
+ default:
+ llvm_unreachable("Unexpected intrinsic");
+ return 0;
+ case Intrinsic::experimental_convergence_anchor:
+ return TargetOpcode::CONVERGENCECTRL_ANCHOR;
+ case Intrinsic::experimental_convergence_entry:
+ return TargetOpcode::CONVERGENCECTRL_ENTRY;
+ case Intrinsic::experimental_convergence_loop:
+ return TargetOpcode::CONVERGENCECTRL_LOOP;
+ }
+}
+
+bool IRTranslator::translateConvergenceControlIntrinsic(
+ const CallInst &CI, Intrinsic::ID ID, MachineIRBuilder &MIRBuilder) {
+ MachineInstrBuilder MIB = MIRBuilder.buildInstr(getConvOpcode(ID));
+ Register OutputReg = getOrCreateConvergenceTokenVReg(CI);
+ MIB.addDef(OutputReg);
+
+ if (ID == Intrinsic::experimental_convergence_loop) {
+ auto Bundle = CI.getOperandBundle(LLVMContext::OB_convergencectrl);
+ assert(Bundle && "Expected a convergence control token.");
+ Register InputReg =
+ getOrCreateConvergenceTokenVReg(*Bundle->Inputs[0].get());
+ MIB.addUse(InputReg);
+ }
+
+ return true;
+}
+
bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
MachineIRBuilder &MIRBuilder) {
if (auto *MI = dyn_cast<AnyMemIntrinsic>(&CI)) {
@@ -2477,7 +2509,10 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
#include "llvm/IR/ConstrainedOps.def"
return translateConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(CI),
MIRBuilder);
-
+ case Intrinsic::experimental_convergence_anchor:
+ case Intrinsic::experimental_convergence_entry:
+ case Intrinsic::experimental_convergence_loop:
+ return translateConvergenceControlIntrinsic(CI, ID, MIRBuilder);
}
return false;
}
@@ -2528,12 +2563,18 @@ bool IRTranslator::translateCallBase(const CallBase &CB,
}
}
+ Register ConvergenceCtrlToken = 0;
+ if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_convergencectrl)) {
+ const auto &Token = *Bundle->Inputs[0].get();
+ ConvergenceCtrlToken = getOrCreateConvergenceTokenVReg(Token);
+ }
+
// We don't set HasCalls on MFI here yet because call lowering may decide to
// optimize into tail calls. Instead, we defer that to selection where a final
// scan is done to check if any instructions are calls.
- bool Success =
- CLI->lowerCall(MIRBuilder, CB, Res, Args, SwiftErrorVReg,
- [&]() { return getOrCreateVReg(*CB.getCalledOperand()); });
+ bool Success = CLI->lowerCall(
+ MIRBuilder, CB, Res, Args, SwiftErrorVReg, ConvergenceCtrlToken,
+ [&]() { return getOrCreateVReg(*CB.getCalledOperand()); });
// Check if we just inserted a tail call.
if (Success) {
@@ -2647,6 +2688,14 @@ bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
MF->getMachineMemOperand(MPI, Info.flags, MemTy, Alignment, CI.getAAMetadata()));
}
+ if (CI.isConvergent()) {
+ if (auto Bundle = CI.getOperandBundle(LLVMContext::OB_convergencectrl)) {
+ auto *Token = Bundle->Inputs[0].get();
+ Register TokenReg = getOrCreateVReg(*Token);
+ MIB.addUse(TokenReg, RegState::Implicit);
+ }
+ }
+
return true;
}
diff --git a/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp b/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp
index 4089a5e941b0512..14e1e1fdf01dea7 100644
--- a/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp
@@ -538,6 +538,14 @@ bool InlineAsmLowering::lowerInlineAsm(
}
}
+ if (auto Bundle = Call.getOperandBundle(LLVMContext::OB_convergencectrl)) {
+ auto *Token = Bundle->Inputs[0].get();
+ ArrayRef<Register> SourceRegs = GetOrCreateVRegs(*Token);
+ assert(SourceRegs.size() == 1 &&
+ "Expected the control token to fit into a single virtual register");
+ Inst.addUse(SourceRegs[0], RegState::Implicit);
+ }
+
if (const MDNode *SrcLoc = Call.getMetadata("srcloc"))
Inst.addMetadata(SrcLoc);
diff --git a/llvm/lib/CodeGen/LowLevelTypeUtils.cpp b/llvm/lib/CodeGen/LowLevelTypeUtils.cpp
index bc2ea3f05b6da6c..3499fe1ee757764 100644
--- a/llvm/lib/CodeGen/LowLevelTypeUtils.cpp
+++ b/llvm/lib/CodeGen/LowLevelTypeUtils.cpp
@@ -39,6 +39,10 @@ LLT llvm::getLLTForType(Type &Ty, const DataLayout &DL) {
return LLT::scalar(SizeInBits);
}
+ if (Ty.isTokenTy()) {
+ return LLT::token();
+ }
+
return LLT();
}
diff --git a/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
index d41fc97cb806037..691c60d22724f36 100644
--- a/llvm/lib/CodeGen/MIRParser/MIParser.cpp
+++ b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
@@ -1919,10 +1919,13 @@ bool MIParser::parseLowLevelType(StringRef::iterator Loc, LLT &Ty) {
if (Token.range().front() == 's') {
auto ScalarSize = APSInt(Token.range().drop_front()).getZExtValue();
- if (!verifyScalarSize(ScalarSize))
- return error("invalid size for scalar type");
-
- Ty = LLT::scalar(ScalarSize);
+ if (ScalarSize) {
+ if (!verifyScalarSize(ScalarSize))
+ return error("invalid size for scalar type");
+ Ty = LLT::scalar(ScalarSize);
+ } else {
+ Ty = LLT::token();
+ }
lex();
return false;
} else if (Token.range().front() == 'p') {
@@ -1980,7 +1983,7 @@ bool MIParser::parseLowLevelType(StringRef::iterator Loc, LLT &Ty) {
if (Token.range().front() == 's') {
auto ScalarSize = APSInt(Token.range().drop_front()).getZExtValue();
if (!verifyScalarSize(ScalarSize))
- return error("invalid size for scalar type");
+ return error("invalid size for scalar element in vector");
Ty = LLT::scalar(ScalarSize);
} else if (Token.range().front() == 'p') {
const DataLayout &DL = MF.getDataLayout();
diff --git a/llvm/lib/IR/ConvergenceVerifier.cpp b/llvm/lib/IR/ConvergenceVerifier.cpp
index e73aeaade5f7149..6708f85819df7f6 100644
--- a/llvm/lib/IR/ConvergenceVerifier.cpp
+++ b/llvm/lib/IR/ConvergenceVerifier.cpp
@@ -75,14 +75,14 @@ GenericConvergenceVerifier<SSAContext>::findAndCheckConvergenceTokenUsed(
template <>
bool GenericConvergenceVerifier<SSAContext>::isInsideConvergentFunction(
- const InstructionT &I) {
+ const Instruction &I) {
auto *F = I.getFunction();
return F->isConvergent();
}
template <>
bool GenericConvergenceVerifier<SSAContext>::isConvergent(
- const InstructionT &I) {
+ const Instruction &I) {
if (auto *CB = dyn_cast<CallBase>(&I)) {
return CB->isConvergent();
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
index 6d05c3678bf09e5..42695d05ca990d5 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
@@ -1483,6 +1483,9 @@ bool AMDGPUCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
+ if (Info.ConvergenceCtrlToken) {
+ MIB.addUse(Info.ConvergenceCtrlToken, RegState::Implicit);
+ }
handleImplicitCallArguments(MIRBuilder, MIB, ST, *MFI, Info.CallConv,
ImplicitArgRegs);
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-convergence-tokens.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-convergence-tokens.ll
new file mode 100644
index 000000000000000..f66ee766738a9d9
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-convergence-tokens.ll
@@ -0,0 +1,83 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 3
+; RUN: llc -global-isel -stop-after=irtranslator -mtriple=amdgcn-- -mcpu=gfx900 -verify-machineinstrs -o - %s | FileCheck %s
+
+define i32 @basic(i32 %src) #1 {
+ ; CHECK-LABEL: name: basic
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK-NEXT: liveins: $vgpr0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; CHECK-NEXT: [[CONVERGENCECTRL_ANCHOR:%[0-9]+]]:_(s0) = CONVERGENCECTRL_ANCHOR
+ ; CHECK-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:_(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.readfirstlane), [[COPY]](s32), implicit [[CONVERGENCECTRL_ANCHOR]](s0)
+ ; CHECK-NEXT: $vgpr0 = COPY [[INTRINSIC_CONVERGENT]](s32)
+ ; CHECK-NEXT: SI_RETURN implicit $vgpr0
+ %t = call token @llvm.experimental.convergence.anchor()
+ %r = call i32 @llvm.amdgcn.readfirstlane(i32 %src) [ "convergencectrl"(token %t) ]
+ ret i32 %r
+}
+
+define i32 @nested(i32 %src) #0 {
+ ; CHECK-LABEL: name: nested
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK-NEXT: liveins: $vgpr0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; CHECK-NEXT: [[CONVERGENCECTRL_ENTRY:%[0-9]+]]:_(s0) = CONVERGENCECTRL_ENTRY
+ ; CHECK-NEXT: [[CONVERGENCECTRL_ANCHOR:%[0-9]+]]:_(s0) = CONVERGENCECTRL_ANCHOR
+ ; CHECK-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:_(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.readfirstlane), [[COPY]](s32), implicit [[CONVERGENCECTRL_ANCHOR]](s0)
+ ; CHECK-NEXT: [[INTRINSIC_CONVERGENT1:%[0-9]+]]:_(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.readfirstlane), [[COPY]](s32), implicit [[CONVERGENCECTRL_ENTRY]](s0)
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[INTRINSIC_CONVERGENT1]], [[INTRINSIC_CONVERGENT]]
+ ; CHECK-NEXT: $vgpr0 = COPY [[ADD]](s32)
+ ; CHECK-NEXT: SI_RETURN implicit $vgpr0
+ %t1 = call token @llvm.experimental.convergence.entry()
+ %t2 = call token @llvm.experimental.convergence.anchor()
+ %r2 = call i32 @llvm.amdgcn.readfirstlane(i32 %src) [ "convergencectrl"(token %t2) ]
+ %r1 = call i32 @llvm.amdgcn.readfirstlane(i32 %src) [ "convergencectrl"(token %t1) ]
+ %sum = add i32 %r1, %r2
+ ret i32 %sum
+}
+
+define i32 @trivial_heart(i32 %src) #1 {
+bb1:
+ %t1 = call token @llvm.experimental.convergence.anchor()
+ br label %bb2
+
+bb2:
+ %t2 = call token @llvm.experimental.convergence.loop() [ "convergencectrl"(token %t1) ]
+ %r = call i32 @llvm.amdgcn.readfirstlane(i32 %src) [ "convergencectrl"(token %t2) ]
+ ret i32 %r
+}
+
+;; FIXME: Cannot test lowering of convergence control in the presence of
+;; branches because the backend emits control flow pseudos such as SI_IF that
+;; are marked convergent. These must also assigned convergence control tokens,
+;; to satisfy the requirement that controlled and uncontrolled operations cannot
+;; be mixed.
+;; define i32 @branch(i32 %src, i1 %cond) #1 {
+;; bb1:
+;; %t = call token @llvm.experimental.convergence.anchor()
+;; br i1 %cond, label %bb2, label %bb3
+;;
+;; bb2:
+;; %r2 = call i32 @llvm.amdgcn.readfirstlane(i32 %src) [ "convergencectrl"(token %t) ]
+;; br label %bb4
+;;
+;; bb3:
+;; %r3 = call i32 @llvm.amdgcn.readfirstlane(i32 %src) [ "convergencectrl"(token %t) ]
+;; br label %bb4
+;;
+;; bb4:
+;; %p = phi i32 [%r2, %bb2], [%r3, %bb3]
+;; ret i32 %p
+;; }
+
+;; TODO: Write tests using natural loops.
+
+declare i32 @llvm.amdgcn.readfirstlane(i32) #0
+
+declare token @llvm.experimental.convergence.entry()
+declare token @llvm.experimental.convergence.anchor()
+declare token @llvm.experimental.convergence.loop()
+
+attributes #0 = { nounwind readnone convergent }
+attributes #1 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/convergence-tokens.ll b/llvm/test/CodeGen/AMDGPU/convergence-tokens.ll
index 2ed6d7fd0f598d2..dc3e05a7c3f116a 100644
--- a/llvm/test/CodeGen/AMDGPU/convergence-tokens.ll
+++ b/llvm/test/CodeGen/AMDGPU/convergence-tokens.ll
@@ -1,8 +1,9 @@
; RUN: llc --amdgpu-disable-structurizer -stop-after=amdgpu-isel -mtriple=amdgcn-- -mcpu=gfx900 -verify-machineinstrs -o - %s | FileCheck --check-prefixes=CHECK,ISEL %s
; RUN: llc --amdgpu-disable-structurizer -stop-after=dead-mi-elimination -mtriple=amdgcn-- -mcpu=gfx900 -verify-machineinstrs -o - %s | FileCheck --check-prefixes=CHECK,DEADMI %s
+; RUN: llc --amdgpu-disable-structurizer -global-isel -stop-after=irtranslator -mtriple=amdgcn-- -mcpu=gfx900 -verify-machineinstrs -o - %s | FileCheck %s --check-prefixes=CHECK,GISEL
; CHECK-LABEL: name: basic_call
-; CHECK: [[TOKEN:%[0-9]+]]:sreg_64 = CONVERGENCECTRL_ENTRY
+; CHECK: [[TOKEN:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_ENTRY
; ISEL: {{.*}} SI_CALL_ISEL {{.*}}, @foo, [[TOKEN]], csr_amdgpu, {{.*}}
; DEADMI: {{.*}} SI_CALL {{.*}}, @foo, csr_amdgpu, {{.*}}, implicit [[TOKEN]]
define i32 @basic_call(i32 %src) #0 {
@@ -12,10 +13,11 @@ define i32 @basic_call(i32 %src) #0 {
}
; CHECK-LABEL: name: basic_intrinsic
-; CHECK: [[TOKEN:%[0-9]+]]:sreg_64 = CONVERGENCECTRL_ANCHOR
+; CHECK: [[TOKEN:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_ANCHOR
; ISEL: CONVERGENCECTRL_GLUE [[TOKEN]]
; DEADMI-NOT: CONVERGENCECTRL_GLUE
-; CHECK: {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[TOKEN]]
+; ISEL: {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[TOKEN]]
+; GISEL: {{.*}} = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.readfirstlane){{.*}}, implicit [[TOKEN]]
define i32 @basic_intrinsic(i32 %src) #0 {
%t = call token @llvm.experimental.convergence.anchor()
%r = call i32 @llvm.amdgcn.readfirstlane(i32 %src) [ "convergencectrl"(token %t) ]
@@ -30,12 +32,13 @@ define i32 @uncontrolled_call(i32 %src) #0 {
}
; CHECK-LABEL: name: basic_branch
-; CHECK: bb.0.entry:
-; CHECK: [[TOKEN:%[0-9]+]]:sreg_64 = CONVERGENCECTRL_ANCHOR
-; CHECK: bb.1.then:
+; CHECK: bb.[[#]].entry:
+; CHECK: [[TOKEN:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_ANCHOR
+; CHECK: bb.[[#]].then:
; ISEL: CONVERGENCECTRL_GLUE [[TOKEN]]
; DEADMI-NOT: CONVERGENCECTRL_GLUE
-; CHECK: {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[TOKEN]]
+; ISEL: {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[TOKEN]]
+; GISEL: {{.*}} = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.readfirstlane){{.*}}, implicit [[TOKEN]]
define i32 @basic_branch(i32 %src, i1 %cond) #0 {
entry:
%t = call token @llvm.experimental.convergence.anchor()
@@ -52,12 +55,13 @@ else:
}
; CHECK-LABEL: name: basic_loop
-; CHECK: [[TOKEN:%[0-9]+]]:sreg_64 = CONVERGENCECTRL_ANCHOR
-; CHECK: bb.1.loop:
-; CHECK: [[LOOP:%[0-9]+]]:sreg_64 = CONVERGENCECTRL_LOOP [[TOKEN]]
+; CHECK: [[TOKEN:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_ANCHOR
+; CHECK: bb.[[#]].loop:
+; CHECK: [[LOOP:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_LOOP [[TOKEN]]
; ISEL: CONVERGENCECTRL_GLUE [[LOOP]]
; DEADMI-NOT: CONVERGENCECTRL_GLUE
-; CHECK: {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[LOOP]]
+; ISEL: {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[LOOP]]
+; GISEL: {{.*}} = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.readfirstlane){{.*}}, implicit [[LOOP]]
define i32 @basic_loop(i32 %src, i1 %cond) #0 {
%t1 = call token @llvm.experimental.convergence.anchor()
br label %loop
@@ -71,6 +75,22 @@ end:
ret i32 %r
}
+define i32 @nested(i32 %src) #0 {
+ ; CHECK-LABEL: name: nested
+ ; CHECK: [[ENTRY:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_ENTRY
+ ; CHECK: [[ANCHOR:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_ANCHOR
+ ; ISEL: {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[ANCHOR]]
+ ; GISEL: {{.*}} = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.readfirstlane){{.*}}, implicit [[ANCHOR]]
+ ; ISEL: {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[ENTRY]]
+ ; GISEL: {{.*}} = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.readfirstlane){{.*}}, implicit [[ENTRY]]
+ %t1 = call token @llvm.experimental.convergence.entry()
+ %t2 = call token @llvm.experimental.convergence.anchor()
+ %r2 = call i32 @llvm.amdgcn.readfirstlane(i32 %src) [ "convergencectrl"(token %t2) ]
+ %r1 = call i32 @llvm.amdgcn.readfirstlane(i32 %src) [ "convergencectrl"(token %t1) ]
+ %sum = add i32 %r1, %r2
+ ret i32 %sum
+}
+
declare i32 @foo(i32 %x) #0
declare i32 @llvm.amdgcn.readfirstlane(i32) #0
diff --git a/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid4.mir b/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid4.mir
deleted file mode 100644
index d66dd1044869225..000000000000000
--- a/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid4.mir
+++ /dev/null
@@ -1,10 +0,0 @@
-# RUN: not llc -mtriple=aarch64-- -run-pass none -o /dev/null %s 2>&1 | FileCheck %s
-# When a low-level type is 0 bits
----
-name: test_scalar_size_0
-body: |
- bb.0:
- liveins: $x0
- ; CHECK: [[@LINE+1]]:10: invalid size for scalar type
- %0:_(s0) = G_IMPLICIT_DEF
-...
diff --git a/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid6.mir b/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid6.mir
index 698568701fb1a47..632e5fa81db111b 100644
--- a/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid6.mir
+++ b/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid6.mir
@@ -5,6 +5,6 @@ name: test_vector_element_size_0
body: |
bb.0:
liveins: $x0
- ; CHECK: [[@LINE+1]]:15: invalid size for scalar type
+ ; CHECK: [[@LINE+1]]:15: invalid size for scalar element in vector
%0:_(<2 x s0>) = G_IMPLICIT_DEF
...
>From 951b5431b2454d53c73a5429d61edfc2f39cdb05 Mon Sep 17 00:00:00 2001
From: Sameer Sahasrabuddhe <sameer.sahasrabuddhe at amd.com>
Date: Thu, 7 Mar 2024 13:34:59 +0530
Subject: [PATCH 2/2] clang format
---
llvm/include/llvm/CodeGenTypes/LowLevelType.h | 9 ++++++---
1 file changed, 6 insertions(+), 3 deletions(-)
diff --git a/llvm/include/llvm/CodeGenTypes/LowLevelType.h b/llvm/include/llvm/CodeGenTypes/LowLevelType.h
index 31782c28b8d383c..74e4a7bda4401bc 100644
--- a/llvm/include/llvm/CodeGenTypes/LowLevelType.h
+++ b/llvm/include/llvm/CodeGenTypes/LowLevelType.h
@@ -47,8 +47,9 @@ class LLT {
/// Get a low-level token; just a scalar with zero bits (or no size).
static constexpr LLT token() {
- return LLT{/*isPointer=*/false, /*isVector=*/false, /*isScalar=*/true,
- ElementCount::getFixed(0), /*SizeInBits=*/0,
+ return LLT{/*isPointer=*/false, /*isVector=*/false,
+ /*isScalar=*/true, ElementCount::getFixed(0),
+ /*SizeInBits=*/0,
/*AddressSpace=*/0};
}
@@ -148,7 +149,9 @@ class LLT {
return isValid() && IsPointer && !IsVector;
}
constexpr bool isPointerVector() const { return IsPointer && isVector(); }
- constexpr bool isPointerOrPointerVector() const { return IsPointer && isValid(); }
+ constexpr bool isPointerOrPointerVector() const {
+ return IsPointer && isValid();
+ }
/// Returns the number of elements in a vector LLT. Must only be called on
/// vector types.
More information about the llvm-commits
mailing list