[llvm] [SPIR-V] Rework usage of virtual registers' types and classes (PR #101732)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Aug 6 09:54:42 PDT 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-spir-v
Author: Vyacheslav Levytskyy (VyacheslavLevytskyy)
<details>
<summary>Changes</summary>
This PR contains changes in virtual register processing aimed to improve correctness of emitted MIR between passes from the perspective of MachineVerifier. This potentially helps to detect previously missed flaws in code emission and harden the test suite. As a measure of correctness and usefulness of this PR we may use a mode with expensive checks set on, and MachineVerifier reports problems in the test suite.
In order to satisfy Machine Verifier requirements to MIR correctness not only a rework of usage of virtual registers' types and classes is required, but also corrections into pre-legalizer and instruction selection logics. Namely, the following changes are introduced:
* scalar virtual registers have proper bit width,
* detect register class by SPIR-V type,
* add a superclass for id virtual register classes,
* fix Tablegen rules used for instruction selection,
* fixes of minor existed issues (missed flag for proper representation of a null constant for OpenCL vs. HLSL, wrong usage of integer virtual registers as a synonym of any non-type virtual register).
---
Patch is 118.41 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/101732.diff
55 Files Affected:
- (modified) llvm/lib/Target/SPIRV/MCTargetDesc/SPIRVMCCodeEmitter.cpp (+1-2)
- (modified) llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp (+77-77)
- (modified) llvm/lib/Target/SPIRV/SPIRVCallLowering.cpp (+4-4)
- (modified) llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp (+16-17)
- (modified) llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.h (+2-2)
- (modified) llvm/lib/Target/SPIRV/SPIRVISelLowering.cpp (+4-4)
- (modified) llvm/lib/Target/SPIRV/SPIRVInstrInfo.td (+32-31)
- (modified) llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp (+54-14)
- (modified) llvm/lib/Target/SPIRV/SPIRVPostLegalizer.cpp (+3-3)
- (modified) llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp (+72-65)
- (modified) llvm/lib/Target/SPIRV/SPIRVRegisterBanks.td (+1-1)
- (modified) llvm/lib/Target/SPIRV/SPIRVRegisterInfo.td (+8-2)
- (modified) llvm/test/CodeGen/SPIRV/SampledImageRetType.ll (+5-1)
- (modified) llvm/test/CodeGen/SPIRV/atomicrmw.ll (+5-1)
- (modified) llvm/test/CodeGen/SPIRV/basic_int_types.ll (+7-3)
- (modified) llvm/test/CodeGen/SPIRV/empty.ll (+4-1)
- (modified) llvm/test/CodeGen/SPIRV/event-zero-const.ll (+2-2)
- (modified) llvm/test/CodeGen/SPIRV/expect.ll (+4-4)
- (modified) llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_double.ll (+1-1)
- (modified) llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_float.ll (+1-1)
- (modified) llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_half.ll (+1-1)
- (modified) llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_double.ll (+1-1)
- (modified) llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_float.ll (+1-1)
- (modified) llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_half.ll (+1-1)
- (modified) llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_cache_controls/basic-load-store.ll (+1-1)
- (modified) llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_usm_storage_classes/intel-usm-addrspaces.ll (+3-2)
- (modified) llvm/test/CodeGen/SPIRV/hlsl-intrinsics/all.ll (+2-2)
- (modified) llvm/test/CodeGen/SPIRV/hlsl-intrinsics/any.ll (+2-2)
- (modified) llvm/test/CodeGen/SPIRV/hlsl-intrinsics/imad.ll (+1-1)
- (modified) llvm/test/CodeGen/SPIRV/instructions/atomic.ll (+2-2)
- (modified) llvm/test/CodeGen/SPIRV/instructions/integer-casts.ll (+2-2)
- (modified) llvm/test/CodeGen/SPIRV/instructions/ptrcmp.ll (+2-2)
- (modified) llvm/test/CodeGen/SPIRV/linkage/link-attribute.ll (+2-1)
- (modified) llvm/test/CodeGen/SPIRV/literals.ll (+8-3)
- (modified) llvm/test/CodeGen/SPIRV/lshr-constexpr.ll (+13-9)
- (modified) llvm/test/CodeGen/SPIRV/opencl/image.ll (+2-2)
- (modified) llvm/test/CodeGen/SPIRV/pointers/irtrans-added-int-const-32-64.ll (+2-2)
- (modified) llvm/test/CodeGen/SPIRV/pointers/type-deduce-global-dup.ll (+2-2)
- (modified) llvm/test/CodeGen/SPIRV/transcoding/OpImageSampleExplicitLod.ll (+2-1)
- (modified) llvm/test/CodeGen/SPIRV/transcoding/OpImageWrite.ll (+5-1)
- (modified) llvm/test/CodeGen/SPIRV/transcoding/OpVectorInsertDynamic_i16.ll (+18-18)
- (modified) llvm/test/CodeGen/SPIRV/transcoding/SampledImage.ll (+5-1)
- (modified) llvm/test/CodeGen/SPIRV/transcoding/cl-types.ll (+4-1)
- (modified) llvm/test/CodeGen/SPIRV/transcoding/fadd.ll (+30-3)
- (modified) llvm/test/CodeGen/SPIRV/transcoding/group_ops.ll (+2-2)
- (modified) llvm/test/CodeGen/SPIRV/transcoding/image_with_access_qualifiers.ll (+5-1)
- (modified) llvm/test/CodeGen/SPIRV/transcoding/non32.ll (+5-1)
- (modified) llvm/test/CodeGen/SPIRV/transcoding/spirv-private-array-initialization.ll (+2-2)
- (modified) llvm/test/CodeGen/SPIRV/transcoding/spirv-types.ll (+5-1)
- (modified) llvm/test/CodeGen/SPIRV/transcoding/sub_group_extended_types.ll (+2-1)
- (modified) llvm/test/CodeGen/SPIRV/transcoding/sub_group_shuffle.ll (+2-1)
- (modified) llvm/test/CodeGen/SPIRV/transcoding/sub_group_shuffle_relative.ll (+2-1)
- (modified) llvm/test/CodeGen/SPIRV/types/or-i1.ll (+1-1)
- (modified) llvm/test/CodeGen/SPIRV/unnamed-global.ll (+5-2)
- (modified) llvm/test/CodeGen/SPIRV/var-uniform-const.ll (+2-2)
``````````diff
diff --git a/llvm/lib/Target/SPIRV/MCTargetDesc/SPIRVMCCodeEmitter.cpp b/llvm/lib/Target/SPIRV/MCTargetDesc/SPIRVMCCodeEmitter.cpp
index 8aea26d9963ce..6dd0df2a104c0 100644
--- a/llvm/lib/Target/SPIRV/MCTargetDesc/SPIRVMCCodeEmitter.cpp
+++ b/llvm/lib/Target/SPIRV/MCTargetDesc/SPIRVMCCodeEmitter.cpp
@@ -67,8 +67,7 @@ static bool hasType(const MCInst &MI, const MCInstrInfo &MII) {
// Check if we define an ID, and take a type as operand 1.
auto &DefOpInfo = MCDesc.operands()[0];
auto &FirstArgOpInfo = MCDesc.operands()[1];
- return (DefOpInfo.RegClass == SPIRV::IDRegClassID ||
- DefOpInfo.RegClass == SPIRV::ANYIDRegClassID) &&
+ return DefOpInfo.RegClass != SPIRV::TYPERegClassID &&
FirstArgOpInfo.RegClass == SPIRV::TYPERegClassID;
}
return false;
diff --git a/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp b/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
index 1609576c038d0..c759fdcbd0a1e 100644
--- a/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
@@ -408,7 +408,7 @@ buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType,
Register ResultRegister =
MIRBuilder.getMRI()->createGenericVirtualRegister(Type);
- MIRBuilder.getMRI()->setRegClass(ResultRegister, &SPIRV::IDRegClass);
+ MIRBuilder.getMRI()->setRegClass(ResultRegister, &SPIRV::iIDRegClass);
GR->assignSPIRVTypeToVReg(BoolType, ResultRegister, MIRBuilder.getMF());
return std::make_tuple(ResultRegister, BoolType);
}
@@ -442,7 +442,7 @@ static Register buildLoadInst(SPIRVType *BaseType, Register PtrRegister,
Register DestinationReg = Register(0)) {
MachineRegisterInfo *MRI = MIRBuilder.getMRI();
if (!DestinationReg.isValid()) {
- DestinationReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
+ DestinationReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
MRI->setType(DestinationReg, LLT::scalar(32));
GR->assignSPIRVTypeToVReg(BaseType, DestinationReg, MIRBuilder.getMF());
}
@@ -460,7 +460,7 @@ static Register buildBuiltinVariableLoad(
SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType,
Register Reg = Register(0), bool isConst = true, bool hasLinkageTy = true) {
Register NewRegister =
- MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::IDRegClass);
+ MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::iIDRegClass);
MIRBuilder.getMRI()->setType(NewRegister,
LLT::pointer(0, GR->getPointerSize()));
SPIRVType *PtrType = GR->getOrCreateSPIRVPointerType(
@@ -544,7 +544,7 @@ static Register buildScopeReg(Register CLScopeRegister,
Scope = getSPIRVScope(CLScope);
if (CLScope == static_cast<unsigned>(Scope)) {
- MRI->setRegClass(CLScopeRegister, &SPIRV::IDRegClass);
+ MRI->setRegClass(CLScopeRegister, &SPIRV::iIDRegClass);
return CLScopeRegister;
}
}
@@ -564,7 +564,7 @@ static Register buildMemSemanticsReg(Register SemanticsRegister,
getMemSemanticsForStorageClass(GR->getPointerStorageClass(PtrRegister));
if (Order == Semantics) {
- MRI->setRegClass(SemanticsRegister, &SPIRV::IDRegClass);
+ MRI->setRegClass(SemanticsRegister, &SPIRV::iIDRegClass);
return SemanticsRegister;
}
}
@@ -583,7 +583,7 @@ static bool buildOpFromWrapper(MachineIRBuilder &MIRBuilder, unsigned Opcode,
for (unsigned i = 0; i < Sz; ++i) {
Register ArgReg = Call->Arguments[i];
if (!MRI->getRegClassOrNull(ArgReg))
- MRI->setRegClass(ArgReg, &SPIRV::IDRegClass);
+ MRI->setRegClass(ArgReg, &SPIRV::iIDRegClass);
MIB.addUse(ArgReg);
}
for (uint32_t ImmArg : ImmArgs)
@@ -599,8 +599,8 @@ static bool buildAtomicInitInst(const SPIRV::IncomingCall *Call,
assert(Call->Arguments.size() == 2 &&
"Need 2 arguments for atomic init translation");
- MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
- MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
+ MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::iIDRegClass);
+ MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::iIDRegClass);
MIRBuilder.buildInstr(SPIRV::OpStore)
.addUse(Call->Arguments[0])
.addUse(Call->Arguments[1]);
@@ -616,14 +616,14 @@ static bool buildAtomicLoadInst(const SPIRV::IncomingCall *Call,
return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicLoad, Call, TypeReg);
Register PtrRegister = Call->Arguments[0];
- MIRBuilder.getMRI()->setRegClass(PtrRegister, &SPIRV::IDRegClass);
+ MIRBuilder.getMRI()->setRegClass(PtrRegister, &SPIRV::iIDRegClass);
// TODO: if true insert call to __translate_ocl_memory_sccope before
// OpAtomicLoad and the function implementation. We can use Translator's
// output for transcoding/atomic_explicit_arguments.cl as an example.
Register ScopeRegister;
if (Call->Arguments.size() > 1) {
ScopeRegister = Call->Arguments[1];
- MIRBuilder.getMRI()->setRegClass(ScopeRegister, &SPIRV::IDRegClass);
+ MIRBuilder.getMRI()->setRegClass(ScopeRegister, &SPIRV::iIDRegClass);
} else
ScopeRegister = buildConstantIntReg(SPIRV::Scope::Device, MIRBuilder, GR);
@@ -631,7 +631,7 @@ static bool buildAtomicLoadInst(const SPIRV::IncomingCall *Call,
if (Call->Arguments.size() > 2) {
// TODO: Insert call to __translate_ocl_memory_order before OpAtomicLoad.
MemSemanticsReg = Call->Arguments[2];
- MIRBuilder.getMRI()->setRegClass(MemSemanticsReg, &SPIRV::IDRegClass);
+ MIRBuilder.getMRI()->setRegClass(MemSemanticsReg, &SPIRV::iIDRegClass);
} else {
int Semantics =
SPIRV::MemorySemantics::SequentiallyConsistent |
@@ -658,12 +658,12 @@ static bool buildAtomicStoreInst(const SPIRV::IncomingCall *Call,
Register ScopeRegister =
buildConstantIntReg(SPIRV::Scope::Device, MIRBuilder, GR);
Register PtrRegister = Call->Arguments[0];
- MIRBuilder.getMRI()->setRegClass(PtrRegister, &SPIRV::IDRegClass);
+ MIRBuilder.getMRI()->setRegClass(PtrRegister, &SPIRV::iIDRegClass);
int Semantics =
SPIRV::MemorySemantics::SequentiallyConsistent |
getMemSemanticsForStorageClass(GR->getPointerStorageClass(PtrRegister));
Register MemSemanticsReg = buildConstantIntReg(Semantics, MIRBuilder, GR);
- MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
+ MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::iIDRegClass);
MIRBuilder.buildInstr(SPIRV::OpAtomicStore)
.addUse(PtrRegister)
.addUse(ScopeRegister)
@@ -686,9 +686,9 @@ static bool buildAtomicCompareExchangeInst(
Register ObjectPtr = Call->Arguments[0]; // Pointer (volatile A *object.)
Register ExpectedArg = Call->Arguments[1]; // Comparator (C* expected).
Register Desired = Call->Arguments[2]; // Value (C Desired).
- MRI->setRegClass(ObjectPtr, &SPIRV::IDRegClass);
- MRI->setRegClass(ExpectedArg, &SPIRV::IDRegClass);
- MRI->setRegClass(Desired, &SPIRV::IDRegClass);
+ MRI->setRegClass(ObjectPtr, &SPIRV::iIDRegClass);
+ MRI->setRegClass(ExpectedArg, &SPIRV::iIDRegClass);
+ MRI->setRegClass(Desired, &SPIRV::iIDRegClass);
SPIRVType *SpvDesiredTy = GR->getSPIRVTypeForVReg(Desired);
LLT DesiredLLT = MRI->getType(Desired);
@@ -729,8 +729,8 @@ static bool buildAtomicCompareExchangeInst(
MemSemEqualReg = Call->Arguments[3];
if (MemOrdNeq == MemSemEqual)
MemSemUnequalReg = Call->Arguments[4];
- MRI->setRegClass(Call->Arguments[3], &SPIRV::IDRegClass);
- MRI->setRegClass(Call->Arguments[4], &SPIRV::IDRegClass);
+ MRI->setRegClass(Call->Arguments[3], &SPIRV::iIDRegClass);
+ MRI->setRegClass(Call->Arguments[4], &SPIRV::iIDRegClass);
}
if (!MemSemEqualReg.isValid())
MemSemEqualReg = buildConstantIntReg(MemSemEqual, MIRBuilder, GR);
@@ -747,7 +747,7 @@ static bool buildAtomicCompareExchangeInst(
Scope = getSPIRVScope(ClScope);
if (ClScope == static_cast<unsigned>(Scope))
ScopeReg = Call->Arguments[5];
- MRI->setRegClass(Call->Arguments[5], &SPIRV::IDRegClass);
+ MRI->setRegClass(Call->Arguments[5], &SPIRV::iIDRegClass);
}
if (!ScopeReg.isValid())
ScopeReg = buildConstantIntReg(Scope, MIRBuilder, GR);
@@ -760,7 +760,7 @@ static bool buildAtomicCompareExchangeInst(
Register Tmp = !IsCmpxchg ? MRI->createGenericVirtualRegister(DesiredLLT)
: Call->ReturnRegister;
if (!MRI->getRegClassOrNull(Tmp))
- MRI->setRegClass(Tmp, &SPIRV::IDRegClass);
+ MRI->setRegClass(Tmp, &SPIRV::iIDRegClass);
GR->assignSPIRVTypeToVReg(SpvDesiredTy, Tmp, MIRBuilder.getMF());
SPIRVType *IntTy = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder);
@@ -799,12 +799,12 @@ static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
Register PtrRegister = Call->Arguments[0];
unsigned Semantics = SPIRV::MemorySemantics::None;
- MRI->setRegClass(PtrRegister, &SPIRV::IDRegClass);
+ MRI->setRegClass(PtrRegister, &SPIRV::iIDRegClass);
Register MemSemanticsReg =
Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister,
Semantics, MIRBuilder, GR);
- MRI->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
+ MRI->setRegClass(Call->Arguments[1], &SPIRV::iIDRegClass);
Register ValueReg = Call->Arguments[1];
Register ValueTypeReg = GR->getSPIRVTypeID(Call->ReturnType);
// support cl_ext_float_atomics
@@ -817,7 +817,7 @@ static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
Opcode = SPIRV::OpAtomicFAddEXT;
Register NegValueReg =
MRI->createGenericVirtualRegister(MRI->getType(ValueReg));
- MRI->setRegClass(NegValueReg, &SPIRV::IDRegClass);
+ MRI->setRegClass(NegValueReg, &SPIRV::iIDRegClass);
GR->assignSPIRVTypeToVReg(Call->ReturnType, NegValueReg,
MIRBuilder.getMF());
MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
@@ -849,16 +849,16 @@ static bool buildAtomicFloatingRMWInst(const SPIRV::IncomingCall *Call,
MachineRegisterInfo *MRI = MIRBuilder.getMRI();
Register PtrReg = Call->Arguments[0];
- MRI->setRegClass(PtrReg, &SPIRV::IDRegClass);
+ MRI->setRegClass(PtrReg, &SPIRV::iIDRegClass);
Register ScopeReg = Call->Arguments[1];
- MRI->setRegClass(ScopeReg, &SPIRV::IDRegClass);
+ MRI->setRegClass(ScopeReg, &SPIRV::iIDRegClass);
Register MemSemanticsReg = Call->Arguments[2];
- MRI->setRegClass(MemSemanticsReg, &SPIRV::IDRegClass);
+ MRI->setRegClass(MemSemanticsReg, &SPIRV::iIDRegClass);
Register ValueReg = Call->Arguments[3];
- MRI->setRegClass(ValueReg, &SPIRV::IDRegClass);
+ MRI->setRegClass(ValueReg, &SPIRV::iIDRegClass);
MIRBuilder.buildInstr(Opcode)
.addDef(Call->ReturnRegister)
@@ -939,7 +939,7 @@ static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
Register MemSemanticsReg;
if (MemFlags == MemSemantics) {
MemSemanticsReg = Call->Arguments[0];
- MRI->setRegClass(MemSemanticsReg, &SPIRV::IDRegClass);
+ MRI->setRegClass(MemSemanticsReg, &SPIRV::iIDRegClass);
} else
MemSemanticsReg = buildConstantIntReg(MemSemantics, MIRBuilder, GR);
@@ -962,7 +962,7 @@ static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
if (CLScope == static_cast<unsigned>(Scope)) {
ScopeReg = Call->Arguments[1];
- MRI->setRegClass(ScopeReg, &SPIRV::IDRegClass);
+ MRI->setRegClass(ScopeReg, &SPIRV::iIDRegClass);
}
}
@@ -1074,7 +1074,7 @@ static bool generateGroupInst(const SPIRV::IncomingCall *Call,
uint64_t GrpOp = MI->getOperand(1).getCImm()->getValue().getZExtValue();
Register ScopeReg = Call->Arguments[0];
if (!MRI->getRegClassOrNull(ScopeReg))
- MRI->setRegClass(ScopeReg, &SPIRV::IDRegClass);
+ MRI->setRegClass(ScopeReg, &SPIRV::iIDRegClass);
auto MIB = MIRBuilder.buildInstr(GroupBuiltin->Opcode)
.addDef(Call->ReturnRegister)
.addUse(GR->getSPIRVTypeID(Call->ReturnType))
@@ -1083,7 +1083,7 @@ static bool generateGroupInst(const SPIRV::IncomingCall *Call,
for (unsigned i = 2; i < Call->Arguments.size(); ++i) {
Register ArgReg = Call->Arguments[i];
if (!MRI->getRegClassOrNull(ArgReg))
- MRI->setRegClass(ArgReg, &SPIRV::IDRegClass);
+ MRI->setRegClass(ArgReg, &SPIRV::iIDRegClass);
MIB.addUse(ArgReg);
}
return true;
@@ -1131,10 +1131,10 @@ static bool generateGroupInst(const SPIRV::IncomingCall *Call,
MIB.addImm(GroupBuiltin->GroupOperation);
if (Call->Arguments.size() > 0) {
MIB.addUse(Arg0.isValid() ? Arg0 : Call->Arguments[0]);
- MRI->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
+ MRI->setRegClass(Call->Arguments[0], &SPIRV::iIDRegClass);
for (unsigned i = 1; i < Call->Arguments.size(); i++) {
MIB.addUse(Call->Arguments[i]);
- MRI->setRegClass(Call->Arguments[i], &SPIRV::IDRegClass);
+ MRI->setRegClass(Call->Arguments[i], &SPIRV::iIDRegClass);
}
}
@@ -1208,7 +1208,7 @@ static bool generateIntelSubgroupsInst(const SPIRV::IncomingCall *Call,
.addUse(GR->getSPIRVTypeID(Call->ReturnType));
for (size_t i = 0; i < Call->Arguments.size(); ++i) {
MIB.addUse(Call->Arguments[i]);
- MRI->setRegClass(Call->Arguments[i], &SPIRV::IDRegClass);
+ MRI->setRegClass(Call->Arguments[i], &SPIRV::iIDRegClass);
}
return true;
@@ -1232,11 +1232,11 @@ static bool generateGroupUniformInst(const SPIRV::IncomingCall *Call,
MachineRegisterInfo *MRI = MIRBuilder.getMRI();
Register GroupResultReg = Call->ReturnRegister;
- MRI->setRegClass(GroupResultReg, &SPIRV::IDRegClass);
+ MRI->setRegClass(GroupResultReg, &SPIRV::iIDRegClass);
// Scope
Register ScopeReg = Call->Arguments[0];
- MRI->setRegClass(ScopeReg, &SPIRV::IDRegClass);
+ MRI->setRegClass(ScopeReg, &SPIRV::iIDRegClass);
// Group Operation
Register ConstGroupOpReg = Call->Arguments[1];
@@ -1253,7 +1253,7 @@ static bool generateGroupUniformInst(const SPIRV::IncomingCall *Call,
// Value
Register ValueReg = Call->Arguments[2];
- MRI->setRegClass(ValueReg, &SPIRV::IDRegClass);
+ MRI->setRegClass(ValueReg, &SPIRV::iIDRegClass);
auto MIB = MIRBuilder.buildInstr(GroupUniform->Opcode)
.addDef(GroupResultReg)
@@ -1280,7 +1280,7 @@ static bool generateKernelClockInst(const SPIRV::IncomingCall *Call,
MachineRegisterInfo *MRI = MIRBuilder.getMRI();
Register ResultReg = Call->ReturnRegister;
- MRI->setRegClass(ResultReg, &SPIRV::IDRegClass);
+ MRI->setRegClass(ResultReg, &SPIRV::iIDRegClass);
// Deduce the `Scope` operand from the builtin function name.
SPIRV::Scope::Scope ScopeArg =
@@ -1350,7 +1350,7 @@ static bool genWorkgroupQuery(const SPIRV::IncomingCall *Call,
Register DefaultReg = Call->ReturnRegister;
if (PointerSize != ResultWidth) {
DefaultReg = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
- MRI->setRegClass(DefaultReg, &SPIRV::IDRegClass);
+ MRI->setRegClass(DefaultReg, &SPIRV::iIDRegClass);
GR->assignSPIRVTypeToVReg(PointerSizeType, DefaultReg,
MIRBuilder.getMF());
ToTruncate = DefaultReg;
@@ -1368,7 +1368,7 @@ static bool genWorkgroupQuery(const SPIRV::IncomingCall *Call,
Register Extracted = Call->ReturnRegister;
if (!IsConstantIndex || PointerSize != ResultWidth) {
Extracted = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
- MRI->setRegClass(Extracted, &SPIRV::IDRegClass);
+ MRI->setRegClass(Extracted, &SPIRV::iIDRegClass);
GR->assignSPIRVTypeToVReg(PointerSizeType, Extracted, MIRBuilder.getMF());
}
// Use Intrinsic::spv_extractelt so dynamic vs static extraction is
@@ -1387,7 +1387,7 @@ static bool genWorkgroupQuery(const SPIRV::IncomingCall *Call,
Register CompareRegister =
MRI->createGenericVirtualRegister(LLT::scalar(1));
- MRI->setRegClass(CompareRegister, &SPIRV::IDRegClass);
+ MRI->setRegClass(CompareRegister, &SPIRV::iIDRegClass);
GR->assignSPIRVTypeToVReg(BoolType, CompareRegister, MIRBuilder.getMF());
// Use G_ICMP to check if idxVReg < 3.
@@ -1404,7 +1404,7 @@ static bool genWorkgroupQuery(const SPIRV::IncomingCall *Call,
if (PointerSize != ResultWidth) {
SelectionResult =
MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
- MRI->setRegClass(SelectionResult, &SPIRV::IDRegClass);
+ MRI->setRegClass(SelectionResult, &SPIRV::iIDRegClass);
GR->assignSPIRVTypeToVReg(PointerSizeType, SelectionResult,
MIRBuilder.getMF());
}
@@ -1588,7 +1588,7 @@ static bool generateImageSizeQueryInst(const SPIRV::IncomingCall *Call,
if (NumExpectedRetComponents != NumActualRetComponents) {
QueryResult = MIRBuilder.getMRI()->createGenericVirtualRegister(
LLT::fixed_vector(NumActualRetComponents, 32));
- MIRBuilder.getMRI()->setRegClass(QueryResult, &SPIRV::IDRegClass);
+ MIRBuilder.getMRI()->setRegClass(QueryResult, &SPIRV::iIDRegClass);
SPIRVType *IntTy = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder);
QueryResultType = GR->getOrCreateSPIRVVectorType(
IntTy, NumActualRetComponents, MIRBuilder);
@@ -1597,7 +1597,7 @@ static bool generateImageSizeQueryInst(const SPIRV::IncomingCall *Call,
bool IsDimBuf = ImgType->getOperand(2).getImm() == SPIRV::Dim::DIM_Buffer;
unsigned Opcode =
IsDimBuf ? SPIRV::OpImageQuerySize : SPIRV::OpImageQuerySizeLod;
- MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
+ MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::iIDRegClass);
auto MIB = MIRBuilder.buildInstr(Opcode)
.addDef(QueryResult)
.addUse(GR->getSPIRVTypeID(QueryResultType))
@@ -1653,7 +1653,7 @@ static bool generateImageMiscQueryInst(const SPIRV::IncomingCall *Call,
SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
Register Image = Call->Arguments[0];
- MIRBuilder.getMRI()->setRegClass(Image, &SPIRV::IDRegClass);
+ MIRBuilder.getMRI()->setRegClass(Image, &SPIRV::iIDRegClass);
SPIRV::Dim::Dim ImageDimensionality = static_cast<SPIRV::Dim::Dim>(
GR->getSPIRVTypeForVReg(Image)->getOperand(2).getImm());
(void)ImageDimensionality;
@@ -1717,12 +1717,12 @@ static bool generateReadImageInst(const StringRef DemangledCall,
SPIRVGlobalRegistry *GR) {
Register Image = Call->Arguments[0];
MachineRegisterInfo *MRI = MIRBuilder.getMRI();
- MRI->setRegClass(Image, &SPIRV::IDRegClass);
- MRI->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
+ MRI->setRegClass(Image, &SPIRV::iIDRegClass);
+ MRI->setRegClass(Call->Arguments[1], &SPIRV::iIDRegClass);
bool HasOclSampler = DemangledCall.contains_insensitive("ocl_sampler");
bool HasMsaa = DemangledCall.contains_insensitive("msaa");
if (HasOclSampler || HasMsaa)
- MRI->setRegClass(Call->Arguments[2], &SPIRV::IDRegClass);
+ MRI->setRegClass(Call->Arguments[2], &SPIRV::iIDRegClass);
if (HasOclSampler) {
Register Sampler = Call->Arguments[1];
@@ -1738,7 +1738,7 @@ static bool generateReadImageInst(const StringRef DemangledCall,
SPIRVType *ImageType = GR->getSPIRVTypeForVReg(Image);
SPIRVType *SampledImageType =
GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
- Register SampledImage = MRI->createVirtualRegister(&SPIRV::IDRegClass);
+ Register SampledImage = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
MIRBuilder.buildInstr(SPIRV::OpSampledImage)
.addDef(SampledImage)
@@ -1757,7 +1757,7 @@ static bool generateReadImageInst(const StringRef DemangledCall,
}
LLT LLType = LLT::scalar(GR->getScalarOrVectorBitWidth(TempType));
Register TempRegister = MRI->createGenericVirtualRegister(LLType);
- MRI->setRegClass(TempRegister, &SPIRV::IDRegClass);
+ MRI->setRegClass(TempRegister, &SPIRV::iIDRegClass);
GR->assignSPIRVTypeToVReg(TempType, TempRegister, MIRBuilder.getMF());
MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
@@ -1795,9 +1795,9 @@ static bool generateReadImageInst(const StringRef DemangledCall,
static bool generateWriteImageInst(const SPIRV::I...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/101732
More information about the llvm-commits
mailing list