[llvm] a93cbd4 - [SPIRV] Audit `select` Result in SPIRVInstructionSelector (#115193)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Nov 12 10:31:04 PST 2024
Author: Finn Plummer
Date: 2024-11-12T10:31:01-08:00
New Revision: a93cbd4e762799206ae6e6c45f4a7d0da7e56513
URL: https://github.com/llvm/llvm-project/commit/a93cbd4e762799206ae6e6c45f4a7d0da7e56513
DIFF: https://github.com/llvm/llvm-project/commit/a93cbd4e762799206ae6e6c45f4a7d0da7e56513.diff
LOG: [SPIRV] Audit `select` Result in SPIRVInstructionSelector (#115193)
- as per the definition of `select` in GlobalISel/InstructionSelector.h
the return value is a boolean denoting if the select was successful
- doing `Result |=` is incorrect as all inserted instructions should be
succesful, hence we change to using `Result &=`
- ensure that the return value of all BuildMI instructions are
propagated correctly
Added:
Modified:
llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
Removed:
################################################################################
diff --git a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
index a968b65c173067..50ba9cc061cedc 100644
--- a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
@@ -264,12 +264,13 @@ class SPIRVInstructionSelector : public InstructionSelector {
bool selectUnmergeValues(MachineInstr &I) const;
- void selectHandleFromBinding(Register &ResVReg, const SPIRVType *ResType,
+ bool selectHandleFromBinding(Register &ResVReg, const SPIRVType *ResType,
MachineInstr &I) const;
// Utilities
- Register buildI32Constant(uint32_t Val, MachineInstr &I,
- const SPIRVType *ResType = nullptr) const;
+ std::pair<Register, bool>
+ buildI32Constant(uint32_t Val, MachineInstr &I,
+ const SPIRVType *ResType = nullptr) const;
Register buildZerosVal(const SPIRVType *ResType, MachineInstr &I) const;
Register buildZerosValF(const SPIRVType *ResType, MachineInstr &I) const;
@@ -1021,6 +1022,7 @@ bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg,
MachineInstr &I) const {
MachineBasicBlock &BB = *I.getParent();
Register SrcReg = I.getOperand(1).getReg();
+ bool Result = true;
if (I.getOpcode() == TargetOpcode::G_MEMSET) {
assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
unsigned Val = getIConstVal(I.getOperand(1).getReg(), MRI);
@@ -1041,12 +1043,13 @@ bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg,
Register VarReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
GR.add(GV, GR.CurMF, VarReg);
- BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
- .addDef(VarReg)
- .addUse(GR.getSPIRVTypeID(VarTy))
- .addImm(SPIRV::StorageClass::UniformConstant)
- .addUse(Const)
- .constrainAllUses(TII, TRI, RBI);
+ Result &=
+ BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
+ .addDef(VarReg)
+ .addUse(GR.getSPIRVTypeID(VarTy))
+ .addImm(SPIRV::StorageClass::UniformConstant)
+ .addUse(Const)
+ .constrainAllUses(TII, TRI, RBI);
buildOpDecorate(VarReg, I, TII, SPIRV::Decoration::Constant, {});
SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType(
ValTy, I, TII, SPIRV::StorageClass::UniformConstant);
@@ -1059,10 +1062,12 @@ bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg,
.addUse(I.getOperand(2).getReg());
if (I.getNumMemOperands())
addMemoryOperands(*I.memoperands_begin(), MIB);
- bool Result = MIB.constrainAllUses(TII, TRI, RBI);
+ Result &= MIB.constrainAllUses(TII, TRI, RBI);
if (ResVReg.isValid() && ResVReg != MIB->getOperand(0).getReg())
- BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), ResVReg)
- .addUse(MIB->getOperand(0).getReg());
+ Result &=
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), ResVReg)
+ .addUse(MIB->getOperand(0).getReg())
+ .constrainAllUses(TII, TRI, RBI);
return Result;
}
@@ -1071,11 +1076,14 @@ bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg,
MachineInstr &I,
unsigned NewOpcode,
unsigned NegateOpcode) const {
+ bool Result = true;
assert(I.hasOneMemOperand());
const MachineMemOperand *MemOp = *I.memoperands_begin();
uint32_t Scope = static_cast<uint32_t>(getMemScope(
GR.CurMF->getFunction().getContext(), MemOp->getSyncScopeID()));
- Register ScopeReg = buildI32Constant(Scope, I);
+ auto ScopeConstant = buildI32Constant(Scope, I);
+ Register ScopeReg = ScopeConstant.first;
+ Result &= ScopeConstant.second;
Register Ptr = I.getOperand(1).getReg();
// TODO: Changed as it's implemented in the translator. See test/atomicrmw.ll
@@ -1083,26 +1091,27 @@ bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg,
// getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr));
AtomicOrdering AO = MemOp->getSuccessOrdering();
uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
- Register MemSemReg = buildI32Constant(MemSem /*| ScSem*/, I);
+ auto MemSemConstant = buildI32Constant(MemSem /*| ScSem*/, I);
+ Register MemSemReg = MemSemConstant.first;
+ Result &= MemSemConstant.second;
- bool Result = false;
Register ValueReg = I.getOperand(2).getReg();
if (NegateOpcode != 0) {
// Translation with negative value operand is requested
Register TmpReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
- Result |= selectUnOpWithSrc(TmpReg, ResType, I, ValueReg, NegateOpcode);
+ Result &= selectUnOpWithSrc(TmpReg, ResType, I, ValueReg, NegateOpcode);
ValueReg = TmpReg;
}
- Result |= BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(NewOpcode))
- .addDef(ResVReg)
- .addUse(GR.getSPIRVTypeID(ResType))
- .addUse(Ptr)
- .addUse(ScopeReg)
- .addUse(MemSemReg)
- .addUse(ValueReg)
- .constrainAllUses(TII, TRI, RBI);
- return Result;
+ return Result &&
+ BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(NewOpcode))
+ .addDef(ResVReg)
+ .addUse(GR.getSPIRVTypeID(ResType))
+ .addUse(Ptr)
+ .addUse(ScopeReg)
+ .addUse(MemSemReg)
+ .addUse(ValueReg)
+ .constrainAllUses(TII, TRI, RBI);
}
bool SPIRVInstructionSelector::selectUnmergeValues(MachineInstr &I) const {
@@ -1143,16 +1152,21 @@ bool SPIRVInstructionSelector::selectUnmergeValues(MachineInstr &I) const {
bool SPIRVInstructionSelector::selectFence(MachineInstr &I) const {
AtomicOrdering AO = AtomicOrdering(I.getOperand(0).getImm());
uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
- Register MemSemReg = buildI32Constant(MemSem, I);
+ auto MemSemConstant = buildI32Constant(MemSem, I);
+ Register MemSemReg = MemSemConstant.first;
+ bool Result = MemSemConstant.second;
SyncScope::ID Ord = SyncScope::ID(I.getOperand(1).getImm());
uint32_t Scope = static_cast<uint32_t>(
getMemScope(GR.CurMF->getFunction().getContext(), Ord));
- Register ScopeReg = buildI32Constant(Scope, I);
+ auto ScopeConstant = buildI32Constant(Scope, I);
+ Register ScopeReg = ScopeConstant.first;
+ Result &= ScopeConstant.second;
MachineBasicBlock &BB = *I.getParent();
- return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemoryBarrier))
- .addUse(ScopeReg)
- .addUse(MemSemReg)
- .constrainAllUses(TII, TRI, RBI);
+ return Result &&
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemoryBarrier))
+ .addUse(ScopeReg)
+ .addUse(MemSemReg)
+ .constrainAllUses(TII, TRI, RBI);
}
bool SPIRVInstructionSelector::selectOverflowArith(Register ResVReg,
@@ -1196,7 +1210,7 @@ bool SPIRVInstructionSelector::selectOverflowArith(Register ResVReg,
.addUse(GR.getSPIRVTypeID(StructType));
for (unsigned i = I.getNumDefs(); i < I.getNumOperands(); ++i)
MIB.addUse(I.getOperand(i).getReg());
- bool Status = MIB.constrainAllUses(TII, TRI, RBI);
+ bool Result = MIB.constrainAllUses(TII, TRI, RBI);
// Build instructions to extract fields of the instruction's result.
// A new virtual register to store the higher part of the result struct.
Register HigherVReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
@@ -1208,21 +1222,21 @@ bool SPIRVInstructionSelector::selectOverflowArith(Register ResVReg,
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(StructVReg)
.addImm(i);
- Status &= MIB.constrainAllUses(TII, TRI, RBI);
+ Result &= MIB.constrainAllUses(TII, TRI, RBI);
}
// Build boolean value from the higher part.
- Status &= BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
- .addDef(I.getOperand(1).getReg())
- .addUse(BoolTypeReg)
- .addUse(HigherVReg)
- .addUse(ZeroReg)
- .constrainAllUses(TII, TRI, RBI);
- return Status;
+ return Result && BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
+ .addDef(I.getOperand(1).getReg())
+ .addUse(BoolTypeReg)
+ .addUse(HigherVReg)
+ .addUse(ZeroReg)
+ .constrainAllUses(TII, TRI, RBI);
}
bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
const SPIRVType *ResType,
MachineInstr &I) const {
+ bool Result = true;
Register ScopeReg;
Register MemSemEqReg;
Register MemSemNeqReg;
@@ -1232,17 +1246,26 @@ bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
const MachineMemOperand *MemOp = *I.memoperands_begin();
unsigned Scope = static_cast<uint32_t>(getMemScope(
GR.CurMF->getFunction().getContext(), MemOp->getSyncScopeID()));
- ScopeReg = buildI32Constant(Scope, I);
+ auto ScopeConstant = buildI32Constant(Scope, I);
+ ScopeReg = ScopeConstant.first;
+ Result &= ScopeConstant.second;
unsigned ScSem = static_cast<uint32_t>(
getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr)));
AtomicOrdering AO = MemOp->getSuccessOrdering();
unsigned MemSemEq = static_cast<uint32_t>(getMemSemantics(AO)) | ScSem;
- MemSemEqReg = buildI32Constant(MemSemEq, I);
+ auto MemSemEqConstant = buildI32Constant(MemSemEq, I);
+ MemSemEqReg = MemSemEqConstant.first;
+ Result &= MemSemEqConstant.second;
AtomicOrdering FO = MemOp->getFailureOrdering();
unsigned MemSemNeq = static_cast<uint32_t>(getMemSemantics(FO)) | ScSem;
- MemSemNeqReg =
- MemSemEq == MemSemNeq ? MemSemEqReg : buildI32Constant(MemSemNeq, I);
+ if (MemSemEq == MemSemNeq)
+ MemSemNeqReg = MemSemEqReg;
+ else {
+ auto MemSemNeqConstant = buildI32Constant(MemSemEq, I);
+ MemSemNeqReg = MemSemNeqConstant.first;
+ Result &= MemSemNeqConstant.second;
+ }
} else {
ScopeReg = I.getOperand(5).getReg();
MemSemEqReg = I.getOperand(6).getReg();
@@ -1254,7 +1277,7 @@ bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
Register ACmpRes = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
const DebugLoc &DL = I.getDebugLoc();
- bool Result =
+ Result &=
BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpAtomicCompareExchange))
.addDef(ACmpRes)
.addUse(GR.getSPIRVTypeID(SpvValTy))
@@ -1267,28 +1290,28 @@ bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
.constrainAllUses(TII, TRI, RBI);
Register CmpSuccReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
SPIRVType *BoolTy = GR.getOrCreateSPIRVBoolType(I, TII);
- Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpIEqual))
+ Result &= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpIEqual))
.addDef(CmpSuccReg)
.addUse(GR.getSPIRVTypeID(BoolTy))
.addUse(ACmpRes)
.addUse(Cmp)
.constrainAllUses(TII, TRI, RBI);
Register TmpReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
- Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
+ Result &= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
.addDef(TmpReg)
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(ACmpRes)
.addUse(GR.getOrCreateUndef(I, ResType, TII))
.addImm(0)
.constrainAllUses(TII, TRI, RBI);
- Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
- .addDef(ResVReg)
- .addUse(GR.getSPIRVTypeID(ResType))
- .addUse(CmpSuccReg)
- .addUse(TmpReg)
- .addImm(1)
- .constrainAllUses(TII, TRI, RBI);
- return Result;
+ return Result &&
+ BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
+ .addDef(ResVReg)
+ .addUse(GR.getSPIRVTypeID(ResType))
+ .addUse(CmpSuccReg)
+ .addUse(TmpReg)
+ .addImm(1)
+ .constrainAllUses(TII, TRI, RBI);
}
static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC) {
@@ -1438,16 +1461,16 @@ bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg,
Register Tmp = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
GR.getPointeeType(SrcPtrTy), I, TII, SPIRV::StorageClass::Generic);
- bool Success = BuildMI(BB, I, DL, TII.get(SPIRV::OpPtrCastToGeneric))
- .addDef(Tmp)
- .addUse(GR.getSPIRVTypeID(GenericPtrTy))
- .addUse(SrcPtr)
- .constrainAllUses(TII, TRI, RBI);
- return Success && BuildMI(BB, I, DL, TII.get(SPIRV::OpGenericCastToPtr))
- .addDef(ResVReg)
- .addUse(GR.getSPIRVTypeID(ResType))
- .addUse(Tmp)
- .constrainAllUses(TII, TRI, RBI);
+ bool Result = BuildMI(BB, I, DL, TII.get(SPIRV::OpPtrCastToGeneric))
+ .addDef(Tmp)
+ .addUse(GR.getSPIRVTypeID(GenericPtrTy))
+ .addUse(SrcPtr)
+ .constrainAllUses(TII, TRI, RBI);
+ return Result && BuildMI(BB, I, DL, TII.get(SPIRV::OpGenericCastToPtr))
+ .addDef(ResVReg)
+ .addUse(GR.getSPIRVTypeID(ResType))
+ .addUse(Tmp)
+ .constrainAllUses(TII, TRI, RBI);
}
// Check if instructions from the SPV_INTEL_usm_storage_classes extension may
@@ -1621,26 +1644,27 @@ bool SPIRVInstructionSelector::selectAnyOrAll(Register ResVReg,
SpvBoolTy = GR.getOrCreateSPIRVVectorType(SpvBoolTy, NumElts, I, TII);
}
+ bool Result = true;
if (!IsBoolTy) {
Register ConstZeroReg =
IsFloatTy ? buildZerosValF(InputType, I) : buildZerosVal(InputType, I);
- BuildMI(BB, I, I.getDebugLoc(), TII.get(SpirvNotEqualId))
- .addDef(NotEqualReg)
- .addUse(GR.getSPIRVTypeID(SpvBoolTy))
- .addUse(InputRegister)
- .addUse(ConstZeroReg)
- .constrainAllUses(TII, TRI, RBI);
+ Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(SpirvNotEqualId))
+ .addDef(NotEqualReg)
+ .addUse(GR.getSPIRVTypeID(SpvBoolTy))
+ .addUse(InputRegister)
+ .addUse(ConstZeroReg)
+ .constrainAllUses(TII, TRI, RBI);
}
if (!IsVectorTy)
- return true;
+ return Result;
- return BuildMI(BB, I, I.getDebugLoc(), TII.get(OpAnyOrAll))
- .addDef(ResVReg)
- .addUse(GR.getSPIRVTypeID(SpvBoolScalarTy))
- .addUse(NotEqualReg)
- .constrainAllUses(TII, TRI, RBI);
+ return Result && BuildMI(BB, I, I.getDebugLoc(), TII.get(OpAnyOrAll))
+ .addDef(ResVReg)
+ .addUse(GR.getSPIRVTypeID(SpvBoolScalarTy))
+ .addUse(NotEqualReg)
+ .constrainAllUses(TII, TRI, RBI);
}
bool SPIRVInstructionSelector::selectAll(Register ResVReg,
@@ -1765,14 +1789,12 @@ bool SPIRVInstructionSelector::selectDot4AddPacked(Register ResVReg,
.addUse(I.getOperand(3).getReg())
.constrainAllUses(TII, TRI, RBI);
- Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIAddS))
- .addDef(ResVReg)
- .addUse(GR.getSPIRVTypeID(ResType))
- .addUse(Dot)
- .addUse(I.getOperand(4).getReg())
- .constrainAllUses(TII, TRI, RBI);
-
- return Result;
+ return Result && BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIAddS))
+ .addDef(ResVReg)
+ .addUse(GR.getSPIRVTypeID(ResType))
+ .addUse(Dot)
+ .addUse(I.getOperand(4).getReg())
+ .constrainAllUses(TII, TRI, RBI);
}
// Since pre-1.6 SPIRV has no DotProductInput4x8BitPacked implementation,
@@ -1787,7 +1809,7 @@ bool SPIRVInstructionSelector::selectDot4AddPackedExpansion(
assert(I.getOperand(4).isReg());
MachineBasicBlock &BB = *I.getParent();
- bool Result = false;
+ bool Result = true;
// Acc = C
Register Acc = I.getOperand(4).getReg();
@@ -1799,7 +1821,7 @@ bool SPIRVInstructionSelector::selectDot4AddPackedExpansion(
for (unsigned i = 0; i < 4; i++) {
// A[i]
Register AElt = MRI->createVirtualRegister(&SPIRV::IDRegClass);
- Result |= BuildMI(BB, I, I.getDebugLoc(), TII.get(ExtractOp))
+ Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(ExtractOp))
.addDef(AElt)
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(I.getOperand(2).getReg())
@@ -1809,7 +1831,7 @@ bool SPIRVInstructionSelector::selectDot4AddPackedExpansion(
// B[i]
Register BElt = MRI->createVirtualRegister(&SPIRV::IDRegClass);
- Result |= BuildMI(BB, I, I.getDebugLoc(), TII.get(ExtractOp))
+ Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(ExtractOp))
.addDef(BElt)
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(I.getOperand(3).getReg())
@@ -1819,7 +1841,7 @@ bool SPIRVInstructionSelector::selectDot4AddPackedExpansion(
// A[i] * B[i]
Register Mul = MRI->createVirtualRegister(&SPIRV::IDRegClass);
- Result |= BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIMulS))
+ Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIMulS))
.addDef(Mul)
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(AElt)
@@ -1828,7 +1850,7 @@ bool SPIRVInstructionSelector::selectDot4AddPackedExpansion(
// Discard 24 highest-bits so that stored i32 register is i8 equivalent
Register MaskMul = MRI->createVirtualRegister(&SPIRV::IDRegClass);
- Result |= BuildMI(BB, I, I.getDebugLoc(), TII.get(ExtractOp))
+ Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(ExtractOp))
.addDef(MaskMul)
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(Mul)
@@ -1839,7 +1861,7 @@ bool SPIRVInstructionSelector::selectDot4AddPackedExpansion(
// Acc = Acc + A[i] * B[i]
Register Sum =
i < 3 ? MRI->createVirtualRegister(&SPIRV::IDRegClass) : ResVReg;
- Result |= BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIAddS))
+ Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIAddS))
.addDef(Sum)
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(Acc)
@@ -1910,7 +1932,7 @@ bool SPIRVInstructionSelector::selectSign(Register ResVReg,
if (NeedsConversion) {
auto ConvertOpcode = IsFloatTy ? SPIRV::OpConvertFToS : SPIRV::OpSConvert;
- Result |= BuildMI(*I.getParent(), I, DL, TII.get(ConvertOpcode))
+ Result &= BuildMI(*I.getParent(), I, DL, TII.get(ConvertOpcode))
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(SignReg)
@@ -2204,7 +2226,7 @@ void SPIRVInstructionSelector::renderImm32(MachineInstrBuilder &MIB,
addNumImm(I.getOperand(1).getCImm()->getValue(), MIB);
}
-Register
+std::pair<Register, bool>
SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I,
const SPIRVType *ResType) const {
Type *LLVMTy = IntegerType::get(GR.CurMF->getFunction().getContext(), 32);
@@ -2213,6 +2235,7 @@ SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I,
// Find a constant in DT or build a new one.
auto ConstInt = ConstantInt::get(LLVMTy, Val);
Register NewReg = GR.find(ConstInt, GR.CurMF);
+ bool Result = true;
if (!NewReg.isValid()) {
NewReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
GR.add(ConstInt, GR.CurMF, NewReg);
@@ -2228,9 +2251,9 @@ SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I,
.addUse(GR.getSPIRVTypeID(SpvI32Ty))
.addImm(APInt(32, Val).getZExtValue());
}
- constrainSelectedInstRegOperands(*MI, TII, TRI, RBI);
+ Result &= constrainSelectedInstRegOperands(*MI, TII, TRI, RBI);
}
- return NewReg;
+ return {NewReg, Result};
}
bool SPIRVInstructionSelector::selectFCmp(Register ResVReg,
@@ -2356,18 +2379,18 @@ bool SPIRVInstructionSelector::selectIntToBool(Register IntReg,
Register Zero = buildZerosVal(IntTy, I);
Register One = buildOnesVal(false, IntTy, I);
MachineBasicBlock &BB = *I.getParent();
- BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
- .addDef(BitIntReg)
- .addUse(GR.getSPIRVTypeID(IntTy))
- .addUse(IntReg)
- .addUse(One)
- .constrainAllUses(TII, TRI, RBI);
- return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
- .addDef(ResVReg)
- .addUse(GR.getSPIRVTypeID(BoolTy))
- .addUse(BitIntReg)
- .addUse(Zero)
- .constrainAllUses(TII, TRI, RBI);
+ bool Result = BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
+ .addDef(BitIntReg)
+ .addUse(GR.getSPIRVTypeID(IntTy))
+ .addUse(IntReg)
+ .addUse(One)
+ .constrainAllUses(TII, TRI, RBI);
+ return Result && BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
+ .addDef(ResVReg)
+ .addUse(GR.getSPIRVTypeID(BoolTy))
+ .addUse(BitIntReg)
+ .addUse(Zero)
+ .constrainAllUses(TII, TRI, RBI);
}
bool SPIRVInstructionSelector::selectTrunc(Register ResVReg,
@@ -2680,34 +2703,40 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
case Intrinsic::spv_cmpxchg:
return selectAtomicCmpXchg(ResVReg, ResType, I);
case Intrinsic::spv_unreachable:
- BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUnreachable));
- break;
+ return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUnreachable))
+ .constrainAllUses(TII, TRI, RBI);
case Intrinsic::spv_alloca:
return selectFrameIndex(ResVReg, ResType, I);
case Intrinsic::spv_alloca_array:
return selectAllocaArray(ResVReg, ResType, I);
case Intrinsic::spv_assume:
if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
- BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpAssumeTrueKHR))
- .addUse(I.getOperand(1).getReg());
+ return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpAssumeTrueKHR))
+ .addUse(I.getOperand(1).getReg())
+ .constrainAllUses(TII, TRI, RBI);
break;
case Intrinsic::spv_expect:
if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
- BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExpectKHR))
+ return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExpectKHR))
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(I.getOperand(2).getReg())
- .addUse(I.getOperand(3).getReg());
+ .addUse(I.getOperand(3).getReg())
+ .constrainAllUses(TII, TRI, RBI);
break;
case Intrinsic::arithmetic_fence:
if (STI.canUseExtension(SPIRV::Extension::SPV_EXT_arithmetic_fence))
- BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpArithmeticFenceEXT))
+ return BuildMI(BB, I, I.getDebugLoc(),
+ TII.get(SPIRV::OpArithmeticFenceEXT))
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType))
- .addUse(I.getOperand(2).getReg());
+ .addUse(I.getOperand(2).getReg())
+ .constrainAllUses(TII, TRI, RBI);
else
- BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), ResVReg)
- .addUse(I.getOperand(2).getReg());
+ return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY),
+ ResVReg)
+ .addUse(I.getOperand(2).getReg())
+ .constrainAllUses(TII, TRI, RBI);
break;
case Intrinsic::spv_thread_id:
return selectSpvThreadId(ResVReg, ResType, I);
@@ -2751,16 +2780,22 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
case Intrinsic::spv_firstbitshigh: // There is no CL equivalent of FindSMsb
return selectFirstBitHigh(ResVReg, ResType, I, /*IsSigned=*/true);
case Intrinsic::spv_group_memory_barrier_with_group_sync: {
- Register MemSemReg =
+ bool Result = true;
+ auto MemSemConstant =
buildI32Constant(SPIRV::MemorySemantics::SequentiallyConsistent, I);
- Register ScopeReg = buildI32Constant(SPIRV::Scope::Workgroup, I);
+ Register MemSemReg = MemSemConstant.first;
+ Result &= MemSemConstant.second;
+ auto ScopeConstant = buildI32Constant(SPIRV::Scope::Workgroup, I);
+ Register ScopeReg = ScopeConstant.first;
+ Result &= ScopeConstant.second;
MachineBasicBlock &BB = *I.getParent();
- return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpControlBarrier))
- .addUse(ScopeReg)
- .addUse(ScopeReg)
- .addUse(MemSemReg)
- .constrainAllUses(TII, TRI, RBI);
- } break;
+ return Result &&
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpControlBarrier))
+ .addUse(ScopeReg)
+ .addUse(ScopeReg)
+ .addUse(MemSemReg)
+ .constrainAllUses(TII, TRI, RBI);
+ }
case Intrinsic::spv_lifetime_start:
case Intrinsic::spv_lifetime_end: {
unsigned Op = IID == Intrinsic::spv_lifetime_start ? SPIRV::OpLifetimeStart
@@ -2769,8 +2804,11 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
Register PtrReg = I.getOperand(I.getNumExplicitDefs() + 2).getReg();
if (Size == -1)
Size = 0;
- BuildMI(BB, I, I.getDebugLoc(), TII.get(Op)).addUse(PtrReg).addImm(Size);
- } break;
+ return BuildMI(BB, I, I.getDebugLoc(), TII.get(Op))
+ .addUse(PtrReg)
+ .addImm(Size)
+ .constrainAllUses(TII, TRI, RBI);
+ }
case Intrinsic::spv_saturate:
return selectSaturate(ResVReg, ResType, I);
case Intrinsic::spv_nclamp:
@@ -2806,8 +2844,7 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
case Intrinsic::spv_value_md:
break;
case Intrinsic::spv_handle_fromBinding: {
- selectHandleFromBinding(ResVReg, ResType, I);
- return true;
+ return selectHandleFromBinding(ResVReg, ResType, I);
}
default: {
std::string DiagMsg;
@@ -2820,7 +2857,7 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
return true;
}
-void SPIRVInstructionSelector::selectHandleFromBinding(Register &ResVReg,
+bool SPIRVInstructionSelector::selectHandleFromBinding(Register &ResVReg,
const SPIRVType *ResType,
MachineInstr &I) const {
@@ -2839,10 +2876,11 @@ void SPIRVInstructionSelector::selectHandleFromBinding(Register &ResVReg,
// TODO: For now we assume the resource is an image, which needs to be
// loaded to get the handle. That will not be true for storage buffers.
- BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
+ return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType))
- .addUse(VarReg);
+ .addUse(VarReg)
+ .constrainAllUses(TII, TRI, RBI);
}
Register SPIRVInstructionSelector::buildPointerToResource(
@@ -3283,14 +3321,12 @@ bool SPIRVInstructionSelector::selectLog10(Register ResVReg,
auto Opcode = ResType->getOpcode() == SPIRV::OpTypeVector
? SPIRV::OpVectorTimesScalar
: SPIRV::OpFMulS;
- Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
- .addDef(ResVReg)
- .addUse(GR.getSPIRVTypeID(ResType))
- .addUse(VarReg)
- .addUse(ScaleReg)
- .constrainAllUses(TII, TRI, RBI);
-
- return Result;
+ return Result && BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
+ .addDef(ResVReg)
+ .addUse(GR.getSPIRVTypeID(ResType))
+ .addUse(VarReg)
+ .addUse(ScaleReg)
+ .constrainAllUses(TII, TRI, RBI);
}
bool SPIRVInstructionSelector::selectSpvThreadId(Register ResVReg,
@@ -3327,10 +3363,11 @@ bool SPIRVInstructionSelector::selectSpvThreadId(Register ResVReg,
GR.assignSPIRVTypeToVReg(Vec3Ty, LoadedRegister, MIRBuilder.getMF());
// Load v3uint value from the global variable.
- BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
- .addDef(LoadedRegister)
- .addUse(GR.getSPIRVTypeID(Vec3Ty))
- .addUse(Variable);
+ bool Result =
+ BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
+ .addDef(LoadedRegister)
+ .addUse(GR.getSPIRVTypeID(Vec3Ty))
+ .addUse(Variable);
// Get Thread ID index. Expecting operand is a constant immediate value,
// wrapped in a type assignment.
@@ -3344,7 +3381,7 @@ bool SPIRVInstructionSelector::selectSpvThreadId(Register ResVReg,
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(LoadedRegister)
.addImm(ThreadId);
- return MIB.constrainAllUses(TII, TRI, RBI);
+ return Result && MIB.constrainAllUses(TII, TRI, RBI);
}
namespace llvm {
More information about the llvm-commits
mailing list