[llvm] [SPIRV] Audit `select` Result in SPIRVInstructionSelector (PR #115193)
Finn Plummer via llvm-commits
llvm-commits at lists.llvm.org
Wed Nov 6 13:55:55 PST 2024
https://github.com/inbelic updated https://github.com/llvm/llvm-project/pull/115193
>From db3cc556c10e942d9d3f2b1b85f7fc08bc469291 Mon Sep 17 00:00:00 2001
From: Finn Plummer <canadienfinn at gmail.com>
Date: Wed, 6 Nov 2024 18:55:16 +0000
Subject: [PATCH 1/2] [SPIRV] Audit `select` Result in SPIRVInstructionSelector
- as per the definition of `select` in GlobalISel/InstructionSelector.h
the return value is a boolean denoting if the select was successful
- doing `Result |=` is incorrect as all inserted instructions should be
succesful, hence we change to using `Result &=`
---
.../Target/SPIRV/SPIRVInstructionSelector.cpp | 26 +++++++++----------
1 file changed, 13 insertions(+), 13 deletions(-)
diff --git a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
index 7aa5f4f2b1a8f1..d4fab92dbc24a5 100644
--- a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
@@ -1041,16 +1041,16 @@ bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg,
uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
Register MemSemReg = buildI32Constant(MemSem /*| ScSem*/, I);
- bool Result = false;
+ bool Result = true;
Register ValueReg = I.getOperand(2).getReg();
if (NegateOpcode != 0) {
// Translation with negative value operand is requested
Register TmpReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
- Result |= selectUnOpWithSrc(TmpReg, ResType, I, ValueReg, NegateOpcode);
+ Result &= selectUnOpWithSrc(TmpReg, ResType, I, ValueReg, NegateOpcode);
ValueReg = TmpReg;
}
- Result |= BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(NewOpcode))
+ Result &= BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(NewOpcode))
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(Ptr)
@@ -1223,21 +1223,21 @@ bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
.constrainAllUses(TII, TRI, RBI);
Register CmpSuccReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
SPIRVType *BoolTy = GR.getOrCreateSPIRVBoolType(I, TII);
- Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpIEqual))
+ Result &= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpIEqual))
.addDef(CmpSuccReg)
.addUse(GR.getSPIRVTypeID(BoolTy))
.addUse(ACmpRes)
.addUse(Cmp)
.constrainAllUses(TII, TRI, RBI);
Register TmpReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
- Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
+ Result &= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
.addDef(TmpReg)
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(ACmpRes)
.addUse(GR.getOrCreateUndef(I, ResType, TII))
.addImm(0)
.constrainAllUses(TII, TRI, RBI);
- Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
+ Result &= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(CmpSuccReg)
@@ -1743,7 +1743,7 @@ bool SPIRVInstructionSelector::selectDot4AddPackedExpansion(
assert(I.getOperand(4).isReg());
MachineBasicBlock &BB = *I.getParent();
- bool Result = false;
+ bool Result = true;
// Acc = C
Register Acc = I.getOperand(4).getReg();
@@ -1755,7 +1755,7 @@ bool SPIRVInstructionSelector::selectDot4AddPackedExpansion(
for (unsigned i = 0; i < 4; i++) {
// A[i]
Register AElt = MRI->createVirtualRegister(&SPIRV::IDRegClass);
- Result |= BuildMI(BB, I, I.getDebugLoc(), TII.get(ExtractOp))
+ Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(ExtractOp))
.addDef(AElt)
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(I.getOperand(2).getReg())
@@ -1765,7 +1765,7 @@ bool SPIRVInstructionSelector::selectDot4AddPackedExpansion(
// B[i]
Register BElt = MRI->createVirtualRegister(&SPIRV::IDRegClass);
- Result |= BuildMI(BB, I, I.getDebugLoc(), TII.get(ExtractOp))
+ Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(ExtractOp))
.addDef(BElt)
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(I.getOperand(3).getReg())
@@ -1775,7 +1775,7 @@ bool SPIRVInstructionSelector::selectDot4AddPackedExpansion(
// A[i] * B[i]
Register Mul = MRI->createVirtualRegister(&SPIRV::IDRegClass);
- Result |= BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIMulS))
+ Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIMulS))
.addDef(Mul)
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(AElt)
@@ -1784,7 +1784,7 @@ bool SPIRVInstructionSelector::selectDot4AddPackedExpansion(
// Discard 24 highest-bits so that stored i32 register is i8 equivalent
Register MaskMul = MRI->createVirtualRegister(&SPIRV::IDRegClass);
- Result |= BuildMI(BB, I, I.getDebugLoc(), TII.get(ExtractOp))
+ Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(ExtractOp))
.addDef(MaskMul)
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(Mul)
@@ -1795,7 +1795,7 @@ bool SPIRVInstructionSelector::selectDot4AddPackedExpansion(
// Acc = Acc + A[i] * B[i]
Register Sum =
i < 3 ? MRI->createVirtualRegister(&SPIRV::IDRegClass) : ResVReg;
- Result |= BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIAddS))
+ Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIAddS))
.addDef(Sum)
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(Acc)
@@ -1866,7 +1866,7 @@ bool SPIRVInstructionSelector::selectSign(Register ResVReg,
if (NeedsConversion) {
auto ConvertOpcode = IsFloatTy ? SPIRV::OpConvertFToS : SPIRV::OpSConvert;
- Result |= BuildMI(*I.getParent(), I, DL, TII.get(ConvertOpcode))
+ Result &= BuildMI(*I.getParent(), I, DL, TII.get(ConvertOpcode))
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(SignReg)
>From 7b922d7dacc2177ed97d46f0615257677542f39f Mon Sep 17 00:00:00 2001
From: Finn Plummer <canadienfinn at gmail.com>
Date: Wed, 6 Nov 2024 20:05:53 +0000
Subject: [PATCH 2/2] additionally check all uses of BuildMI
- propogate all BuildMI returns up
propogate the buildmi result in buildI32Constant
---
.../Target/SPIRV/SPIRVInstructionSelector.cpp | 236 +++++++++++-------
1 file changed, 139 insertions(+), 97 deletions(-)
diff --git a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
index f5b9ca939ba898..aaa9133124c791 100644
--- a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
@@ -261,12 +261,13 @@ class SPIRVInstructionSelector : public InstructionSelector {
bool selectUnmergeValues(MachineInstr &I) const;
- void selectHandleFromBinding(Register &ResVReg, const SPIRVType *ResType,
+ bool selectHandleFromBinding(Register &ResVReg, const SPIRVType *ResType,
MachineInstr &I) const;
// Utilities
- Register buildI32Constant(uint32_t Val, MachineInstr &I,
- const SPIRVType *ResType = nullptr) const;
+ std::pair<Register, bool>
+ buildI32Constant(uint32_t Val, MachineInstr &I,
+ const SPIRVType *ResType = nullptr) const;
Register buildZerosVal(const SPIRVType *ResType, MachineInstr &I) const;
Register buildZerosValF(const SPIRVType *ResType, MachineInstr &I) const;
@@ -1018,6 +1019,7 @@ bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg,
MachineInstr &I) const {
MachineBasicBlock &BB = *I.getParent();
Register SrcReg = I.getOperand(1).getReg();
+ bool Result = true;
if (I.getOpcode() == TargetOpcode::G_MEMSET) {
assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
unsigned Val = getIConstVal(I.getOperand(1).getReg(), MRI);
@@ -1038,12 +1040,13 @@ bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg,
Register VarReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
GR.add(GV, GR.CurMF, VarReg);
- BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
- .addDef(VarReg)
- .addUse(GR.getSPIRVTypeID(VarTy))
- .addImm(SPIRV::StorageClass::UniformConstant)
- .addUse(Const)
- .constrainAllUses(TII, TRI, RBI);
+ Result &=
+ BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
+ .addDef(VarReg)
+ .addUse(GR.getSPIRVTypeID(VarTy))
+ .addImm(SPIRV::StorageClass::UniformConstant)
+ .addUse(Const)
+ .constrainAllUses(TII, TRI, RBI);
buildOpDecorate(VarReg, I, TII, SPIRV::Decoration::Constant, {});
SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType(
ValTy, I, TII, SPIRV::StorageClass::UniformConstant);
@@ -1056,10 +1059,12 @@ bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg,
.addUse(I.getOperand(2).getReg());
if (I.getNumMemOperands())
addMemoryOperands(*I.memoperands_begin(), MIB);
- bool Result = MIB.constrainAllUses(TII, TRI, RBI);
+ Result &= MIB.constrainAllUses(TII, TRI, RBI);
if (ResVReg.isValid() && ResVReg != MIB->getOperand(0).getReg())
- BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), ResVReg)
- .addUse(MIB->getOperand(0).getReg());
+ Result &=
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), ResVReg)
+ .addUse(MIB->getOperand(0).getReg())
+ .constrainAllUses(TII, TRI, RBI);
return Result;
}
@@ -1068,11 +1073,14 @@ bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg,
MachineInstr &I,
unsigned NewOpcode,
unsigned NegateOpcode) const {
+ bool Result = true;
assert(I.hasOneMemOperand());
const MachineMemOperand *MemOp = *I.memoperands_begin();
uint32_t Scope = static_cast<uint32_t>(getMemScope(
GR.CurMF->getFunction().getContext(), MemOp->getSyncScopeID()));
- Register ScopeReg = buildI32Constant(Scope, I);
+ auto ScopeConstant = buildI32Constant(Scope, I);
+ Register ScopeReg = ScopeConstant.first;
+ Result &= ScopeConstant.second;
Register Ptr = I.getOperand(1).getReg();
// TODO: Changed as it's implemented in the translator. See test/atomicrmw.ll
@@ -1080,9 +1088,10 @@ bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg,
// getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr));
AtomicOrdering AO = MemOp->getSuccessOrdering();
uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
- Register MemSemReg = buildI32Constant(MemSem /*| ScSem*/, I);
+ auto MemSemConstant = buildI32Constant(MemSem /*| ScSem*/, I);
+ Register MemSemReg = MemSemConstant.first;
+ Result &= MemSemConstant.second;
- bool Result = true;
Register ValueReg = I.getOperand(2).getReg();
if (NegateOpcode != 0) {
// Translation with negative value operand is requested
@@ -1140,16 +1149,21 @@ bool SPIRVInstructionSelector::selectUnmergeValues(MachineInstr &I) const {
bool SPIRVInstructionSelector::selectFence(MachineInstr &I) const {
AtomicOrdering AO = AtomicOrdering(I.getOperand(0).getImm());
uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
- Register MemSemReg = buildI32Constant(MemSem, I);
+ auto MemSemConstant = buildI32Constant(MemSem, I);
+ Register MemSemReg = MemSemConstant.first;
+ bool Result = MemSemConstant.second;
SyncScope::ID Ord = SyncScope::ID(I.getOperand(1).getImm());
uint32_t Scope = static_cast<uint32_t>(
getMemScope(GR.CurMF->getFunction().getContext(), Ord));
- Register ScopeReg = buildI32Constant(Scope, I);
+ auto ScopeConstant = buildI32Constant(Scope, I);
+ Register ScopeReg = ScopeConstant.first;
+ Result &= ScopeConstant.second;
MachineBasicBlock &BB = *I.getParent();
- return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemoryBarrier))
- .addUse(ScopeReg)
- .addUse(MemSemReg)
- .constrainAllUses(TII, TRI, RBI);
+ return Result &&
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemoryBarrier))
+ .addUse(ScopeReg)
+ .addUse(MemSemReg)
+ .constrainAllUses(TII, TRI, RBI);
}
bool SPIRVInstructionSelector::selectOverflowArith(Register ResVReg,
@@ -1193,7 +1207,7 @@ bool SPIRVInstructionSelector::selectOverflowArith(Register ResVReg,
.addUse(GR.getSPIRVTypeID(StructType));
for (unsigned i = I.getNumDefs(); i < I.getNumOperands(); ++i)
MIB.addUse(I.getOperand(i).getReg());
- bool Status = MIB.constrainAllUses(TII, TRI, RBI);
+ bool Result = MIB.constrainAllUses(TII, TRI, RBI);
// Build instructions to extract fields of the instruction's result.
// A new virtual register to store the higher part of the result struct.
Register HigherVReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
@@ -1205,21 +1219,22 @@ bool SPIRVInstructionSelector::selectOverflowArith(Register ResVReg,
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(StructVReg)
.addImm(i);
- Status &= MIB.constrainAllUses(TII, TRI, RBI);
+ Result &= MIB.constrainAllUses(TII, TRI, RBI);
}
// Build boolean value from the higher part.
- Status &= BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
+ Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
.addDef(I.getOperand(1).getReg())
.addUse(BoolTypeReg)
.addUse(HigherVReg)
.addUse(ZeroReg)
.constrainAllUses(TII, TRI, RBI);
- return Status;
+ return Result;
}
bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
const SPIRVType *ResType,
MachineInstr &I) const {
+ bool Result = true;
Register ScopeReg;
Register MemSemEqReg;
Register MemSemNeqReg;
@@ -1229,17 +1244,26 @@ bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
const MachineMemOperand *MemOp = *I.memoperands_begin();
unsigned Scope = static_cast<uint32_t>(getMemScope(
GR.CurMF->getFunction().getContext(), MemOp->getSyncScopeID()));
- ScopeReg = buildI32Constant(Scope, I);
+ auto ScopeConstant = buildI32Constant(Scope, I);
+ ScopeReg = ScopeConstant.first;
+ Result &= ScopeConstant.second;
unsigned ScSem = static_cast<uint32_t>(
getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr)));
AtomicOrdering AO = MemOp->getSuccessOrdering();
unsigned MemSemEq = static_cast<uint32_t>(getMemSemantics(AO)) | ScSem;
- MemSemEqReg = buildI32Constant(MemSemEq, I);
+ auto MemSemEqConstant = buildI32Constant(MemSemEq, I);
+ MemSemEqReg = MemSemEqConstant.first;
+ Result &= MemSemEqConstant.second;
AtomicOrdering FO = MemOp->getFailureOrdering();
unsigned MemSemNeq = static_cast<uint32_t>(getMemSemantics(FO)) | ScSem;
- MemSemNeqReg =
- MemSemEq == MemSemNeq ? MemSemEqReg : buildI32Constant(MemSemNeq, I);
+ if (MemSemEq == MemSemNeq)
+ MemSemNeqReg = MemSemEqReg;
+ else {
+ auto MemSemNeqConstant = buildI32Constant(MemSemEq, I);
+ MemSemNeqReg = MemSemNeqConstant.first;
+ Result &= MemSemNeqConstant.second;
+ }
} else {
ScopeReg = I.getOperand(5).getReg();
MemSemEqReg = I.getOperand(6).getReg();
@@ -1251,7 +1275,7 @@ bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
Register ACmpRes = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
const DebugLoc &DL = I.getDebugLoc();
- bool Result =
+ Result &=
BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpAtomicCompareExchange))
.addDef(ACmpRes)
.addUse(GR.getSPIRVTypeID(SpvValTy))
@@ -1435,16 +1459,16 @@ bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg,
Register Tmp = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
GR.getPointeeType(SrcPtrTy), I, TII, SPIRV::StorageClass::Generic);
- bool Success = BuildMI(BB, I, DL, TII.get(SPIRV::OpPtrCastToGeneric))
- .addDef(Tmp)
- .addUse(GR.getSPIRVTypeID(GenericPtrTy))
- .addUse(SrcPtr)
- .constrainAllUses(TII, TRI, RBI);
- return Success && BuildMI(BB, I, DL, TII.get(SPIRV::OpGenericCastToPtr))
- .addDef(ResVReg)
- .addUse(GR.getSPIRVTypeID(ResType))
- .addUse(Tmp)
- .constrainAllUses(TII, TRI, RBI);
+ bool Result = BuildMI(BB, I, DL, TII.get(SPIRV::OpPtrCastToGeneric))
+ .addDef(Tmp)
+ .addUse(GR.getSPIRVTypeID(GenericPtrTy))
+ .addUse(SrcPtr)
+ .constrainAllUses(TII, TRI, RBI);
+ return Result && BuildMI(BB, I, DL, TII.get(SPIRV::OpGenericCastToPtr))
+ .addDef(ResVReg)
+ .addUse(GR.getSPIRVTypeID(ResType))
+ .addUse(Tmp)
+ .constrainAllUses(TII, TRI, RBI);
}
// Check if instructions from the SPV_INTEL_usm_storage_classes extension may
@@ -1618,26 +1642,27 @@ bool SPIRVInstructionSelector::selectAnyOrAll(Register ResVReg,
SpvBoolTy = GR.getOrCreateSPIRVVectorType(SpvBoolTy, NumElts, I, TII);
}
+ bool Result = true;
if (!IsBoolTy) {
Register ConstZeroReg =
IsFloatTy ? buildZerosValF(InputType, I) : buildZerosVal(InputType, I);
- BuildMI(BB, I, I.getDebugLoc(), TII.get(SpirvNotEqualId))
- .addDef(NotEqualReg)
- .addUse(GR.getSPIRVTypeID(SpvBoolTy))
- .addUse(InputRegister)
- .addUse(ConstZeroReg)
- .constrainAllUses(TII, TRI, RBI);
+ Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(SpirvNotEqualId))
+ .addDef(NotEqualReg)
+ .addUse(GR.getSPIRVTypeID(SpvBoolTy))
+ .addUse(InputRegister)
+ .addUse(ConstZeroReg)
+ .constrainAllUses(TII, TRI, RBI);
}
if (!IsVectorTy)
- return true;
+ return Result;
- return BuildMI(BB, I, I.getDebugLoc(), TII.get(OpAnyOrAll))
- .addDef(ResVReg)
- .addUse(GR.getSPIRVTypeID(SpvBoolScalarTy))
- .addUse(NotEqualReg)
- .constrainAllUses(TII, TRI, RBI);
+ return Result && BuildMI(BB, I, I.getDebugLoc(), TII.get(OpAnyOrAll))
+ .addDef(ResVReg)
+ .addUse(GR.getSPIRVTypeID(SpvBoolScalarTy))
+ .addUse(NotEqualReg)
+ .constrainAllUses(TII, TRI, RBI);
}
bool SPIRVInstructionSelector::selectAll(Register ResVReg,
@@ -2170,7 +2195,7 @@ void SPIRVInstructionSelector::renderImm32(MachineInstrBuilder &MIB,
addNumImm(I.getOperand(1).getCImm()->getValue(), MIB);
}
-Register
+std::pair<Register, bool>
SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I,
const SPIRVType *ResType) const {
Type *LLVMTy = IntegerType::get(GR.CurMF->getFunction().getContext(), 32);
@@ -2179,6 +2204,7 @@ SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I,
// Find a constant in DT or build a new one.
auto ConstInt = ConstantInt::get(LLVMTy, Val);
Register NewReg = GR.find(ConstInt, GR.CurMF);
+ bool Result = true;
if (!NewReg.isValid()) {
NewReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
GR.add(ConstInt, GR.CurMF, NewReg);
@@ -2194,9 +2220,9 @@ SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I,
.addUse(GR.getSPIRVTypeID(SpvI32Ty))
.addImm(APInt(32, Val).getZExtValue());
}
- constrainSelectedInstRegOperands(*MI, TII, TRI, RBI);
+ Result &= constrainSelectedInstRegOperands(*MI, TII, TRI, RBI);
}
- return NewReg;
+ return {NewReg, Result};
}
bool SPIRVInstructionSelector::selectFCmp(Register ResVReg,
@@ -2322,18 +2348,18 @@ bool SPIRVInstructionSelector::selectIntToBool(Register IntReg,
Register Zero = buildZerosVal(IntTy, I);
Register One = buildOnesVal(false, IntTy, I);
MachineBasicBlock &BB = *I.getParent();
- BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
- .addDef(BitIntReg)
- .addUse(GR.getSPIRVTypeID(IntTy))
- .addUse(IntReg)
- .addUse(One)
- .constrainAllUses(TII, TRI, RBI);
- return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
- .addDef(ResVReg)
- .addUse(GR.getSPIRVTypeID(BoolTy))
- .addUse(BitIntReg)
- .addUse(Zero)
- .constrainAllUses(TII, TRI, RBI);
+ bool Result = BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
+ .addDef(BitIntReg)
+ .addUse(GR.getSPIRVTypeID(IntTy))
+ .addUse(IntReg)
+ .addUse(One)
+ .constrainAllUses(TII, TRI, RBI);
+ return Result && BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
+ .addDef(ResVReg)
+ .addUse(GR.getSPIRVTypeID(BoolTy))
+ .addUse(BitIntReg)
+ .addUse(Zero)
+ .constrainAllUses(TII, TRI, RBI);
}
bool SPIRVInstructionSelector::selectTrunc(Register ResVReg,
@@ -2646,34 +2672,40 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
case Intrinsic::spv_cmpxchg:
return selectAtomicCmpXchg(ResVReg, ResType, I);
case Intrinsic::spv_unreachable:
- BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUnreachable));
- break;
+ return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUnreachable))
+ .constrainAllUses(TII, TRI, RBI);
case Intrinsic::spv_alloca:
return selectFrameIndex(ResVReg, ResType, I);
case Intrinsic::spv_alloca_array:
return selectAllocaArray(ResVReg, ResType, I);
case Intrinsic::spv_assume:
if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
- BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpAssumeTrueKHR))
- .addUse(I.getOperand(1).getReg());
+ return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpAssumeTrueKHR))
+ .addUse(I.getOperand(1).getReg())
+ .constrainAllUses(TII, TRI, RBI);
break;
case Intrinsic::spv_expect:
if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
- BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExpectKHR))
+ return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExpectKHR))
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(I.getOperand(2).getReg())
- .addUse(I.getOperand(3).getReg());
+ .addUse(I.getOperand(3).getReg())
+ .constrainAllUses(TII, TRI, RBI);
break;
case Intrinsic::arithmetic_fence:
if (STI.canUseExtension(SPIRV::Extension::SPV_EXT_arithmetic_fence))
- BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpArithmeticFenceEXT))
+ return BuildMI(BB, I, I.getDebugLoc(),
+ TII.get(SPIRV::OpArithmeticFenceEXT))
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType))
- .addUse(I.getOperand(2).getReg());
+ .addUse(I.getOperand(2).getReg())
+ .constrainAllUses(TII, TRI, RBI);
else
- BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), ResVReg)
- .addUse(I.getOperand(2).getReg());
+ return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY),
+ ResVReg)
+ .addUse(I.getOperand(2).getReg())
+ .constrainAllUses(TII, TRI, RBI);
break;
case Intrinsic::spv_thread_id:
return selectSpvThreadId(ResVReg, ResType, I);
@@ -2712,16 +2744,22 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
case Intrinsic::spv_firstbitshigh: // There is no CL equivalent of FindSMsb
return selectFirstBitHigh(ResVReg, ResType, I, /*IsSigned=*/true);
case Intrinsic::spv_group_memory_barrier_with_group_sync: {
- Register MemSemReg =
+ bool Result = true;
+ auto MemSemConstant =
buildI32Constant(SPIRV::MemorySemantics::SequentiallyConsistent, I);
- Register ScopeReg = buildI32Constant(SPIRV::Scope::Workgroup, I);
+ Register MemSemReg = MemSemConstant.first;
+ Result &= MemSemConstant.second;
+ auto ScopeConstant = buildI32Constant(SPIRV::Scope::Workgroup, I);
+ Register ScopeReg = ScopeConstant.first;
+ Result &= ScopeConstant.second;
MachineBasicBlock &BB = *I.getParent();
- return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpControlBarrier))
- .addUse(ScopeReg)
- .addUse(ScopeReg)
- .addUse(MemSemReg)
- .constrainAllUses(TII, TRI, RBI);
- } break;
+ return Result &&
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpControlBarrier))
+ .addUse(ScopeReg)
+ .addUse(ScopeReg)
+ .addUse(MemSemReg)
+ .constrainAllUses(TII, TRI, RBI);
+ }
case Intrinsic::spv_lifetime_start:
case Intrinsic::spv_lifetime_end: {
unsigned Op = IID == Intrinsic::spv_lifetime_start ? SPIRV::OpLifetimeStart
@@ -2730,8 +2768,11 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
Register PtrReg = I.getOperand(I.getNumExplicitDefs() + 2).getReg();
if (Size == -1)
Size = 0;
- BuildMI(BB, I, I.getDebugLoc(), TII.get(Op)).addUse(PtrReg).addImm(Size);
- } break;
+ return BuildMI(BB, I, I.getDebugLoc(), TII.get(Op))
+ .addUse(PtrReg)
+ .addImm(Size)
+ .constrainAllUses(TII, TRI, RBI);
+ }
case Intrinsic::spv_saturate:
return selectSaturate(ResVReg, ResType, I);
case Intrinsic::spv_wave_is_first_lane: {
@@ -2759,8 +2800,7 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
case Intrinsic::spv_value_md:
break;
case Intrinsic::spv_handle_fromBinding: {
- selectHandleFromBinding(ResVReg, ResType, I);
- return true;
+ return selectHandleFromBinding(ResVReg, ResType, I);
}
default: {
std::string DiagMsg;
@@ -2773,7 +2813,7 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
return true;
}
-void SPIRVInstructionSelector::selectHandleFromBinding(Register &ResVReg,
+bool SPIRVInstructionSelector::selectHandleFromBinding(Register &ResVReg,
const SPIRVType *ResType,
MachineInstr &I) const {
@@ -2792,10 +2832,11 @@ void SPIRVInstructionSelector::selectHandleFromBinding(Register &ResVReg,
// TODO: For now we assume the resource is an image, which needs to be
// loaded to get the handle. That will not be true for storage buffers.
- BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
+ return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType))
- .addUse(VarReg);
+ .addUse(VarReg)
+ .constrainAllUses(TII, TRI, RBI);
}
Register SPIRVInstructionSelector::buildPointerToResource(
@@ -3280,10 +3321,11 @@ bool SPIRVInstructionSelector::selectSpvThreadId(Register ResVReg,
GR.assignSPIRVTypeToVReg(Vec3Ty, LoadedRegister, MIRBuilder.getMF());
// Load v3uint value from the global variable.
- BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
- .addDef(LoadedRegister)
- .addUse(GR.getSPIRVTypeID(Vec3Ty))
- .addUse(Variable);
+ bool Result =
+ BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
+ .addDef(LoadedRegister)
+ .addUse(GR.getSPIRVTypeID(Vec3Ty))
+ .addUse(Variable);
// Get Thread ID index. Expecting operand is a constant immediate value,
// wrapped in a type assignment.
@@ -3297,7 +3339,7 @@ bool SPIRVInstructionSelector::selectSpvThreadId(Register ResVReg,
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(LoadedRegister)
.addImm(ThreadId);
- return MIB.constrainAllUses(TII, TRI, RBI);
+ return Result && MIB.constrainAllUses(TII, TRI, RBI);
}
namespace llvm {
More information about the llvm-commits
mailing list