[llvm] [NFC][LLVM] Make `MachineInstrBuilder::constrainAllUses` return `void` (PR #179632)
Juan Manuel Martinez CaamaƱo via llvm-commits
llvm-commits at lists.llvm.org
Wed Feb 4 02:11:50 PST 2026
https://github.com/jmmartinez updated https://github.com/llvm/llvm-project/pull/179632
>From 7b600e60e9609b4ddf6fc042400668274e4ebad5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Juan=20Manuel=20Martinez=20Caama=C3=B1o?=
<jmartinezcaamao at gmail.com>
Date: Wed, 4 Feb 2026 10:31:26 +0100
Subject: [PATCH] [NFC][LLVM] Make `MachineInstrBuilder::constrainAllUses`
return `void`
This function always returns `true`; so we can transform it to return
`void` and simplify the code.
Follow up of https://github.com/llvm/llvm-project/pull/179501 .
---
.../llvm/CodeGen/MachineInstrBuilder.h | 3 +-
.../Target/Mips/MipsInstructionSelector.cpp | 9 +-
llvm/lib/Target/Mips/MipsLegalizerInfo.cpp | 13 +-
.../PowerPC/GISel/PPCInstructionSelector.cpp | 185 ++--
.../RISCV/GISel/RISCVInstructionSelector.cpp | 33 +-
llvm/lib/Target/SPIRV/SPIRVCallLowering.cpp | 9 +-
llvm/lib/Target/SPIRV/SPIRVISelLowering.cpp | 17 +-
.../Target/SPIRV/SPIRVInstructionSelector.cpp | 973 +++++++++---------
8 files changed, 615 insertions(+), 627 deletions(-)
diff --git a/llvm/include/llvm/CodeGen/MachineInstrBuilder.h b/llvm/include/llvm/CodeGen/MachineInstrBuilder.h
index 7e1d67265a7c1..1b3216aef8079 100644
--- a/llvm/include/llvm/CodeGen/MachineInstrBuilder.h
+++ b/llvm/include/llvm/CodeGen/MachineInstrBuilder.h
@@ -438,11 +438,10 @@ class MachineInstrBuilder {
return *this;
}
- bool constrainAllUses(const TargetInstrInfo &TII,
+ void constrainAllUses(const TargetInstrInfo &TII,
const TargetRegisterInfo &TRI,
const RegisterBankInfo &RBI) const {
constrainSelectedInstRegOperands(*MI, TII, TRI, RBI);
- return true;
}
};
diff --git a/llvm/lib/Target/Mips/MipsInstructionSelector.cpp b/llvm/lib/Target/Mips/MipsInstructionSelector.cpp
index 9391e0da7e50f..2099df11f5b53 100644
--- a/llvm/lib/Target/Mips/MipsInstructionSelector.cpp
+++ b/llvm/lib/Target/Mips/MipsInstructionSelector.cpp
@@ -588,8 +588,7 @@ bool MipsInstructionSelector::select(MachineInstr &I) {
MachineInstrBuilder MTC1 =
B.buildInstr(Mips::MTC1, {I.getOperand(0).getReg()}, {GPRReg});
- if (!MTC1.constrainAllUses(TII, TRI, RBI))
- return false;
+ MTC1.constrainAllUses(TII, TRI, RBI);
}
if (Size == 64) {
Register GPRRegHigh = MRI.createVirtualRegister(&Mips::GPR32RegClass);
@@ -603,8 +602,7 @@ bool MipsInstructionSelector::select(MachineInstr &I) {
MachineInstrBuilder PairF64 = B.buildInstr(
STI.isFP64bit() ? Mips::BuildPairF64_64 : Mips::BuildPairF64,
{I.getOperand(0).getReg()}, {GPRRegLow, GPRRegHigh});
- if (!PairF64.constrainAllUses(TII, TRI, RBI))
- return false;
+ PairF64.constrainAllUses(TII, TRI, RBI);
}
I.eraseFromParent();
@@ -791,8 +789,7 @@ bool MipsInstructionSelector::select(MachineInstr &I) {
else
MIB.addUse(Instruction.RHS);
- if (!MIB.constrainAllUses(TII, TRI, RBI))
- return false;
+ MIB.constrainAllUses(TII, TRI, RBI);
}
I.eraseFromParent();
diff --git a/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp b/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp
index 8468dd6a22119..e487d80e845f5 100644
--- a/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp
+++ b/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp
@@ -470,13 +470,12 @@ static bool SelectMSA3OpIntrinsic(MachineInstr &MI, unsigned Opcode,
MachineIRBuilder &MIRBuilder,
const MipsSubtarget &ST) {
assert(ST.hasMSA() && "MSA intrinsic not supported on target without MSA.");
- if (!MIRBuilder.buildInstr(Opcode)
- .add(MI.getOperand(0))
- .add(MI.getOperand(2))
- .add(MI.getOperand(3))
- .constrainAllUses(MIRBuilder.getTII(), *ST.getRegisterInfo(),
- *ST.getRegBankInfo()))
- return false;
+ MIRBuilder.buildInstr(Opcode)
+ .add(MI.getOperand(0))
+ .add(MI.getOperand(2))
+ .add(MI.getOperand(3))
+ .constrainAllUses(MIRBuilder.getTII(), *ST.getRegisterInfo(),
+ *ST.getRegBankInfo());
MI.eraseFromParent();
return true;
}
diff --git a/llvm/lib/Target/PowerPC/GISel/PPCInstructionSelector.cpp b/llvm/lib/Target/PowerPC/GISel/PPCInstructionSelector.cpp
index acaf284684d0a..e60ee32520f23 100644
--- a/llvm/lib/Target/PowerPC/GISel/PPCInstructionSelector.cpp
+++ b/llvm/lib/Target/PowerPC/GISel/PPCInstructionSelector.cpp
@@ -304,16 +304,20 @@ std::optional<bool> PPCInstructionSelector::selectI64ImmDirect(MachineInstr &I,
// 1-1) Patterns : {zeros}{15-bit valve}
// {ones}{15-bit valve}
- if (isInt<16>(Imm))
- return BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::LI8), Reg)
+ if (isInt<16>(Imm)) {
+ BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::LI8), Reg)
.addImm(Imm)
.constrainAllUses(TII, TRI, RBI);
+ return true;
+ }
// 1-2) Patterns : {zeros}{15-bit valve}{16 zeros}
// {ones}{15-bit valve}{16 zeros}
- if (TZ > 15 && (LZ > 32 || LO > 32))
- return BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::LIS8), Reg)
+ if (TZ > 15 && (LZ > 32 || LO > 32)) {
+ BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::LIS8), Reg)
.addImm((Imm >> 16) & 0xffff)
.constrainAllUses(TII, TRI, RBI);
+ return true;
+ }
// Following patterns use 2 instructions to materialize the Imm.
@@ -326,14 +330,14 @@ std::optional<bool> PPCInstructionSelector::selectI64ImmDirect(MachineInstr &I,
uint64_t ImmHi16 = (Imm >> 16) & 0xffff;
unsigned Opcode = ImmHi16 ? PPC::LIS8 : PPC::LI8;
Register TmpReg = MRI.createVirtualRegister(&PPC::G8RCRegClass);
- if (!BuildMI(MBB, I, I.getDebugLoc(), TII.get(Opcode), TmpReg)
- .addImm((Imm >> 16) & 0xffff)
- .constrainAllUses(TII, TRI, RBI))
- return false;
- return BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::ORI8), Reg)
+ BuildMI(MBB, I, I.getDebugLoc(), TII.get(Opcode), TmpReg)
+ .addImm((Imm >> 16) & 0xffff)
+ .constrainAllUses(TII, TRI, RBI);
+ BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::ORI8), Reg)
.addReg(TmpReg, RegState::Kill)
.addImm(Imm & 0xffff)
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
// 2-2) Patterns : {zeros}{ones}{15-bit value}{zeros}
// {zeros}{15-bit value}{zeros}
@@ -343,15 +347,15 @@ std::optional<bool> PPCInstructionSelector::selectI64ImmDirect(MachineInstr &I,
// ones, and then use RLDIC to mask off the ones in both sides after rotation.
if ((LZ + FO + TZ) > 48) {
Register TmpReg = MRI.createVirtualRegister(&PPC::G8RCRegClass);
- if (!BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::LI8), TmpReg)
- .addImm((Imm >> TZ) & 0xffff)
- .constrainAllUses(TII, TRI, RBI))
- return false;
- return BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::RLDIC), Reg)
+ BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::LI8), TmpReg)
+ .addImm((Imm >> TZ) & 0xffff)
+ .constrainAllUses(TII, TRI, RBI);
+ BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::RLDIC), Reg)
.addReg(TmpReg, RegState::Kill)
.addImm(TZ)
.addImm(LZ)
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
// 2-3) Pattern : {zeros}{15-bit value}{ones}
// Shift right the Imm by (48 - LZ) bits to construct a negtive 16 bits value,
@@ -374,15 +378,15 @@ std::optional<bool> PPCInstructionSelector::selectI64ImmDirect(MachineInstr &I,
// the Imm by a negative value.
assert(LZ <= 32 && "Unexpected shift value.");
Register TmpReg = MRI.createVirtualRegister(&PPC::G8RCRegClass);
- if (!BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::LI8), TmpReg)
- .addImm(Imm >> (48 - LZ) & 0xffff)
- .constrainAllUses(TII, TRI, RBI))
- return false;
- return BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::RLDICL), Reg)
+ BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::LI8), TmpReg)
+ .addImm(Imm >> (48 - LZ) & 0xffff)
+ .constrainAllUses(TII, TRI, RBI);
+ BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::RLDICL), Reg)
.addReg(TmpReg, RegState::Kill)
.addImm(48 - LZ)
.addImm(LZ)
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
// 2-4) Patterns : {zeros}{ones}{15-bit value}{ones}
// {ones}{15-bit value}{ones}
@@ -402,15 +406,15 @@ std::optional<bool> PPCInstructionSelector::selectI64ImmDirect(MachineInstr &I,
// LI8: sext many leading zeros RLDICL: rotate left TO, clear left LZ
if ((LZ + FO + TO) > 48) {
Register TmpReg = MRI.createVirtualRegister(&PPC::G8RCRegClass);
- if (!BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::LI8), TmpReg)
- .addImm((Imm >> TO) & 0xffff)
- .constrainAllUses(TII, TRI, RBI))
- return false;
- return BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::RLDICL), Reg)
+ BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::LI8), TmpReg)
+ .addImm((Imm >> TO) & 0xffff)
+ .constrainAllUses(TII, TRI, RBI);
+ BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::RLDICL), Reg)
.addReg(TmpReg, RegState::Kill)
.addImm(TO)
.addImm(LZ)
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
// 2-5) Pattern : {32 zeros}{****}{0}{15-bit value}
// If Hi32 is zero and the Lo16(in Lo32) can be presented as a positive 16 bit
@@ -418,14 +422,14 @@ std::optional<bool> PPCInstructionSelector::selectI64ImmDirect(MachineInstr &I,
// Hi16(in Lo32).
if (LZ == 32 && ((Lo32 & 0x8000) == 0)) {
Register TmpReg = MRI.createVirtualRegister(&PPC::G8RCRegClass);
- if (!BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::LI8), TmpReg)
- .addImm(Lo32 & 0xffff)
- .constrainAllUses(TII, TRI, RBI))
- return false;
- return BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::ORIS8), Reg)
+ BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::LI8), TmpReg)
+ .addImm(Lo32 & 0xffff)
+ .constrainAllUses(TII, TRI, RBI);
+ BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::ORIS8), Reg)
.addReg(TmpReg, RegState::Kill)
.addImm(Lo32 >> 16)
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
// 2-6) Patterns : {******}{49 zeros}{******}
// {******}{49 ones}{******}
@@ -449,15 +453,15 @@ std::optional<bool> PPCInstructionSelector::selectI64ImmDirect(MachineInstr &I,
(Shift = findContiguousZerosAtLeast(~Imm, 49))) {
uint64_t RotImm = APInt(64, Imm).rotr(Shift).getZExtValue();
Register TmpReg = MRI.createVirtualRegister(&PPC::G8RCRegClass);
- if (!BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::LI8), TmpReg)
- .addImm(RotImm & 0xffff)
- .constrainAllUses(TII, TRI, RBI))
- return false;
- return BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::RLDICL), Reg)
+ BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::LI8), TmpReg)
+ .addImm(RotImm & 0xffff)
+ .constrainAllUses(TII, TRI, RBI);
+ BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::RLDICL), Reg)
.addReg(TmpReg, RegState::Kill)
.addImm(Shift)
.addImm(0)
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
// Following patterns use 3 instructions to materialize the Imm.
@@ -474,20 +478,19 @@ std::optional<bool> PPCInstructionSelector::selectI64ImmDirect(MachineInstr &I,
unsigned Opcode = ImmHi16 ? PPC::LIS8 : PPC::LI8;
Register TmpReg = MRI.createVirtualRegister(&PPC::G8RCRegClass);
Register Tmp2Reg = MRI.createVirtualRegister(&PPC::G8RCRegClass);
- if (!BuildMI(MBB, I, I.getDebugLoc(), TII.get(Opcode), TmpReg)
- .addImm(ImmHi16)
- .constrainAllUses(TII, TRI, RBI))
- return false;
- if (!BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::ORI8), Tmp2Reg)
- .addReg(TmpReg, RegState::Kill)
- .addImm((Imm >> TZ) & 0xffff)
- .constrainAllUses(TII, TRI, RBI))
- return false;
- return BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::RLDIC), Reg)
+ BuildMI(MBB, I, I.getDebugLoc(), TII.get(Opcode), TmpReg)
+ .addImm(ImmHi16)
+ .constrainAllUses(TII, TRI, RBI);
+ BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::ORI8), Tmp2Reg)
+ .addReg(TmpReg, RegState::Kill)
+ .addImm((Imm >> TZ) & 0xffff)
+ .constrainAllUses(TII, TRI, RBI);
+ BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::RLDIC), Reg)
.addReg(Tmp2Reg, RegState::Kill)
.addImm(TZ)
.addImm(LZ)
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
// 3-2) Pattern : {zeros}{31-bit value}{ones}
// Shift right the Imm by (32 - LZ) bits to construct a negative 32 bits
@@ -501,20 +504,19 @@ std::optional<bool> PPCInstructionSelector::selectI64ImmDirect(MachineInstr &I,
assert(LZ <= 32 && "Unexpected shift value.");
Register TmpReg = MRI.createVirtualRegister(&PPC::G8RCRegClass);
Register Tmp2Reg = MRI.createVirtualRegister(&PPC::G8RCRegClass);
- if (!BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::LIS8), TmpReg)
- .addImm((Imm >> (48 - LZ)) & 0xffff)
- .constrainAllUses(TII, TRI, RBI))
- return false;
- if (!BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::ORI8), Tmp2Reg)
- .addReg(TmpReg, RegState::Kill)
- .addImm((Imm >> (32 - LZ)) & 0xffff)
- .constrainAllUses(TII, TRI, RBI))
- return false;
- return BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::RLDICL), Reg)
+ BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::LIS8), TmpReg)
+ .addImm((Imm >> (48 - LZ)) & 0xffff)
+ .constrainAllUses(TII, TRI, RBI);
+ BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::ORI8), Tmp2Reg)
+ .addReg(TmpReg, RegState::Kill)
+ .addImm((Imm >> (32 - LZ)) & 0xffff)
+ .constrainAllUses(TII, TRI, RBI);
+ BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::RLDICL), Reg)
.addReg(Tmp2Reg, RegState::Kill)
.addImm(32 - LZ)
.addImm(LZ)
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
// 3-3) Patterns : {zeros}{ones}{31-bit value}{ones}
// {ones}{31-bit value}{ones}
@@ -525,20 +527,19 @@ std::optional<bool> PPCInstructionSelector::selectI64ImmDirect(MachineInstr &I,
if ((LZ + FO + TO) > 32) {
Register TmpReg = MRI.createVirtualRegister(&PPC::G8RCRegClass);
Register Tmp2Reg = MRI.createVirtualRegister(&PPC::G8RCRegClass);
- if (!BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::LIS8), TmpReg)
- .addImm((Imm >> (TO + 16)) & 0xffff)
- .constrainAllUses(TII, TRI, RBI))
- return false;
- if (!BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::ORI8), Tmp2Reg)
- .addReg(TmpReg, RegState::Kill)
- .addImm((Imm >> TO) & 0xffff)
- .constrainAllUses(TII, TRI, RBI))
- return false;
- return BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::RLDICL), Reg)
+ BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::LIS8), TmpReg)
+ .addImm((Imm >> (TO + 16)) & 0xffff)
+ .constrainAllUses(TII, TRI, RBI);
+ BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::ORI8), Tmp2Reg)
+ .addReg(TmpReg, RegState::Kill)
+ .addImm((Imm >> TO) & 0xffff)
+ .constrainAllUses(TII, TRI, RBI);
+ BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::RLDICL), Reg)
.addReg(Tmp2Reg, RegState::Kill)
.addImm(TO)
.addImm(LZ)
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
// 3-4) Patterns : High word == Low word
if (Hi32 == Lo32) {
@@ -547,21 +548,20 @@ std::optional<bool> PPCInstructionSelector::selectI64ImmDirect(MachineInstr &I,
unsigned Opcode = ImmHi16 ? PPC::LIS8 : PPC::LI8;
Register TmpReg = MRI.createVirtualRegister(&PPC::G8RCRegClass);
Register Tmp2Reg = MRI.createVirtualRegister(&PPC::G8RCRegClass);
- if (!BuildMI(MBB, I, I.getDebugLoc(), TII.get(Opcode), TmpReg)
- .addImm(ImmHi16)
- .constrainAllUses(TII, TRI, RBI))
- return false;
- if (!BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::ORI8), Tmp2Reg)
- .addReg(TmpReg, RegState::Kill)
- .addImm(Lo32 & 0xffff)
- .constrainAllUses(TII, TRI, RBI))
- return false;
- return BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::RLDIMI), Reg)
+ BuildMI(MBB, I, I.getDebugLoc(), TII.get(Opcode), TmpReg)
+ .addImm(ImmHi16)
+ .constrainAllUses(TII, TRI, RBI);
+ BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::ORI8), Tmp2Reg)
+ .addReg(TmpReg, RegState::Kill)
+ .addImm(Lo32 & 0xffff)
+ .constrainAllUses(TII, TRI, RBI);
+ BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::RLDIMI), Reg)
.addReg(Tmp2Reg)
.addReg(Tmp2Reg, RegState::Kill)
.addImm(32)
.addImm(0)
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
// 3-5) Patterns : {******}{33 zeros}{******}
// {******}{33 ones}{******}
@@ -577,20 +577,19 @@ std::optional<bool> PPCInstructionSelector::selectI64ImmDirect(MachineInstr &I,
unsigned Opcode = ImmHi16 ? PPC::LIS8 : PPC::LI8;
Register TmpReg = MRI.createVirtualRegister(&PPC::G8RCRegClass);
Register Tmp2Reg = MRI.createVirtualRegister(&PPC::G8RCRegClass);
- if (!BuildMI(MBB, I, I.getDebugLoc(), TII.get(Opcode), TmpReg)
- .addImm(ImmHi16)
- .constrainAllUses(TII, TRI, RBI))
- return false;
- if (!BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::ORI8), Tmp2Reg)
- .addReg(TmpReg, RegState::Kill)
- .addImm(RotImm & 0xffff)
- .constrainAllUses(TII, TRI, RBI))
- return false;
- return BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::RLDICL), Reg)
+ BuildMI(MBB, I, I.getDebugLoc(), TII.get(Opcode), TmpReg)
+ .addImm(ImmHi16)
+ .constrainAllUses(TII, TRI, RBI);
+ BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::ORI8), Tmp2Reg)
+ .addReg(TmpReg, RegState::Kill)
+ .addImm(RotImm & 0xffff)
+ .constrainAllUses(TII, TRI, RBI);
+ BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::RLDICL), Reg)
.addReg(Tmp2Reg, RegState::Kill)
.addImm(Shift)
.addImm(0)
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
// If we end up here then no instructions were inserted.
@@ -630,19 +629,17 @@ bool PPCInstructionSelector::selectI64Imm(MachineInstr &I,
if (Hi16) {
Register TmpReg =
Lo16 ? MRI.createVirtualRegister(&PPC::G8RCRegClass) : DstReg;
- if (!BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::ORIS8), TmpReg)
- .addReg(Reg, RegState::Kill)
- .addImm(Hi16)
- .constrainAllUses(TII, TRI, RBI))
- return false;
+ BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::ORIS8), TmpReg)
+ .addReg(Reg, RegState::Kill)
+ .addImm(Hi16)
+ .constrainAllUses(TII, TRI, RBI);
Reg = TmpReg;
}
if (Lo16) {
- if (!BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::ORI8), DstReg)
- .addReg(Reg, RegState::Kill)
- .addImm(Lo16)
- .constrainAllUses(TII, TRI, RBI))
- return false;
+ BuildMI(MBB, I, I.getDebugLoc(), TII.get(PPC::ORI8), DstReg)
+ .addReg(Reg, RegState::Kill)
+ .addImm(Lo16)
+ .constrainAllUses(TII, TRI, RBI);
}
I.eraseFromParent();
return true;
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
index e4df9e5f9c569..5d584e99ccc81 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
@@ -1229,8 +1229,7 @@ bool RISCVInstructionSelector::select(MachineInstr &MI) {
: Size == 32 ? RISCV::FMV_W_X
: RISCV::FMV_H_X;
auto FMV = MIB.buildInstr(Opcode, {DstReg}, {GPRReg});
- if (!FMV.constrainAllUses(TII, TRI, RBI))
- return false;
+ FMV.constrainAllUses(TII, TRI, RBI);
} else {
// s64 on rv32
assert(Size == 64 && !Subtarget->is64Bit() &&
@@ -1241,8 +1240,7 @@ bool RISCVInstructionSelector::select(MachineInstr &MI) {
MachineInstrBuilder FCVT =
MIB.buildInstr(RISCV::FCVT_D_W, {DstReg}, {Register(RISCV::X0)})
.addImm(RISCVFPRndMode::RNE);
- if (!FCVT.constrainAllUses(TII, TRI, RBI))
- return false;
+ FCVT.constrainAllUses(TII, TRI, RBI);
MI.eraseFromParent();
return true;
@@ -1259,8 +1257,7 @@ bool RISCVInstructionSelector::select(MachineInstr &MI) {
return false;
MachineInstrBuilder PairF64 = MIB.buildInstr(
RISCV::BuildPairF64Pseudo, {DstReg}, {GPRRegLow, GPRRegHigh});
- if (!PairF64.constrainAllUses(TII, TRI, RBI))
- return false;
+ PairF64.constrainAllUses(TII, TRI, RBI);
}
MI.eraseFromParent();
@@ -1860,51 +1857,43 @@ bool RISCVInstructionSelector::selectFPCompare(MachineInstr &MI,
if (NeedInvert)
TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
auto Cmp = MIB.buildInstr(getFCmpOpcode(Pred, Size), {TmpReg}, {LHS, RHS});
- if (!Cmp.constrainAllUses(TII, TRI, RBI))
- return false;
+ Cmp.constrainAllUses(TII, TRI, RBI);
} else if (Pred == CmpInst::FCMP_ONE || Pred == CmpInst::FCMP_UEQ) {
// fcmp one LHS, RHS => (OR (FLT LHS, RHS), (FLT RHS, LHS))
NeedInvert = Pred == CmpInst::FCMP_UEQ;
auto Cmp1 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OLT, Size),
{&RISCV::GPRRegClass}, {LHS, RHS});
- if (!Cmp1.constrainAllUses(TII, TRI, RBI))
- return false;
+ Cmp1.constrainAllUses(TII, TRI, RBI);
auto Cmp2 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OLT, Size),
{&RISCV::GPRRegClass}, {RHS, LHS});
- if (!Cmp2.constrainAllUses(TII, TRI, RBI))
- return false;
+ Cmp2.constrainAllUses(TII, TRI, RBI);
if (NeedInvert)
TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
auto Or =
MIB.buildInstr(RISCV::OR, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
- if (!Or.constrainAllUses(TII, TRI, RBI))
- return false;
+ Or.constrainAllUses(TII, TRI, RBI);
} else if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) {
// fcmp ord LHS, RHS => (AND (FEQ LHS, LHS), (FEQ RHS, RHS))
// FIXME: If LHS and RHS are the same we can use a single FEQ.
NeedInvert = Pred == CmpInst::FCMP_UNO;
auto Cmp1 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OEQ, Size),
{&RISCV::GPRRegClass}, {LHS, LHS});
- if (!Cmp1.constrainAllUses(TII, TRI, RBI))
- return false;
+ Cmp1.constrainAllUses(TII, TRI, RBI);
auto Cmp2 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OEQ, Size),
{&RISCV::GPRRegClass}, {RHS, RHS});
- if (!Cmp2.constrainAllUses(TII, TRI, RBI))
- return false;
+ Cmp2.constrainAllUses(TII, TRI, RBI);
if (NeedInvert)
TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
auto And =
MIB.buildInstr(RISCV::AND, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
- if (!And.constrainAllUses(TII, TRI, RBI))
- return false;
+ And.constrainAllUses(TII, TRI, RBI);
} else
llvm_unreachable("Unhandled predicate");
// Emit an XORI to invert the result if needed.
if (NeedInvert) {
auto Xor = MIB.buildInstr(RISCV::XORI, {DstReg}, {TmpReg}).addImm(1);
- if (!Xor.constrainAllUses(TII, TRI, RBI))
- return false;
+ Xor.constrainAllUses(TII, TRI, RBI);
}
MI.eraseFromParent();
diff --git a/llvm/lib/Target/SPIRV/SPIRVCallLowering.cpp b/llvm/lib/Target/SPIRV/SPIRVCallLowering.cpp
index 23c5798f9d0af..0543ddb2edb3d 100644
--- a/llvm/lib/Target/SPIRV/SPIRVCallLowering.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVCallLowering.cpp
@@ -47,12 +47,14 @@ bool SPIRVCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
// TODO: handle the case of multiple registers.
if (VRegs.size() > 1)
return false;
+
if (Val) {
const auto &STI = MIRBuilder.getMF().getSubtarget();
- return MIRBuilder.buildInstr(SPIRV::OpReturnValue)
+ MIRBuilder.buildInstr(SPIRV::OpReturnValue)
.addUse(VRegs[0])
.constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(),
*STI.getRegBankInfo());
+ return true;
}
MIRBuilder.buildInstr(SPIRV::OpReturn);
return true;
@@ -698,6 +700,7 @@ bool SPIRVCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
}
}
- return MIB.constrainAllUses(MIRBuilder.getTII(), *ST->getRegisterInfo(),
- *ST->getRegBankInfo());
+ MIB.constrainAllUses(MIRBuilder.getTII(), *ST->getRegisterInfo(),
+ *ST->getRegBankInfo());
+ return true;
}
diff --git a/llvm/lib/Target/SPIRV/SPIRVISelLowering.cpp b/llvm/lib/Target/SPIRV/SPIRVISelLowering.cpp
index 3e5ce4b90ea4a..5f0c33bb44b4b 100644
--- a/llvm/lib/Target/SPIRV/SPIRVISelLowering.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVISelLowering.cpp
@@ -151,14 +151,12 @@ static void doInsertBitcast(const SPIRVSubtarget &STI, MachineRegisterInfo *MRI,
SPIRVType *NewPtrType) {
MachineIRBuilder MIB(I);
Register NewReg = createVirtualRegister(NewPtrType, &GR, MRI, MIB.getMF());
- bool Res = MIB.buildInstr(SPIRV::OpBitcast)
- .addDef(NewReg)
- .addUse(GR.getSPIRVTypeID(NewPtrType))
- .addUse(OpReg)
- .constrainAllUses(*STI.getInstrInfo(), *STI.getRegisterInfo(),
- *STI.getRegBankInfo());
- if (!Res)
- report_fatal_error("insert validation bitcast: cannot constrain all uses");
+ MIB.buildInstr(SPIRV::OpBitcast)
+ .addDef(NewReg)
+ .addUse(GR.getSPIRVTypeID(NewPtrType))
+ .addUse(OpReg)
+ .constrainAllUses(*STI.getInstrInfo(), *STI.getRegisterInfo(),
+ *STI.getRegBankInfo());
I.getOperand(OpIdx).setReg(NewReg);
}
@@ -619,10 +617,11 @@ bool SPIRVTargetLowering::insertLogicalCopyOnResult(
OldType.setReg(NewTypeReg);
MachineIRBuilder MIB(*I.getNextNode());
- return MIB.buildInstr(SPIRV::OpCopyLogical)
+ MIB.buildInstr(SPIRV::OpCopyLogical)
.addDef(OldResultReg)
.addUse(OldTypeReg)
.addUse(NewResultReg)
.constrainAllUses(*STI.getInstrInfo(), *STI.getRegisterInfo(),
*STI.getRegBankInfo());
+ return true;
}
diff --git a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
index 808ce89bf76a3..2f1506780e4b7 100644
--- a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
@@ -358,9 +358,8 @@ class SPIRVInstructionSelector : public InstructionSelector {
bool selectDerivativeInst(Register ResVReg, const SPIRVType *ResType,
MachineInstr &I, const unsigned DPdOpCode) const;
// Utilities
- std::pair<Register, bool>
- buildI32Constant(uint32_t Val, MachineInstr &I,
- const SPIRVType *ResType = nullptr) const;
+ Register buildI32Constant(uint32_t Val, MachineInstr &I,
+ const SPIRVType *ResType = nullptr) const;
Register buildZerosVal(const SPIRVType *ResType, MachineInstr &I) const;
bool isScalarOrVectorIntConstantZero(Register Reg) const;
@@ -847,11 +846,11 @@ bool SPIRVInstructionSelector::BuildCOPY(Register DestReg, Register SrcReg,
const TargetRegisterClass *SrcRC = MRI->getRegClassOrNull(SrcReg);
if (DstRC != SrcRC && SrcRC)
MRI->setRegClass(DestReg, SrcRC);
- return BuildMI(*I.getParent(), I, I.getDebugLoc(),
- TII.get(TargetOpcode::COPY))
+ BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
.addDef(DestReg)
.addUse(SrcReg)
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
@@ -893,7 +892,8 @@ bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
.addUse(I.getOperand(2).getReg());
for (auto V : I.getOperand(3).getShuffleMask())
MIB.addImm(V);
- return MIB.constrainAllUses(TII, TRI, RBI);
+ MIB.constrainAllUses(TII, TRI, RBI);
+ return true;
}
case TargetOpcode::G_MEMMOVE:
case TargetOpcode::G_MEMCPY:
@@ -966,7 +966,8 @@ bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(regForLround);
- return MIB.constrainAllUses(TII, TRI, RBI);
+ MIB.constrainAllUses(TII, TRI, RBI);
+ return true;
}
case TargetOpcode::G_STRICT_FMA:
case TargetOpcode::G_FMA: {
@@ -979,7 +980,8 @@ bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
.addUse(I.getOperand(2).getReg())
.addUse(I.getOperand(3).getReg())
.setMIFlags(I.getFlags());
- return MIB.constrainAllUses(TII, TRI, RBI);
+ MIB.constrainAllUses(TII, TRI, RBI);
+ return true;
}
return selectExtInst(ResVReg, ResType, I, CL::fma, GL::Fma);
}
@@ -1169,18 +1171,17 @@ bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
.addDef(NewVReg)
.addUse(ResTypeReg)
.addUse(GV);
- return MIB.constrainAllUses(TII, TRI, RBI) &&
- BuildMI(BB, I, I.getDebugLoc(),
- TII.get(STI.isLogicalSPIRV()
- ? SPIRV::OpInBoundsAccessChain
- : SPIRV::OpInBoundsPtrAccessChain))
- .addDef(ResVReg)
- .addUse(ResTypeReg)
- .addUse(NewVReg)
- .addUse(I.getOperand(2).getReg())
- .constrainAllUses(TII, TRI, RBI);
+ MIB.constrainAllUses(TII, TRI, RBI);
+ BuildMI(BB, I, I.getDebugLoc(),
+ TII.get(STI.isLogicalSPIRV() ? SPIRV::OpInBoundsAccessChain
+ : SPIRV::OpInBoundsPtrAccessChain))
+ .addDef(ResVReg)
+ .addUse(ResTypeReg)
+ .addUse(NewVReg)
+ .addUse(I.getOperand(2).getReg())
+ .constrainAllUses(TII, TRI, RBI);
} else {
- return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType))
.addImm(
@@ -1189,6 +1190,7 @@ bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
.addUse(I.getOperand(2).getReg())
.constrainAllUses(TII, TRI, RBI);
}
+ return true;
}
// It's possible to translate G_PTR_ADD to OpSpecConstantOp: either to
// initialize a global variable with a constant expression (e.g., the test
@@ -1202,7 +1204,8 @@ bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
.addUse(GV)
.addUse(Idx)
.addUse(I.getOperand(2).getReg());
- return MIB.constrainAllUses(TII, TRI, RBI);
+ MIB.constrainAllUses(TII, TRI, RBI);
+ return true;
}
case TargetOpcode::G_ATOMICRMW_OR:
@@ -1272,8 +1275,9 @@ bool SPIRVInstructionSelector::selectDebugTrap(Register ResVReg,
MachineInstr &I) const {
unsigned Opcode = SPIRV::OpNop;
MachineBasicBlock &BB = *I.getParent();
- return BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
@@ -1334,7 +1338,8 @@ bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
Index = 2;
for (; Index < NumOps; ++Index)
MIB.add(I.getOperand(Index));
- return MIB.constrainAllUses(TII, TRI, RBI);
+ MIB.constrainAllUses(TII, TRI, RBI);
+ return true;
}
}
return false;
@@ -1394,30 +1399,27 @@ bool SPIRVInstructionSelector::selectFrexp(Register ResVReg,
createVirtualRegister(PointerType, &GR, MRI, MRI->getMF());
auto It = getOpVariableMBBIt(I);
- auto MIB = BuildMI(*It->getParent(), It, It->getDebugLoc(),
- TII.get(SPIRV::OpVariable))
- .addDef(PointerVReg)
- .addUse(GR.getSPIRVTypeID(PointerType))
- .addImm(static_cast<uint32_t>(SPIRV::StorageClass::Function))
- .constrainAllUses(TII, TRI, RBI);
-
- MIB = MIB &
- BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
- .addDef(ResVReg)
- .addUse(GR.getSPIRVTypeID(ResType))
- .addImm(static_cast<uint32_t>(Ex.first))
- .addImm(Opcode)
- .add(I.getOperand(2))
- .addUse(PointerVReg)
- .constrainAllUses(TII, TRI, RBI);
-
- MIB = MIB &
- BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
- .addDef(I.getOperand(1).getReg())
- .addUse(GR.getSPIRVTypeID(PointeeTy))
- .addUse(PointerVReg)
- .constrainAllUses(TII, TRI, RBI);
- return MIB;
+ BuildMI(*It->getParent(), It, It->getDebugLoc(), TII.get(SPIRV::OpVariable))
+ .addDef(PointerVReg)
+ .addUse(GR.getSPIRVTypeID(PointerType))
+ .addImm(static_cast<uint32_t>(SPIRV::StorageClass::Function))
+ .constrainAllUses(TII, TRI, RBI);
+
+ BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
+ .addDef(ResVReg)
+ .addUse(GR.getSPIRVTypeID(ResType))
+ .addImm(static_cast<uint32_t>(Ex.first))
+ .addImm(Opcode)
+ .add(I.getOperand(2))
+ .addUse(PointerVReg)
+ .constrainAllUses(TII, TRI, RBI);
+
+ BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
+ .addDef(I.getOperand(1).getReg())
+ .addUse(GR.getSPIRVTypeID(PointeeTy))
+ .addUse(PointerVReg)
+ .constrainAllUses(TII, TRI, RBI);
+ return true;
}
return false;
}
@@ -1489,7 +1491,8 @@ bool SPIRVInstructionSelector::selectOpWithSrcs(Register ResVReg,
for (Register SReg : Srcs) {
MIB.addUse(SReg);
}
- return MIB.constrainAllUses(TII, TRI, RBI);
+ MIB.constrainAllUses(TII, TRI, RBI);
+ return true;
}
bool SPIRVInstructionSelector::selectUnOp(Register ResVReg,
@@ -1526,14 +1529,16 @@ bool SPIRVInstructionSelector::selectUnOp(Register ResVReg,
SpecOpcode = static_cast<uint32_t>(SPIRV::Opcode::ConvertUToPtr);
break;
}
- if (SpecOpcode)
- return BuildMI(*I.getParent(), I, I.getDebugLoc(),
- TII.get(SPIRV::OpSpecConstantOp))
+ if (SpecOpcode) {
+ BuildMI(*I.getParent(), I, I.getDebugLoc(),
+ TII.get(SPIRV::OpSpecConstantOp))
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType))
.addImm(SpecOpcode)
.addUse(SrcReg)
.constrainAllUses(TII, TRI, RBI);
+ return true;
+ }
}
}
return selectOpWithSrcs(ResVReg, ResType, I, {I.getOperand(1).getReg()},
@@ -1642,7 +1647,8 @@ bool SPIRVInstructionSelector::selectLoad(Register ResVReg,
MachineIRBuilder MIRBuilder(I);
addMemoryOperands(*I.memoperands_begin(), MIB, MIRBuilder, GR);
}
- return MIB.constrainAllUses(TII, TRI, RBI);
+ MIB.constrainAllUses(TII, TRI, RBI);
+ return true;
}
bool SPIRVInstructionSelector::selectStore(MachineInstr &I) const {
@@ -1675,7 +1681,8 @@ bool SPIRVInstructionSelector::selectStore(MachineInstr &I) const {
if (sampledTypeIsSignedInteger(LLVMHandleType))
BMI.addImm(0x1000); // SignExtend
- return BMI.constrainAllUses(TII, TRI, RBI);
+ BMI.constrainAllUses(TII, TRI, RBI);
+ return true;
}
}
@@ -1692,7 +1699,8 @@ bool SPIRVInstructionSelector::selectStore(MachineInstr &I) const {
MachineIRBuilder MIRBuilder(I);
addMemoryOperands(*I.memoperands_begin(), MIB, MIRBuilder, GR);
}
- return MIB.constrainAllUses(TII, TRI, RBI);
+ MIB.constrainAllUses(TII, TRI, RBI);
+ return true;
}
bool SPIRVInstructionSelector::selectStackSave(Register ResVReg,
@@ -1704,10 +1712,11 @@ bool SPIRVInstructionSelector::selectStackSave(Register ResVReg,
"SPIR-V extension: SPV_INTEL_variable_length_array",
false);
MachineBasicBlock &BB = *I.getParent();
- return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSaveMemoryINTEL))
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSaveMemoryINTEL))
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType))
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
bool SPIRVInstructionSelector::selectStackRestore(MachineInstr &I) const {
@@ -1719,9 +1728,10 @@ bool SPIRVInstructionSelector::selectStackRestore(MachineInstr &I) const {
if (!I.getOperand(0).isReg())
return false;
MachineBasicBlock &BB = *I.getParent();
- return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpRestoreMemoryINTEL))
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpRestoreMemoryINTEL))
.addUse(I.getOperand(0).getReg())
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
Register
@@ -1756,8 +1766,7 @@ SPIRVInstructionSelector::getOrCreateMemSetGlobal(MachineInstr &I) const {
.addUse(GR.getSPIRVTypeID(VarTy))
.addImm(SPIRV::StorageClass::UniformConstant)
.addUse(Const);
- if (!MIBVar.constrainAllUses(TII, TRI, RBI))
- return Register();
+ MIBVar.constrainAllUses(TII, TRI, RBI);
GR.add(GV, MIBVar);
GR.addGlobalObject(GV, GR.CurMF, VarReg);
@@ -1791,7 +1800,8 @@ bool SPIRVInstructionSelector::selectCopyMemory(MachineInstr &I,
MachineIRBuilder MIRBuilder(I);
addMemoryOperands(*I.memoperands_begin(), MIB, MIRBuilder, GR);
}
- return MIB.constrainAllUses(TII, TRI, RBI);
+ MIB.constrainAllUses(TII, TRI, RBI);
+ return true;
}
bool SPIRVInstructionSelector::selectCopyMemorySized(MachineInstr &I,
@@ -1805,13 +1815,13 @@ bool SPIRVInstructionSelector::selectCopyMemorySized(MachineInstr &I,
MachineIRBuilder MIRBuilder(I);
addMemoryOperands(*I.memoperands_begin(), MIB, MIRBuilder, GR);
}
- return MIB.constrainAllUses(TII, TRI, RBI);
+ MIB.constrainAllUses(TII, TRI, RBI);
+ return true;
}
bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg,
MachineInstr &I) const {
Register SrcReg = I.getOperand(1).getReg();
- bool Result = true;
if (I.getOpcode() == TargetOpcode::G_MEMSET) {
Register VarReg = getOrCreateMemSetGlobal(I);
if (!VarReg.isValid())
@@ -1820,16 +1830,20 @@ bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg,
SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType(
ValTy, I, SPIRV::StorageClass::UniformConstant);
SrcReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
- Result &= selectOpWithSrcs(SrcReg, SourceTy, I, {VarReg}, SPIRV::OpBitcast);
+ if (!selectOpWithSrcs(SrcReg, SourceTy, I, {VarReg}, SPIRV::OpBitcast))
+ return false;
}
if (STI.isLogicalSPIRV()) {
- Result &= selectCopyMemory(I, SrcReg);
+ if (!selectCopyMemory(I, SrcReg))
+ return false;
} else {
- Result &= selectCopyMemorySized(I, SrcReg);
+ if (!selectCopyMemorySized(I, SrcReg))
+ return false;
}
if (ResVReg.isValid() && ResVReg != I.getOperand(0).getReg())
- Result &= BuildCOPY(ResVReg, I.getOperand(0).getReg(), I);
- return Result;
+ if (!BuildCOPY(ResVReg, I.getOperand(0).getReg(), I))
+ return false;
+ return true;
}
bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg,
@@ -1837,14 +1851,11 @@ bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg,
MachineInstr &I,
unsigned NewOpcode,
unsigned NegateOpcode) const {
- bool Result = true;
assert(I.hasOneMemOperand());
const MachineMemOperand *MemOp = *I.memoperands_begin();
uint32_t Scope = static_cast<uint32_t>(getMemScope(
GR.CurMF->getFunction().getContext(), MemOp->getSyncScopeID()));
- auto ScopeConstant = buildI32Constant(Scope, I);
- Register ScopeReg = ScopeConstant.first;
- Result &= ScopeConstant.second;
+ Register ScopeReg = buildI32Constant(Scope, I);
Register Ptr = I.getOperand(1).getReg();
// TODO: Changed as it's implemented in the translator. See test/atomicrmw.ll
@@ -1852,19 +1863,17 @@ bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg,
// getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr));
AtomicOrdering AO = MemOp->getSuccessOrdering();
uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
- auto MemSemConstant = buildI32Constant(MemSem /*| ScSem*/, I);
- Register MemSemReg = MemSemConstant.first;
- Result &= MemSemConstant.second;
+ Register MemSemReg = buildI32Constant(MemSem /*| ScSem*/, I);
Register ValueReg = I.getOperand(2).getReg();
if (NegateOpcode != 0) {
// Translation with negative value operand is requested
Register TmpReg = createVirtualRegister(ResType, &GR, MRI, MRI->getMF());
- Result &= selectOpWithSrcs(TmpReg, ResType, I, {ValueReg}, NegateOpcode);
+ if (!selectOpWithSrcs(TmpReg, ResType, I, {ValueReg}, NegateOpcode))
+ return false;
ValueReg = TmpReg;
}
- return Result &&
BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(NewOpcode))
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType))
@@ -1873,6 +1882,7 @@ bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg,
.addUse(MemSemReg)
.addUse(ValueReg)
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
bool SPIRVInstructionSelector::selectUnmergeValues(MachineInstr &I) const {
@@ -1888,7 +1898,6 @@ bool SPIRVInstructionSelector::selectUnmergeValues(MachineInstr &I) const {
SPIRVType *ScalarType =
GR.getSPIRVTypeForVReg(SrcType->getOperand(1).getReg());
MachineBasicBlock &BB = *I.getParent();
- bool Res = false;
unsigned CurrentIndex = 0;
for (unsigned i = 0; i < I.getNumDefs(); ++i) {
Register ResVReg = I.getOperand(i).getReg();
@@ -1919,7 +1928,7 @@ bool SPIRVInstructionSelector::selectUnmergeValues(MachineInstr &I) const {
MIB.addImm(CurrentIndex + j);
}
CurrentIndex += NumElements;
- Res |= MIB.constrainAllUses(TII, TRI, RBI);
+ MIB.constrainAllUses(TII, TRI, RBI);
} else {
auto MIB =
BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
@@ -1928,30 +1937,26 @@ bool SPIRVInstructionSelector::selectUnmergeValues(MachineInstr &I) const {
.addUse(SrcReg)
.addImm(CurrentIndex);
CurrentIndex++;
- Res |= MIB.constrainAllUses(TII, TRI, RBI);
+ MIB.constrainAllUses(TII, TRI, RBI);
}
}
- return Res;
+ return true;
}
bool SPIRVInstructionSelector::selectFence(MachineInstr &I) const {
AtomicOrdering AO = AtomicOrdering(I.getOperand(0).getImm());
uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
- auto MemSemConstant = buildI32Constant(MemSem, I);
- Register MemSemReg = MemSemConstant.first;
- bool Result = MemSemConstant.second;
+ Register MemSemReg = buildI32Constant(MemSem, I);
SyncScope::ID Ord = SyncScope::ID(I.getOperand(1).getImm());
uint32_t Scope = static_cast<uint32_t>(
getMemScope(GR.CurMF->getFunction().getContext(), Ord));
- auto ScopeConstant = buildI32Constant(Scope, I);
- Register ScopeReg = ScopeConstant.first;
- Result &= ScopeConstant.second;
+ Register ScopeReg = buildI32Constant(Scope, I);
MachineBasicBlock &BB = *I.getParent();
- return Result &&
- BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemoryBarrier))
- .addUse(ScopeReg)
- .addUse(MemSemReg)
- .constrainAllUses(TII, TRI, RBI);
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemoryBarrier))
+ .addUse(ScopeReg)
+ .addUse(MemSemReg)
+ .constrainAllUses(TII, TRI, RBI);
+ return true;
}
bool SPIRVInstructionSelector::selectOverflowArith(Register ResVReg,
@@ -1995,7 +2000,7 @@ bool SPIRVInstructionSelector::selectOverflowArith(Register ResVReg,
.addUse(GR.getSPIRVTypeID(StructType));
for (unsigned i = I.getNumDefs(); i < I.getNumOperands(); ++i)
MIB.addUse(I.getOperand(i).getReg());
- bool Result = MIB.constrainAllUses(TII, TRI, RBI);
+ MIB.constrainAllUses(TII, TRI, RBI);
// Build instructions to extract fields of the instruction's result.
// A new virtual register to store the higher part of the result struct.
Register HigherVReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
@@ -2007,21 +2012,21 @@ bool SPIRVInstructionSelector::selectOverflowArith(Register ResVReg,
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(StructVReg)
.addImm(i);
- Result &= MIB.constrainAllUses(TII, TRI, RBI);
+ MIB.constrainAllUses(TII, TRI, RBI);
}
// Build boolean value from the higher part.
- return Result && BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
- .addDef(I.getOperand(1).getReg())
- .addUse(BoolTypeReg)
- .addUse(HigherVReg)
- .addUse(ZeroReg)
- .constrainAllUses(TII, TRI, RBI);
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
+ .addDef(I.getOperand(1).getReg())
+ .addUse(BoolTypeReg)
+ .addUse(HigherVReg)
+ .addUse(ZeroReg)
+ .constrainAllUses(TII, TRI, RBI);
+ return true;
}
bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
const SPIRVType *ResType,
MachineInstr &I) const {
- bool Result = true;
Register ScopeReg;
Register MemSemEqReg;
Register MemSemNeqReg;
@@ -2031,25 +2036,19 @@ bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
const MachineMemOperand *MemOp = *I.memoperands_begin();
unsigned Scope = static_cast<uint32_t>(getMemScope(
GR.CurMF->getFunction().getContext(), MemOp->getSyncScopeID()));
- auto ScopeConstant = buildI32Constant(Scope, I);
- ScopeReg = ScopeConstant.first;
- Result &= ScopeConstant.second;
+ ScopeReg = buildI32Constant(Scope, I);
unsigned ScSem = static_cast<uint32_t>(
getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr)));
AtomicOrdering AO = MemOp->getSuccessOrdering();
unsigned MemSemEq = static_cast<uint32_t>(getMemSemantics(AO)) | ScSem;
- auto MemSemEqConstant = buildI32Constant(MemSemEq, I);
- MemSemEqReg = MemSemEqConstant.first;
- Result &= MemSemEqConstant.second;
+ Register MemSemEqReg = buildI32Constant(MemSemEq, I);
AtomicOrdering FO = MemOp->getFailureOrdering();
unsigned MemSemNeq = static_cast<uint32_t>(getMemSemantics(FO)) | ScSem;
if (MemSemEq == MemSemNeq)
MemSemNeqReg = MemSemEqReg;
else {
- auto MemSemNeqConstant = buildI32Constant(MemSemEq, I);
- MemSemNeqReg = MemSemNeqConstant.first;
- Result &= MemSemNeqConstant.second;
+ MemSemNeqReg = buildI32Constant(MemSemEq, I);
}
} else {
ScopeReg = I.getOperand(5).getReg();
@@ -2062,41 +2061,40 @@ bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
Register ACmpRes = createVirtualRegister(SpvValTy, &GR, MRI, *I.getMF());
const DebugLoc &DL = I.getDebugLoc();
- Result &=
- BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpAtomicCompareExchange))
- .addDef(ACmpRes)
- .addUse(GR.getSPIRVTypeID(SpvValTy))
- .addUse(Ptr)
- .addUse(ScopeReg)
- .addUse(MemSemEqReg)
- .addUse(MemSemNeqReg)
- .addUse(Val)
- .addUse(Cmp)
- .constrainAllUses(TII, TRI, RBI);
+ BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpAtomicCompareExchange))
+ .addDef(ACmpRes)
+ .addUse(GR.getSPIRVTypeID(SpvValTy))
+ .addUse(Ptr)
+ .addUse(ScopeReg)
+ .addUse(MemSemEqReg)
+ .addUse(MemSemNeqReg)
+ .addUse(Val)
+ .addUse(Cmp)
+ .constrainAllUses(TII, TRI, RBI);
SPIRVType *BoolTy = GR.getOrCreateSPIRVBoolType(I, TII);
Register CmpSuccReg = createVirtualRegister(BoolTy, &GR, MRI, *I.getMF());
- Result &= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpIEqual))
- .addDef(CmpSuccReg)
- .addUse(GR.getSPIRVTypeID(BoolTy))
- .addUse(ACmpRes)
- .addUse(Cmp)
- .constrainAllUses(TII, TRI, RBI);
+ BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpIEqual))
+ .addDef(CmpSuccReg)
+ .addUse(GR.getSPIRVTypeID(BoolTy))
+ .addUse(ACmpRes)
+ .addUse(Cmp)
+ .constrainAllUses(TII, TRI, RBI);
Register TmpReg = createVirtualRegister(ResType, &GR, MRI, *I.getMF());
- Result &= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
- .addDef(TmpReg)
- .addUse(GR.getSPIRVTypeID(ResType))
- .addUse(ACmpRes)
- .addUse(GR.getOrCreateUndef(I, ResType, TII))
- .addImm(0)
- .constrainAllUses(TII, TRI, RBI);
- return Result &&
- BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
- .addDef(ResVReg)
- .addUse(GR.getSPIRVTypeID(ResType))
- .addUse(CmpSuccReg)
- .addUse(TmpReg)
- .addImm(1)
- .constrainAllUses(TII, TRI, RBI);
+ BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
+ .addDef(TmpReg)
+ .addUse(GR.getSPIRVTypeID(ResType))
+ .addUse(ACmpRes)
+ .addUse(GR.getOrCreateUndef(I, ResType, TII))
+ .addImm(0)
+ .constrainAllUses(TII, TRI, RBI);
+ BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
+ .addDef(ResVReg)
+ .addUse(GR.getSPIRVTypeID(ResType))
+ .addUse(CmpSuccReg)
+ .addUse(TmpReg)
+ .addImm(1)
+ .constrainAllUses(TII, TRI, RBI);
+ return true;
}
static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC) {
@@ -2199,18 +2197,18 @@ bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg,
// be addressed in the emit-intrinsic step to infer a correct
// OpConstantComposite type.
if (SpecOpcode) {
- return buildSpecConstantOp(I, ResVReg, SrcPtr,
- getUcharPtrTypeReg(I, DstSC), SpecOpcode)
+ buildSpecConstantOp(I, ResVReg, SrcPtr, getUcharPtrTypeReg(I, DstSC),
+ SpecOpcode)
.constrainAllUses(TII, TRI, RBI);
} else if (isGenericCastablePtr(SrcSC) && isGenericCastablePtr(DstSC)) {
MachineInstrBuilder MIB = buildConstGenericPtr(I, SrcPtr, SrcPtrTy);
- return MIB.constrainAllUses(TII, TRI, RBI) &&
- buildSpecConstantOp(
- I, ResVReg, MIB->getOperand(0).getReg(),
- getUcharPtrTypeReg(I, DstSC),
- static_cast<uint32_t>(SPIRV::Opcode::GenericCastToPtr))
- .constrainAllUses(TII, TRI, RBI);
+ MIB.constrainAllUses(TII, TRI, RBI);
+ buildSpecConstantOp(
+ I, ResVReg, MIB->getOperand(0).getReg(), getUcharPtrTypeReg(I, DstSC),
+ static_cast<uint32_t>(SPIRV::Opcode::GenericCastToPtr))
+ .constrainAllUses(TII, TRI, RBI);
}
+ return true;
}
// don't generate a cast between identical storage classes
@@ -2234,16 +2232,17 @@ bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg,
SPIRVType *GenericPtrTy =
GR.changePointerStorageClass(SrcPtrTy, SPIRV::StorageClass::Generic, I);
Register Tmp = createVirtualRegister(GenericPtrTy, &GR, MRI, MRI->getMF());
- bool Result = BuildMI(BB, I, DL, TII.get(SPIRV::OpPtrCastToGeneric))
- .addDef(Tmp)
- .addUse(GR.getSPIRVTypeID(GenericPtrTy))
- .addUse(SrcPtr)
- .constrainAllUses(TII, TRI, RBI);
- return Result && BuildMI(BB, I, DL, TII.get(SPIRV::OpGenericCastToPtr))
- .addDef(ResVReg)
- .addUse(GR.getSPIRVTypeID(ResType))
- .addUse(Tmp)
- .constrainAllUses(TII, TRI, RBI);
+ BuildMI(BB, I, DL, TII.get(SPIRV::OpPtrCastToGeneric))
+ .addDef(Tmp)
+ .addUse(GR.getSPIRVTypeID(GenericPtrTy))
+ .addUse(SrcPtr)
+ .constrainAllUses(TII, TRI, RBI);
+ BuildMI(BB, I, DL, TII.get(SPIRV::OpGenericCastToPtr))
+ .addDef(ResVReg)
+ .addUse(GR.getSPIRVTypeID(ResType))
+ .addUse(Tmp)
+ .constrainAllUses(TII, TRI, RBI);
+ return true;
}
// Check if instructions from the SPV_INTEL_usm_storage_classes extension may
@@ -2414,27 +2413,25 @@ bool SPIRVInstructionSelector::selectAnyOrAll(Register ResVReg,
SpvBoolTy = GR.getOrCreateSPIRVVectorType(SpvBoolTy, NumElts, I, TII);
}
- bool Result = true;
if (!IsBoolTy) {
Register ConstZeroReg =
IsFloatTy ? buildZerosValF(InputType, I) : buildZerosVal(InputType, I);
- Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(SpirvNotEqualId))
- .addDef(NotEqualReg)
- .addUse(GR.getSPIRVTypeID(SpvBoolTy))
- .addUse(InputRegister)
- .addUse(ConstZeroReg)
- .constrainAllUses(TII, TRI, RBI);
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(SpirvNotEqualId))
+ .addDef(NotEqualReg)
+ .addUse(GR.getSPIRVTypeID(SpvBoolTy))
+ .addUse(InputRegister)
+ .addUse(ConstZeroReg)
+ .constrainAllUses(TII, TRI, RBI);
}
- if (!IsVectorTy)
- return Result;
-
- return Result && BuildMI(BB, I, I.getDebugLoc(), TII.get(OpAnyOrAll))
- .addDef(ResVReg)
- .addUse(GR.getSPIRVTypeID(SpvBoolScalarTy))
- .addUse(NotEqualReg)
- .constrainAllUses(TII, TRI, RBI);
+ if (IsVectorTy)
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(OpAnyOrAll))
+ .addDef(ResVReg)
+ .addUse(GR.getSPIRVTypeID(SpvBoolScalarTy))
+ .addUse(NotEqualReg)
+ .constrainAllUses(TII, TRI, RBI);
+ return true;
}
bool SPIRVInstructionSelector::selectAll(Register ResVReg,
@@ -2470,12 +2467,13 @@ bool SPIRVInstructionSelector::selectFloatDot(Register ResVReg,
assert(EltType->getOpcode() == SPIRV::OpTypeFloat);
MachineBasicBlock &BB = *I.getParent();
- return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpDot))
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpDot))
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(I.getOperand(2).getReg())
.addUse(I.getOperand(3).getReg())
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
bool SPIRVInstructionSelector::selectIntegerDot(Register ResVReg,
@@ -2488,12 +2486,13 @@ bool SPIRVInstructionSelector::selectIntegerDot(Register ResVReg,
MachineBasicBlock &BB = *I.getParent();
auto DotOp = Signed ? SPIRV::OpSDot : SPIRV::OpUDot;
- return BuildMI(BB, I, I.getDebugLoc(), TII.get(DotOp))
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(DotOp))
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(I.getOperand(2).getReg())
.addUse(I.getOperand(3).getReg())
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
// Since pre-1.6 SPIRV has no integer dot implementation,
@@ -2511,29 +2510,28 @@ bool SPIRVInstructionSelector::selectIntegerDotExpansion(
Register TmpVec = MRI->createVirtualRegister(GR.getRegClass(ResType));
SPIRVType *VecType = GR.getSPIRVTypeForVReg(Vec0);
- bool Result = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIMulV))
- .addDef(TmpVec)
- .addUse(GR.getSPIRVTypeID(VecType))
- .addUse(Vec0)
- .addUse(Vec1)
- .constrainAllUses(TII, TRI, RBI);
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIMulV))
+ .addDef(TmpVec)
+ .addUse(GR.getSPIRVTypeID(VecType))
+ .addUse(Vec0)
+ .addUse(Vec1)
+ .constrainAllUses(TII, TRI, RBI);
assert(VecType->getOpcode() == SPIRV::OpTypeVector &&
GR.getScalarOrVectorComponentCount(VecType) > 1 &&
"dot product requires a vector of at least 2 components");
Register Res = MRI->createVirtualRegister(GR.getRegClass(ResType));
- Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
- .addDef(Res)
- .addUse(GR.getSPIRVTypeID(ResType))
- .addUse(TmpVec)
- .addImm(0)
- .constrainAllUses(TII, TRI, RBI);
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
+ .addDef(Res)
+ .addUse(GR.getSPIRVTypeID(ResType))
+ .addUse(TmpVec)
+ .addImm(0)
+ .constrainAllUses(TII, TRI, RBI);
for (unsigned i = 1; i < GR.getScalarOrVectorComponentCount(VecType); i++) {
Register Elt = MRI->createVirtualRegister(GR.getRegClass(ResType));
- Result &=
BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
.addDef(Elt)
.addUse(GR.getSPIRVTypeID(ResType))
@@ -2545,38 +2543,40 @@ bool SPIRVInstructionSelector::selectIntegerDotExpansion(
? MRI->createVirtualRegister(GR.getRegClass(ResType))
: ResVReg;
- Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIAddS))
- .addDef(Sum)
- .addUse(GR.getSPIRVTypeID(ResType))
- .addUse(Res)
- .addUse(Elt)
- .constrainAllUses(TII, TRI, RBI);
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIAddS))
+ .addDef(Sum)
+ .addUse(GR.getSPIRVTypeID(ResType))
+ .addUse(Res)
+ .addUse(Elt)
+ .constrainAllUses(TII, TRI, RBI);
Res = Sum;
}
- return Result;
+ return true;
}
bool SPIRVInstructionSelector::selectOpIsInf(Register ResVReg,
const SPIRVType *ResType,
MachineInstr &I) const {
MachineBasicBlock &BB = *I.getParent();
- return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIsInf))
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIsInf))
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(I.getOperand(2).getReg())
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
bool SPIRVInstructionSelector::selectOpIsNan(Register ResVReg,
const SPIRVType *ResType,
MachineInstr &I) const {
MachineBasicBlock &BB = *I.getParent();
- return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIsNan))
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIsNan))
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(I.getOperand(2).getReg())
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
template <bool Signed>
@@ -2624,8 +2624,6 @@ bool SPIRVInstructionSelector::selectDot4AddPackedExpansion(
assert(I.getOperand(4).isReg());
MachineBasicBlock &BB = *I.getParent();
- bool Result = true;
-
Register Acc = I.getOperand(2).getReg();
Register X = I.getOperand(3).getReg();
Register Y = I.getOperand(4).getReg();
@@ -2639,60 +2637,57 @@ bool SPIRVInstructionSelector::selectDot4AddPackedExpansion(
for (unsigned i = 0; i < 4; i++) {
// A[i]
Register AElt = MRI->createVirtualRegister(&SPIRV::IDRegClass);
- Result &=
- BuildMI(BB, I, I.getDebugLoc(), TII.get(ExtractOp))
- .addDef(AElt)
- .addUse(GR.getSPIRVTypeID(ResType))
- .addUse(X)
- .addUse(GR.getOrCreateConstInt(i * 8, I, EltType, TII, ZeroAsNull))
- .addUse(GR.getOrCreateConstInt(8, I, EltType, TII, ZeroAsNull))
- .constrainAllUses(TII, TRI, RBI);
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(ExtractOp))
+ .addDef(AElt)
+ .addUse(GR.getSPIRVTypeID(ResType))
+ .addUse(X)
+ .addUse(GR.getOrCreateConstInt(i * 8, I, EltType, TII, ZeroAsNull))
+ .addUse(GR.getOrCreateConstInt(8, I, EltType, TII, ZeroAsNull))
+ .constrainAllUses(TII, TRI, RBI);
// B[i]
Register BElt = MRI->createVirtualRegister(&SPIRV::IDRegClass);
- Result &=
- BuildMI(BB, I, I.getDebugLoc(), TII.get(ExtractOp))
- .addDef(BElt)
- .addUse(GR.getSPIRVTypeID(ResType))
- .addUse(Y)
- .addUse(GR.getOrCreateConstInt(i * 8, I, EltType, TII, ZeroAsNull))
- .addUse(GR.getOrCreateConstInt(8, I, EltType, TII, ZeroAsNull))
- .constrainAllUses(TII, TRI, RBI);
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(ExtractOp))
+ .addDef(BElt)
+ .addUse(GR.getSPIRVTypeID(ResType))
+ .addUse(Y)
+ .addUse(GR.getOrCreateConstInt(i * 8, I, EltType, TII, ZeroAsNull))
+ .addUse(GR.getOrCreateConstInt(8, I, EltType, TII, ZeroAsNull))
+ .constrainAllUses(TII, TRI, RBI);
// A[i] * B[i]
Register Mul = MRI->createVirtualRegister(&SPIRV::IDRegClass);
- Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIMulS))
- .addDef(Mul)
- .addUse(GR.getSPIRVTypeID(ResType))
- .addUse(AElt)
- .addUse(BElt)
- .constrainAllUses(TII, TRI, RBI);
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIMulS))
+ .addDef(Mul)
+ .addUse(GR.getSPIRVTypeID(ResType))
+ .addUse(AElt)
+ .addUse(BElt)
+ .constrainAllUses(TII, TRI, RBI);
// Discard 24 highest-bits so that stored i32 register is i8 equivalent
Register MaskMul = MRI->createVirtualRegister(&SPIRV::IDRegClass);
- Result &=
- BuildMI(BB, I, I.getDebugLoc(), TII.get(ExtractOp))
- .addDef(MaskMul)
- .addUse(GR.getSPIRVTypeID(ResType))
- .addUse(Mul)
- .addUse(GR.getOrCreateConstInt(0, I, EltType, TII, ZeroAsNull))
- .addUse(GR.getOrCreateConstInt(8, I, EltType, TII, ZeroAsNull))
- .constrainAllUses(TII, TRI, RBI);
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(ExtractOp))
+ .addDef(MaskMul)
+ .addUse(GR.getSPIRVTypeID(ResType))
+ .addUse(Mul)
+ .addUse(GR.getOrCreateConstInt(0, I, EltType, TII, ZeroAsNull))
+ .addUse(GR.getOrCreateConstInt(8, I, EltType, TII, ZeroAsNull))
+ .constrainAllUses(TII, TRI, RBI);
// Acc = Acc + A[i] * B[i]
Register Sum =
i < 3 ? MRI->createVirtualRegister(&SPIRV::IDRegClass) : ResVReg;
- Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIAddS))
- .addDef(Sum)
- .addUse(GR.getSPIRVTypeID(ResType))
- .addUse(Acc)
- .addUse(MaskMul)
- .constrainAllUses(TII, TRI, RBI);
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIAddS))
+ .addDef(Sum)
+ .addUse(GR.getSPIRVTypeID(ResType))
+ .addUse(Acc)
+ .addUse(MaskMul)
+ .constrainAllUses(TII, TRI, RBI);
Acc = Sum;
}
- return Result;
+ return true;
}
/// Transform saturate(x) to clamp(x, 0.0f, 1.0f) as SPIRV
@@ -2706,7 +2701,7 @@ bool SPIRVInstructionSelector::selectSaturate(Register ResVReg,
Register VZero = buildZerosValF(ResType, I);
Register VOne = buildOnesValF(ResType, I);
- return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType))
.addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
@@ -2715,6 +2710,7 @@ bool SPIRVInstructionSelector::selectSaturate(Register ResVReg,
.addUse(VZero)
.addUse(VOne)
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
bool SPIRVInstructionSelector::selectSign(Register ResVReg,
@@ -2742,7 +2738,6 @@ bool SPIRVInstructionSelector::selectSign(Register ResVReg,
? MRI->createVirtualRegister(&SPIRV::IDRegClass)
: ResVReg;
- bool Result =
BuildMI(BB, I, DL, TII.get(SPIRV::OpExtInst))
.addDef(SignReg)
.addUse(GR.getSPIRVTypeID(InputType))
@@ -2753,14 +2748,14 @@ bool SPIRVInstructionSelector::selectSign(Register ResVReg,
if (NeedsConversion) {
auto ConvertOpcode = IsFloatTy ? SPIRV::OpConvertFToS : SPIRV::OpSConvert;
- Result &= BuildMI(*I.getParent(), I, DL, TII.get(ConvertOpcode))
- .addDef(ResVReg)
- .addUse(GR.getSPIRVTypeID(ResType))
- .addUse(SignReg)
- .constrainAllUses(TII, TRI, RBI);
+ BuildMI(*I.getParent(), I, DL, TII.get(ConvertOpcode))
+ .addDef(ResVReg)
+ .addUse(GR.getSPIRVTypeID(ResType))
+ .addUse(SignReg)
+ .constrainAllUses(TII, TRI, RBI);
}
- return Result;
+ return true;
}
bool SPIRVInstructionSelector::selectWaveOpInst(Register ResVReg,
@@ -2780,7 +2775,8 @@ bool SPIRVInstructionSelector::selectWaveOpInst(Register ResVReg,
BMI.addUse(I.getOperand(J).getReg());
}
- return BMI.constrainAllUses(TII, TRI, RBI);
+ BMI.constrainAllUses(TII, TRI, RBI);
+ return true;
}
bool SPIRVInstructionSelector::selectWaveActiveCountBits(
@@ -2789,21 +2785,22 @@ bool SPIRVInstructionSelector::selectWaveActiveCountBits(
SPIRVType *IntTy = GR.getOrCreateSPIRVIntegerType(32, I, TII);
SPIRVType *BallotType = GR.getOrCreateSPIRVVectorType(IntTy, 4, I, TII);
Register BallotReg = MRI->createVirtualRegister(GR.getRegClass(BallotType));
- bool Result = selectWaveOpInst(BallotReg, BallotType, I,
- SPIRV::OpGroupNonUniformBallot);
+ if (!selectWaveOpInst(BallotReg, BallotType, I,
+ SPIRV::OpGroupNonUniformBallot))
+ return false;
MachineBasicBlock &BB = *I.getParent();
- Result &= BuildMI(BB, I, I.getDebugLoc(),
- TII.get(SPIRV::OpGroupNonUniformBallotBitCount))
- .addDef(ResVReg)
- .addUse(GR.getSPIRVTypeID(ResType))
- .addUse(GR.getOrCreateConstInt(SPIRV::Scope::Subgroup, I, IntTy,
- TII, !STI.isShader()))
- .addImm(SPIRV::GroupOperation::Reduce)
- .addUse(BallotReg)
- .constrainAllUses(TII, TRI, RBI);
+ BuildMI(BB, I, I.getDebugLoc(),
+ TII.get(SPIRV::OpGroupNonUniformBallotBitCount))
+ .addDef(ResVReg)
+ .addUse(GR.getSPIRVTypeID(ResType))
+ .addUse(GR.getOrCreateConstInt(SPIRV::Scope::Subgroup, I, IntTy, TII,
+ !STI.isShader()))
+ .addImm(SPIRV::GroupOperation::Reduce)
+ .addUse(BallotReg)
+ .constrainAllUses(TII, TRI, RBI);
- return Result;
+ return true;
}
bool SPIRVInstructionSelector::selectWavePrefixBitCount(
@@ -2913,7 +2910,7 @@ bool SPIRVInstructionSelector::selectWaveReduce(
SPIRVType *IntTy = GR.getOrCreateSPIRVIntegerType(32, I, TII);
const unsigned Opcode = PickOpcode(InputRegister, IsUnsigned);
- return BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(GR.getOrCreateConstInt(SPIRV::Scope::Subgroup, I, IntTy, TII,
@@ -2921,6 +2918,7 @@ bool SPIRVInstructionSelector::selectWaveReduce(
.addImm(SPIRV::GroupOperation::Reduce)
.addUse(I.getOperand(2).getReg())
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
bool SPIRVInstructionSelector::selectWaveExclusiveScanSum(
@@ -2950,7 +2948,7 @@ bool SPIRVInstructionSelector::selectWaveExclusiveScan(
SPIRVType *IntTy = GR.getOrCreateSPIRVIntegerType(32, I, TII);
const unsigned Opcode = PickOpcode(InputRegister, IsUnsigned);
- return BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(GR.getOrCreateConstInt(SPIRV::Scope::Subgroup, I, IntTy, TII,
@@ -2958,17 +2956,19 @@ bool SPIRVInstructionSelector::selectWaveExclusiveScan(
.addImm(SPIRV::GroupOperation::ExclusiveScan)
.addUse(I.getOperand(2).getReg())
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
bool SPIRVInstructionSelector::selectBitreverse(Register ResVReg,
const SPIRVType *ResType,
MachineInstr &I) const {
MachineBasicBlock &BB = *I.getParent();
- return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpBitReverse))
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpBitReverse))
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(I.getOperand(1).getReg())
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
bool SPIRVInstructionSelector::selectFreeze(Register ResVReg,
@@ -3005,10 +3005,11 @@ bool SPIRVInstructionSelector::selectFreeze(Register ResVReg,
DestOpCode = TargetOpcode::COPY;
Reg = OpReg;
}
- return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(DestOpCode))
+ BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(DestOpCode))
.addDef(I.getOperand(0).getReg())
.addUse(Reg)
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
return false;
}
@@ -3045,7 +3046,8 @@ bool SPIRVInstructionSelector::selectBuildVector(Register ResVReg,
.addUse(GR.getSPIRVTypeID(ResType));
for (unsigned i = I.getNumExplicitDefs(); i < I.getNumExplicitOperands(); ++i)
MIB.addUse(I.getOperand(i).getReg());
- return MIB.constrainAllUses(TII, TRI, RBI);
+ MIB.constrainAllUses(TII, TRI, RBI);
+ return true;
}
bool SPIRVInstructionSelector::selectSplatVector(Register ResVReg,
@@ -3079,7 +3081,8 @@ bool SPIRVInstructionSelector::selectSplatVector(Register ResVReg,
.addUse(GR.getSPIRVTypeID(ResType));
for (unsigned i = 0; i < N; ++i)
MIB.addUse(OpReg);
- return MIB.constrainAllUses(TII, TRI, RBI);
+ MIB.constrainAllUses(TII, TRI, RBI);
+ return true;
}
bool SPIRVInstructionSelector::selectDiscard(Register ResVReg,
@@ -3102,8 +3105,9 @@ bool SPIRVInstructionSelector::selectDiscard(Register ResVReg,
}
MachineBasicBlock &BB = *I.getParent();
- return BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
bool SPIRVInstructionSelector::selectCmp(Register ResVReg,
@@ -3115,13 +3119,14 @@ bool SPIRVInstructionSelector::selectCmp(Register ResVReg,
assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() ==
GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() &&
"CMP operands should have the same type");
- return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CmpOpc))
+ BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CmpOpc))
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(Cmp0)
.addUse(Cmp1)
.setMIFlags(I.getFlags())
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
bool SPIRVInstructionSelector::selectICmp(Register ResVReg,
@@ -3140,7 +3145,7 @@ bool SPIRVInstructionSelector::selectICmp(Register ResVReg,
return selectCmp(ResVReg, ResType, CmpOpc, I);
}
-std::pair<Register, bool>
+Register
SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I,
const SPIRVType *ResType) const {
Type *LLVMTy = IntegerType::get(GR.CurMF->getFunction().getContext(), 32);
@@ -3149,7 +3154,6 @@ SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I,
// Find a constant in DT or build a new one.
auto ConstInt = ConstantInt::get(LLVMTy, Val);
Register NewReg = GR.find(ConstInt, GR.CurMF);
- bool Result = true;
if (!NewReg.isValid()) {
NewReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
MachineBasicBlock &BB = *I.getParent();
@@ -3165,7 +3169,7 @@ SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I,
constrainSelectedInstRegOperands(*MI, TII, TRI, RBI);
GR.add(ConstInt, MI);
}
- return {NewReg, Result};
+ return NewReg;
}
bool SPIRVInstructionSelector::selectFCmp(Register ResVReg,
@@ -3296,13 +3300,14 @@ bool SPIRVInstructionSelector::selectSelect(Register ResVReg,
Opcode = IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectVIVCond;
}
}
- return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
+ BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(I.getOperand(1).getReg())
.addUse(SelectFirstArg)
.addUse(SelectSecondArg)
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
bool SPIRVInstructionSelector::selectSelectDefaultArgs(Register ResVReg,
@@ -3316,13 +3321,14 @@ bool SPIRVInstructionSelector::selectSelectDefaultArgs(Register ResVReg,
GR.isScalarOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool);
unsigned Opcode =
IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectVIVCond;
- return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
+ BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(I.getOperand(1).getReg())
.addUse(OneReg)
.addUse(ZeroReg)
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
bool SPIRVInstructionSelector::selectIToF(Register ResVReg,
@@ -3379,24 +3385,23 @@ bool SPIRVInstructionSelector::selectSUCmp(Register ResVReg,
Register IsLessEqReg = MRI->createVirtualRegister(GR.getRegClass(ResType));
MRI->setType(IsLessEqReg, LLT::scalar(64));
GR.assignSPIRVTypeToVReg(ResType, IsLessEqReg, MIRBuilder.getMF());
- bool Result = BuildMI(BB, I, I.getDebugLoc(),
- TII.get(IsSigned ? SPIRV::OpSLessThanEqual
- : SPIRV::OpULessThanEqual))
- .addDef(IsLessEqReg)
- .addUse(BoolTypeReg)
- .addUse(I.getOperand(1).getReg())
- .addUse(I.getOperand(2).getReg())
- .constrainAllUses(TII, TRI, RBI);
+ BuildMI(BB, I, I.getDebugLoc(),
+ TII.get(IsSigned ? SPIRV::OpSLessThanEqual : SPIRV::OpULessThanEqual))
+ .addDef(IsLessEqReg)
+ .addUse(BoolTypeReg)
+ .addUse(I.getOperand(1).getReg())
+ .addUse(I.getOperand(2).getReg())
+ .constrainAllUses(TII, TRI, RBI);
Register IsLessReg = MRI->createVirtualRegister(GR.getRegClass(ResType));
MRI->setType(IsLessReg, LLT::scalar(64));
GR.assignSPIRVTypeToVReg(ResType, IsLessReg, MIRBuilder.getMF());
- Result &= BuildMI(BB, I, I.getDebugLoc(),
- TII.get(IsSigned ? SPIRV::OpSLessThan : SPIRV::OpULessThan))
- .addDef(IsLessReg)
- .addUse(BoolTypeReg)
- .addUse(I.getOperand(1).getReg())
- .addUse(I.getOperand(2).getReg())
- .constrainAllUses(TII, TRI, RBI);
+ BuildMI(BB, I, I.getDebugLoc(),
+ TII.get(IsSigned ? SPIRV::OpSLessThan : SPIRV::OpULessThan))
+ .addDef(IsLessReg)
+ .addUse(BoolTypeReg)
+ .addUse(I.getOperand(1).getReg())
+ .addUse(I.getOperand(2).getReg())
+ .constrainAllUses(TII, TRI, RBI);
// Build selects.
Register ResTypeReg = GR.getSPIRVTypeID(ResType);
Register NegOneOrZeroReg =
@@ -3405,20 +3410,21 @@ bool SPIRVInstructionSelector::selectSUCmp(Register ResVReg,
GR.assignSPIRVTypeToVReg(ResType, NegOneOrZeroReg, MIRBuilder.getMF());
unsigned SelectOpcode =
N > 1 ? SPIRV::OpSelectVIVCond : SPIRV::OpSelectSISCond;
- Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(SelectOpcode))
- .addDef(NegOneOrZeroReg)
- .addUse(ResTypeReg)
- .addUse(IsLessReg)
- .addUse(buildOnesVal(true, ResType, I)) // -1
- .addUse(buildZerosVal(ResType, I))
- .constrainAllUses(TII, TRI, RBI);
- return Result & BuildMI(BB, I, I.getDebugLoc(), TII.get(SelectOpcode))
- .addDef(ResVReg)
- .addUse(ResTypeReg)
- .addUse(IsLessEqReg)
- .addUse(NegOneOrZeroReg) // -1 or 0
- .addUse(buildOnesVal(false, ResType, I))
- .constrainAllUses(TII, TRI, RBI);
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(SelectOpcode))
+ .addDef(NegOneOrZeroReg)
+ .addUse(ResTypeReg)
+ .addUse(IsLessReg)
+ .addUse(buildOnesVal(true, ResType, I)) // -1
+ .addUse(buildZerosVal(ResType, I))
+ .constrainAllUses(TII, TRI, RBI);
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(SelectOpcode))
+ .addDef(ResVReg)
+ .addUse(ResTypeReg)
+ .addUse(IsLessEqReg)
+ .addUse(NegOneOrZeroReg) // -1 or 0
+ .addUse(buildOnesVal(false, ResType, I))
+ .constrainAllUses(TII, TRI, RBI);
+ return true;
}
bool SPIRVInstructionSelector::selectIntToBool(Register IntReg,
@@ -3433,18 +3439,19 @@ bool SPIRVInstructionSelector::selectIntToBool(Register IntReg,
Register Zero = buildZerosVal(IntTy, I);
Register One = buildOnesVal(false, IntTy, I);
MachineBasicBlock &BB = *I.getParent();
- bool Result = BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
- .addDef(BitIntReg)
- .addUse(GR.getSPIRVTypeID(IntTy))
- .addUse(IntReg)
- .addUse(One)
- .constrainAllUses(TII, TRI, RBI);
- return Result && BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
- .addDef(ResVReg)
- .addUse(GR.getSPIRVTypeID(BoolTy))
- .addUse(BitIntReg)
- .addUse(Zero)
- .constrainAllUses(TII, TRI, RBI);
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
+ .addDef(BitIntReg)
+ .addUse(GR.getSPIRVTypeID(IntTy))
+ .addUse(IntReg)
+ .addUse(One)
+ .constrainAllUses(TII, TRI, RBI);
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
+ .addDef(ResVReg)
+ .addUse(GR.getSPIRVTypeID(BoolTy))
+ .addUse(BitIntReg)
+ .addUse(Zero)
+ .constrainAllUses(TII, TRI, RBI);
+ return true;
}
bool SPIRVInstructionSelector::selectTrunc(Register ResVReg,
@@ -3486,10 +3493,11 @@ bool SPIRVInstructionSelector::selectConst(Register ResVReg,
bool SPIRVInstructionSelector::selectOpUndef(Register ResVReg,
const SPIRVType *ResType,
MachineInstr &I) const {
- return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
+ BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType))
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
bool SPIRVInstructionSelector::selectInsertVal(Register ResVReg,
@@ -3505,7 +3513,8 @@ bool SPIRVInstructionSelector::selectInsertVal(Register ResVReg,
.addUse(I.getOperand(2).getReg());
for (unsigned i = 4; i < I.getNumOperands(); i++)
MIB.addImm(foldImm(I.getOperand(i), MRI));
- return MIB.constrainAllUses(TII, TRI, RBI);
+ MIB.constrainAllUses(TII, TRI, RBI);
+ return true;
}
bool SPIRVInstructionSelector::selectExtractVal(Register ResVReg,
@@ -3529,7 +3538,8 @@ bool SPIRVInstructionSelector::selectExtractVal(Register ResVReg,
.addUse(I.getOperand(2).getReg());
for (unsigned i = 3; i < I.getNumOperands(); i++)
MIB.addImm(foldImm(I.getOperand(i), MRI));
- return MIB.constrainAllUses(TII, TRI, RBI);
+ MIB.constrainAllUses(TII, TRI, RBI);
+ return true;
}
bool SPIRVInstructionSelector::selectInsertElt(Register ResVReg,
@@ -3538,13 +3548,14 @@ bool SPIRVInstructionSelector::selectInsertElt(Register ResVReg,
if (getImm(I.getOperand(4), MRI))
return selectInsertVal(ResVReg, ResType, I);
MachineBasicBlock &BB = *I.getParent();
- return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorInsertDynamic))
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorInsertDynamic))
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(I.getOperand(2).getReg())
.addUse(I.getOperand(3).getReg())
.addUse(I.getOperand(4).getReg())
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
bool SPIRVInstructionSelector::selectExtractElt(Register ResVReg,
@@ -3553,12 +3564,13 @@ bool SPIRVInstructionSelector::selectExtractElt(Register ResVReg,
if (getImm(I.getOperand(3), MRI))
return selectExtractVal(ResVReg, ResType, I);
MachineBasicBlock &BB = *I.getParent();
- return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorExtractDynamic))
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorExtractDynamic))
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(I.getOperand(2).getReg())
.addUse(I.getOperand(3).getReg())
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
bool SPIRVInstructionSelector::selectGEP(Register ResVReg,
@@ -3593,13 +3605,13 @@ bool SPIRVInstructionSelector::selectGEP(Register ResVReg,
: 4;
for (unsigned i = StartingIndex; i < I.getNumExplicitOperands(); ++i)
Res.addUse(I.getOperand(i).getReg());
- return Res.constrainAllUses(TII, TRI, RBI);
+ Res.constrainAllUses(TII, TRI, RBI);
+ return true;
}
// Maybe wrap a value into OpSpecConstantOp
bool SPIRVInstructionSelector::wrapIntoSpecConstantOp(
MachineInstr &I, SmallVector<Register> &CompositeArgs) const {
- bool Result = true;
unsigned Lim = I.getNumExplicitOperands();
for (unsigned i = I.getNumExplicitDefs() + 1; i < Lim; ++i) {
Register OpReg = I.getOperand(i).getReg();
@@ -3634,11 +3646,9 @@ bool SPIRVInstructionSelector::wrapIntoSpecConstantOp(
.addImm(static_cast<uint32_t>(SPIRV::Opcode::Bitcast))
.addUse(OpReg);
GR.add(OpDefine, MIB);
- Result = MIB.constrainAllUses(TII, TRI, RBI);
- if (!Result)
- break;
+ MIB.constrainAllUses(TII, TRI, RBI);
}
- return Result;
+ return true;
}
bool SPIRVInstructionSelector::selectDerivativeInst(
@@ -3672,24 +3682,22 @@ bool SPIRVInstructionSelector::selectDerivativeInst(
Register ConvertToVReg = MRI->createVirtualRegister(RegClass);
Register DpdOpVReg = MRI->createVirtualRegister(RegClass);
- bool Result =
BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpFConvert))
.addDef(ConvertToVReg)
.addUse(GR.getSPIRVTypeID(F32ConvertTy))
.addUse(SrcReg)
.constrainAllUses(TII, TRI, RBI);
- Result &= BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(DPdOpCode))
- .addDef(DpdOpVReg)
- .addUse(GR.getSPIRVTypeID(F32ConvertTy))
- .addUse(ConvertToVReg)
- .constrainAllUses(TII, TRI, RBI);
- Result &=
+ BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(DPdOpCode))
+ .addDef(DpdOpVReg)
+ .addUse(GR.getSPIRVTypeID(F32ConvertTy))
+ .addUse(ConvertToVReg)
+ .constrainAllUses(TII, TRI, RBI);
BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpFConvert))
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(DpdOpVReg)
.constrainAllUses(TII, TRI, RBI);
- return Result;
+ return true;
}
bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
@@ -3728,7 +3736,8 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
: nullptr;
assert(MI);
Register GVarVReg = MI->getOperand(0).getReg();
- bool Res = selectGlobalValue(GVarVReg, *MI, Init);
+ if (!selectGlobalValue(GVarVReg, *MI, Init))
+ return false;
// We violate SSA form by inserting OpVariable and still having a gMIR
// instruction %vreg = G_GLOBAL_VALUE @gvar. We need to fix this by erasing
// the duplicated definition.
@@ -3736,13 +3745,14 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
GR.invalidateMachineInstr(MI);
MI->removeFromParent();
}
- return Res;
+ return true;
}
case Intrinsic::spv_undef: {
auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType));
- return MIB.constrainAllUses(TII, TRI, RBI);
+ MIB.constrainAllUses(TII, TRI, RBI);
+ return true;
}
case Intrinsic::spv_const_composite: {
// If no values are attached, the composite is null constant.
@@ -3768,7 +3778,8 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType));
- return MIB.constrainAllUses(TII, TRI, RBI);
+ MIB.constrainAllUses(TII, TRI, RBI);
+ return true;
}
}
case Intrinsic::spv_assign_name: {
@@ -3778,7 +3789,8 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
i < I.getNumExplicitOperands(); ++i) {
MIB.addImm(I.getOperand(i).getImm());
}
- return MIB.constrainAllUses(TII, TRI, RBI);
+ MIB.constrainAllUses(TII, TRI, RBI);
+ return true;
}
case Intrinsic::spv_switch: {
auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSwitch));
@@ -3792,7 +3804,8 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
else
llvm_unreachable("Unexpected OpSwitch operand");
}
- return MIB.constrainAllUses(TII, TRI, RBI);
+ MIB.constrainAllUses(TII, TRI, RBI);
+ return true;
}
case Intrinsic::spv_loop_merge: {
auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpLoopMerge));
@@ -3802,14 +3815,16 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
else
MIB.addImm(foldImm(I.getOperand(i), MRI));
}
- return MIB.constrainAllUses(TII, TRI, RBI);
+ MIB.constrainAllUses(TII, TRI, RBI);
+ return true;
}
case Intrinsic::spv_loop_control_intel: {
auto MIB =
BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpLoopControlINTEL));
for (unsigned J = 1; J < I.getNumExplicitOperands(); ++J)
MIB.addImm(foldImm(I.getOperand(J), MRI));
- return MIB.constrainAllUses(TII, TRI, RBI);
+ MIB.constrainAllUses(TII, TRI, RBI);
+ return true;
}
case Intrinsic::spv_selection_merge: {
auto MIB =
@@ -3818,41 +3833,47 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
"operand 1 to spv_selection_merge must be a basic block");
MIB.addMBB(I.getOperand(1).getMBB());
MIB.addImm(getSelectionOperandForImm(I.getOperand(2).getImm()));
- return MIB.constrainAllUses(TII, TRI, RBI);
+ MIB.constrainAllUses(TII, TRI, RBI);
+ return true;
}
case Intrinsic::spv_cmpxchg:
return selectAtomicCmpXchg(ResVReg, ResType, I);
case Intrinsic::spv_unreachable:
- return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUnreachable))
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUnreachable))
.constrainAllUses(TII, TRI, RBI);
+ return true;
case Intrinsic::spv_alloca:
return selectFrameIndex(ResVReg, ResType, I);
case Intrinsic::spv_alloca_array:
return selectAllocaArray(ResVReg, ResType, I);
case Intrinsic::spv_assume:
- if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
- return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpAssumeTrueKHR))
+ if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume)) {
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpAssumeTrueKHR))
.addUse(I.getOperand(1).getReg())
.constrainAllUses(TII, TRI, RBI);
+ return true;
+ }
break;
case Intrinsic::spv_expect:
- if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
- return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExpectKHR))
+ if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume)) {
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExpectKHR))
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(I.getOperand(2).getReg())
.addUse(I.getOperand(3).getReg())
.constrainAllUses(TII, TRI, RBI);
+ return true;
+ }
break;
case Intrinsic::arithmetic_fence:
- if (STI.canUseExtension(SPIRV::Extension::SPV_EXT_arithmetic_fence))
- return BuildMI(BB, I, I.getDebugLoc(),
- TII.get(SPIRV::OpArithmeticFenceEXT))
+ if (STI.canUseExtension(SPIRV::Extension::SPV_EXT_arithmetic_fence)) {
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpArithmeticFenceEXT))
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(I.getOperand(2).getReg())
.constrainAllUses(TII, TRI, RBI);
- else
+ return true;
+ } else
return BuildCOPY(ResVReg, I.getOperand(2).getReg(), I);
break;
case Intrinsic::spv_thread_id:
@@ -3974,21 +3995,16 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
case Intrinsic::spv_firstbitlow: // There is no CL equivlent of FindILsb
return selectFirstBitLow(ResVReg, ResType, I);
case Intrinsic::spv_group_memory_barrier_with_group_sync: {
- bool Result = true;
- auto MemSemConstant =
+ Register MemSemReg =
buildI32Constant(SPIRV::MemorySemantics::SequentiallyConsistent, I);
- Register MemSemReg = MemSemConstant.first;
- Result &= MemSemConstant.second;
- auto ScopeConstant = buildI32Constant(SPIRV::Scope::Workgroup, I);
- Register ScopeReg = ScopeConstant.first;
- Result &= ScopeConstant.second;
+ Register ScopeReg = buildI32Constant(SPIRV::Scope::Workgroup, I);
MachineBasicBlock &BB = *I.getParent();
- return Result &&
- BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpControlBarrier))
- .addUse(ScopeReg)
- .addUse(ScopeReg)
- .addUse(MemSemReg)
- .constrainAllUses(TII, TRI, RBI);
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpControlBarrier))
+ .addUse(ScopeReg)
+ .addUse(ScopeReg)
+ .addUse(MemSemReg)
+ .constrainAllUses(TII, TRI, RBI);
+ return true;
}
case Intrinsic::spv_generic_cast_to_ptr_explicit: {
Register PtrReg = I.getOperand(I.getNumExplicitDefs() + 1).getReg();
@@ -3997,13 +4013,13 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
if (!isGenericCastablePtr(ResSC))
report_fatal_error("The target storage class is not castable from the "
"Generic storage class");
- return BuildMI(BB, I, I.getDebugLoc(),
- TII.get(SPIRV::OpGenericCastToPtrExplicit))
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpGenericCastToPtrExplicit))
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(PtrReg)
.addImm(ResSC)
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
case Intrinsic::spv_lifetime_start:
case Intrinsic::spv_lifetime_end: {
@@ -4013,10 +4029,11 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
Register PtrReg = I.getOperand(I.getNumExplicitDefs() + 2).getReg();
if (Size == -1)
Size = 0;
- return BuildMI(BB, I, I.getDebugLoc(), TII.get(Op))
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(Op))
.addUse(PtrReg)
.addImm(Size)
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
case Intrinsic::spv_saturate:
return selectSaturate(ResVReg, ResType, I);
@@ -4206,47 +4223,34 @@ bool SPIRVInstructionSelector::selectUpdateCounter(Register &ResVReg,
SPIRVType *IntPtrType = GR.getOrCreateSPIRVPointerType(
LLVMIntType, MIRBuilder, SPIRV::StorageClass::StorageBuffer);
- auto Zero = buildI32Constant(0, I);
- if (!Zero.second)
- return false;
+ Register Zero = buildI32Constant(0, I);
Register PtrToCounter =
MRI->createVirtualRegister(GR.getRegClass(IntPtrType));
- if (!BuildMI(*I.getParent(), I, I.getDebugLoc(),
- TII.get(SPIRV::OpAccessChain))
- .addDef(PtrToCounter)
- .addUse(GR.getSPIRVTypeID(IntPtrType))
- .addUse(CounterHandleReg)
- .addUse(Zero.first)
- .constrainAllUses(TII, TRI, RBI)) {
- return false;
- }
+ BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpAccessChain))
+ .addDef(PtrToCounter)
+ .addUse(GR.getSPIRVTypeID(IntPtrType))
+ .addUse(CounterHandleReg)
+ .addUse(Zero)
+ .constrainAllUses(TII, TRI, RBI);
// For UAV/SSBO counters, the scope is Device. The counter variable is not
// used as a flag. So the memory semantics can be None.
- auto Scope = buildI32Constant(SPIRV::Scope::Device, I);
- if (!Scope.second)
- return false;
- auto Semantics = buildI32Constant(SPIRV::MemorySemantics::None, I);
- if (!Semantics.second)
- return false;
+ Register Scope = buildI32Constant(SPIRV::Scope::Device, I);
+ Register Semantics = buildI32Constant(SPIRV::MemorySemantics::None, I);
int64_t IncrVal = getIConstValSext(IncrReg, MRI);
- auto Incr = buildI32Constant(static_cast<uint32_t>(IncrVal), I);
- if (!Incr.second)
- return false;
+ Register Incr = buildI32Constant(static_cast<uint32_t>(IncrVal), I);
Register AtomicRes = MRI->createVirtualRegister(GR.getRegClass(ResType));
- if (!BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpAtomicIAdd))
- .addDef(AtomicRes)
- .addUse(GR.getSPIRVTypeID(ResType))
- .addUse(PtrToCounter)
- .addUse(Scope.first)
- .addUse(Semantics.first)
- .addUse(Incr.first)
- .constrainAllUses(TII, TRI, RBI)) {
- return false;
- }
+ BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpAtomicIAdd))
+ .addDef(AtomicRes)
+ .addUse(GR.getSPIRVTypeID(ResType))
+ .addUse(PtrToCounter)
+ .addUse(Scope)
+ .addUse(Semantics)
+ .addUse(Incr)
+ .constrainAllUses(TII, TRI, RBI);
if (IncrVal >= 0) {
return BuildCOPY(ResVReg, AtomicRes, I);
}
@@ -4256,12 +4260,13 @@ bool SPIRVInstructionSelector::selectUpdateCounter(Register &ResVReg,
// to the same atomic intrinsic which returns the value *before* the
// operation. So for decrements (negative IncrVal), we must subtract the
// increment value from the result to get the post-decrement value.
- return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpIAddS))
+ BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpIAddS))
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(AtomicRes)
- .addUse(Incr.first)
+ .addUse(Incr)
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
bool SPIRVInstructionSelector::selectReadImageIntrinsic(
Register &ResVReg, const SPIRVType *ResType, MachineInstr &I) const {
@@ -4325,14 +4330,12 @@ bool SPIRVInstructionSelector::selectSampleIntrinsic(Register &ResVReg,
Register SampledImageReg =
MRI->createVirtualRegister(GR.getRegClass(SampledImageType));
- bool Succeed = BuildMI(*I.getParent(), I, Loc, TII.get(SPIRV::OpSampledImage))
- .addDef(SampledImageReg)
- .addUse(GR.getSPIRVTypeID(SampledImageType))
- .addUse(NewImageReg)
- .addUse(NewSamplerReg)
- .constrainAllUses(TII, TRI, RBI);
- if (!Succeed)
- return false;
+ BuildMI(*I.getParent(), I, Loc, TII.get(SPIRV::OpSampledImage))
+ .addDef(SampledImageReg)
+ .addUse(GR.getSPIRVTypeID(SampledImageType))
+ .addUse(NewImageReg)
+ .addUse(NewSamplerReg)
+ .constrainAllUses(TII, TRI, RBI);
auto MIB =
BuildMI(*I.getParent(), I, Loc, TII.get(SPIRV::OpImageSampleImplicitLod))
@@ -4358,7 +4361,8 @@ bool SPIRVInstructionSelector::selectSampleIntrinsic(Register &ResVReg,
MIB.addUse(*ClampReg);
}
- return MIB.constrainAllUses(TII, TRI, RBI);
+ MIB.constrainAllUses(TII, TRI, RBI);
+ return true;
}
bool SPIRVInstructionSelector::generateImageReadOrFetch(
@@ -4387,7 +4391,8 @@ bool SPIRVInstructionSelector::generateImageReadOrFetch(
if (IsSignedInteger)
BMI.addImm(0x1000); // SignExtend
- return BMI.constrainAllUses(TII, TRI, RBI);
+ BMI.constrainAllUses(TII, TRI, RBI);
+ return true;
}
SPIRVType *ReadType = widenTypeToVec4(ResType, Pos);
@@ -4401,18 +4406,16 @@ bool SPIRVInstructionSelector::generateImageReadOrFetch(
.addUse(IdxReg);
if (IsSignedInteger)
BMI.addImm(0x1000); // SignExtend
- bool Succeed = BMI.constrainAllUses(TII, TRI, RBI);
- if (!Succeed)
- return false;
+ BMI.constrainAllUses(TII, TRI, RBI);
if (ResultSize == 1) {
- return BuildMI(*Pos.getParent(), Pos, Loc,
- TII.get(SPIRV::OpCompositeExtract))
+ BuildMI(*Pos.getParent(), Pos, Loc, TII.get(SPIRV::OpCompositeExtract))
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(ReadReg)
.addImm(0)
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
return extractSubvector(ResVReg, ResType, ReadReg, Pos);
}
@@ -4435,14 +4438,14 @@ bool SPIRVInstructionSelector::selectResourceGetPointer(
Register IndexReg = I.getOperand(3).getReg();
Register ZeroReg =
buildZerosVal(GR.getOrCreateSPIRVIntegerType(32, I, TII), I);
- return BuildMI(*I.getParent(), I, I.getDebugLoc(),
- TII.get(SPIRV::OpAccessChain))
+ BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpAccessChain))
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(ResourcePtr)
.addUse(ZeroReg)
.addUse(IndexReg)
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
bool SPIRVInstructionSelector::selectPushConstantGetPointer(
@@ -4515,16 +4518,13 @@ bool SPIRVInstructionSelector::extractSubvector(
const TargetRegisterClass *ScalarRegClass = GR.getRegClass(ScalarType);
for (uint64_t I = 0; I < ResultSize; I++) {
Register ComponentReg = MRI->createVirtualRegister(ScalarRegClass);
- bool Succeed = BuildMI(*InsertionPoint.getParent(), InsertionPoint,
- InsertionPoint.getDebugLoc(),
- TII.get(SPIRV::OpCompositeExtract))
- .addDef(ComponentReg)
- .addUse(ScalarType->getOperand(0).getReg())
- .addUse(ReadReg)
- .addImm(I)
- .constrainAllUses(TII, TRI, RBI);
- if (!Succeed)
- return false;
+ BuildMI(*InsertionPoint.getParent(), InsertionPoint,
+ InsertionPoint.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
+ .addDef(ComponentReg)
+ .addUse(ScalarType->getOperand(0).getReg())
+ .addUse(ReadReg)
+ .addImm(I)
+ .constrainAllUses(TII, TRI, RBI);
ComponentRegisters.emplace_back(ComponentReg);
}
@@ -4536,7 +4536,8 @@ bool SPIRVInstructionSelector::extractSubvector(
for (Register ComponentReg : ComponentRegisters)
MIB.addUse(ComponentReg);
- return MIB.constrainAllUses(TII, TRI, RBI);
+ MIB.constrainAllUses(TII, TRI, RBI);
+ return true;
}
bool SPIRVInstructionSelector::selectImageWriteIntrinsic(
@@ -4559,12 +4560,12 @@ bool SPIRVInstructionSelector::selectImageWriteIntrinsic(
Register DataReg = I.getOperand(3).getReg();
assert(GR.getResultType(DataReg)->getOpcode() == SPIRV::OpTypeVector);
assert(GR.getScalarOrVectorComponentCount(GR.getResultType(DataReg)) == 4);
- return BuildMI(*I.getParent(), I, I.getDebugLoc(),
- TII.get(SPIRV::OpImageWrite))
+ BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpImageWrite))
.addUse(NewImageReg)
.addUse(CoordinateReg)
.addUse(DataReg)
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
Register SPIRVInstructionSelector::buildPointerToResource(
@@ -4604,23 +4605,24 @@ bool SPIRVInstructionSelector::selectFirstBitSet16(
Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
unsigned ExtendOpcode, unsigned BitSetOpcode) const {
Register ExtReg = MRI->createVirtualRegister(GR.getRegClass(ResType));
- bool Result = selectOpWithSrcs(ExtReg, ResType, I, {I.getOperand(2).getReg()},
- ExtendOpcode);
+ if (!selectOpWithSrcs(ExtReg, ResType, I, {I.getOperand(2).getReg()},
+ ExtendOpcode))
+ return false;
- return Result &&
- selectFirstBitSet32(ResVReg, ResType, I, ExtReg, BitSetOpcode);
+ return selectFirstBitSet32(ResVReg, ResType, I, ExtReg, BitSetOpcode);
}
bool SPIRVInstructionSelector::selectFirstBitSet32(
Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
Register SrcReg, unsigned BitSetOpcode) const {
- return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
+ BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType))
.addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
.addImm(BitSetOpcode)
.addUse(SrcReg)
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
bool SPIRVInstructionSelector::selectFirstBitSet64Overflow(
@@ -4661,8 +4663,7 @@ bool SPIRVInstructionSelector::selectFirstBitSet64Overflow(
.addImm(CurrentComponent)
.addImm(CurrentComponent + 1);
- if (!MIB.constrainAllUses(TII, TRI, RBI))
- return false;
+ MIB.constrainAllUses(TII, TRI, RBI);
Register SubVecBitSetReg =
MRI->createVirtualRegister(GR.getRegClass(Vec2ResType));
@@ -4765,8 +4766,7 @@ bool SPIRVInstructionSelector::selectFirstBitSet64(
MIB.addImm(J);
}
- if (!MIB.constrainAllUses(TII, TRI, RBI))
- return false;
+ MIB.constrainAllUses(TII, TRI, RBI);
MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
TII.get(SPIRV::OpVectorShuffle))
@@ -4780,8 +4780,7 @@ bool SPIRVInstructionSelector::selectFirstBitSet64(
for (unsigned J = 1; J < ComponentCount * 2; J += 2) {
MIB.addImm(J);
}
- if (!MIB.constrainAllUses(TII, TRI, RBI))
- return false;
+ MIB.constrainAllUses(TII, TRI, RBI);
}
// 4. Check the result. When primary bits == -1 use secondary, otherwise use
@@ -4902,17 +4901,16 @@ bool SPIRVInstructionSelector::selectAllocaArray(Register ResVReg,
// there was an allocation size parameter to the allocation instruction
// that is not 1
MachineBasicBlock &BB = *I.getParent();
- bool Res = BuildMI(BB, I, I.getDebugLoc(),
- TII.get(SPIRV::OpVariableLengthArrayINTEL))
- .addDef(ResVReg)
- .addUse(GR.getSPIRVTypeID(ResType))
- .addUse(I.getOperand(2).getReg())
- .constrainAllUses(TII, TRI, RBI);
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVariableLengthArrayINTEL))
+ .addDef(ResVReg)
+ .addUse(GR.getSPIRVTypeID(ResType))
+ .addUse(I.getOperand(2).getReg())
+ .constrainAllUses(TII, TRI, RBI);
if (!STI.isShader()) {
unsigned Alignment = I.getOperand(3).getImm();
buildOpDecorate(ResVReg, I, TII, SPIRV::Decoration::Alignment, {Alignment});
}
- return Res;
+ return true;
}
bool SPIRVInstructionSelector::selectFrameIndex(Register ResVReg,
@@ -4921,18 +4919,17 @@ bool SPIRVInstructionSelector::selectFrameIndex(Register ResVReg,
// Change order of instructions if needed: all OpVariable instructions in a
// function must be the first instructions in the first block
auto It = getOpVariableMBBIt(I);
- bool Res = BuildMI(*It->getParent(), It, It->getDebugLoc(),
- TII.get(SPIRV::OpVariable))
- .addDef(ResVReg)
- .addUse(GR.getSPIRVTypeID(ResType))
- .addImm(static_cast<uint32_t>(SPIRV::StorageClass::Function))
- .constrainAllUses(TII, TRI, RBI);
+ BuildMI(*It->getParent(), It, It->getDebugLoc(), TII.get(SPIRV::OpVariable))
+ .addDef(ResVReg)
+ .addUse(GR.getSPIRVTypeID(ResType))
+ .addImm(static_cast<uint32_t>(SPIRV::StorageClass::Function))
+ .constrainAllUses(TII, TRI, RBI);
if (!STI.isShader()) {
unsigned Alignment = I.getOperand(2).getImm();
buildOpDecorate(ResVReg, *It, TII, SPIRV::Decoration::Alignment,
{Alignment});
}
- return Res;
+ return true;
}
bool SPIRVInstructionSelector::selectBranch(MachineInstr &I) const {
@@ -4943,15 +4940,17 @@ bool SPIRVInstructionSelector::selectBranch(MachineInstr &I) const {
const MachineInstr *PrevI = I.getPrevNode();
MachineBasicBlock &MBB = *I.getParent();
if (PrevI != nullptr && PrevI->getOpcode() == TargetOpcode::G_BRCOND) {
- return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
+ BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
.addUse(PrevI->getOperand(0).getReg())
.addMBB(PrevI->getOperand(1).getMBB())
.addMBB(I.getOperand(0).getMBB())
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
- return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranch))
+ BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranch))
.addMBB(I.getOperand(0).getMBB())
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
bool SPIRVInstructionSelector::selectBranchCond(MachineInstr &I) const {
@@ -4974,11 +4973,12 @@ bool SPIRVInstructionSelector::selectBranchCond(MachineInstr &I) const {
MachineBasicBlock &MBB = *I.getParent();
unsigned NextMBBNum = MBB.getNextNode()->getNumber();
MachineBasicBlock *NextMBB = I.getMF()->getBlockNumbered(NextMBBNum);
- return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
+ BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
.addUse(I.getOperand(0).getReg())
.addMBB(I.getOperand(1).getMBB())
.addMBB(NextMBB)
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
bool SPIRVInstructionSelector::selectPhi(Register ResVReg,
@@ -4992,10 +4992,10 @@ bool SPIRVInstructionSelector::selectPhi(Register ResVReg,
MIB.addUse(I.getOperand(i + 0).getReg());
MIB.addMBB(I.getOperand(i + 1).getMBB());
}
- bool Res = MIB.constrainAllUses(TII, TRI, RBI);
+ MIB.constrainAllUses(TII, TRI, RBI);
MIB->setDesc(TII.get(TargetOpcode::PHI));
MIB->removeOperand(1);
- return Res;
+ return true;
}
bool SPIRVInstructionSelector::selectGlobalValue(
@@ -5061,15 +5061,17 @@ bool SPIRVInstructionSelector::selectGlobalValue(
GR.add(ConstVal, MIB2);
// mapping the function pointer to the used Function
GR.recordFunctionPointer(&MIB2.getInstr()->getOperand(2), GVFun);
- return MIB1.constrainAllUses(TII, TRI, RBI) &&
- MIB2.constrainAllUses(TII, TRI, RBI);
+ MIB1.constrainAllUses(TII, TRI, RBI);
+ MIB2.constrainAllUses(TII, TRI, RBI);
+ return true;
}
MachineInstrBuilder MIB3 =
BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
.addDef(NewReg)
.addUse(GR.getSPIRVTypeID(ResType));
GR.add(ConstVal, MIB3);
- return MIB3.constrainAllUses(TII, TRI, RBI);
+ MIB3.constrainAllUses(TII, TRI, RBI);
+ return true;
}
assert(NewReg != ResVReg);
return BuildCOPY(ResVReg, NewReg, I);
@@ -5124,14 +5126,13 @@ bool SPIRVInstructionSelector::selectLog10(Register ResVReg,
// Build log2(x).
Register VarReg = MRI->createVirtualRegister(GR.getRegClass(ResType));
- bool Result =
- BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
- .addDef(VarReg)
- .addUse(GR.getSPIRVTypeID(ResType))
- .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
- .addImm(GL::Log2)
- .add(I.getOperand(1))
- .constrainAllUses(TII, TRI, RBI);
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
+ .addDef(VarReg)
+ .addUse(GR.getSPIRVTypeID(ResType))
+ .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
+ .addImm(GL::Log2)
+ .add(I.getOperand(1))
+ .constrainAllUses(TII, TRI, RBI);
// Build 0.30103.
assert(ResType->getOpcode() == SPIRV::OpTypeVector ||
@@ -5148,12 +5149,13 @@ bool SPIRVInstructionSelector::selectLog10(Register ResVReg,
auto Opcode = ResType->getOpcode() == SPIRV::OpTypeVector
? SPIRV::OpVectorTimesScalar
: SPIRV::OpFMulS;
- return Result && BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
- .addDef(ResVReg)
- .addUse(GR.getSPIRVTypeID(ResType))
- .addUse(VarReg)
- .addUse(ScaleReg)
- .constrainAllUses(TII, TRI, RBI);
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
+ .addDef(ResVReg)
+ .addUse(GR.getSPIRVTypeID(ResType))
+ .addUse(VarReg)
+ .addUse(ScaleReg)
+ .constrainAllUses(TII, TRI, RBI);
+ return true;
}
bool SPIRVInstructionSelector::selectModf(Register ResVReg,
@@ -5219,10 +5221,12 @@ bool SPIRVInstructionSelector::selectModf(Register ResVReg,
.addDef(IntegralPartReg)
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(Variable);
- return LoadMIB.constrainAllUses(TII, TRI, RBI);
+ LoadMIB.constrainAllUses(TII, TRI, RBI);
+ return true;
}
- return MIB.constrainAllUses(TII, TRI, RBI);
+ MIB.constrainAllUses(TII, TRI, RBI);
+ return true;
} else if (STI.canUseExtInstSet(SPIRV::InstructionSet::GLSL_std_450)) {
assert(false && "GLSL::Modf is deprecated.");
// FIXME: GL::Modf is deprecated, use Modfstruct instead.
@@ -5264,11 +5268,10 @@ bool SPIRVInstructionSelector::loadVec3BuiltinInputID(
GR.assignSPIRVTypeToVReg(Vec3Ty, LoadedRegister, MIRBuilder.getMF());
// Load v3uint value from the global variable.
- bool Result =
- BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
- .addDef(LoadedRegister)
- .addUse(GR.getSPIRVTypeID(Vec3Ty))
- .addUse(Variable);
+ BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
+ .addDef(LoadedRegister)
+ .addUse(GR.getSPIRVTypeID(Vec3Ty))
+ .addUse(Variable);
// Get the input ID index. Expecting operand is a constant immediate value,
// wrapped in a type assignment.
@@ -5282,7 +5285,8 @@ bool SPIRVInstructionSelector::loadVec3BuiltinInputID(
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(LoadedRegister)
.addImm(ThreadId);
- return Result && MIB.constrainAllUses(TII, TRI, RBI);
+ MIB.constrainAllUses(TII, TRI, RBI);
+ return true;
}
// Generate the instructions to load 32-bit integer builtin input IDs/Indices.
@@ -5316,7 +5320,8 @@ bool SPIRVInstructionSelector::loadBuiltinInputID(
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(Variable);
- return MIB.constrainAllUses(TII, TRI, RBI);
+ MIB.constrainAllUses(TII, TRI, RBI);
+ return true;
}
SPIRVType *SPIRVInstructionSelector::widenTypeToVec4(const SPIRVType *Type,
@@ -5365,12 +5370,12 @@ bool SPIRVInstructionSelector::loadHandleBeforePosition(
uint32_t LoadOpcode =
IsStructuredBuffer ? SPIRV::OpCopyObject : SPIRV::OpLoad;
GR.assignSPIRVTypeToVReg(ResType, HandleReg, *Pos.getMF());
- return BuildMI(*Pos.getParent(), Pos, HandleDef.getDebugLoc(),
- TII.get(LoadOpcode))
+ BuildMI(*Pos.getParent(), Pos, HandleDef.getDebugLoc(), TII.get(LoadOpcode))
.addDef(HandleReg)
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(VarReg)
.constrainAllUses(TII, TRI, RBI);
+ return true;
}
void SPIRVInstructionSelector::errorIfInstrOutsideShader(
More information about the llvm-commits
mailing list