[llvm] 191d70f - [AMDGPU] Use Register in more places in SIInstrInfo. NFC.
Jay Foad via llvm-commits
llvm-commits at lists.llvm.org
Tue Oct 25 07:07:31 PDT 2022
Author: Jay Foad
Date: 2022-10-25T15:04:58+01:00
New Revision: 191d70f2f52d9cd38e0c57f8319b466eff1ac023
URL: https://github.com/llvm/llvm-project/commit/191d70f2f52d9cd38e0c57f8319b466eff1ac023
DIFF: https://github.com/llvm/llvm-project/commit/191d70f2f52d9cd38e0c57f8319b466eff1ac023.diff
LOG: [AMDGPU] Use Register in more places in SIInstrInfo. NFC.
Also avoid using AMDGPU::NoRegister when it's not neeeded.
Added:
Modified:
llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
llvm/lib/Target/AMDGPU/SIInstrInfo.h
Removed:
################################################################################
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 3cc81f1947ff0..640e401677e45 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -1059,7 +1059,7 @@ int SIInstrInfo::commuteOpcode(unsigned Opcode) const {
void SIInstrInfo::materializeImmediate(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
- const DebugLoc &DL, unsigned DestReg,
+ const DebugLoc &DL, Register DestReg,
int64_t Value) const {
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
const TargetRegisterClass *RegClass = MRI.getRegClass(DestReg);
@@ -3974,7 +3974,7 @@ static Register findImplicitSGPRRead(const MachineInstr &MI) {
}
}
- return AMDGPU::NoRegister;
+ return Register();
}
static bool shouldReadExec(const MachineInstr &MI) {
@@ -4326,7 +4326,7 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
}
SGPRUsed = findImplicitSGPRRead(MI);
- if (SGPRUsed != AMDGPU::NoRegister) {
+ if (SGPRUsed) {
// Implicit uses may safely overlap true operands
if (llvm::all_of(SGPRsUsed, [this, SGPRUsed](unsigned SGPR) {
return !RI.regsOverlap(SGPRUsed, SGPR);
@@ -4354,7 +4354,7 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
// but still can't use more than one SGPR register
if (Desc.getOpcode() == AMDGPU::V_WRITELANE_B32) {
unsigned SGPRCount = 0;
- Register SGPRUsed = AMDGPU::NoRegister;
+ Register SGPRUsed;
for (int OpIdx : {Src0Idx, Src1Idx}) {
if (OpIdx == -1)
@@ -5105,7 +5105,7 @@ void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI,
// If there is an implicit SGPR use such as VCC use for v_addc_u32/v_subb_u32
// we need to only have one constant bus use before GFX10.
- bool HasImplicitSGPR = findImplicitSGPRRead(MI) != AMDGPU::NoRegister;
+ bool HasImplicitSGPR = findImplicitSGPRRead(MI);
if (HasImplicitSGPR && ST.getConstantBusLimit(Opc) <= 1 &&
Src0.isReg() && (RI.isSGPRReg(MRI, Src0.getReg()) ||
isLiteralConstantLike(Src0, InstrDesc.OpInfo[Src0Idx])))
@@ -5239,7 +5239,7 @@ void SIInstrInfo::legalizeOperandsVOP3(MachineRegisterInfo &MRI,
int LiteralLimit = ST.hasVOP3Literal() ? 1 : 0;
SmallDenseSet<unsigned> SGPRsUsed;
Register SGPRReg = findUsedSGPR(MI, VOP3Idx);
- if (SGPRReg != AMDGPU::NoRegister) {
+ if (SGPRReg) {
SGPRsUsed.insert(SGPRReg);
--ConstantBusLimit;
}
@@ -5313,7 +5313,7 @@ Register SIInstrInfo::readlaneVGPRToSGPR(Register SrcReg, MachineInstr &UseMI,
return DstReg;
}
- SmallVector<unsigned, 8> SRegs;
+ SmallVector<Register, 8> SRegs;
for (unsigned i = 0; i < SubRegs; ++i) {
Register SGPR = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
@@ -5515,7 +5515,7 @@ emitLoadSRsrcFromVGPRLoop(const SIInstrInfo &TII, MachineRegisterInfo &MRI,
MachineBasicBlock::iterator I = LoopBB.begin();
SmallVector<Register, 8> ReadlanePieces;
- Register CondReg = AMDGPU::NoRegister;
+ Register CondReg;
Register VRsrc = Rsrc.getReg();
unsigned VRsrcUndef = getUndefRegState(Rsrc.isUndef());
@@ -5558,7 +5558,7 @@ emitLoadSRsrcFromVGPRLoop(const SIInstrInfo &TII, MachineRegisterInfo &MRI,
Cmp.addReg(VRsrc, VRsrcUndef, TRI->getSubRegFromChannel(Idx, 2));
// Combine the comparison results with AND.
- if (CondReg == AMDGPU::NoRegister) // First.
+ if (!CondReg) // First.
CondReg = NewCondReg;
else { // If not the first, we create an AND.
Register AndReg = MRI.createVirtualRegister(BoolXExecRC);
@@ -6417,7 +6417,7 @@ MachineBasicBlock *SIInstrInfo::moveToVALU(MachineInstr &TopInst,
}
bool HasDst = Inst.getOperand(0).isReg() && Inst.getOperand(0).isDef();
- unsigned NewDstReg = AMDGPU::NoRegister;
+ Register NewDstReg;
if (HasDst) {
Register DstReg = Inst.getOperand(0).getReg();
if (DstReg.isPhysical())
@@ -7308,10 +7308,10 @@ Register SIInstrInfo::findUsedSGPR(const MachineInstr &MI,
// If the operand's class is an SGPR, we can never move it.
Register SGPRReg = findImplicitSGPRRead(MI);
- if (SGPRReg != AMDGPU::NoRegister)
+ if (SGPRReg)
return SGPRReg;
- Register UsedSGPRs[3] = { AMDGPU::NoRegister };
+ Register UsedSGPRs[3] = {Register()};
const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
for (unsigned i = 0; i < 3; ++i) {
@@ -7350,12 +7350,12 @@ Register SIInstrInfo::findUsedSGPR(const MachineInstr &MI,
// TODO: If some of the operands are 64-bit SGPRs and some 32, we should
// prefer those.
- if (UsedSGPRs[0] != AMDGPU::NoRegister) {
+ if (UsedSGPRs[0]) {
if (UsedSGPRs[0] == UsedSGPRs[1] || UsedSGPRs[0] == UsedSGPRs[2])
SGPRReg = UsedSGPRs[0];
}
- if (SGPRReg == AMDGPU::NoRegister && UsedSGPRs[1] != AMDGPU::NoRegister) {
+ if (!SGPRReg && UsedSGPRs[1]) {
if (UsedSGPRs[1] == UsedSGPRs[2])
SGPRReg = UsedSGPRs[1];
}
@@ -7436,7 +7436,7 @@ unsigned SIInstrInfo::isStackAccess(const MachineInstr &MI,
int &FrameIndex) const {
const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::vaddr);
if (!Addr || !Addr->isFI())
- return AMDGPU::NoRegister;
+ return Register();
assert(!MI.memoperands_empty() &&
(*MI.memoperands_begin())->getAddrSpace() == AMDGPUAS::PRIVATE_ADDRESS);
@@ -7456,7 +7456,7 @@ unsigned SIInstrInfo::isSGPRStackAccess(const MachineInstr &MI,
unsigned SIInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
if (!MI.mayLoad())
- return AMDGPU::NoRegister;
+ return Register();
if (isMUBUF(MI) || isVGPRSpill(MI))
return isStackAccess(MI, FrameIndex);
@@ -7464,13 +7464,13 @@ unsigned SIInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
if (isSGPRSpill(MI))
return isSGPRStackAccess(MI, FrameIndex);
- return AMDGPU::NoRegister;
+ return Register();
}
unsigned SIInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
if (!MI.mayStore())
- return AMDGPU::NoRegister;
+ return Register();
if (isMUBUF(MI) || isVGPRSpill(MI))
return isStackAccess(MI, FrameIndex);
@@ -7478,7 +7478,7 @@ unsigned SIInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
if (isSGPRSpill(MI))
return isSGPRStackAccess(MI, FrameIndex);
- return AMDGPU::NoRegister;
+ return Register();
}
unsigned SIInstrInfo::getInstBundleSize(const MachineInstr &MI) const {
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
index 2b2b4d0ce91f4..b80838c393fcc 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
@@ -210,10 +210,8 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
bool KillSrc) const override;
void materializeImmediate(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const DebugLoc &DL,
- unsigned DestReg,
- int64_t Value) const;
+ MachineBasicBlock::iterator MI, const DebugLoc &DL,
+ Register DestReg, int64_t Value) const;
const TargetRegisterClass *getPreferredSelectRegClass(
unsigned Size) const;
More information about the llvm-commits
mailing list