[llvm] [CodeGen] Make the parameter TRI required in some functions. (PR #85968)
Xu Zhang via llvm-commits
llvm-commits at lists.llvm.org
Mon Apr 8 06:26:38 PDT 2024
https://github.com/simonzgx updated https://github.com/llvm/llvm-project/pull/85968
>From eb9aab96f8e362f03969c02b10c6d319bcba4c9f Mon Sep 17 00:00:00 2001
From: Xu Zhang <simonzgx at gmail.com>
Date: Sun, 25 Feb 2024 20:50:26 +0800
Subject: [PATCH] [CodeGen] Make the parameter TRI required in some functions
to prevent miscalling.
---
.../GlobalISel/LegalizationArtifactCombiner.h | 2 +-
llvm/include/llvm/CodeGen/MachineInstr.h | 72 +++++++++----------
llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp | 9 +--
llvm/lib/CodeGen/CalcSpillWeights.cpp | 3 +-
llvm/lib/CodeGen/CodeGenCommonISel.cpp | 2 +-
llvm/lib/CodeGen/EarlyIfConversion.cpp | 4 +-
.../CodeGen/FixupStatepointCallerSaved.cpp | 2 +-
.../lib/CodeGen/GlobalISel/CombinerHelper.cpp | 4 +-
llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp | 3 +-
llvm/lib/CodeGen/InlineSpiller.cpp | 2 +-
llvm/lib/CodeGen/LiveVariables.cpp | 11 +--
llvm/lib/CodeGen/MachineCSE.cpp | 2 +-
llvm/lib/CodeGen/MachineCombiner.cpp | 14 ++--
llvm/lib/CodeGen/MachineCopyPropagation.cpp | 2 +-
llvm/lib/CodeGen/MachineInstr.cpp | 13 ++--
llvm/lib/CodeGen/MachineLateInstrsCleanup.cpp | 2 +-
llvm/lib/CodeGen/MachineSink.cpp | 2 +-
llvm/lib/CodeGen/ModuloSchedule.cpp | 9 +--
llvm/lib/CodeGen/PHIElimination.cpp | 13 ++--
llvm/lib/CodeGen/PeepholeOptimizer.cpp | 2 +-
llvm/lib/CodeGen/RegisterCoalescer.cpp | 4 +-
.../lib/CodeGen/SelectionDAG/InstrEmitter.cpp | 2 +-
llvm/lib/CodeGen/StackSlotColoring.cpp | 2 +-
.../lib/CodeGen/TwoAddressInstructionPass.cpp | 16 +++--
.../AArch64/AArch64ConditionOptimizer.cpp | 2 +-
.../AArch64/AArch64ConditionalCompares.cpp | 2 +-
.../AArch64DeadRegisterDefinitionsPass.cpp | 3 +-
llvm/lib/Target/AArch64/AArch64InstrInfo.cpp | 33 +++++----
.../lib/Target/AArch64/AArch64MacroFusion.cpp | 4 +-
.../AArch64RedundantCopyElimination.cpp | 2 +-
.../GISel/AArch64PostSelectOptimize.cpp | 4 +-
.../lib/Target/AMDGPU/GCNHazardRecognizer.cpp | 7 +-
llvm/lib/Target/AMDGPU/R600InstrInfo.cpp | 4 +-
llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp | 4 +-
llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp | 6 +-
llvm/lib/Target/AMDGPU/SIInstrInfo.cpp | 18 ++---
.../AMDGPU/SIOptimizeExecMaskingPreRA.cpp | 2 +-
llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp | 4 +-
llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp | 10 +--
.../Target/AMDGPU/SIShrinkInstructions.cpp | 2 +-
llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp | 4 +-
llvm/lib/Target/ARM/A15SDOptimizer.cpp | 4 +-
llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp | 14 ++--
llvm/lib/Target/ARM/ARMConstantIslandPass.cpp | 2 +-
llvm/lib/Target/ARM/ARMFrameLowering.cpp | 2 +-
llvm/lib/Target/ARM/ARMISelLowering.cpp | 6 +-
llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp | 14 ++--
llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp | 7 +-
.../ARM/MVETPAndVPTOptimisationsPass.cpp | 17 ++---
llvm/lib/Target/ARM/MVEVPTBlockPass.cpp | 3 +-
llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp | 2 +-
llvm/lib/Target/ARM/Thumb2InstrInfo.cpp | 2 +-
llvm/lib/Target/ARM/Thumb2SizeReduction.cpp | 6 +-
.../Target/Hexagon/HexagonCopyToCombine.cpp | 7 +-
.../Target/Hexagon/HexagonExpandCondsets.cpp | 8 ++-
llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp | 6 +-
llvm/lib/Target/M68k/M68kISelLowering.cpp | 4 +-
llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp | 3 +-
llvm/lib/Target/Mips/MipsInstrInfo.cpp | 4 +-
llvm/lib/Target/PowerPC/PPCCTRLoops.cpp | 9 ++-
llvm/lib/Target/PowerPC/PPCInstrInfo.cpp | 17 ++---
.../lib/Target/PowerPC/PPCPreEmitPeephole.cpp | 4 +-
llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp | 12 ++--
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 2 +-
.../Target/RISCV/RISCVInsertReadWriteCSR.cpp | 5 +-
llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp | 15 ++--
.../lib/Target/RISCV/RISCVInsertWriteVXRM.cpp | 6 +-
llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 6 +-
llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp | 2 +-
.../lib/Target/SystemZ/SystemZElimCompare.cpp | 6 +-
.../Target/SystemZ/SystemZISelLowering.cpp | 15 ++--
llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp | 18 ++---
.../WebAssemblyDebugValueManager.cpp | 2 +-
.../WebAssembly/WebAssemblyRegStackify.cpp | 10 +--
llvm/lib/Target/X86/X86CmovConversion.cpp | 8 +--
llvm/lib/Target/X86/X86FixupSetCC.cpp | 4 +-
llvm/lib/Target/X86/X86FlagsCopyLowering.cpp | 16 +++--
llvm/lib/Target/X86/X86FloatingPoint.cpp | 31 ++++----
llvm/lib/Target/X86/X86ISelLowering.cpp | 16 ++---
llvm/lib/Target/X86/X86InstrInfo.cpp | 38 +++++-----
llvm/lib/Target/X86/X86MCInstLower.cpp | 3 +-
.../X86/X86SpeculativeLoadHardening.cpp | 15 ++--
82 files changed, 362 insertions(+), 317 deletions(-)
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
index ca62f38061b115..58a200b16ddee1 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
@@ -906,7 +906,7 @@ class LegalizationArtifactCombiner {
unsigned &DefOperandIdx) {
if (Register Def = findValueFromDefImpl(Reg, 0, Size)) {
if (auto *Unmerge = dyn_cast<GUnmerge>(MRI.getVRegDef(Def))) {
- DefOperandIdx = Unmerge->findRegisterDefOperandIdx(Def);
+ DefOperandIdx = Unmerge->findRegisterDefOperandIdx(Def, nullptr);
return Unmerge;
}
}
diff --git a/llvm/include/llvm/CodeGen/MachineInstr.h b/llvm/include/llvm/CodeGen/MachineInstr.h
index 7249f812d2cc4c..be1ad36dcd8721 100644
--- a/llvm/include/llvm/CodeGen/MachineInstr.h
+++ b/llvm/include/llvm/CodeGen/MachineInstr.h
@@ -1445,9 +1445,8 @@ class MachineInstr
/// is a read of a super-register.
/// This does not count partial redefines of virtual registers as reads:
/// %reg1024:6 = OP.
- bool readsRegister(Register Reg,
- const TargetRegisterInfo *TRI = nullptr) const {
- return findRegisterUseOperandIdx(Reg, false, TRI) != -1;
+ bool readsRegister(Register Reg, const TargetRegisterInfo *TRI) const {
+ return findRegisterUseOperandIdx(Reg, TRI, false) != -1;
}
/// Return true if the MachineInstr reads the specified virtual register.
@@ -1466,34 +1465,30 @@ class MachineInstr
/// Return true if the MachineInstr kills the specified register.
/// If TargetRegisterInfo is non-null, then it also checks if there is
/// a kill of a super-register.
- bool killsRegister(Register Reg,
- const TargetRegisterInfo *TRI = nullptr) const {
- return findRegisterUseOperandIdx(Reg, true, TRI) != -1;
+ bool killsRegister(Register Reg, const TargetRegisterInfo *TRI) const {
+ return findRegisterUseOperandIdx(Reg, TRI, true) != -1;
}
/// Return true if the MachineInstr fully defines the specified register.
/// If TargetRegisterInfo is non-null, then it also checks
/// if there is a def of a super-register.
/// NOTE: It's ignoring subreg indices on virtual registers.
- bool definesRegister(Register Reg,
- const TargetRegisterInfo *TRI = nullptr) const {
- return findRegisterDefOperandIdx(Reg, false, false, TRI) != -1;
+ bool definesRegister(Register Reg, const TargetRegisterInfo *TRI) const {
+ return findRegisterDefOperandIdx(Reg, TRI, false, false) != -1;
}
/// Return true if the MachineInstr modifies (fully define or partially
/// define) the specified register.
/// NOTE: It's ignoring subreg indices on virtual registers.
- bool modifiesRegister(Register Reg,
- const TargetRegisterInfo *TRI = nullptr) const {
- return findRegisterDefOperandIdx(Reg, false, true, TRI) != -1;
+ bool modifiesRegister(Register Reg, const TargetRegisterInfo *TRI) const {
+ return findRegisterDefOperandIdx(Reg, TRI, false, true) != -1;
}
/// Returns true if the register is dead in this machine instruction.
/// If TargetRegisterInfo is non-null, then it also checks
/// if there is a dead def of a super-register.
- bool registerDefIsDead(Register Reg,
- const TargetRegisterInfo *TRI = nullptr) const {
- return findRegisterDefOperandIdx(Reg, true, false, TRI) != -1;
+ bool registerDefIsDead(Register Reg, const TargetRegisterInfo *TRI) const {
+ return findRegisterDefOperandIdx(Reg, TRI, true, false) != -1;
}
/// Returns true if the MachineInstr has an implicit-use operand of exactly
@@ -1503,22 +1498,23 @@ class MachineInstr
/// Returns the operand index that is a use of the specific register or -1
/// if it is not found. It further tightens the search criteria to a use
/// that kills the register if isKill is true.
- int findRegisterUseOperandIdx(Register Reg, bool isKill = false,
- const TargetRegisterInfo *TRI = nullptr) const;
+ int findRegisterUseOperandIdx(Register Reg, const TargetRegisterInfo *TRI,
+ bool isKill = false) const;
/// Wrapper for findRegisterUseOperandIdx, it returns
/// a pointer to the MachineOperand rather than an index.
- MachineOperand *findRegisterUseOperand(Register Reg, bool isKill = false,
- const TargetRegisterInfo *TRI = nullptr) {
- int Idx = findRegisterUseOperandIdx(Reg, isKill, TRI);
+ MachineOperand *findRegisterUseOperand(Register Reg,
+ const TargetRegisterInfo *TRI,
+ bool isKill = false) {
+ int Idx = findRegisterUseOperandIdx(Reg, TRI, isKill);
return (Idx == -1) ? nullptr : &getOperand(Idx);
}
- const MachineOperand *findRegisterUseOperand(
- Register Reg, bool isKill = false,
- const TargetRegisterInfo *TRI = nullptr) const {
- return const_cast<MachineInstr *>(this)->
- findRegisterUseOperand(Reg, isKill, TRI);
+ const MachineOperand *findRegisterUseOperand(Register Reg,
+ const TargetRegisterInfo *TRI,
+ bool isKill = false) const {
+ return const_cast<MachineInstr *>(this)->findRegisterUseOperand(Reg, TRI,
+ isKill);
}
/// Returns the operand index that is a def of the specified register or
@@ -1527,26 +1523,26 @@ class MachineInstr
/// overlap the specified register. If TargetRegisterInfo is non-null,
/// then it also checks if there is a def of a super-register.
/// This may also return a register mask operand when Overlap is true.
- int findRegisterDefOperandIdx(Register Reg,
- bool isDead = false, bool Overlap = false,
- const TargetRegisterInfo *TRI = nullptr) const;
+ int findRegisterDefOperandIdx(Register Reg, const TargetRegisterInfo *TRI,
+ bool isDead = false,
+ bool Overlap = false) const;
/// Wrapper for findRegisterDefOperandIdx, it returns
/// a pointer to the MachineOperand rather than an index.
- MachineOperand *
- findRegisterDefOperand(Register Reg, bool isDead = false,
- bool Overlap = false,
- const TargetRegisterInfo *TRI = nullptr) {
- int Idx = findRegisterDefOperandIdx(Reg, isDead, Overlap, TRI);
+ MachineOperand *findRegisterDefOperand(Register Reg,
+ const TargetRegisterInfo *TRI,
+ bool isDead = false,
+ bool Overlap = false) {
+ int Idx = findRegisterDefOperandIdx(Reg, TRI, isDead, Overlap);
return (Idx == -1) ? nullptr : &getOperand(Idx);
}
- const MachineOperand *
- findRegisterDefOperand(Register Reg, bool isDead = false,
- bool Overlap = false,
- const TargetRegisterInfo *TRI = nullptr) const {
+ const MachineOperand *findRegisterDefOperand(Register Reg,
+ const TargetRegisterInfo *TRI,
+ bool isDead = false,
+ bool Overlap = false) const {
return const_cast<MachineInstr *>(this)->findRegisterDefOperand(
- Reg, isDead, Overlap, TRI);
+ Reg, TRI, isDead, Overlap);
}
/// Find the index of the first operand in the
diff --git a/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp b/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp
index ed6ce6bc73d38c..7c219a91acdb8d 100644
--- a/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp
+++ b/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp
@@ -231,9 +231,9 @@ bool AggressiveAntiDepBreaker::IsImplicitDefUse(MachineInstr &MI,
MachineOperand *Op = nullptr;
if (MO.isDef())
- Op = MI.findRegisterUseOperand(Reg, true);
+ Op = MI.findRegisterUseOperand(Reg, nullptr, true);
else
- Op = MI.findRegisterDefOperand(Reg);
+ Op = MI.findRegisterDefOperand(Reg, nullptr);
return(Op && Op->isImplicit());
}
@@ -679,7 +679,7 @@ bool AggressiveAntiDepBreaker::FindSuitableFreeRegisters(
// defines 'NewReg' via an early-clobber operand.
for (const auto &Q : make_range(RegRefs.equal_range(Reg))) {
MachineInstr *UseMI = Q.second.Operand->getParent();
- int Idx = UseMI->findRegisterDefOperandIdx(NewReg, false, true, TRI);
+ int Idx = UseMI->findRegisterDefOperandIdx(NewReg, TRI, false, true);
if (Idx == -1)
continue;
@@ -846,7 +846,8 @@ unsigned AggressiveAntiDepBreaker::BreakAntiDependencies(
continue;
} else {
// No anti-dep breaking for implicit deps
- MachineOperand *AntiDepOp = MI.findRegisterDefOperand(AntiDepReg);
+ MachineOperand *AntiDepOp =
+ MI.findRegisterDefOperand(AntiDepReg, nullptr);
assert(AntiDepOp && "Can't find index for defined register operand");
if (!AntiDepOp || AntiDepOp->isImplicit()) {
LLVM_DEBUG(dbgs() << " (implicit)\n");
diff --git a/llvm/lib/CodeGen/CalcSpillWeights.cpp b/llvm/lib/CodeGen/CalcSpillWeights.cpp
index f3cb7fa5af6148..6eebc1fc5ca8ce 100644
--- a/llvm/lib/CodeGen/CalcSpillWeights.cpp
+++ b/llvm/lib/CodeGen/CalcSpillWeights.cpp
@@ -251,7 +251,8 @@ float VirtRegAuxInfo::weightCalcHelper(LiveInterval &LI, SlotIndex *Start,
// For terminators that produce values, ask the backend if the register is
// not spillable.
- if (TII.isUnspillableTerminator(MI) && MI->definesRegister(LI.reg())) {
+ if (TII.isUnspillableTerminator(MI) &&
+ MI->definesRegister(LI.reg(), nullptr)) {
LI.markNotSpillable();
return -1.0f;
}
diff --git a/llvm/lib/CodeGen/CodeGenCommonISel.cpp b/llvm/lib/CodeGen/CodeGenCommonISel.cpp
index 577c5dbc8e2da8..fc56bbe5c5d020 100644
--- a/llvm/lib/CodeGen/CodeGenCommonISel.cpp
+++ b/llvm/lib/CodeGen/CodeGenCommonISel.cpp
@@ -260,7 +260,7 @@ void llvm::salvageDebugInfoForDbgValue(const MachineRegisterInfo &MRI,
continue;
}
- int UseMOIdx = DbgMI->findRegisterUseOperandIdx(DefMO->getReg());
+ int UseMOIdx = DbgMI->findRegisterUseOperandIdx(DefMO->getReg(), nullptr);
assert(UseMOIdx != -1 && DbgMI->hasDebugOperandForReg(DefMO->getReg()) &&
"Must use salvaged instruction as its location");
diff --git a/llvm/lib/CodeGen/EarlyIfConversion.cpp b/llvm/lib/CodeGen/EarlyIfConversion.cpp
index 31e107ade1ccbb..1eecf9e2a54e17 100644
--- a/llvm/lib/CodeGen/EarlyIfConversion.cpp
+++ b/llvm/lib/CodeGen/EarlyIfConversion.cpp
@@ -599,8 +599,8 @@ static bool hasSameValue(const MachineRegisterInfo &MRI,
return false;
// Further, check that the two defs come from corresponding operands.
- int TIdx = TDef->findRegisterDefOperandIdx(TReg);
- int FIdx = FDef->findRegisterDefOperandIdx(FReg);
+ int TIdx = TDef->findRegisterDefOperandIdx(TReg, nullptr);
+ int FIdx = FDef->findRegisterDefOperandIdx(FReg, nullptr);
if (TIdx == -1 || FIdx == -1)
return false;
diff --git a/llvm/lib/CodeGen/FixupStatepointCallerSaved.cpp b/llvm/lib/CodeGen/FixupStatepointCallerSaved.cpp
index 4d668c53f7156b..3bb9da5f1a37bb 100644
--- a/llvm/lib/CodeGen/FixupStatepointCallerSaved.cpp
+++ b/llvm/lib/CodeGen/FixupStatepointCallerSaved.cpp
@@ -112,7 +112,7 @@ static Register performCopyPropagation(Register Reg,
bool &IsKill, const TargetInstrInfo &TII,
const TargetRegisterInfo &TRI) {
// First check if statepoint itself uses Reg in non-meta operands.
- int Idx = RI->findRegisterUseOperandIdx(Reg, false, &TRI);
+ int Idx = RI->findRegisterUseOperandIdx(Reg, &TRI, false);
if (Idx >= 0 && (unsigned)Idx < StatepointOpers(&*RI).getNumDeoptArgsIdx()) {
IsKill = false;
return Reg;
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 40c5119ee7fb3b..d9b4f1d455ede0 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -2723,8 +2723,8 @@ bool CombinerHelper::matchEqualDefs(const MachineOperand &MOP1,
// %5:_(s8), %6:_(s8), %7:_(s8), %8:_(s8) = G_UNMERGE_VALUES %4:_(<4 x s8>)
// I1 and I2 are different instructions but produce same values,
// %1 and %6 are same, %1 and %7 are not the same value.
- return I1->findRegisterDefOperandIdx(InstAndDef1->Reg) ==
- I2->findRegisterDefOperandIdx(InstAndDef2->Reg);
+ return I1->findRegisterDefOperandIdx(InstAndDef1->Reg, nullptr) ==
+ I2->findRegisterDefOperandIdx(InstAndDef2->Reg, nullptr);
}
return false;
}
diff --git a/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp b/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp
index bb5363fb2527b5..a0967746397f47 100644
--- a/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp
@@ -420,7 +420,8 @@ void RegBankSelect::tryAvoidingSplit(
// If the next terminator uses Reg, this means we have
// to split right after MI and thus we need a way to ask
// which outgoing edges are affected.
- assert(!Next->readsRegister(Reg) && "Need to split between terminators");
+ assert(!Next->readsRegister(Reg, nullptr) &&
+ "Need to split between terminators");
// We will split all the edges and repair there.
} else {
// This is a virtual register defined by a terminator.
diff --git a/llvm/lib/CodeGen/InlineSpiller.cpp b/llvm/lib/CodeGen/InlineSpiller.cpp
index c46b1fe18ca743..01e0adf03dcd1b 100644
--- a/llvm/lib/CodeGen/InlineSpiller.cpp
+++ b/llvm/lib/CodeGen/InlineSpiller.cpp
@@ -869,7 +869,7 @@ static void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B,
// destination that is marked as an early clobber, print the
// early-clobber slot index.
if (VReg) {
- MachineOperand *MO = I->findRegisterDefOperand(VReg);
+ MachineOperand *MO = I->findRegisterDefOperand(VReg, nullptr);
if (MO && MO->isEarlyClobber())
Idx = Idx.getRegSlot(true);
}
diff --git a/llvm/lib/CodeGen/LiveVariables.cpp b/llvm/lib/CodeGen/LiveVariables.cpp
index b85526cfb380b6..10df5fe1aa96c8 100644
--- a/llvm/lib/CodeGen/LiveVariables.cpp
+++ b/llvm/lib/CodeGen/LiveVariables.cpp
@@ -258,7 +258,7 @@ void LiveVariables::HandlePhysRegUse(Register Reg, MachineInstr &MI) {
}
}
} else if (LastDef && !PhysRegUse[Reg] &&
- !LastDef->findRegisterDefOperand(Reg))
+ !LastDef->findRegisterDefOperand(Reg, nullptr))
// Last def defines the super register, add an implicit def of reg.
LastDef->addOperand(MachineOperand::CreateReg(Reg, true/*IsDef*/,
true/*IsImp*/));
@@ -361,7 +361,8 @@ bool LiveVariables::HandlePhysRegKill(Register Reg, MachineInstr *MI) {
continue;
bool NeedDef = true;
if (PhysRegDef[Reg] == PhysRegDef[SubReg]) {
- MachineOperand *MO = PhysRegDef[Reg]->findRegisterDefOperand(SubReg);
+ MachineOperand *MO =
+ PhysRegDef[Reg]->findRegisterDefOperand(SubReg, nullptr);
if (MO) {
NeedDef = false;
assert(!MO->isDead());
@@ -388,7 +389,7 @@ bool LiveVariables::HandlePhysRegKill(Register Reg, MachineInstr *MI) {
true/*IsImp*/, true/*IsKill*/));
else {
MachineOperand *MO =
- LastRefOrPartRef->findRegisterDefOperand(Reg, false, false, TRI);
+ LastRefOrPartRef->findRegisterDefOperand(Reg, TRI, false, false);
bool NeedEC = MO->isEarlyClobber() && MO->getReg() != Reg;
// If the last reference is the last def, then it's not used at all.
// That is, unless we are currently processing the last reference itself.
@@ -396,7 +397,7 @@ bool LiveVariables::HandlePhysRegKill(Register Reg, MachineInstr *MI) {
if (NeedEC) {
// If we are adding a subreg def and the superreg def is marked early
// clobber, add an early clobber marker to the subreg def.
- MO = LastRefOrPartRef->findRegisterDefOperand(Reg);
+ MO = LastRefOrPartRef->findRegisterDefOperand(Reg, nullptr);
if (MO)
MO->setIsEarlyClobber();
}
@@ -727,7 +728,7 @@ void LiveVariables::recomputeForSingleDefVirtReg(Register Reg) {
if (MI.isPHI())
break;
if (MI.readsVirtualRegister(Reg)) {
- assert(!MI.killsRegister(Reg));
+ assert(!MI.killsRegister(Reg, nullptr));
MI.addRegisterKilled(Reg, nullptr);
VI.Kills.push_back(&MI);
break;
diff --git a/llvm/lib/CodeGen/MachineCSE.cpp b/llvm/lib/CodeGen/MachineCSE.cpp
index 26a8d00e662651..42cdcaa5bbf4f2 100644
--- a/llvm/lib/CodeGen/MachineCSE.cpp
+++ b/llvm/lib/CodeGen/MachineCSE.cpp
@@ -709,7 +709,7 @@ bool MachineCSE::ProcessBlockCSE(MachineBasicBlock *MBB) {
for (MachineBasicBlock::iterator II = CSMI, IE = &MI; II != IE; ++II)
for (auto ImplicitDef : ImplicitDefs)
if (MachineOperand *MO = II->findRegisterUseOperand(
- ImplicitDef, /*isKill=*/true, TRI))
+ ImplicitDef, TRI, /*isKill=*/true))
MO->setIsKill(false);
} else {
// If the instructions aren't in the same BB, bail out and clear the
diff --git a/llvm/lib/CodeGen/MachineCombiner.cpp b/llvm/lib/CodeGen/MachineCombiner.cpp
index a4c87a7678bd8d..3a50a17a0ebcfa 100644
--- a/llvm/lib/CodeGen/MachineCombiner.cpp
+++ b/llvm/lib/CodeGen/MachineCombiner.cpp
@@ -229,8 +229,8 @@ MachineCombiner::getDepth(SmallVectorImpl<MachineInstr *> &InsInstrs,
assert(DefInstr &&
"There must be a definition for a new virtual register");
DepthOp = InstrDepth[II->second];
- int DefIdx = DefInstr->findRegisterDefOperandIdx(MO.getReg());
- int UseIdx = InstrPtr->findRegisterUseOperandIdx(MO.getReg());
+ int DefIdx = DefInstr->findRegisterDefOperandIdx(MO.getReg(), nullptr);
+ int UseIdx = InstrPtr->findRegisterUseOperandIdx(MO.getReg(), nullptr);
LatencyOp = TSchedModel.computeOperandLatency(DefInstr, DefIdx,
InstrPtr, UseIdx);
} else {
@@ -241,8 +241,10 @@ MachineCombiner::getDepth(SmallVectorImpl<MachineInstr *> &InsInstrs,
DepthOp = BlockTrace.getInstrCycles(*DefInstr).Depth;
if (!isTransientMI(DefInstr))
LatencyOp = TSchedModel.computeOperandLatency(
- DefInstr, DefInstr->findRegisterDefOperandIdx(MO.getReg()),
- InstrPtr, InstrPtr->findRegisterUseOperandIdx(MO.getReg()));
+ DefInstr,
+ DefInstr->findRegisterDefOperandIdx(MO.getReg(), nullptr),
+ InstrPtr,
+ InstrPtr->findRegisterUseOperandIdx(MO.getReg(), nullptr));
}
}
IDepth = std::max(IDepth, DepthOp + LatencyOp);
@@ -280,8 +282,8 @@ unsigned MachineCombiner::getLatency(MachineInstr *Root, MachineInstr *NewRoot,
unsigned LatencyOp = 0;
if (UseMO && BlockTrace.isDepInTrace(*Root, *UseMO)) {
LatencyOp = TSchedModel.computeOperandLatency(
- NewRoot, NewRoot->findRegisterDefOperandIdx(MO.getReg()), UseMO,
- UseMO->findRegisterUseOperandIdx(MO.getReg()));
+ NewRoot, NewRoot->findRegisterDefOperandIdx(MO.getReg(), nullptr),
+ UseMO, UseMO->findRegisterUseOperandIdx(MO.getReg(), nullptr));
} else {
LatencyOp = TSchedModel.computeInstrLatency(NewRoot);
}
diff --git a/llvm/lib/CodeGen/MachineCopyPropagation.cpp b/llvm/lib/CodeGen/MachineCopyPropagation.cpp
index 8dc6781fcb018f..3cf7651f792dc5 100644
--- a/llvm/lib/CodeGen/MachineCopyPropagation.cpp
+++ b/llvm/lib/CodeGen/MachineCopyPropagation.cpp
@@ -737,7 +737,7 @@ void MachineCopyPropagation::forwardUses(MachineInstr &MI) {
// cannot cope with that.
if (isCopyInstr(MI, *TII, UseCopyInstr) &&
MI.modifiesRegister(CopySrcReg, TRI) &&
- !MI.definesRegister(CopySrcReg)) {
+ !MI.definesRegister(CopySrcReg, nullptr)) {
LLVM_DEBUG(dbgs() << "MCP: Copy source overlap with dest in " << MI);
continue;
}
diff --git a/llvm/lib/CodeGen/MachineInstr.cpp b/llvm/lib/CodeGen/MachineInstr.cpp
index 83604003a038bd..e0883e0cd97931 100644
--- a/llvm/lib/CodeGen/MachineInstr.cpp
+++ b/llvm/lib/CodeGen/MachineInstr.cpp
@@ -1028,8 +1028,9 @@ bool MachineInstr::hasRegisterImplicitUseOperand(Register Reg) const {
/// findRegisterUseOperandIdx() - Returns the MachineOperand that is a use of
/// the specific register or -1 if it is not found. It further tightens
/// the search criteria to a use that kills the register if isKill is true.
-int MachineInstr::findRegisterUseOperandIdx(
- Register Reg, bool isKill, const TargetRegisterInfo *TRI) const {
+int MachineInstr::findRegisterUseOperandIdx(Register Reg,
+ const TargetRegisterInfo *TRI,
+ bool isKill) const {
for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
const MachineOperand &MO = getOperand(i);
if (!MO.isReg() || !MO.isUse())
@@ -1076,9 +1077,9 @@ MachineInstr::readsWritesVirtualRegister(Register Reg,
/// the specified register or -1 if it is not found. If isDead is true, defs
/// that are not dead are skipped. If TargetRegisterInfo is non-null, then it
/// also checks if there is a def of a super-register.
-int
-MachineInstr::findRegisterDefOperandIdx(Register Reg, bool isDead, bool Overlap,
- const TargetRegisterInfo *TRI) const {
+int MachineInstr::findRegisterDefOperandIdx(Register Reg,
+ const TargetRegisterInfo *TRI,
+ bool isDead, bool Overlap) const {
bool isPhys = Reg.isPhysical();
for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
const MachineOperand &MO = getOperand(i);
@@ -2111,7 +2112,7 @@ void MachineInstr::setRegisterDefReadUndef(Register Reg, bool IsUndef) {
void MachineInstr::addRegisterDefined(Register Reg,
const TargetRegisterInfo *RegInfo) {
if (Reg.isPhysical()) {
- MachineOperand *MO = findRegisterDefOperand(Reg, false, false, RegInfo);
+ MachineOperand *MO = findRegisterDefOperand(Reg, RegInfo, false, false);
if (MO)
return;
} else {
diff --git a/llvm/lib/CodeGen/MachineLateInstrsCleanup.cpp b/llvm/lib/CodeGen/MachineLateInstrsCleanup.cpp
index aa1eb7c354255d..1f596cd1bd2ec1 100644
--- a/llvm/lib/CodeGen/MachineLateInstrsCleanup.cpp
+++ b/llvm/lib/CodeGen/MachineLateInstrsCleanup.cpp
@@ -230,7 +230,7 @@ bool MachineLateInstrsCleanup::processBlock(MachineBasicBlock *MBB) {
if (MI.modifiesRegister(Reg, TRI)) {
MBBDefs.erase(Reg);
MBBKills.erase(Reg);
- } else if (MI.findRegisterUseOperandIdx(Reg, true /*isKill*/, TRI) != -1)
+ } else if (MI.findRegisterUseOperandIdx(Reg, TRI, true /*isKill*/) != -1)
// Keep track of register kills.
MBBKills[Reg] = &MI;
}
diff --git a/llvm/lib/CodeGen/MachineSink.cpp b/llvm/lib/CodeGen/MachineSink.cpp
index c3a1d3759882d8..3d40130b92c443 100644
--- a/llvm/lib/CodeGen/MachineSink.cpp
+++ b/llvm/lib/CodeGen/MachineSink.cpp
@@ -309,7 +309,7 @@ static bool blockPrologueInterferes(const MachineBasicBlock *BB,
if (PI->readsRegister(Reg, TRI))
return true;
// Check for interference with non-dead defs
- auto *DefOp = PI->findRegisterDefOperand(Reg, false, true, TRI);
+ auto *DefOp = PI->findRegisterDefOperand(Reg, TRI, false, true);
if (DefOp && !DefOp->isDead())
return true;
}
diff --git a/llvm/lib/CodeGen/ModuloSchedule.cpp b/llvm/lib/CodeGen/ModuloSchedule.cpp
index bdae94c4e6f885..5b52ef925fbc39 100644
--- a/llvm/lib/CodeGen/ModuloSchedule.cpp
+++ b/llvm/lib/CodeGen/ModuloSchedule.cpp
@@ -814,7 +814,7 @@ void ModuloScheduleExpander::splitLifetimes(MachineBasicBlock *KernelBB,
unsigned SplitReg = 0;
for (auto &BBJ : make_range(MachineBasicBlock::instr_iterator(MI),
KernelBB->instr_end()))
- if (BBJ.readsRegister(Def)) {
+ if (BBJ.readsRegister(Def, nullptr)) {
// We split the lifetime when we find the first use.
if (SplitReg == 0) {
SplitReg = MRI.createVirtualRegister(MRI.getRegClass(Def));
@@ -829,7 +829,7 @@ void ModuloScheduleExpander::splitLifetimes(MachineBasicBlock *KernelBB,
// Search through each of the epilog blocks for any uses to be renamed.
for (auto &Epilog : EpilogBBs)
for (auto &I : *Epilog)
- if (I.readsRegister(Def))
+ if (I.readsRegister(Def, nullptr))
I.substituteRegister(Def, SplitReg, 0, *TRI);
break;
}
@@ -1673,7 +1673,8 @@ void PeelingModuloScheduleExpander::moveStageBetweenBlocks(
// we don't need the phi anymore.
if (getStage(Def) == Stage) {
Register PhiReg = MI.getOperand(0).getReg();
- assert(Def->findRegisterDefOperandIdx(MI.getOperand(1).getReg()) != -1);
+ assert(Def->findRegisterDefOperandIdx(MI.getOperand(1).getReg(),
+ nullptr) != -1);
MRI.replaceRegWith(MI.getOperand(0).getReg(), MI.getOperand(1).getReg());
MI.getOperand(0).setReg(PhiReg);
PhiToDelete.push_back(&MI);
@@ -1899,7 +1900,7 @@ Register
PeelingModuloScheduleExpander::getEquivalentRegisterIn(Register Reg,
MachineBasicBlock *BB) {
MachineInstr *MI = MRI.getUniqueVRegDef(Reg);
- unsigned OpIdx = MI->findRegisterDefOperandIdx(Reg);
+ unsigned OpIdx = MI->findRegisterDefOperandIdx(Reg, nullptr);
return BlockMIs[{BB, CanonicalMIs[MI]}]->getOperand(OpIdx).getReg();
}
diff --git a/llvm/lib/CodeGen/PHIElimination.cpp b/llvm/lib/CodeGen/PHIElimination.cpp
index 18f8c001bd789a..83bec4f6f2a674 100644
--- a/llvm/lib/CodeGen/PHIElimination.cpp
+++ b/llvm/lib/CodeGen/PHIElimination.cpp
@@ -549,7 +549,7 @@ void PHIElimination::LowerPHINode(MachineBasicBlock &MBB,
MachineBasicBlock::iterator KillInst = opBlock.end();
for (MachineBasicBlock::iterator Term = InsertPos; Term != opBlock.end();
++Term) {
- if (Term->readsRegister(SrcReg))
+ if (Term->readsRegister(SrcReg, nullptr))
KillInst = Term;
}
@@ -563,7 +563,7 @@ void PHIElimination::LowerPHINode(MachineBasicBlock &MBB,
--KillInst;
if (KillInst->isDebugInstr())
continue;
- if (KillInst->readsRegister(SrcReg))
+ if (KillInst->readsRegister(SrcReg, nullptr))
break;
}
} else {
@@ -571,7 +571,8 @@ void PHIElimination::LowerPHINode(MachineBasicBlock &MBB,
KillInst = NewSrcInstr;
}
}
- assert(KillInst->readsRegister(SrcReg) && "Cannot find kill instruction");
+ assert(KillInst->readsRegister(SrcReg, nullptr) &&
+ "Cannot find kill instruction");
// Finally, mark it killed.
LV->addVirtualRegisterKilled(SrcReg, *KillInst);
@@ -607,7 +608,7 @@ void PHIElimination::LowerPHINode(MachineBasicBlock &MBB,
MachineBasicBlock::iterator KillInst = opBlock.end();
for (MachineBasicBlock::iterator Term = InsertPos;
Term != opBlock.end(); ++Term) {
- if (Term->readsRegister(SrcReg))
+ if (Term->readsRegister(SrcReg, nullptr))
KillInst = Term;
}
@@ -621,7 +622,7 @@ void PHIElimination::LowerPHINode(MachineBasicBlock &MBB,
--KillInst;
if (KillInst->isDebugInstr())
continue;
- if (KillInst->readsRegister(SrcReg))
+ if (KillInst->readsRegister(SrcReg, nullptr))
break;
}
} else {
@@ -629,7 +630,7 @@ void PHIElimination::LowerPHINode(MachineBasicBlock &MBB,
KillInst = std::prev(InsertPos);
}
}
- assert(KillInst->readsRegister(SrcReg) &&
+ assert(KillInst->readsRegister(SrcReg, nullptr) &&
"Cannot find kill instruction");
SlotIndex LastUseIndex = LIS->getInstructionIndex(*KillInst);
diff --git a/llvm/lib/CodeGen/PeepholeOptimizer.cpp b/llvm/lib/CodeGen/PeepholeOptimizer.cpp
index 1b1f22e827cb1e..08e8b3d72861e3 100644
--- a/llvm/lib/CodeGen/PeepholeOptimizer.cpp
+++ b/llvm/lib/CodeGen/PeepholeOptimizer.cpp
@@ -1577,7 +1577,7 @@ bool PeepholeOptimizer::findTargetRecurrence(
return false;
MachineInstr &MI = *(MRI->use_instr_nodbg_begin(Reg));
- unsigned Idx = MI.findRegisterUseOperandIdx(Reg);
+ unsigned Idx = MI.findRegisterUseOperandIdx(Reg, nullptr);
// Only interested in recurrences whose instructions have only one def, which
// is a virtual register.
diff --git a/llvm/lib/CodeGen/RegisterCoalescer.cpp b/llvm/lib/CodeGen/RegisterCoalescer.cpp
index 7e9c992031f8d3..40735b33ee37cc 100644
--- a/llvm/lib/CodeGen/RegisterCoalescer.cpp
+++ b/llvm/lib/CodeGen/RegisterCoalescer.cpp
@@ -723,7 +723,7 @@ bool RegisterCoalescer::adjustCopiesBackFrom(const CoalescerPair &CP,
// If the source instruction was killing the source register before the
// merge, unset the isKill marker given the live range has been extended.
- int UIdx = ValSEndInst->findRegisterUseOperandIdx(IntB.reg(), true);
+ int UIdx = ValSEndInst->findRegisterUseOperandIdx(IntB.reg(), nullptr, true);
if (UIdx != -1) {
ValSEndInst->getOperand(UIdx).setIsKill(false);
}
@@ -848,7 +848,7 @@ RegisterCoalescer::removeCopyByCommutingDef(const CoalescerPair &CP,
return { false, false };
// If DefMI is a two-address instruction then commuting it will change the
// destination register.
- int DefIdx = DefMI->findRegisterDefOperandIdx(IntA.reg());
+ int DefIdx = DefMI->findRegisterDefOperandIdx(IntA.reg(), nullptr);
assert(DefIdx != -1);
unsigned UseOpIdx;
if (!DefMI->isRegTiedToUseOperand(DefIdx, &UseOpIdx))
diff --git a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
index 54409cbf91f1f7..759368a67a16cf 100644
--- a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
@@ -1420,7 +1420,7 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
for (unsigned Reg : ECRegs) {
if (MIB->readsRegister(Reg, TRI)) {
MachineOperand *MO =
- MIB->findRegisterDefOperand(Reg, false, false, TRI);
+ MIB->findRegisterDefOperand(Reg, TRI, false, false);
assert(MO && "No def operand for clobbered register?");
MO->setIsEarlyClobber(false);
}
diff --git a/llvm/lib/CodeGen/StackSlotColoring.cpp b/llvm/lib/CodeGen/StackSlotColoring.cpp
index 6d3fc740b292a8..0fe31eba6f4c2e 100644
--- a/llvm/lib/CodeGen/StackSlotColoring.cpp
+++ b/llvm/lib/CodeGen/StackSlotColoring.cpp
@@ -486,7 +486,7 @@ bool StackSlotColoring::RemoveDeadStores(MachineBasicBlock* MBB) {
++NumDead;
changed = true;
- if (NextMI->findRegisterUseOperandIdx(LoadReg, true, nullptr) != -1) {
+ if (NextMI->findRegisterUseOperandIdx(LoadReg, nullptr, true) != -1) {
++NumDead;
toErase.push_back(&*ProbableLoadMI);
}
diff --git a/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp b/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
index ebacbc420f8580..ba5cbbf43915cb 100644
--- a/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
+++ b/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
@@ -339,7 +339,7 @@ bool TwoAddressInstructionPass::isPlainlyKilled(const MachineInstr *MI,
});
}
- return MI->killsRegister(Reg);
+ return MI->killsRegister(Reg, nullptr);
}
/// Test if the register used by the given operand is killed by the operand's
@@ -1355,8 +1355,10 @@ tryInstructionTransform(MachineBasicBlock::iterator &mi,
<< "2addr: NEW INST: " << *NewMIs[1]);
// Transform the instruction, now that it no longer has a load.
- unsigned NewDstIdx = NewMIs[1]->findRegisterDefOperandIdx(regA);
- unsigned NewSrcIdx = NewMIs[1]->findRegisterUseOperandIdx(regB);
+ unsigned NewDstIdx =
+ NewMIs[1]->findRegisterDefOperandIdx(regA, nullptr);
+ unsigned NewSrcIdx =
+ NewMIs[1]->findRegisterUseOperandIdx(regB, nullptr);
MachineBasicBlock::iterator NewMI = NewMIs[1];
bool TransformResult =
tryInstructionTransform(NewMI, mi, NewSrcIdx, NewDstIdx, Dist, true);
@@ -1371,19 +1373,19 @@ tryInstructionTransform(MachineBasicBlock::iterator &mi,
if (MO.isReg() && MO.getReg().isVirtual()) {
if (MO.isUse()) {
if (MO.isKill()) {
- if (NewMIs[0]->killsRegister(MO.getReg()))
+ if (NewMIs[0]->killsRegister(MO.getReg(), nullptr))
LV->replaceKillInstruction(MO.getReg(), MI, *NewMIs[0]);
else {
- assert(NewMIs[1]->killsRegister(MO.getReg()) &&
+ assert(NewMIs[1]->killsRegister(MO.getReg(), nullptr) &&
"Kill missing after load unfold!");
LV->replaceKillInstruction(MO.getReg(), MI, *NewMIs[1]);
}
}
} else if (LV->removeVirtualRegisterDead(MO.getReg(), MI)) {
- if (NewMIs[1]->registerDefIsDead(MO.getReg()))
+ if (NewMIs[1]->registerDefIsDead(MO.getReg(), nullptr))
LV->addVirtualRegisterDead(MO.getReg(), *NewMIs[1]);
else {
- assert(NewMIs[0]->registerDefIsDead(MO.getReg()) &&
+ assert(NewMIs[0]->registerDefIsDead(MO.getReg(), nullptr) &&
"Dead flag missing after load unfold!");
LV->addVirtualRegisterDead(MO.getReg(), *NewMIs[0]);
}
diff --git a/llvm/lib/Target/AArch64/AArch64ConditionOptimizer.cpp b/llvm/lib/Target/AArch64/AArch64ConditionOptimizer.cpp
index 1c20e24e41d7ea..f24f8b7c40669c 100644
--- a/llvm/lib/Target/AArch64/AArch64ConditionOptimizer.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ConditionOptimizer.cpp
@@ -163,7 +163,7 @@ MachineInstr *AArch64ConditionOptimizer::findSuitableCompare(
MachineInstr &I = *It;
assert(!I.isTerminator() && "Spurious terminator");
// Check if there is any use of NZCV between CMP and Bcc.
- if (I.readsRegister(AArch64::NZCV))
+ if (I.readsRegister(AArch64::NZCV, nullptr))
return nullptr;
switch (I.getOpcode()) {
// cmp is an alias for subs with a dead destination register.
diff --git a/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp b/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp
index 17e0e3072db6ff..8fb585eec04f4d 100644
--- a/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp
@@ -299,7 +299,7 @@ MachineInstr *SSACCmpConv::findConvertibleCompare(MachineBasicBlock *MBB) {
if (I == MBB->end())
return nullptr;
// The terminator must be controlled by the flags.
- if (!I->readsRegister(AArch64::NZCV)) {
+ if (!I->readsRegister(AArch64::NZCV, nullptr)) {
switch (I->getOpcode()) {
case AArch64::CBZW:
case AArch64::CBZX:
diff --git a/llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp b/llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp
index 3e04cbae8acf18..377fa02c68999f 100644
--- a/llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp
+++ b/llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp
@@ -124,7 +124,8 @@ void AArch64DeadRegisterDefinitions::processMachineBasicBlock(
LLVM_DEBUG(dbgs() << " Ignoring, operand is frame index\n");
continue;
}
- if (MI.definesRegister(AArch64::XZR) || MI.definesRegister(AArch64::WZR)) {
+ if (MI.definesRegister(AArch64::XZR, nullptr) ||
+ MI.definesRegister(AArch64::WZR, nullptr)) {
// It is not allowed to write to the same register (not even the zero
// register) twice in a single instruction.
LLVM_DEBUG(
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index 22687b0e31c284..a5b635df8e8f9b 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -643,7 +643,7 @@ static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
case AArch64::ADDSXri:
case AArch64::ADDSWri:
// if NZCV is used, do not fold.
- if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
+ if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, nullptr, true) == -1)
return 0;
// fall-through to ADDXri and ADDWri.
[[fallthrough]];
@@ -671,7 +671,7 @@ static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
case AArch64::SUBSXrr:
case AArch64::SUBSWrr:
// if NZCV is used, do not fold.
- if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
+ if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, nullptr, true) == -1)
return 0;
// fall-through to SUBXrr and SUBWrr.
[[fallthrough]];
@@ -1275,7 +1275,8 @@ static unsigned convertToNonFlagSettingOpc(const MachineInstr &MI) {
// Don't convert all compare instructions, because for some the zero register
// encoding becomes the sp register.
bool MIDefinesZeroReg = false;
- if (MI.definesRegister(AArch64::WZR) || MI.definesRegister(AArch64::XZR))
+ if (MI.definesRegister(AArch64::WZR, nullptr) ||
+ MI.definesRegister(AArch64::XZR, nullptr))
MIDefinesZeroReg = true;
switch (MI.getOpcode()) {
@@ -1519,10 +1520,11 @@ bool AArch64InstrInfo::optimizeCompareInstr(
assert(MRI);
// Replace SUBSWrr with SUBWrr if NZCV is not used.
- int DeadNZCVIdx = CmpInstr.findRegisterDefOperandIdx(AArch64::NZCV, true);
+ int DeadNZCVIdx =
+ CmpInstr.findRegisterDefOperandIdx(AArch64::NZCV, nullptr, true);
if (DeadNZCVIdx != -1) {
- if (CmpInstr.definesRegister(AArch64::WZR) ||
- CmpInstr.definesRegister(AArch64::XZR)) {
+ if (CmpInstr.definesRegister(AArch64::WZR, nullptr) ||
+ CmpInstr.definesRegister(AArch64::XZR, nullptr)) {
CmpInstr.eraseFromParent();
return true;
}
@@ -1623,7 +1625,7 @@ findCondCodeUseOperandIdxForBranchOrSelect(const MachineInstr &Instr) {
return -1;
case AArch64::Bcc: {
- int Idx = Instr.findRegisterUseOperandIdx(AArch64::NZCV);
+ int Idx = Instr.findRegisterUseOperandIdx(AArch64::NZCV, nullptr);
assert(Idx >= 2);
return Idx - 2;
}
@@ -1638,7 +1640,7 @@ findCondCodeUseOperandIdxForBranchOrSelect(const MachineInstr &Instr) {
case AArch64::CSNEGXr:
case AArch64::FCSELSrrr:
case AArch64::FCSELDrrr: {
- int Idx = Instr.findRegisterUseOperandIdx(AArch64::NZCV);
+ int Idx = Instr.findRegisterUseOperandIdx(AArch64::NZCV, nullptr);
assert(Idx >= 1);
return Idx - 1;
}
@@ -1846,7 +1848,7 @@ static bool canCmpInstrBeRemoved(MachineInstr &MI, MachineInstr &CmpInstr,
return false;
// NZCV needs to be defined
- if (MI.findRegisterDefOperandIdx(AArch64::NZCV, true) != -1)
+ if (MI.findRegisterDefOperandIdx(AArch64::NZCV, nullptr, true) != -1)
return false;
// CmpInstr is 'ADDS %vreg, 0' or 'SUBS %vreg, 0' or 'SUBS %vreg, 1'
@@ -5926,7 +5928,7 @@ static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO,
}
if (isCombineInstrSettingFlag(CombineOpc) &&
- MI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
+ MI->findRegisterDefOperandIdx(AArch64::NZCV, nullptr, true) == -1)
return false;
return true;
@@ -6064,7 +6066,7 @@ static bool getMaddPatterns(MachineInstr &Root,
if (!isCombineInstrCandidate(Opc))
return false;
if (isCombineInstrSettingFlag(Opc)) {
- int Cmp_NZCV = Root.findRegisterDefOperandIdx(AArch64::NZCV, true);
+ int Cmp_NZCV = Root.findRegisterDefOperandIdx(AArch64::NZCV, nullptr, true);
// When NZCV is live bail out.
if (Cmp_NZCV == -1)
return false;
@@ -6563,7 +6565,7 @@ static bool getMiscPatterns(MachineInstr &Root,
}
if (isCombineInstrSettingFlag(Opc) &&
- Root.findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
+ Root.findRegisterDefOperandIdx(AArch64::NZCV, nullptr, true) == -1)
return false;
if (canCombine(MBB, Root.getOperand(2), AArch64::ADDWrr) ||
@@ -8032,7 +8034,7 @@ bool AArch64InstrInfo::optimizeCondBranch(MachineInstr &MI) const {
DefMI->getOperand(2).getReg() == AArch64::XZR))
return false;
- if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) != -1)
+ if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, nullptr, true) != -1)
return false;
AArch64CC::CondCode CC = (AArch64CC::CondCode)DefMI->getOperand(3).getImm();
@@ -9238,8 +9240,9 @@ AArch64InstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
(!MI.getOperand(0).getReg().isVirtual() ||
MI.getOperand(0).getSubReg() == 0) &&
(!MI.getOperand(0).getReg().isPhysical() ||
- MI.findRegisterDefOperandIdx(MI.getOperand(0).getReg() - AArch64::W0 +
- AArch64::X0) == -1))
+ MI.findRegisterDefOperandIdx(
+ MI.getOperand(0).getReg() - AArch64::W0 + AArch64::X0, nullptr) ==
+ -1))
return DestSourcePair{MI.getOperand(0), MI.getOperand(2)};
if (MI.getOpcode() == AArch64::ORRXrs &&
diff --git a/llvm/lib/Target/AArch64/AArch64MacroFusion.cpp b/llvm/lib/Target/AArch64/AArch64MacroFusion.cpp
index 05d60872bf51ac..0f41d39f776c01 100644
--- a/llvm/lib/Target/AArch64/AArch64MacroFusion.cpp
+++ b/llvm/lib/Target/AArch64/AArch64MacroFusion.cpp
@@ -245,7 +245,7 @@ static bool isCCSelectPair(const MachineInstr *FirstMI,
if (FirstMI == nullptr)
return true;
- if (FirstMI->definesRegister(AArch64::WZR))
+ if (FirstMI->definesRegister(AArch64::WZR, nullptr))
switch (FirstMI->getOpcode()) {
case AArch64::SUBSWrs:
return !AArch64InstrInfo::hasShiftedReg(*FirstMI);
@@ -263,7 +263,7 @@ static bool isCCSelectPair(const MachineInstr *FirstMI,
if (FirstMI == nullptr)
return true;
- if (FirstMI->definesRegister(AArch64::XZR))
+ if (FirstMI->definesRegister(AArch64::XZR, nullptr))
switch (FirstMI->getOpcode()) {
case AArch64::SUBSXrs:
return !AArch64InstrInfo::hasShiftedReg(*FirstMI);
diff --git a/llvm/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp b/llvm/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp
index 1494312886a40d..dfd0e57efd0780 100644
--- a/llvm/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp
+++ b/llvm/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp
@@ -265,7 +265,7 @@ bool AArch64RedundantCopyElimination::knownRegValInBlock(
}
// Bail if we see an instruction that defines NZCV that we don't handle.
- if (PredI.definesRegister(AArch64::NZCV))
+ if (PredI.definesRegister(AArch64::NZCV, nullptr))
return false;
// Track clobbered and used registers.
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64PostSelectOptimize.cpp b/llvm/lib/Target/AArch64/GISel/AArch64PostSelectOptimize.cpp
index 94584e20f5ab3f..7440d6d22d247d 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64PostSelectOptimize.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64PostSelectOptimize.cpp
@@ -199,10 +199,10 @@ bool AArch64PostSelectOptimize::optimizeNZCVDefs(MachineBasicBlock &MBB) {
for (auto &II : instructionsWithoutDebug(MBB.rbegin(), MBB.rend())) {
bool NZCVDead = LRU.available(AArch64::NZCV);
- if (NZCVDead && II.definesRegister(AArch64::NZCV)) {
+ if (NZCVDead && II.definesRegister(AArch64::NZCV, nullptr)) {
// The instruction defines NZCV, but NZCV is dead.
unsigned NewOpc = getNonFlagSettingVariant(II.getOpcode());
- int DeadNZCVIdx = II.findRegisterDefOperandIdx(AArch64::NZCV);
+ int DeadNZCVIdx = II.findRegisterDefOperandIdx(AArch64::NZCV, nullptr);
if (DeadNZCVIdx != -1) {
if (NewOpc) {
// If there is an equivalent non-flag-setting op, we convert.
diff --git a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
index 75766b11ca8229..b42ec739ee517e 100644
--- a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
@@ -241,7 +241,7 @@ GCNHazardRecognizer::getHazardType(SUnit *SU, int Stalls) {
(ST.hasReadM0SendMsgHazard() && isSendMsgTraceDataOrGDS(TII, *MI)) ||
(ST.hasReadM0LdsDmaHazard() && isLdsDma(*MI)) ||
(ST.hasReadM0LdsDirectHazard() &&
- MI->readsRegister(AMDGPU::LDS_DIRECT))) &&
+ MI->readsRegister(AMDGPU::LDS_DIRECT, nullptr))) &&
checkReadM0Hazards(MI) > 0)
return HazardType;
@@ -381,7 +381,8 @@ unsigned GCNHazardRecognizer::PreEmitNoopsCommon(MachineInstr *MI) {
MI->getOpcode() == AMDGPU::DS_READ_ADDTID_B32)) ||
(ST.hasReadM0SendMsgHazard() && isSendMsgTraceDataOrGDS(TII, *MI)) ||
(ST.hasReadM0LdsDmaHazard() && isLdsDma(*MI)) ||
- (ST.hasReadM0LdsDirectHazard() && MI->readsRegister(AMDGPU::LDS_DIRECT)))
+ (ST.hasReadM0LdsDirectHazard() &&
+ MI->readsRegister(AMDGPU::LDS_DIRECT, nullptr)))
return std::max(WaitStates, checkReadM0Hazards(MI));
if (SIInstrInfo::isMAI(*MI))
@@ -1161,7 +1162,7 @@ bool GCNHazardRecognizer::fixVMEMtoScalarWriteHazards(MachineInstr *MI) {
for (const MachineOperand &Def : MI->defs()) {
const MachineOperand *Op =
- I.findRegisterUseOperand(Def.getReg(), false, TRI);
+ I.findRegisterUseOperand(Def.getReg(), TRI, false);
if (!Op)
continue;
return true;
diff --git a/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp b/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp
index 7f874b245b8f4f..98e7359357891a 100644
--- a/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp
@@ -207,11 +207,11 @@ bool R600InstrInfo::mustBeLastInClause(unsigned Opcode) const {
}
bool R600InstrInfo::usesAddressRegister(MachineInstr &MI) const {
- return MI.findRegisterUseOperandIdx(R600::AR_X, false, &RI) != -1;
+ return MI.findRegisterUseOperandIdx(R600::AR_X, &RI, false) != -1;
}
bool R600InstrInfo::definesAddressRegister(MachineInstr &MI) const {
- return MI.findRegisterDefOperandIdx(R600::AR_X, false, false, &RI) != -1;
+ return MI.findRegisterDefOperandIdx(R600::AR_X, &RI, false, false) != -1;
}
bool R600InstrInfo::readsLDSSrcReg(const MachineInstr &MI) const {
diff --git a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
index 8b21c22b449710..7fc643a51d45d1 100644
--- a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
@@ -947,8 +947,8 @@ void SIFixSGPRCopies::analyzeVGPRToSGPRCopy(MachineInstr* MI) {
(Inst->isCopy() && Inst->getOperand(0).getReg() == AMDGPU::SCC)) {
auto I = Inst->getIterator();
auto E = Inst->getParent()->end();
- while (++I != E && !I->findRegisterDefOperand(AMDGPU::SCC)) {
- if (I->readsRegister(AMDGPU::SCC))
+ while (++I != E && !I->findRegisterDefOperand(AMDGPU::SCC, nullptr)) {
+ if (I->readsRegister(AMDGPU::SCC, nullptr))
Users.push_back(&*I);
}
} else if (Inst->getNumExplicitDefs() != 0) {
diff --git a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
index bb499c5c8c578e..29ce6f64c54e97 100644
--- a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
@@ -2252,12 +2252,12 @@ bool SIInsertWaitcnts::insertWaitcntInBlock(MachineFunction &MF,
// Don't examine operands unless we need to track vccz correctness.
if (ST->hasReadVCCZBug() || !ST->partialVCCWritesUpdateVCCZ()) {
- if (Inst.definesRegister(AMDGPU::VCC_LO) ||
- Inst.definesRegister(AMDGPU::VCC_HI)) {
+ if (Inst.definesRegister(AMDGPU::VCC_LO, nullptr) ||
+ Inst.definesRegister(AMDGPU::VCC_HI, nullptr)) {
// Up to gfx9, writes to vcc_lo and vcc_hi don't update vccz.
if (!ST->partialVCCWritesUpdateVCCZ())
VCCZCorrect = false;
- } else if (Inst.definesRegister(AMDGPU::VCC)) {
+ } else if (Inst.definesRegister(AMDGPU::VCC, nullptr)) {
// There is a hardware bug on CI/SI where SMRD instruction may corrupt
// vccz bit, so when we detect that an instruction may read from a
// corrupt vccz bit, we need to:
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index f4b21b7dfac391..adcb8189c71ed5 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -164,7 +164,7 @@ static bool resultDependsOnExec(const MachineInstr &MI) {
break;
case AMDGPU::S_AND_B32:
case AMDGPU::S_AND_B64:
- if (!Use.readsRegister(AMDGPU::EXEC))
+ if (!Use.readsRegister(AMDGPU::EXEC, nullptr))
return true;
break;
default:
@@ -6689,7 +6689,7 @@ SIInstrInfo::legalizeOperands(MachineInstr &MI,
// Also include following copies of the return value
++End;
while (End != MBB.end() && End->isCopy() && End->getOperand(1).isReg() &&
- MI.definesRegister(End->getOperand(1).getReg()))
+ MI.definesRegister(End->getOperand(1).getReg(), nullptr))
++End;
CreatedBB =
loadMBUFScalarOperandsFromVGPR(*this, MI, {Dest}, MDT, Start, End);
@@ -7257,7 +7257,7 @@ void SIInstrInfo::moveToVALUImpl(SIInstrWorklist &Worklist,
.add(Inst.getOperand(1));
}
legalizeOperands(*NewInstr, MDT);
- int SCCIdx = Inst.findRegisterDefOperandIdx(AMDGPU::SCC);
+ int SCCIdx = Inst.findRegisterDefOperandIdx(AMDGPU::SCC, nullptr);
MachineOperand SCCOp = Inst.getOperand(SCCIdx);
addSCCDefUsersToVALUWorklist(SCCOp, Inst, Worklist, CondReg);
Inst.eraseFromParent();
@@ -7523,7 +7523,7 @@ void SIInstrInfo::lowerSelect(SIInstrWorklist &Worklist, MachineInstr &Inst,
for (MachineInstr &CandI :
make_range(std::next(MachineBasicBlock::reverse_iterator(Inst)),
Inst.getParent()->rend())) {
- if (CandI.findRegisterDefOperandIdx(AMDGPU::SCC, false, false, &RI) !=
+ if (CandI.findRegisterDefOperandIdx(AMDGPU::SCC, &RI, false, false) !=
-1) {
if (CandI.isCopy() && CandI.getOperand(0).getReg() == AMDGPU::SCC) {
BuildMI(MBB, MII, DL, get(AMDGPU::COPY), NewCondReg)
@@ -8338,7 +8338,7 @@ void SIInstrInfo::addSCCDefUsersToVALUWorklist(MachineOperand &Op,
make_range(std::next(MachineBasicBlock::iterator(SCCDefInst)),
SCCDefInst.getParent()->end())) {
// Check if SCC is used first.
- int SCCIdx = MI.findRegisterUseOperandIdx(AMDGPU::SCC, false, &RI);
+ int SCCIdx = MI.findRegisterUseOperandIdx(AMDGPU::SCC, &RI, false);
if (SCCIdx != -1) {
if (MI.isCopy()) {
MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
@@ -8355,7 +8355,7 @@ void SIInstrInfo::addSCCDefUsersToVALUWorklist(MachineOperand &Op,
}
}
// Exit if we find another SCC def.
- if (MI.findRegisterDefOperandIdx(AMDGPU::SCC, false, false, &RI) != -1)
+ if (MI.findRegisterDefOperandIdx(AMDGPU::SCC, &RI, false, false) != -1)
break;
}
for (auto &Copy : CopyToDelete)
@@ -9408,7 +9408,7 @@ MachineInstr *SIInstrInfo::createPHIDestinationCopy(
auto Cur = MBB.begin();
if (Cur != MBB.end())
do {
- if (!Cur->isPHI() && Cur->readsRegister(Dst))
+ if (!Cur->isPHI() && Cur->readsRegister(Dst, nullptr))
return BuildMI(MBB, Cur, DL, get(TargetOpcode::COPY), Dst).addReg(Src);
++Cur;
} while (Cur != MBB.end() && Cur != LastPHIIt);
@@ -9424,7 +9424,7 @@ MachineInstr *SIInstrInfo::createPHISourceCopy(
(InsPt->getOpcode() == AMDGPU::SI_IF ||
InsPt->getOpcode() == AMDGPU::SI_ELSE ||
InsPt->getOpcode() == AMDGPU::SI_IF_BREAK) &&
- InsPt->definesRegister(Src)) {
+ InsPt->definesRegister(Src, nullptr)) {
InsPt++;
return BuildMI(MBB, InsPt, DL,
get(ST.isWave32() ? AMDGPU::S_MOV_B32_term
@@ -9796,7 +9796,7 @@ bool SIInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
return false;
}
- MachineOperand *SccDef = Def->findRegisterDefOperand(AMDGPU::SCC);
+ MachineOperand *SccDef = Def->findRegisterDefOperand(AMDGPU::SCC, nullptr);
SccDef->setIsDead(false);
CmpInstr.eraseFromParent();
diff --git a/llvm/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp b/llvm/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp
index d2a5eb89da129c..c0febd2f08fd0c 100644
--- a/llvm/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp
@@ -456,7 +456,7 @@ bool SIOptimizeExecMaskingPreRA::runOnMachineFunction(MachineFunction &MF) {
Register SavedExec = I->getOperand(0).getReg();
if (SavedExec.isVirtual() && MRI->hasOneNonDBGUse(SavedExec)) {
MachineInstr *SingleExecUser = &*MRI->use_instr_nodbg_begin(SavedExec);
- int Idx = SingleExecUser->findRegisterUseOperandIdx(SavedExec);
+ int Idx = SingleExecUser->findRegisterUseOperandIdx(SavedExec, nullptr);
assert(Idx != -1);
if (SingleExecUser->getParent() == I->getParent() &&
!SingleExecUser->getOperand(Idx).isImplicit() &&
diff --git a/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp b/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp
index 82da53d0c0ebdd..10bc66ca6f3a6d 100644
--- a/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp
@@ -171,7 +171,7 @@ bool SIPreEmitPeephole::optimizeVccBranch(MachineInstr &MI) const {
if (A->getOpcode() == AndN2)
MaskValue = ~MaskValue;
- if (!ReadsCond && A->registerDefIsDead(AMDGPU::SCC)) {
+ if (!ReadsCond && A->registerDefIsDead(AMDGPU::SCC, nullptr)) {
if (!MI.killsRegister(CondReg, TRI)) {
// Replace AND with MOV
if (MaskValue == 0) {
@@ -235,7 +235,7 @@ bool SIPreEmitPeephole::optimizeVccBranch(MachineInstr &MI) const {
TII->get(IsVCCZ ? AMDGPU::S_CBRANCH_EXECZ : AMDGPU::S_CBRANCH_EXECNZ));
}
- MI.removeOperand(MI.findRegisterUseOperandIdx(CondReg, false /*Kill*/, TRI));
+ MI.removeOperand(MI.findRegisterUseOperandIdx(CondReg, TRI, false /*Kill*/));
MI.addImplicitDefUseOperands(*MBB.getParent());
return true;
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
index 245731ad5fc7c9..26be9620ee8fa4 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
@@ -2373,8 +2373,8 @@ bool SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
return false;
}
- bool NeedSaveSCC =
- RS->isRegUsed(AMDGPU::SCC) && !MI->definesRegister(AMDGPU::SCC);
+ bool NeedSaveSCC = RS->isRegUsed(AMDGPU::SCC) &&
+ !MI->definesRegister(AMDGPU::SCC, nullptr);
Register TmpSReg =
UseSGPR ? TmpReg
@@ -2416,7 +2416,7 @@ bool SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
if (TmpSReg == FrameReg) {
// Undo frame register modification.
- if (NeedSaveSCC && !MI->registerDefIsDead(AMDGPU::SCC)) {
+ if (NeedSaveSCC && !MI->registerDefIsDead(AMDGPU::SCC, nullptr)) {
MachineBasicBlock::iterator I =
BuildMI(*MBB, std::next(MI), DL, TII->get(AMDGPU::S_ADDC_U32),
TmpSReg)
@@ -2446,8 +2446,8 @@ bool SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
// Convert to a swizzled stack address by scaling by the wave size.
// In an entry function/kernel the offset is already swizzled.
bool IsSALU = isSGPRClass(TII->getOpRegClass(*MI, FIOperandNum));
- bool LiveSCC =
- RS->isRegUsed(AMDGPU::SCC) && !MI->definesRegister(AMDGPU::SCC);
+ bool LiveSCC = RS->isRegUsed(AMDGPU::SCC) &&
+ !MI->definesRegister(AMDGPU::SCC, nullptr);
const TargetRegisterClass *RC = IsSALU && !LiveSCC
? &AMDGPU::SReg_32RegClass
: &AMDGPU::VGPR_32RegClass;
diff --git a/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp b/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
index 3c6f6ddfd89d0d..4d333429dd368f 100644
--- a/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
+++ b/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
@@ -1014,7 +1014,7 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
// Copy deadness from the old explicit vcc def to the new implicit def.
if (SDst && SDst->isDead())
- Inst32->findRegisterDefOperand(VCCReg)->setIsDead();
+ Inst32->findRegisterDefOperand(VCCReg, nullptr)->setIsDead();
MI.eraseFromParent();
foldImmediates(*Inst32);
diff --git a/llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp b/llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp
index 5e6c34992930be..0a922fed4e1dd5 100644
--- a/llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp
+++ b/llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp
@@ -1525,10 +1525,10 @@ void SIWholeQuadMode::lowerCopyInstrs() {
MI->getOperand(0).setIsEarlyClobber(false);
LIS->createAndComputeVirtRegInterval(Reg);
}
- int Index = MI->findRegisterUseOperandIdx(AMDGPU::EXEC);
+ int Index = MI->findRegisterUseOperandIdx(AMDGPU::EXEC, nullptr);
while (Index >= 0) {
MI->removeOperand(Index);
- Index = MI->findRegisterUseOperandIdx(AMDGPU::EXEC);
+ Index = MI->findRegisterUseOperandIdx(AMDGPU::EXEC, nullptr);
}
MI->setDesc(TII->get(AMDGPU::COPY));
LLVM_DEBUG(dbgs() << " -> " << *MI);
diff --git a/llvm/lib/Target/ARM/A15SDOptimizer.cpp b/llvm/lib/Target/ARM/A15SDOptimizer.cpp
index 3543cefeb399de..77e15a42d2b98c 100644
--- a/llvm/lib/Target/ARM/A15SDOptimizer.cpp
+++ b/llvm/lib/Target/ARM/A15SDOptimizer.cpp
@@ -156,7 +156,7 @@ unsigned A15SDOptimizer::getPrefSPRLane(unsigned SReg) {
MachineInstr *MI = MRI->getVRegDef(SReg);
if (!MI) return ARM::ssub_0;
- MachineOperand *MO = MI->findRegisterDefOperand(SReg);
+ MachineOperand *MO = MI->findRegisterDefOperand(SReg, nullptr);
if (!MO) return ARM::ssub_0;
assert(MO->isReg() && "Non-register operand found!");
@@ -192,7 +192,7 @@ void A15SDOptimizer::eraseInstrWithNoUses(MachineInstr *MI) {
Register Reg = MO.getReg();
if (!Reg.isVirtual())
continue;
- MachineOperand *Op = MI->findRegisterDefOperand(Reg);
+ MachineOperand *Op = MI->findRegisterDefOperand(Reg, nullptr);
if (!Op)
continue;
diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
index 5d0468948dfb61..585510d5eb79f5 100644
--- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
@@ -326,7 +326,7 @@ ARMBaseInstrInfo::convertToThreeAddress(MachineInstr &MI, LiveVariables *LV,
for (unsigned j = 0; j < 2; ++j) {
// Look at the two new MI's in reverse order.
MachineInstr *NewMI = NewMIs[j];
- if (!NewMI->readsRegister(Reg))
+ if (!NewMI->readsRegister(Reg, nullptr))
continue;
LV->addVirtualRegisterKilled(Reg, *NewMI);
if (VI.removeKill(MI))
@@ -1732,7 +1732,7 @@ bool ARMBaseInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
// Get rid of the old implicit-def of DstRegD. Leave it if it defines a Q-reg
// or some other super-register.
- int ImpDefIdx = MI.findRegisterDefOperandIdx(DstRegD);
+ int ImpDefIdx = MI.findRegisterDefOperandIdx(DstRegD, nullptr);
if (ImpDefIdx != -1)
MI.removeOperand(ImpDefIdx);
@@ -2085,7 +2085,7 @@ bool ARMBaseInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
// Calls don't actually change the stack pointer, even if they have imp-defs.
// No ARM calling conventions change the stack pointer. (X86 calling
// conventions sometimes do).
- if (!MI.isCall() && MI.definesRegister(ARM::SP))
+ if (!MI.isCall() && MI.definesRegister(ARM::SP, nullptr))
return true;
return false;
@@ -4137,7 +4137,7 @@ static const MachineInstr *getBundledDefMI(const TargetRegisterInfo *TRI,
int Idx = -1;
while (II->isInsideBundle()) {
- Idx = II->findRegisterDefOperandIdx(Reg, false, true, TRI);
+ Idx = II->findRegisterDefOperandIdx(Reg, TRI, false, true);
if (Idx != -1)
break;
--II;
@@ -4161,7 +4161,7 @@ static const MachineInstr *getBundledUseMI(const TargetRegisterInfo *TRI,
// FIXME: This doesn't properly handle multiple uses.
int Idx = -1;
while (II != E && II->isInsideBundle()) {
- Idx = II->findRegisterUseOperandIdx(Reg, false, TRI);
+ Idx = II->findRegisterUseOperandIdx(Reg, TRI, false);
if (Idx != -1)
break;
if (II->getOpcode() != ARM::t2IT)
@@ -5361,7 +5361,7 @@ unsigned ARMBaseInstrInfo::getPartialRegUpdateClearance(
case ARM::VMOVv2i32:
case ARM::VMOVv2f32:
case ARM::VMOVv1i64:
- UseOp = MI.findRegisterUseOperandIdx(Reg, false, TRI);
+ UseOp = MI.findRegisterUseOperandIdx(Reg, TRI, false);
break;
// Explicitly reads the dependency.
@@ -6092,7 +6092,7 @@ ARMBaseInstrInfo::getOutliningCandidateInfo(
bool ARMBaseInstrInfo::checkAndUpdateStackOffset(MachineInstr *MI,
int64_t Fixup,
bool Updt) const {
- int SPIdx = MI->findRegisterUseOperandIdx(ARM::SP);
+ int SPIdx = MI->findRegisterUseOperandIdx(ARM::SP, nullptr);
unsigned AddrMode = (MI->getDesc().TSFlags & ARMII::AddrModeMask);
if (SPIdx < 0)
// No SP operand
diff --git a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
index 7a3ba5870bc6df..80aea1a22eeae5 100644
--- a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
+++ b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
@@ -1937,7 +1937,7 @@ bool ARMConstantIslands::optimizeThumb2Branches() {
// If the conditional branch doesn't kill CPSR, then CPSR can be liveout
// so this transformation is not safe.
- if (!Br.MI->killsRegister(ARM::CPSR))
+ if (!Br.MI->killsRegister(ARM::CPSR, nullptr))
return false;
Register PredReg;
diff --git a/llvm/lib/Target/ARM/ARMFrameLowering.cpp b/llvm/lib/Target/ARM/ARMFrameLowering.cpp
index 9b54dd4e4e618d..542bdbaf031922 100644
--- a/llvm/lib/Target/ARM/ARMFrameLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMFrameLowering.cpp
@@ -1873,7 +1873,7 @@ skipAlignedDPRCS2Spills(MachineBasicBlock::iterator MI,
case 1:
case 2:
case 4:
- assert(MI->killsRegister(ARM::R4) && "Missed kill flag");
+ assert(MI->killsRegister(ARM::R4, nullptr) && "Missed kill flag");
++MI;
}
return MI;
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 3907131be6d131..217124c95934c9 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -11796,9 +11796,9 @@ static bool checkAndUpdateCPSRKill(MachineBasicBlock::iterator SelectItr,
MachineBasicBlock::iterator miI(std::next(SelectItr));
for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
const MachineInstr& mi = *miI;
- if (mi.readsRegister(ARM::CPSR))
+ if (mi.readsRegister(ARM::CPSR, nullptr))
return false;
- if (mi.definesRegister(ARM::CPSR))
+ if (mi.definesRegister(ARM::CPSR, nullptr))
break; // Should have kill-flag - update below.
}
@@ -12157,7 +12157,7 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
// Check whether CPSR is live past the tMOVCCr_pseudo.
const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
- if (!MI.killsRegister(ARM::CPSR) &&
+ if (!MI.killsRegister(ARM::CPSR, nullptr) &&
!checkAndUpdateCPSRKill(MI, thisMBB, TRI)) {
copy0MBB->addLiveIn(ARM::CPSR);
sinkMBB->addLiveIn(ARM::CPSR);
diff --git a/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
index 469340784284cb..7e45d641aa6c6a 100644
--- a/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
@@ -495,7 +495,7 @@ void ARMLoadStoreOpt::UpdateBaseRegUses(MachineBasicBlock &MBB,
bool InsertSub = false;
unsigned Opc = MBBI->getOpcode();
- if (MBBI->readsRegister(Base)) {
+ if (MBBI->readsRegister(Base, nullptr)) {
int Offset;
bool IsLoad =
Opc == ARM::tLDRi || Opc == ARM::tLDRHi || Opc == ARM::tLDRBi;
@@ -560,7 +560,8 @@ void ARMLoadStoreOpt::UpdateBaseRegUses(MachineBasicBlock &MBB,
return;
}
- if (MBBI->killsRegister(Base) || MBBI->definesRegister(Base))
+ if (MBBI->killsRegister(Base, nullptr) ||
+ MBBI->definesRegister(Base, nullptr))
// Register got killed. Stop updating.
return;
}
@@ -888,7 +889,7 @@ MachineInstr *ARMLoadStoreOpt::MergeOpsUpdate(const MergeCandidate &Cand) {
if (is_contained(ImpDefs, DefReg))
continue;
// We can ignore cases where the super-reg is read and written.
- if (MI->readsRegister(DefReg))
+ if (MI->readsRegister(DefReg, nullptr))
continue;
ImpDefs.push_back(DefReg);
}
@@ -903,7 +904,7 @@ MachineInstr *ARMLoadStoreOpt::MergeOpsUpdate(const MergeCandidate &Cand) {
MachineBasicBlock &MBB = *LatestMI->getParent();
unsigned Offset = getMemoryOpOffset(*First);
Register Base = getLoadStoreBaseOp(*First).getReg();
- bool BaseKill = LatestMI->killsRegister(Base);
+ bool BaseKill = LatestMI->killsRegister(Base, nullptr);
Register PredReg;
ARMCC::CondCodes Pred = getInstrPredicate(*First, PredReg);
DebugLoc DL = First->getDebugLoc();
@@ -2076,7 +2077,8 @@ bool ARMLoadStoreOpt::CombineMovBx(MachineBasicBlock &MBB) {
MachineBasicBlock::iterator Prev = MBBI;
--Prev;
- if (Prev->getOpcode() != ARM::tMOVr || !Prev->definesRegister(ARM::LR))
+ if (Prev->getOpcode() != ARM::tMOVr ||
+ !Prev->definesRegister(ARM::LR, nullptr))
return false;
for (auto Use : Prev->uses())
@@ -3176,7 +3178,7 @@ bool ARMPreAllocLoadStoreOpt::DistributeIncrements(Register Base) {
if (PrePostInc || BaseAccess->getParent() != Increment->getParent())
return false;
Register PredReg;
- if (Increment->definesRegister(ARM::CPSR) ||
+ if (Increment->definesRegister(ARM::CPSR, nullptr) ||
getInstrPredicate(*Increment, PredReg) != ARMCC::AL)
return false;
diff --git a/llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp b/llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp
index 8629551152cb64..ece8c561fb312e 100644
--- a/llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp
+++ b/llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp
@@ -91,11 +91,11 @@ static bool isVectorPredicated(MachineInstr *MI) {
}
static bool isVectorPredicate(MachineInstr *MI) {
- return MI->findRegisterDefOperandIdx(ARM::VPR) != -1;
+ return MI->findRegisterDefOperandIdx(ARM::VPR, nullptr) != -1;
}
static bool hasVPRUse(MachineInstr &MI) {
- return MI.findRegisterUseOperandIdx(ARM::VPR) != -1;
+ return MI.findRegisterUseOperandIdx(ARM::VPR, nullptr) != -1;
}
static bool isDomainMVE(MachineInstr *MI) {
@@ -564,7 +564,8 @@ static bool TryRemove(MachineInstr *MI, ReachingDefAnalysis &RDA,
SmallPtrSet<MachineInstr *, 2> ModifiedITs;
SmallPtrSet<MachineInstr *, 2> RemoveITs;
for (auto *Dead : Killed) {
- if (MachineOperand *MO = Dead->findRegisterUseOperand(ARM::ITSTATE)) {
+ if (MachineOperand *MO =
+ Dead->findRegisterUseOperand(ARM::ITSTATE, nullptr)) {
MachineInstr *IT = RDA.getMIOperand(Dead, *MO);
RemoveITs.insert(IT);
auto &CurrentBlock = ITBlocks[IT];
diff --git a/llvm/lib/Target/ARM/MVETPAndVPTOptimisationsPass.cpp b/llvm/lib/Target/ARM/MVETPAndVPTOptimisationsPass.cpp
index e8d2cba7ee556f..1c2769bf0b68ba 100644
--- a/llvm/lib/Target/ARM/MVETPAndVPTOptimisationsPass.cpp
+++ b/llvm/lib/Target/ARM/MVETPAndVPTOptimisationsPass.cpp
@@ -666,18 +666,18 @@ static bool MoveVPNOTBeforeFirstUser(MachineBasicBlock &MBB,
bool MustMove = false, HasUser = false;
MachineOperand *VPNOTOperandKiller = nullptr;
for (; Iter != MBB.end(); ++Iter) {
- if (MachineOperand *MO =
- Iter->findRegisterUseOperand(VPNOTOperand, /*isKill*/ true)) {
+ if (MachineOperand *MO = Iter->findRegisterUseOperand(VPNOTOperand, nullptr,
+ /*isKill*/ true)) {
// If we find the operand that kills the VPNOTOperand's result, save it.
VPNOTOperandKiller = MO;
}
- if (Iter->findRegisterUseOperandIdx(Reg) != -1) {
+ if (Iter->findRegisterUseOperandIdx(Reg, nullptr) != -1) {
MustMove = true;
continue;
}
- if (Iter->findRegisterUseOperandIdx(VPNOTResult) == -1)
+ if (Iter->findRegisterUseOperandIdx(VPNOTResult, nullptr) == -1)
continue;
HasUser = true;
@@ -731,7 +731,7 @@ bool MVETPAndVPTOptimisations::ReduceOldVCCRValueUses(MachineBasicBlock &MBB) {
// If we already have a VCCRValue, and this is a VPNOT on VCCRValue, we've
// found what we were looking for.
if (VCCRValue && Iter->getOpcode() == ARM::MVE_VPNOT &&
- Iter->findRegisterUseOperandIdx(VCCRValue) != -1) {
+ Iter->findRegisterUseOperandIdx(VCCRValue, nullptr) != -1) {
// Move the VPNOT closer to its first user if needed, and ignore if it
// has no users.
if (!MoveVPNOTBeforeFirstUser(MBB, Iter, VCCRValue))
@@ -763,7 +763,8 @@ bool MVETPAndVPTOptimisations::ReduceOldVCCRValueUses(MachineBasicBlock &MBB) {
for (; Iter != End; ++Iter) {
bool IsInteresting = false;
- if (MachineOperand *MO = Iter->findRegisterUseOperand(VCCRValue)) {
+ if (MachineOperand *MO =
+ Iter->findRegisterUseOperand(VCCRValue, nullptr)) {
IsInteresting = true;
// - If the instruction is a VPNOT, it can be removed, and we can just
@@ -795,7 +796,7 @@ bool MVETPAndVPTOptimisations::ReduceOldVCCRValueUses(MachineBasicBlock &MBB) {
// If the instr uses OppositeVCCRValue, make it use LastVPNOTResult
// instead as they contain the same value.
if (MachineOperand *MO =
- Iter->findRegisterUseOperand(OppositeVCCRValue)) {
+ Iter->findRegisterUseOperand(OppositeVCCRValue, nullptr)) {
IsInteresting = true;
// This is pointless if LastVPNOTResult == OppositeVCCRValue.
@@ -856,7 +857,7 @@ bool MVETPAndVPTOptimisations::ReplaceVCMPsByVPNOTs(MachineBasicBlock &MBB) {
for (MachineInstr &Instr : MBB.instrs()) {
if (PrevVCMP) {
if (MachineOperand *MO = Instr.findRegisterUseOperand(
- PrevVCMP->getOperand(0).getReg(), /*isKill*/ true)) {
+ PrevVCMP->getOperand(0).getReg(), nullptr, /*isKill*/ true)) {
// If we come accross the instr that kills PrevVCMP's result, record it
// so we can remove the kill flag later if we need to.
PrevVCMPResultKiller = MO;
diff --git a/llvm/lib/Target/ARM/MVEVPTBlockPass.cpp b/llvm/lib/Target/ARM/MVEVPTBlockPass.cpp
index d2b0bcf1250fe7..b3e7f5f5fda476 100644
--- a/llvm/lib/Target/ARM/MVEVPTBlockPass.cpp
+++ b/llvm/lib/Target/ARM/MVEVPTBlockPass.cpp
@@ -131,7 +131,8 @@ static bool StepOverPredicatedInstrs(MachineBasicBlock::instr_iterator &Iter,
static bool IsVPRDefinedOrKilledByBlock(MachineBasicBlock::iterator Iter,
MachineBasicBlock::iterator End) {
for (; Iter != End; ++Iter)
- if (Iter->definesRegister(ARM::VPR) || Iter->killsRegister(ARM::VPR))
+ if (Iter->definesRegister(ARM::VPR, nullptr) ||
+ Iter->killsRegister(ARM::VPR, nullptr))
return true;
return false;
}
diff --git a/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp b/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp
index 2945b5eaae3e3f..d2f88e269f5812 100644
--- a/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp
+++ b/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp
@@ -269,7 +269,7 @@ bool Thumb2ITBlock::InsertITInstructions(MachineBasicBlock &MBB) {
MIB.addImm(Mask);
// Last instruction in IT block kills ITSTATE.
- LastITMI->findRegisterUseOperand(ARM::ITSTATE)->setIsKill();
+ LastITMI->findRegisterUseOperand(ARM::ITSTATE, nullptr)->setIsKill();
// Finalize the bundle.
finalizeBundle(MBB, InsertPos.getInstrIterator(),
diff --git a/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp b/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp
index 083f25f49dec45..1cddc1c0bfc2df 100644
--- a/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp
+++ b/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp
@@ -552,7 +552,7 @@ bool llvm::rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
Register PredReg;
if (Offset == 0 && getInstrPredicate(MI, PredReg) == ARMCC::AL &&
- !MI.definesRegister(ARM::CPSR)) {
+ !MI.definesRegister(ARM::CPSR, nullptr)) {
// Turn it into a move.
MI.setDesc(TII.get(ARM::tMOVr));
MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
diff --git a/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp b/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
index 286010e2ba2327..b5058d071c5154 100644
--- a/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
+++ b/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
@@ -1097,12 +1097,12 @@ bool Thumb2SizeReduce::ReduceMBB(MachineBasicBlock &MBB,
// marker is only on the BUNDLE instruction. Process the BUNDLE
// instruction as we finish with the bundled instruction to work around
// the inconsistency.
- if (BundleMI->killsRegister(ARM::CPSR))
+ if (BundleMI->killsRegister(ARM::CPSR, nullptr))
LiveCPSR = false;
- MachineOperand *MO = BundleMI->findRegisterDefOperand(ARM::CPSR);
+ MachineOperand *MO = BundleMI->findRegisterDefOperand(ARM::CPSR, nullptr);
if (MO && !MO->isDead())
LiveCPSR = true;
- MO = BundleMI->findRegisterUseOperand(ARM::CPSR);
+ MO = BundleMI->findRegisterUseOperand(ARM::CPSR, nullptr);
if (MO && !MO->isKill())
LiveCPSR = true;
}
diff --git a/llvm/lib/Target/Hexagon/HexagonCopyToCombine.cpp b/llvm/lib/Target/Hexagon/HexagonCopyToCombine.cpp
index 310993662b672f..66141d229a4cff 100644
--- a/llvm/lib/Target/Hexagon/HexagonCopyToCombine.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonCopyToCombine.cpp
@@ -283,7 +283,7 @@ bool HexagonCopyToCombine::isSafeToMoveTogether(MachineInstr &I1,
// uses I2's use reg we need to modify that (first) instruction to now kill
// this reg.
unsigned KilledOperand = 0;
- if (I2.killsRegister(I2UseReg))
+ if (I2.killsRegister(I2UseReg, nullptr))
KilledOperand = I2UseReg;
MachineInstr *KillingInstr = nullptr;
@@ -360,11 +360,12 @@ bool HexagonCopyToCombine::isSafeToMoveTogether(MachineInstr &I1,
if (isUnsafeToMoveAcross(MI, I1UseReg, I1DestReg, TRI) ||
// Check for an aliased register kill. Bail out if we see one.
- (!MI.killsRegister(I1UseReg) && MI.killsRegister(I1UseReg, TRI)))
+ (!MI.killsRegister(I1UseReg, nullptr) &&
+ MI.killsRegister(I1UseReg, TRI)))
return false;
// Check for an exact kill (registers match).
- if (I1UseReg && MI.killsRegister(I1UseReg)) {
+ if (I1UseReg && MI.killsRegister(I1UseReg, nullptr)) {
assert(!KillingInstr && "Should only see one killing instruction");
KilledOperand = I1UseReg;
KillingInstr = &MI;
diff --git a/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp b/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp
index e1005296d63752..5fc40442b72101 100644
--- a/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp
@@ -779,7 +779,8 @@ MachineInstr *HexagonExpandCondsets::getReachingDefForPred(RegisterRef RD,
// Check if this instruction can be ignored, i.e. if it is predicated
// on the complementary condition.
if (PredValid && HII->isPredicated(*MI)) {
- if (MI->readsRegister(PredR) && (Cond != HII->isPredicatedTrue(*MI)))
+ if (MI->readsRegister(PredR, nullptr) &&
+ (Cond != HII->isPredicatedTrue(*MI)))
continue;
}
@@ -937,7 +938,8 @@ void HexagonExpandCondsets::renameInRange(RegisterRef RO, RegisterRef RN,
// on the opposite condition.
if (!HII->isPredicated(MI))
continue;
- if (!MI.readsRegister(PredR) || (Cond != HII->isPredicatedTrue(MI)))
+ if (!MI.readsRegister(PredR, nullptr) ||
+ (Cond != HII->isPredicatedTrue(MI)))
continue;
for (auto &Op : MI.operands()) {
@@ -1007,7 +1009,7 @@ bool HexagonExpandCondsets::predicate(MachineInstr &TfrI, bool Cond,
// By default assume that the instruction executes on the same condition
// as TfrI (Exec_Then), and also on the opposite one (Exec_Else).
unsigned Exec = Exec_Then | Exec_Else;
- if (PredValid && HII->isPredicated(MI) && MI.readsRegister(PredR))
+ if (PredValid && HII->isPredicated(MI) && MI.readsRegister(PredR, nullptr))
Exec = (Cond == HII->isPredicatedTrue(MI)) ? Exec_Then : Exec_Else;
for (auto &Op : MI.operands()) {
diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
index b9bf26ba7cca1e..c7f1ac3337cc02 100644
--- a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
@@ -3517,7 +3517,7 @@ unsigned HexagonInstrInfo::getCompoundOpcode(const MachineInstr &GA,
(GB.getOpcode() != Hexagon::J2_jumptnew))
return -1u;
Register DestReg = GA.getOperand(0).getReg();
- if (!GB.readsRegister(DestReg))
+ if (!GB.readsRegister(DestReg, nullptr))
return -1u;
if (DestReg != Hexagon::P0 && DestReg != Hexagon::P1)
return -1u;
@@ -4334,7 +4334,7 @@ std::optional<unsigned> HexagonInstrInfo::getOperandLatency(
if (DefMO.isReg() && DefMO.getReg().isPhysical()) {
if (DefMO.isImplicit()) {
for (MCPhysReg SR : HRI.superregs(DefMO.getReg())) {
- int Idx = DefMI.findRegisterDefOperandIdx(SR, false, false, &HRI);
+ int Idx = DefMI.findRegisterDefOperandIdx(SR, &HRI, false, false);
if (Idx != -1) {
DefIdx = Idx;
break;
@@ -4345,7 +4345,7 @@ std::optional<unsigned> HexagonInstrInfo::getOperandLatency(
const MachineOperand &UseMO = UseMI.getOperand(UseIdx);
if (UseMO.isImplicit()) {
for (MCPhysReg SR : HRI.superregs(UseMO.getReg())) {
- int Idx = UseMI.findRegisterUseOperandIdx(SR, false, &HRI);
+ int Idx = UseMI.findRegisterUseOperandIdx(SR, &HRI, false);
if (Idx != -1) {
UseIdx = Idx;
break;
diff --git a/llvm/lib/Target/M68k/M68kISelLowering.cpp b/llvm/lib/Target/M68k/M68kISelLowering.cpp
index 786aa7bcb64ea0..74426a6ac896d3 100644
--- a/llvm/lib/Target/M68k/M68kISelLowering.cpp
+++ b/llvm/lib/Target/M68k/M68kISelLowering.cpp
@@ -3075,9 +3075,9 @@ static bool checkAndUpdateCCRKill(MachineBasicBlock::iterator SelectItr,
MachineBasicBlock::iterator miI(std::next(SelectItr));
for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
const MachineInstr &mi = *miI;
- if (mi.readsRegister(M68k::CCR))
+ if (mi.readsRegister(M68k::CCR, nullptr))
return false;
- if (mi.definesRegister(M68k::CCR))
+ if (mi.definesRegister(M68k::CCR, nullptr))
break; // Should have kill-flag - update below.
}
diff --git a/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp b/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp
index cb98c04ff4e501..d9a0e90b1e4394 100644
--- a/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp
+++ b/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp
@@ -365,7 +365,8 @@ void RegDefsUses::setCallerSaved(const MachineInstr &MI) {
// Add RA/RA_64 to Defs to prevent users of RA/RA_64 from going into
// the delay slot. The reason is that RA/RA_64 must not be changed
// in the delay slot so that the callee can return to the caller.
- if (MI.definesRegister(Mips::RA) || MI.definesRegister(Mips::RA_64)) {
+ if (MI.definesRegister(Mips::RA, nullptr) ||
+ MI.definesRegister(Mips::RA_64, nullptr)) {
Defs.set(Mips::RA);
Defs.set(Mips::RA_64);
}
diff --git a/llvm/lib/Target/Mips/MipsInstrInfo.cpp b/llvm/lib/Target/Mips/MipsInstrInfo.cpp
index 392cc15d7943af..0c28acaf7b1c6a 100644
--- a/llvm/lib/Target/Mips/MipsInstrInfo.cpp
+++ b/llvm/lib/Target/Mips/MipsInstrInfo.cpp
@@ -619,7 +619,7 @@ bool MipsInstrInfo::SafeInLoadDelaySlot(const MachineInstr &MIInSlot,
return false;
return !llvm::any_of(LoadMI.defs(), [&](const MachineOperand &Op) {
- return Op.isReg() && MIInSlot.readsRegister(Op.getReg());
+ return Op.isReg() && MIInSlot.readsRegister(Op.getReg(), nullptr);
});
}
@@ -699,7 +699,7 @@ MipsInstrInfo::genInstrWithNewOpc(unsigned NewOpc,
bool BranchWithZeroOperand = false;
if (I->isBranch() && !I->isPseudo()) {
auto TRI = I->getParent()->getParent()->getSubtarget().getRegisterInfo();
- ZeroOperandPosition = I->findRegisterUseOperandIdx(Mips::ZERO, false, TRI);
+ ZeroOperandPosition = I->findRegisterUseOperandIdx(Mips::ZERO, TRI, false);
BranchWithZeroOperand = ZeroOperandPosition != -1;
}
diff --git a/llvm/lib/Target/PowerPC/PPCCTRLoops.cpp b/llvm/lib/Target/PowerPC/PPCCTRLoops.cpp
index 5299c0c924b3c5..d118bef480fdea 100644
--- a/llvm/lib/Target/PowerPC/PPCCTRLoops.cpp
+++ b/llvm/lib/Target/PowerPC/PPCCTRLoops.cpp
@@ -123,10 +123,12 @@ bool PPCCTRLoops::isCTRClobber(MachineInstr *MI, bool CheckReads) const {
// CTR defination inside the callee of a call instruction will not impact
// the defination of MTCTRloop, so we can use definesRegister() for the
// check, no need to check the regmask.
- return MI->definesRegister(PPC::CTR) || MI->definesRegister(PPC::CTR8);
+ return MI->definesRegister(PPC::CTR, nullptr) ||
+ MI->definesRegister(PPC::CTR8, nullptr);
}
- if (MI->modifiesRegister(PPC::CTR) || MI->modifiesRegister(PPC::CTR8))
+ if (MI->modifiesRegister(PPC::CTR, nullptr) ||
+ MI->modifiesRegister(PPC::CTR8, nullptr))
return true;
if (MI->getDesc().isCall())
@@ -134,7 +136,8 @@ bool PPCCTRLoops::isCTRClobber(MachineInstr *MI, bool CheckReads) const {
// We define the CTR in the loop preheader, so if there is any CTR reader in
// the loop, we also can not use CTR loop form.
- if (MI->readsRegister(PPC::CTR) || MI->readsRegister(PPC::CTR8))
+ if (MI->readsRegister(PPC::CTR, nullptr) ||
+ MI->readsRegister(PPC::CTR8, nullptr))
return true;
return false;
diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
index 5f5eb31a5a85fa..3c1cd464db8b41 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -2111,7 +2111,8 @@ bool PPCInstrInfo::foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
static bool MBBDefinesCTR(MachineBasicBlock &MBB) {
for (MachineInstr &MI : MBB)
- if (MI.definesRegister(PPC::CTR) || MI.definesRegister(PPC::CTR8))
+ if (MI.definesRegister(PPC::CTR, nullptr) ||
+ MI.definesRegister(PPC::CTR8, nullptr))
return true;
return false;
}
@@ -2717,19 +2718,19 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
MI->setDesc(NewDesc);
for (MCPhysReg ImpDef : NewDesc.implicit_defs()) {
- if (!MI->definesRegister(ImpDef)) {
+ if (!MI->definesRegister(ImpDef, nullptr)) {
MI->addOperand(*MI->getParent()->getParent(),
MachineOperand::CreateReg(ImpDef, true, true));
}
}
for (MCPhysReg ImpUse : NewDesc.implicit_uses()) {
- if (!MI->readsRegister(ImpUse)) {
+ if (!MI->readsRegister(ImpUse, nullptr)) {
MI->addOperand(*MI->getParent()->getParent(),
MachineOperand::CreateReg(ImpUse, false, true));
}
}
}
- assert(MI->definesRegister(PPC::CR0) &&
+ assert(MI->definesRegister(PPC::CR0, nullptr) &&
"Record-form instruction does not define cr0?");
// Modify the condition code of operands in OperandsToUpdate.
@@ -2779,7 +2780,7 @@ bool PPCInstrInfo::optimizeCmpPostRA(MachineInstr &CmpMI) const {
bool SrcRegHasOtherUse = false;
MachineInstr *SrcMI = getDefMIPostRA(SrcReg, CmpMI, SrcRegHasOtherUse);
- if (!SrcMI || !SrcMI->definesRegister(SrcReg))
+ if (!SrcMI || !SrcMI->definesRegister(SrcReg, nullptr))
return false;
MachineOperand RegMO = CmpMI.getOperand(0);
@@ -2792,7 +2793,7 @@ bool PPCInstrInfo::optimizeCmpPostRA(MachineInstr &CmpMI) const {
bool IsCRRegKilled = false;
if (!isRegElgibleForForwarding(RegMO, *SrcMI, CmpMI, false, IsCRRegKilled,
SeenUseOfCRReg) ||
- SrcMI->definesRegister(CRReg) || SeenUseOfCRReg)
+ SrcMI->definesRegister(CRReg, nullptr) || SeenUseOfCRReg)
return false;
int SrcMIOpc = SrcMI->getOpcode();
@@ -2809,7 +2810,7 @@ bool PPCInstrInfo::optimizeCmpPostRA(MachineInstr &CmpMI) const {
.addReg(CRReg, RegState::ImplicitDefine);
SrcMI->clearRegisterDeads(CRReg);
- assert(SrcMI->definesRegister(PPC::CR0) &&
+ assert(SrcMI->definesRegister(PPC::CR0, nullptr) &&
"Record-form instruction does not define cr0?");
LLVM_DEBUG(dbgs() << "with: ");
@@ -3279,7 +3280,7 @@ void PPCInstrInfo::replaceInstrOperandWithImm(MachineInstr &MI,
// result its number of explicit operands may be changed, thus the begin of
// implicit operand is changed.
const TargetRegisterInfo *TRI = &getRegisterInfo();
- int UseOpIdx = MI.findRegisterUseOperandIdx(InUseReg, false, TRI);
+ int UseOpIdx = MI.findRegisterUseOperandIdx(InUseReg, TRI, false);
if (UseOpIdx >= 0) {
MachineOperand &MO = MI.getOperand(UseOpIdx);
if (MO.isImplicit())
diff --git a/llvm/lib/Target/PowerPC/PPCPreEmitPeephole.cpp b/llvm/lib/Target/PowerPC/PPCPreEmitPeephole.cpp
index 6e3bf26a598a9a..a182be3ea712b4 100644
--- a/llvm/lib/Target/PowerPC/PPCPreEmitPeephole.cpp
+++ b/llvm/lib/Target/PowerPC/PPCPreEmitPeephole.cpp
@@ -158,7 +158,7 @@ static bool hasPCRelativeForm(MachineInstr &Use) {
++AfterBBI) {
// Track the operand that kill Reg. We would unset the kill flag of
// the operand if there is a following redundant load immediate.
- int KillIdx = AfterBBI->findRegisterUseOperandIdx(Reg, true, TRI);
+ int KillIdx = AfterBBI->findRegisterUseOperandIdx(Reg, TRI, true);
// We can't just clear implicit kills, so if we encounter one, stop
// looking further.
@@ -204,7 +204,7 @@ static bool hasPCRelativeForm(MachineInstr &Use) {
DeadOrKillToUnset->setIsKill(false);
}
DeadOrKillToUnset =
- AfterBBI->findRegisterDefOperand(Reg, true, true, TRI);
+ AfterBBI->findRegisterDefOperand(Reg, TRI, true, true);
if (DeadOrKillToUnset)
LLVM_DEBUG(dbgs()
<< " Dead flag of " << *DeadOrKillToUnset << " from "
diff --git a/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp b/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
index 0f450a4bf9692b..bf650aacda4ef0 100644
--- a/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
@@ -1013,8 +1013,8 @@ void PPCRegisterInfo::lowerCRRestore(MachineBasicBlock::iterator II,
Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
Register DestReg = MI.getOperand(0).getReg();
- assert(MI.definesRegister(DestReg) &&
- "RESTORE_CR does not define its destination");
+ assert(MI.definesRegister(DestReg, nullptr) &&
+ "RESTORE_CR does not define its destination");
addFrameReference(BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::LWZ8 : PPC::LWZ),
Reg), FrameIndex);
@@ -1175,8 +1175,8 @@ void PPCRegisterInfo::lowerCRBitRestore(MachineBasicBlock::iterator II,
Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
Register DestReg = MI.getOperand(0).getReg();
- assert(MI.definesRegister(DestReg) &&
- "RESTORE_CRBIT does not define its destination");
+ assert(MI.definesRegister(DestReg, nullptr) &&
+ "RESTORE_CRBIT does not define its destination");
addFrameReference(BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::LWZ8 : PPC::LWZ),
Reg), FrameIndex);
@@ -1363,7 +1363,7 @@ void PPCRegisterInfo::lowerACCRestore(MachineBasicBlock::iterator II,
DebugLoc DL = MI.getDebugLoc();
Register DestReg = MI.getOperand(0).getReg();
- assert(MI.definesRegister(DestReg) &&
+ assert(MI.definesRegister(DestReg, nullptr) &&
"RESTORE_ACC does not define its destination");
bool IsPrimed = PPC::ACCRCRegClass.contains(DestReg);
@@ -1491,7 +1491,7 @@ void PPCRegisterInfo::lowerQuadwordRestore(MachineBasicBlock::iterator II,
DebugLoc DL = MI.getDebugLoc();
Register DestReg = MI.getOperand(0).getReg();
- assert(MI.definesRegister(DestReg) &&
+ assert(MI.definesRegister(DestReg, nullptr) &&
"RESTORE_QUADWORD does not define its destination");
Register Reg = PPC::X0 + (DestReg - PPC::G8p0) * 2;
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 279d8a435a04ca..779246aeba5090 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -17931,7 +17931,7 @@ void RISCVTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
if (MI.getOperand(Idx).getImm() != RISCVFPRndMode::DYN)
return;
// If the instruction already reads FRM, don't add another read.
- if (MI.readsRegister(RISCV::FRM))
+ if (MI.readsRegister(RISCV::FRM, nullptr))
return;
MI.addOperand(
MachineOperand::CreateReg(RISCV::FRM, /*isDef*/ false, /*isImp*/ true));
diff --git a/llvm/lib/Target/RISCV/RISCVInsertReadWriteCSR.cpp b/llvm/lib/Target/RISCV/RISCVInsertReadWriteCSR.cpp
index aac0ecc1cfc9b8..6b359beec90de1 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertReadWriteCSR.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertReadWriteCSR.cpp
@@ -82,7 +82,8 @@ bool RISCVInsertReadWriteCSR::emitWriteRoundingModeOpt(MachineBasicBlock &MBB) {
continue;
}
- if (MI.isCall() || MI.isInlineAsm() || MI.readsRegister(RISCV::FRM)) {
+ if (MI.isCall() || MI.isInlineAsm() ||
+ MI.readsRegister(RISCV::FRM, nullptr)) {
// Restore FRM before unknown operations.
if (SavedFRM.isValid())
BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(RISCV::WriteFRM))
@@ -92,7 +93,7 @@ bool RISCVInsertReadWriteCSR::emitWriteRoundingModeOpt(MachineBasicBlock &MBB) {
continue;
}
- assert(!MI.modifiesRegister(RISCV::FRM) &&
+ assert(!MI.modifiesRegister(RISCV::FRM, nullptr) &&
"Expected that MI could not modify FRM.");
int FRMIdx = RISCVII::getFRMOpNum(MI.getDesc());
diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
index a14f9a28354737..d9ec4b1444a024 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -356,9 +356,10 @@ DemandedFields getDemanded(const MachineInstr &MI,
// Most instructions don't use any of these subfeilds.
DemandedFields Res;
// Start conservative if registers are used
- if (MI.isCall() || MI.isInlineAsm() || MI.readsRegister(RISCV::VL))
+ if (MI.isCall() || MI.isInlineAsm() || MI.readsRegister(RISCV::VL, nullptr))
Res.demandVL();
- if (MI.isCall() || MI.isInlineAsm() || MI.readsRegister(RISCV::VTYPE))
+ if (MI.isCall() || MI.isInlineAsm() ||
+ MI.readsRegister(RISCV::VTYPE, nullptr))
Res.demandVTYPE();
// Start conservative on the unlowered form too
uint64_t TSFlags = MI.getDesc().TSFlags;
@@ -1156,8 +1157,9 @@ void RISCVInsertVSETVLI::transferAfter(VSETVLIInfo &Info,
// If this is something that updates VL/VTYPE that we don't know about, set
// the state to unknown.
- if (MI.isCall() || MI.isInlineAsm() || MI.modifiesRegister(RISCV::VL) ||
- MI.modifiesRegister(RISCV::VTYPE))
+ if (MI.isCall() || MI.isInlineAsm() ||
+ MI.modifiesRegister(RISCV::VL, nullptr) ||
+ MI.modifiesRegister(RISCV::VTYPE, nullptr))
Info = VSETVLIInfo::getUnknown();
}
@@ -1340,8 +1342,9 @@ void RISCVInsertVSETVLI::emitVSETVLIs(MachineBasicBlock &MBB) {
/*isImp*/ true));
}
- if (MI.isCall() || MI.isInlineAsm() || MI.modifiesRegister(RISCV::VL) ||
- MI.modifiesRegister(RISCV::VTYPE))
+ if (MI.isCall() || MI.isInlineAsm() ||
+ MI.modifiesRegister(RISCV::VL, nullptr) ||
+ MI.modifiesRegister(RISCV::VTYPE, nullptr))
PrefixTransparent = false;
transferAfter(CurInfo, MI);
diff --git a/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp b/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp
index e487cc8b2e20c9..eb83a2db11f34e 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp
@@ -225,7 +225,8 @@ bool RISCVInsertWriteVXRM::computeVXRMChanges(const MachineBasicBlock &MBB) {
continue;
}
- if (MI.isCall() || MI.isInlineAsm() || MI.modifiesRegister(RISCV::VXRM)) {
+ if (MI.isCall() || MI.isInlineAsm() ||
+ MI.modifiesRegister(RISCV::VXRM, nullptr)) {
if (!BBInfo.VXRMUse.isValid())
BBInfo.VXRMUse.setUnknown();
@@ -386,7 +387,8 @@ void RISCVInsertWriteVXRM::emitWriteVXRM(MachineBasicBlock &MBB) {
continue;
}
- if (MI.isCall() || MI.isInlineAsm() || MI.modifiesRegister(RISCV::VXRM))
+ if (MI.isCall() || MI.isInlineAsm() ||
+ MI.modifiesRegister(RISCV::VXRM, nullptr))
Info.setUnknown();
}
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 5582de51b17d19..d37aa804bcb6ee 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -239,7 +239,7 @@ static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI,
} else if (MBBI->getNumDefs()) {
// Check all the instructions which will change VL.
// For example, vleff has implicit def VL.
- if (MBBI->modifiesRegister(RISCV::VL))
+ if (MBBI->modifiesRegister(RISCV::VL, nullptr))
return false;
// Only converting whole register copies to vmv.v.v when the defining
@@ -3274,8 +3274,8 @@ RISCV::isRVVSpillForZvlsseg(unsigned Opcode) {
}
bool RISCV::isFaultFirstLoad(const MachineInstr &MI) {
- return MI.getNumExplicitDefs() == 2 && MI.modifiesRegister(RISCV::VL) &&
- !MI.isInlineAsm();
+ return MI.getNumExplicitDefs() == 2 &&
+ MI.modifiesRegister(RISCV::VL, nullptr) && !MI.isInlineAsm();
}
bool RISCV::hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2) {
diff --git a/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp b/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp
index 39d420c2fbf080..3b3ddb8fc609c3 100644
--- a/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp
+++ b/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp
@@ -409,7 +409,7 @@ static bool isSignExtendedW(Register SrcReg, const RISCVSubtarget &ST,
if (!MI)
continue;
- int OpNo = MI->findRegisterDefOperandIdx(Reg);
+ int OpNo = MI->findRegisterDefOperandIdx(Reg, nullptr);
assert(OpNo != -1 && "Couldn't find register");
// If this is a sign extending operation we don't need to look any further.
diff --git a/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp b/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp
index e58f50e471fc0e..92fe0baea01aa7 100644
--- a/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp
@@ -633,7 +633,7 @@ bool SystemZElimCompare::fuseCompareOperations(
RegMask = MBBI->getOperand(3).getRegMask();
// Clear out all current operands.
- int CCUse = MBBI->findRegisterUseOperandIdx(SystemZ::CC, false, TRI);
+ int CCUse = MBBI->findRegisterUseOperandIdx(SystemZ::CC, TRI, false);
assert(CCUse >= 0 && "BRC/BCR must use CC");
Branch->removeOperand(CCUse);
// Remove regmask (sibcall).
@@ -707,11 +707,11 @@ bool SystemZElimCompare::processBlock(MachineBasicBlock &MBB) {
continue;
}
- if (MI.definesRegister(SystemZ::CC)) {
+ if (MI.definesRegister(SystemZ::CC, nullptr)) {
CCUsers.clear();
CompleteCCUsers = true;
}
- if (MI.readsRegister(SystemZ::CC) && CompleteCCUsers)
+ if (MI.readsRegister(SystemZ::CC, nullptr) && CompleteCCUsers)
CCUsers.push_back(&MI);
}
return Changed;
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index efffd669d268b5..8cc556e6a599fc 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -8082,9 +8082,9 @@ static bool checkCCKill(MachineInstr &MI, MachineBasicBlock *MBB) {
MachineBasicBlock::iterator miI(std::next(MachineBasicBlock::iterator(MI)));
for (MachineBasicBlock::iterator miE = MBB->end(); miI != miE; ++miI) {
const MachineInstr& mi = *miI;
- if (mi.readsRegister(SystemZ::CC))
+ if (mi.readsRegister(SystemZ::CC, nullptr))
return false;
- if (mi.definesRegister(SystemZ::CC))
+ if (mi.definesRegister(SystemZ::CC, nullptr))
break; // Should have kill-flag - update below.
}
@@ -8223,7 +8223,8 @@ SystemZTargetLowering::emitSelect(MachineInstr &MI,
}
break;
}
- if (NextMI.definesRegister(SystemZ::CC) || NextMI.usesCustomInsertionHook())
+ if (NextMI.definesRegister(SystemZ::CC, nullptr) ||
+ NextMI.usesCustomInsertionHook())
break;
bool User = false;
for (auto *SelMI : Selects)
@@ -8241,8 +8242,8 @@ SystemZTargetLowering::emitSelect(MachineInstr &MI,
}
MachineInstr *LastMI = Selects.back();
- bool CCKilled =
- (LastMI->killsRegister(SystemZ::CC) || checkCCKill(*LastMI, MBB));
+ bool CCKilled = (LastMI->killsRegister(SystemZ::CC, nullptr) ||
+ checkCCKill(*LastMI, MBB));
MachineBasicBlock *StartMBB = MBB;
MachineBasicBlock *JoinMBB = SystemZ::splitBlockAfter(LastMI, MBB);
MachineBasicBlock *FalseMBB = SystemZ::emitBlockAfter(StartMBB);
@@ -8342,7 +8343,7 @@ MachineBasicBlock *SystemZTargetLowering::emitCondStore(MachineInstr &MI,
// Unless CC was killed in the CondStore instruction, mark it as
// live-in to both FalseMBB and JoinMBB.
- if (!MI.killsRegister(SystemZ::CC) && !checkCCKill(MI, JoinMBB)) {
+ if (!MI.killsRegister(SystemZ::CC, nullptr) && !checkCCKill(MI, JoinMBB)) {
FalseMBB->addLiveIn(SystemZ::CC);
JoinMBB->addLiveIn(SystemZ::CC);
}
@@ -8745,7 +8746,7 @@ SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr &MI,
// If the CC def wasn't dead in the ATOMIC_CMP_SWAPW, mark CC as live-in
// to the block after the loop. At this point, CC may have been defined
// either by the CR in LoopMBB or by the CS in SetMBB.
- if (!MI.registerDefIsDead(SystemZ::CC))
+ if (!MI.registerDefIsDead(SystemZ::CC, nullptr))
DoneMBB->addLiveIn(SystemZ::CC);
MI.eraseFromParent();
diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
index 950548abcfa92c..3b7435a12dfd23 100644
--- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
@@ -938,8 +938,8 @@ static LogicOp interpretAndImmediate(unsigned Opcode) {
}
static void transferDeadCC(MachineInstr *OldMI, MachineInstr *NewMI) {
- if (OldMI->registerDefIsDead(SystemZ::CC)) {
- MachineOperand *CCDef = NewMI->findRegisterDefOperand(SystemZ::CC);
+ if (OldMI->registerDefIsDead(SystemZ::CC, nullptr)) {
+ MachineOperand *CCDef = NewMI->findRegisterDefOperand(SystemZ::CC, nullptr);
if (CCDef != nullptr)
CCDef->setIsDead(true);
}
@@ -1034,7 +1034,7 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
.addFrameIndex(FrameIndex)
.addImm(0)
.addImm(MI.getOperand(2).getImm());
- BuiltMI->findRegisterDefOperand(SystemZ::CC)->setIsDead(true);
+ BuiltMI->findRegisterDefOperand(SystemZ::CC, nullptr)->setIsDead(true);
CCLiveRange->createDeadDef(MISlot, LIS->getVNInfoAllocator());
return BuiltMI;
}
@@ -1195,7 +1195,7 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
unsigned NumOps = MI.getNumExplicitOperands();
int MemOpcode = SystemZ::getMemOpcode(Opcode);
if (MemOpcode == -1 ||
- (CCLiveAtMI && !MI.definesRegister(SystemZ::CC) &&
+ (CCLiveAtMI && !MI.definesRegister(SystemZ::CC, nullptr) &&
get(MemOpcode).hasImplicitDefOfPhysReg(SystemZ::CC)))
return nullptr;
@@ -1303,9 +1303,9 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
MIB.addImm(CCValid);
MIB.addImm(NeedsCommute ? CCMask ^ CCValid : CCMask);
}
- if (MIB->definesRegister(SystemZ::CC) &&
- (!MI.definesRegister(SystemZ::CC) ||
- MI.registerDefIsDead(SystemZ::CC))) {
+ if (MIB->definesRegister(SystemZ::CC, nullptr) &&
+ (!MI.definesRegister(SystemZ::CC, nullptr) ||
+ MI.registerDefIsDead(SystemZ::CC, nullptr))) {
MIB->addRegisterDead(SystemZ::CC, TRI);
if (CCLiveRange)
CCLiveRange->createDeadDef(MISlot, LIS->getVNInfoAllocator());
@@ -1861,14 +1861,14 @@ prepareCompareSwapOperands(MachineBasicBlock::iterator const MBBI) const {
bool CCLive = true;
SmallVector<MachineInstr *, 4> CCUsers;
for (MachineInstr &MI : llvm::make_range(std::next(MBBI), MBB->end())) {
- if (MI.readsRegister(SystemZ::CC)) {
+ if (MI.readsRegister(SystemZ::CC, nullptr)) {
unsigned Flags = MI.getDesc().TSFlags;
if ((Flags & SystemZII::CCMaskFirst) || (Flags & SystemZII::CCMaskLast))
CCUsers.push_back(&MI);
else
return false;
}
- if (MI.definesRegister(SystemZ::CC)) {
+ if (MI.definesRegister(SystemZ::CC, nullptr)) {
CCLive = false;
break;
}
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyDebugValueManager.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyDebugValueManager.cpp
index a2a054127d5f65..e2d97cb811d4e0 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyDebugValueManager.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyDebugValueManager.cpp
@@ -37,7 +37,7 @@ WebAssemblyDebugValueManager::WebAssemblyDebugValueManager(MachineInstr *Def)
ME = Def->getParent()->end();
MI != ME; ++MI) {
// If another definition appears, stop
- if (MI->definesRegister(CurrentReg))
+ if (MI->definesRegister(CurrentReg, nullptr))
break;
if (MI->isDebugValue() && MI->hasDebugOperandForReg(CurrentReg))
DbgValues.push_back(&*MI);
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp
index 3046f9476f91c3..cb4258d1497025 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp
@@ -80,13 +80,13 @@ FunctionPass *llvm::createWebAssemblyRegStackify() {
// the expression stack.
static void imposeStackOrdering(MachineInstr *MI) {
// Write the opaque VALUE_STACK register.
- if (!MI->definesRegister(WebAssembly::VALUE_STACK))
+ if (!MI->definesRegister(WebAssembly::VALUE_STACK, nullptr))
MI->addOperand(MachineOperand::CreateReg(WebAssembly::VALUE_STACK,
/*isDef=*/true,
/*isImp=*/true));
// Also read the opaque VALUE_STACK register.
- if (!MI->readsRegister(WebAssembly::VALUE_STACK))
+ if (!MI->readsRegister(WebAssembly::VALUE_STACK, nullptr))
MI->addOperand(MachineOperand::CreateReg(WebAssembly::VALUE_STACK,
/*isDef=*/false,
/*isImp=*/true));
@@ -371,8 +371,8 @@ static bool isSafeToMove(const MachineOperand *Def, const MachineOperand *Use,
Register Reg = MO.getReg();
// If the register is dead here and at Insert, ignore it.
- if (MO.isDead() && Insert->definesRegister(Reg) &&
- !Insert->readsRegister(Reg))
+ if (MO.isDead() && Insert->definesRegister(Reg, nullptr) &&
+ !Insert->readsRegister(Reg, nullptr))
continue;
if (Reg.isPhysical()) {
@@ -864,7 +864,7 @@ bool WebAssemblyRegStackify::runOnMachineFunction(MachineFunction &MF) {
if (WebAssembly::isArgument(DefI->getOpcode()))
continue;
- MachineOperand *Def = DefI->findRegisterDefOperand(Reg);
+ MachineOperand *Def = DefI->findRegisterDefOperand(Reg, nullptr);
assert(Def != nullptr);
// Decide which strategy to take. Prefer to move a single-use value
diff --git a/llvm/lib/Target/X86/X86CmovConversion.cpp b/llvm/lib/Target/X86/X86CmovConversion.cpp
index 8dc3b91f08e292..e58d15456a5e5c 100644
--- a/llvm/lib/Target/X86/X86CmovConversion.cpp
+++ b/llvm/lib/Target/X86/X86CmovConversion.cpp
@@ -355,7 +355,7 @@ bool X86CmovConverterPass::collectCmovCandidates(
FoundNonCMOVInst = true;
// Check if this instruction define EFLAGS, to determine end of processed
// range, as there would be no more instructions using current EFLAGS def.
- if (I.definesRegister(X86::EFLAGS)) {
+ if (I.definesRegister(X86::EFLAGS, nullptr)) {
// Check if current processed CMOV-group should not be skipped and add
// it as a CMOV-group-candidate.
if (!SkipGroup)
@@ -582,7 +582,7 @@ bool X86CmovConverterPass::checkForProfitableCmovCandidates(
}
static bool checkEFLAGSLive(MachineInstr *MI) {
- if (MI->killsRegister(X86::EFLAGS))
+ if (MI->killsRegister(X86::EFLAGS, nullptr))
return false;
// The EFLAGS operand of MI might be missing a kill marker.
@@ -592,9 +592,9 @@ static bool checkEFLAGSLive(MachineInstr *MI) {
// Scan forward through BB for a use/def of EFLAGS.
for (auto I = std::next(ItrMI), E = BB->end(); I != E; ++I) {
- if (I->readsRegister(X86::EFLAGS))
+ if (I->readsRegister(X86::EFLAGS, nullptr))
return true;
- if (I->definesRegister(X86::EFLAGS))
+ if (I->definesRegister(X86::EFLAGS, nullptr))
return false;
}
diff --git a/llvm/lib/Target/X86/X86FixupSetCC.cpp b/llvm/lib/Target/X86/X86FixupSetCC.cpp
index 269f8ce6bd7a42..50b5e19d5398d4 100644
--- a/llvm/lib/Target/X86/X86FixupSetCC.cpp
+++ b/llvm/lib/Target/X86/X86FixupSetCC.cpp
@@ -69,7 +69,7 @@ bool X86FixupSetCCPass::runOnMachineFunction(MachineFunction &MF) {
MachineInstr *FlagsDefMI = nullptr;
for (auto &MI : MBB) {
// Remember the most recent preceding eflags defining instruction.
- if (MI.definesRegister(X86::EFLAGS))
+ if (MI.definesRegister(X86::EFLAGS, nullptr))
FlagsDefMI = &MI;
// Find a setcc that is used by a zext.
@@ -94,7 +94,7 @@ bool X86FixupSetCCPass::runOnMachineFunction(MachineFunction &MF) {
// it, itself, by definition, clobbers eflags. But it may happen that
// FlagsDefMI also *uses* eflags, in which case the transformation is
// invalid.
- if (FlagsDefMI->readsRegister(X86::EFLAGS))
+ if (FlagsDefMI->readsRegister(X86::EFLAGS, nullptr))
continue;
// On 32-bit, we need to be careful to force an ABCD register.
diff --git a/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp b/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp
index d96613d7bb7efc..af8da1bb820340 100644
--- a/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp
+++ b/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp
@@ -442,7 +442,8 @@ bool X86FlagsCopyLoweringPass::runOnMachineFunction(MachineFunction &MF) {
llvm::reverse(llvm::make_range(Begin, End)), [&](MachineInstr &MI) {
// Flag any instruction (other than the copy we are
// currently rewriting) that defs EFLAGS.
- return &MI != CopyI && MI.findRegisterDefOperand(X86::EFLAGS);
+ return &MI != CopyI &&
+ MI.findRegisterDefOperand(X86::EFLAGS, nullptr);
});
};
auto HasEFLAGSClobberPath = [&](MachineBasicBlock *BeginMBB,
@@ -500,7 +501,7 @@ bool X86FlagsCopyLoweringPass::runOnMachineFunction(MachineFunction &MF) {
auto DefIt = llvm::find_if(
llvm::reverse(llvm::make_range(TestMBB->instr_begin(), TestPos)),
[&](MachineInstr &MI) {
- return MI.findRegisterDefOperand(X86::EFLAGS);
+ return MI.findRegisterDefOperand(X86::EFLAGS, nullptr);
});
if (DefIt.base() != TestMBB->instr_begin()) {
dbgs() << " Using EFLAGS defined by: ";
@@ -562,9 +563,10 @@ bool X86FlagsCopyLoweringPass::runOnMachineFunction(MachineFunction &MF) {
break;
}
- MachineOperand *FlagUse = MI.findRegisterUseOperand(X86::EFLAGS);
+ MachineOperand *FlagUse =
+ MI.findRegisterUseOperand(X86::EFLAGS, nullptr);
if (!FlagUse) {
- if (MI.findRegisterDefOperand(X86::EFLAGS)) {
+ if (MI.findRegisterDefOperand(X86::EFLAGS, nullptr)) {
// If EFLAGS are defined, it's as-if they were killed. We can stop
// scanning here.
//
@@ -615,7 +617,7 @@ bool X86FlagsCopyLoweringPass::runOnMachineFunction(MachineFunction &MF) {
rewriteCopy(MI, *FlagUse, CopyDefI);
} else {
// We assume all other instructions that use flags also def them.
- assert(MI.findRegisterDefOperand(X86::EFLAGS) &&
+ assert(MI.findRegisterDefOperand(X86::EFLAGS, nullptr) &&
"Expected a def of EFLAGS for this instruction!");
// NB!!! Several arithmetic instructions only *partially* update
@@ -734,7 +736,7 @@ CondRegArray X86FlagsCopyLoweringPass::collectCondsInRegs(
// Stop scanning when we see the first definition of the EFLAGS as prior to
// this we would potentially capture the wrong flag state.
- if (MI.findRegisterDefOperand(X86::EFLAGS))
+ if (MI.findRegisterDefOperand(X86::EFLAGS, nullptr))
break;
}
return CondRegs;
@@ -914,7 +916,7 @@ void X86FlagsCopyLoweringPass::rewriteCondJmp(
// Rewrite the jump to use the !ZF flag from the test, and kill its use of
// flags afterward.
JmpI.getOperand(1).setImm(Inverted ? X86::COND_E : X86::COND_NE);
- JmpI.findRegisterUseOperand(X86::EFLAGS)->setIsKill(true);
+ JmpI.findRegisterUseOperand(X86::EFLAGS, nullptr)->setIsKill(true);
LLVM_DEBUG(dbgs() << " fixed jCC: "; JmpI.dump());
}
diff --git a/llvm/lib/Target/X86/X86FloatingPoint.cpp b/llvm/lib/Target/X86/X86FloatingPoint.cpp
index 260879ffaa4f12..ad58e6a88aa9be 100644
--- a/llvm/lib/Target/X86/X86FloatingPoint.cpp
+++ b/llvm/lib/Target/X86/X86FloatingPoint.cpp
@@ -829,7 +829,7 @@ static const TableEntry PopTable[] = {
};
static bool doesInstructionSetFPSW(MachineInstr &MI) {
- if (const MachineOperand *MO = MI.findRegisterDefOperand(X86::FPSW))
+ if (const MachineOperand *MO = MI.findRegisterDefOperand(X86::FPSW, nullptr))
if (!MO->isDead())
return true;
return false;
@@ -872,7 +872,7 @@ void FPS::popStackAfter(MachineBasicBlock::iterator &I) {
if (doesInstructionSetFPSW(MI)) {
MachineBasicBlock &MBB = *MI.getParent();
MachineBasicBlock::iterator Next = getNextFPInstruction(I);
- if (Next != MBB.end() && Next->readsRegister(X86::FPSW))
+ if (Next != MBB.end() && Next->readsRegister(X86::FPSW, nullptr))
I = Next;
}
I = BuildMI(*MBB, ++I, dl, TII->get(X86::ST_FPrr)).addReg(X86::ST0);
@@ -1081,11 +1081,12 @@ void FPS::handleReturn(MachineBasicBlock::iterator &I) {
continue;
// FP Register uses must be kills unless there are two uses of the same
// register, in which case only one will be a kill.
- assert(Op.isUse() &&
- (Op.isKill() || // Marked kill.
- getFPReg(Op) == FirstFPRegOp || // Second instance.
- MI.killsRegister(Op.getReg())) && // Later use is marked kill.
- "Ret only defs operands, and values aren't live beyond it");
+ assert(
+ Op.isUse() &&
+ (Op.isKill() || // Marked kill.
+ getFPReg(Op) == FirstFPRegOp || // Second instance.
+ MI.killsRegister(Op.getReg(), nullptr)) && // Later use is marked kill.
+ "Ret only defs operands, and values aren't live beyond it");
if (FirstFPRegOp == ~0U)
FirstFPRegOp = getFPReg(Op);
@@ -1181,7 +1182,7 @@ void FPS::handleOneArgFP(MachineBasicBlock::iterator &I) {
// Is this the last use of the source register?
unsigned Reg = getFPReg(MI.getOperand(NumOps - 1));
- bool KillsSrc = MI.killsRegister(X86::FP0 + Reg);
+ bool KillsSrc = MI.killsRegister(X86::FP0 + Reg, nullptr);
// FISTP64m is strange because there isn't a non-popping versions.
// If we have one _and_ we don't want to pop the operand, duplicate the value
@@ -1244,7 +1245,7 @@ void FPS::handleOneArgFPRW(MachineBasicBlock::iterator &I) {
// Is this the last use of the source register?
unsigned Reg = getFPReg(MI.getOperand(1));
- bool KillsSrc = MI.killsRegister(X86::FP0 + Reg);
+ bool KillsSrc = MI.killsRegister(X86::FP0 + Reg, nullptr);
if (KillsSrc) {
// If this is the last use of the source register, just make sure it's on
@@ -1355,8 +1356,8 @@ void FPS::handleTwoArgFP(MachineBasicBlock::iterator &I) {
unsigned Dest = getFPReg(MI.getOperand(0));
unsigned Op0 = getFPReg(MI.getOperand(NumOperands - 2));
unsigned Op1 = getFPReg(MI.getOperand(NumOperands - 1));
- bool KillsOp0 = MI.killsRegister(X86::FP0 + Op0);
- bool KillsOp1 = MI.killsRegister(X86::FP0 + Op1);
+ bool KillsOp0 = MI.killsRegister(X86::FP0 + Op0, nullptr);
+ bool KillsOp1 = MI.killsRegister(X86::FP0 + Op1, nullptr);
const DebugLoc &dl = MI.getDebugLoc();
unsigned TOS = getStackEntry(0);
@@ -1453,8 +1454,8 @@ void FPS::handleCompareFP(MachineBasicBlock::iterator &I) {
assert(NumOperands == 2 && "Illegal FUCOM* instruction!");
unsigned Op0 = getFPReg(MI.getOperand(NumOperands - 2));
unsigned Op1 = getFPReg(MI.getOperand(NumOperands - 1));
- bool KillsOp0 = MI.killsRegister(X86::FP0 + Op0);
- bool KillsOp1 = MI.killsRegister(X86::FP0 + Op1);
+ bool KillsOp0 = MI.killsRegister(X86::FP0 + Op0, nullptr);
+ bool KillsOp1 = MI.killsRegister(X86::FP0 + Op1, nullptr);
// Make sure the first operand is on the top of stack, the other one can be
// anywhere.
@@ -1480,7 +1481,7 @@ void FPS::handleCondMovFP(MachineBasicBlock::iterator &I) {
unsigned Op0 = getFPReg(MI.getOperand(0));
unsigned Op1 = getFPReg(MI.getOperand(2));
- bool KillsOp1 = MI.killsRegister(X86::FP0 + Op1);
+ bool KillsOp1 = MI.killsRegister(X86::FP0 + Op1, nullptr);
// The first operand *must* be on the top of the stack.
moveToTop(Op0, I);
@@ -1524,7 +1525,7 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &Inst) {
// We handle three kinds of copies: FP <- FP, FP <- ST, and ST <- FP.
const MachineOperand &MO1 = MI.getOperand(1);
const MachineOperand &MO0 = MI.getOperand(0);
- bool KillsSrc = MI.killsRegister(MO1.getReg());
+ bool KillsSrc = MI.killsRegister(MO1.getReg(), nullptr);
// FP <- FP copy.
unsigned DstFP = getFPReg(MO0);
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 6f65344215c020..5323676bbc56ed 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -34170,10 +34170,10 @@ static bool isEFLAGSLiveAfter(MachineBasicBlock::iterator Itr,
MachineBasicBlock *BB) {
// Scan forward through BB for a use/def of EFLAGS.
for (const MachineInstr &mi : llvm::make_range(std::next(Itr), BB->end())) {
- if (mi.readsRegister(X86::EFLAGS))
+ if (mi.readsRegister(X86::EFLAGS, nullptr))
return true;
// If we found a def, we can stop searching.
- if (mi.definesRegister(X86::EFLAGS))
+ if (mi.definesRegister(X86::EFLAGS, nullptr))
return false;
}
@@ -34759,7 +34759,7 @@ X86TargetLowering::EmitLoweredCascadedSelect(MachineInstr &FirstCMOV,
// If the EFLAGS register isn't dead in the terminator, then claim that it's
// live into the sink and copy blocks.
const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
- if (!SecondCascadedCMOV.killsRegister(X86::EFLAGS) &&
+ if (!SecondCascadedCMOV.killsRegister(X86::EFLAGS, nullptr) &&
!checkAndUpdateEFLAGSKill(SecondCascadedCMOV, ThisMBB, TRI)) {
SecondInsertedMBB->addLiveIn(X86::EFLAGS);
SinkMBB->addLiveIn(X86::EFLAGS);
@@ -34915,7 +34915,7 @@ X86TargetLowering::EmitLoweredSelect(MachineInstr &MI,
// If the EFLAGS register isn't dead in the terminator, then claim that it's
// live into the sink and copy blocks.
const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
- if (!LastCMOV->killsRegister(X86::EFLAGS) &&
+ if (!LastCMOV->killsRegister(X86::EFLAGS, nullptr) &&
!checkAndUpdateEFLAGSKill(LastCMOV, ThisMBB, TRI)) {
FalseMBB->addLiveIn(X86::EFLAGS);
SinkMBB->addLiveIn(X86::EFLAGS);
@@ -36494,10 +36494,10 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
// four operand definitions that are E[ABCD] registers. We skip them and
// then insert the LEA.
MachineBasicBlock::reverse_iterator RMBBI(MI.getReverseIterator());
- while (RMBBI != BB->rend() && (RMBBI->definesRegister(X86::EAX) ||
- RMBBI->definesRegister(X86::EBX) ||
- RMBBI->definesRegister(X86::ECX) ||
- RMBBI->definesRegister(X86::EDX))) {
+ while (RMBBI != BB->rend() && (RMBBI->definesRegister(X86::EAX, nullptr) ||
+ RMBBI->definesRegister(X86::EBX, nullptr) ||
+ RMBBI->definesRegister(X86::ECX, nullptr) ||
+ RMBBI->definesRegister(X86::EDX, nullptr))) {
++RMBBI;
}
MachineBasicBlock::iterator MBBI(RMBBI);
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index a5b2e4895eded2..83d5faa4e9f21d 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -1168,7 +1168,7 @@ bool X86InstrInfo::classifyLEAReg(MachineInstr &MI, const MachineOperand &Src,
RC = Opc != X86::LEA32r ? &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass;
}
Register SrcReg = Src.getReg();
- isKill = MI.killsRegister(SrcReg);
+ isKill = MI.killsRegister(SrcReg, nullptr);
// For both LEA64 and LEA32 the register already has essentially the right
// type (32-bit or 64-bit) we may just need to forbid SP.
@@ -3727,7 +3727,7 @@ bool X86InstrInfo::analyzeBranchImpl(
// In practice we should never have an undef eflags operand, if we do
// abort here as we are not prepared to preserve the flag.
- if (I->findRegisterUseOperand(X86::EFLAGS)->isUndef())
+ if (I->findRegisterUseOperand(X86::EFLAGS, nullptr)->isUndef())
return true;
// Working from the bottom, handle the first conditional branch.
@@ -5472,7 +5472,7 @@ bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
}
// Make sure Sub instruction defines EFLAGS and mark the def live.
- MachineOperand *FlagDef = Sub->findRegisterDefOperand(X86::EFLAGS);
+ MachineOperand *FlagDef = Sub->findRegisterDefOperand(X86::EFLAGS, nullptr);
assert(FlagDef && "Unable to locate a def EFLAGS operand");
FlagDef->setIsDead(false);
@@ -5629,7 +5629,7 @@ bool X86InstrInfo::foldImmediateImpl(MachineInstr &UseMI, MachineInstr *DefMI,
return false;
}
- if (UseMI.findRegisterUseOperand(Reg)->getSubReg())
+ if (UseMI.findRegisterUseOperand(Reg, nullptr)->getSubReg())
return false;
// Immediate has larger code size than register. So avoid folding the
// immediate if it has more than 1 use and we are optimizing for size.
@@ -5676,7 +5676,7 @@ bool X86InstrInfo::foldImmediateImpl(MachineInstr &UseMI, MachineInstr *DefMI,
if (!MakeChange)
return true;
UseMI.setDesc(get(X86::MOV32r0));
- UseMI.removeOperand(UseMI.findRegisterUseOperandIdx(Reg));
+ UseMI.removeOperand(UseMI.findRegisterUseOperandIdx(Reg, nullptr));
UseMI.addOperand(MachineOperand::CreateReg(X86::EFLAGS, /*isDef=*/true,
/*isImp=*/true,
/*isKill=*/false,
@@ -5698,18 +5698,18 @@ bool X86InstrInfo::foldImmediateImpl(MachineInstr &UseMI, MachineInstr *DefMI,
NewOpc == X86::SBB64ri32 || NewOpc == X86::SBB32ri ||
NewOpc == X86::SUB64ri32_ND || NewOpc == X86::SUB32ri_ND ||
NewOpc == X86::SBB64ri32_ND || NewOpc == X86::SBB32ri_ND) &&
- UseMI.findRegisterUseOperandIdx(Reg) != 2)
+ UseMI.findRegisterUseOperandIdx(Reg, nullptr) != 2)
return false;
// For CMP instructions the immediate can only be at index 1.
if (((NewOpc == X86::CMP64ri32 || NewOpc == X86::CMP32ri) ||
(NewOpc == X86::CCMP64ri32 || NewOpc == X86::CCMP32ri)) &&
- UseMI.findRegisterUseOperandIdx(Reg) != 1)
+ UseMI.findRegisterUseOperandIdx(Reg, nullptr) != 1)
return false;
using namespace X86;
if (isSHL(Opc) || isSHR(Opc) || isSAR(Opc) || isROL(Opc) || isROR(Opc) ||
isRCL(Opc) || isRCR(Opc)) {
- unsigned RegIdx = UseMI.findRegisterUseOperandIdx(Reg);
+ unsigned RegIdx = UseMI.findRegisterUseOperandIdx(Reg, nullptr);
if (RegIdx < 2)
return false;
if (!isInt<8>(ImmVal))
@@ -5733,13 +5733,14 @@ bool X86InstrInfo::foldImmediateImpl(MachineInstr &UseMI, MachineInstr *DefMI,
if (!Modified) {
// Modify the instruction.
if (ImmVal == 0 && canConvert2Copy(NewOpc) &&
- UseMI.registerDefIsDead(X86::EFLAGS)) {
+ UseMI.registerDefIsDead(X86::EFLAGS, nullptr)) {
// %100 = add %101, 0
// ==>
// %100 = COPY %101
UseMI.setDesc(get(TargetOpcode::COPY));
- UseMI.removeOperand(UseMI.findRegisterUseOperandIdx(Reg));
- UseMI.removeOperand(UseMI.findRegisterDefOperandIdx(X86::EFLAGS));
+ UseMI.removeOperand(UseMI.findRegisterUseOperandIdx(Reg, nullptr));
+ UseMI.removeOperand(
+ UseMI.findRegisterDefOperandIdx(X86::EFLAGS, nullptr));
UseMI.untieRegOperand(0);
UseMI.clearFlag(MachineInstr::MIFlag::NoSWrap);
UseMI.clearFlag(MachineInstr::MIFlag::NoUWrap);
@@ -9538,7 +9539,8 @@ bool X86InstrInfo::hasReassociableOperands(const MachineInstr &Inst,
// not change anything because rearranging the operands could affect other
// instructions that depend on the exact status flags (zero, sign, etc.)
// that are set by using these particular operands with this operation.
- const MachineOperand *FlagDef = Inst.findRegisterDefOperand(X86::EFLAGS);
+ const MachineOperand *FlagDef =
+ Inst.findRegisterDefOperand(X86::EFLAGS, nullptr);
assert((Inst.getNumDefs() == 1 || FlagDef) && "Implicit def isn't flags?");
if (FlagDef && !FlagDef->isDead())
return false;
@@ -10060,8 +10062,10 @@ void X86InstrInfo::setSpecialOperandAttr(MachineInstr &OldMI1,
MachineInstr &NewMI1,
MachineInstr &NewMI2) const {
// Integer instructions may define an implicit EFLAGS dest register operand.
- MachineOperand *OldFlagDef1 = OldMI1.findRegisterDefOperand(X86::EFLAGS);
- MachineOperand *OldFlagDef2 = OldMI2.findRegisterDefOperand(X86::EFLAGS);
+ MachineOperand *OldFlagDef1 =
+ OldMI1.findRegisterDefOperand(X86::EFLAGS, nullptr);
+ MachineOperand *OldFlagDef2 =
+ OldMI2.findRegisterDefOperand(X86::EFLAGS, nullptr);
assert(!OldFlagDef1 == !OldFlagDef2 &&
"Unexpected instruction type for reassociation");
@@ -10072,8 +10076,10 @@ void X86InstrInfo::setSpecialOperandAttr(MachineInstr &OldMI1,
assert(OldFlagDef1->isDead() && OldFlagDef2->isDead() &&
"Must have dead EFLAGS operand in reassociable instruction");
- MachineOperand *NewFlagDef1 = NewMI1.findRegisterDefOperand(X86::EFLAGS);
- MachineOperand *NewFlagDef2 = NewMI2.findRegisterDefOperand(X86::EFLAGS);
+ MachineOperand *NewFlagDef1 =
+ NewMI1.findRegisterDefOperand(X86::EFLAGS, nullptr);
+ MachineOperand *NewFlagDef2 =
+ NewMI2.findRegisterDefOperand(X86::EFLAGS, nullptr);
assert(NewFlagDef1 && NewFlagDef2 &&
"Unexpected operand in reassociable instruction");
diff --git a/llvm/lib/Target/X86/X86MCInstLower.cpp b/llvm/lib/Target/X86/X86MCInstLower.cpp
index e2330ff34c1753..efdae589dcfb43 100644
--- a/llvm/lib/Target/X86/X86MCInstLower.cpp
+++ b/llvm/lib/Target/X86/X86MCInstLower.cpp
@@ -506,7 +506,8 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
// recognize as TZCNT, which has better performance than BSF.
// BSF and TZCNT have different interpretations on ZF bit. So make sure
// it won't be used later.
- const MachineOperand *FlagDef = MI->findRegisterDefOperand(X86::EFLAGS);
+ const MachineOperand *FlagDef =
+ MI->findRegisterDefOperand(X86::EFLAGS, nullptr);
if (!MF.getFunction().hasOptSize() && FlagDef && FlagDef->isDead())
OutMI.setFlags(X86::IP_HAS_REPEAT);
break;
diff --git a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
index 69a54e7667b553..77523cf53e120e 100644
--- a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
+++ b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
@@ -483,7 +483,7 @@ bool X86SpeculativeLoadHardeningPass::runOnMachineFunction(
PredStateSubReg);
++NumInstsInserted;
MachineOperand *ZeroEFLAGSDefOp =
- ZeroI->findRegisterDefOperand(X86::EFLAGS);
+ ZeroI->findRegisterDefOperand(X86::EFLAGS, nullptr);
assert(ZeroEFLAGSDefOp && ZeroEFLAGSDefOp->isImplicit() &&
"Must have an implicit def of EFLAGS!");
ZeroEFLAGSDefOp->setIsDead(true);
@@ -762,7 +762,8 @@ X86SpeculativeLoadHardeningPass::tracePredStateThroughCFG(
// If this is the last cmov and the EFLAGS weren't originally
// live-in, mark them as killed.
if (!LiveEFLAGS && Cond == Conds.back())
- CMovI->findRegisterUseOperand(X86::EFLAGS)->setIsKill(true);
+ CMovI->findRegisterUseOperand(X86::EFLAGS, nullptr)
+ ->setIsKill(true);
++NumInstsInserted;
LLVM_DEBUG(dbgs() << " Inserting cmov: "; CMovI->dump();
@@ -1185,7 +1186,7 @@ X86SpeculativeLoadHardeningPass::tracePredStateThroughIndirectBranches(
.addReg(PS->InitialReg)
.addReg(PS->PoisonReg)
.addImm(X86::COND_NE);
- CMovI->findRegisterUseOperand(X86::EFLAGS)->setIsKill(true);
+ CMovI->findRegisterUseOperand(X86::EFLAGS, nullptr)->setIsKill(true);
++NumInstsInserted;
LLVM_DEBUG(dbgs() << " Inserting cmov: "; CMovI->dump(); dbgs() << "\n");
CMovs.push_back(&*CMovI);
@@ -1202,7 +1203,8 @@ X86SpeculativeLoadHardeningPass::tracePredStateThroughIndirectBranches(
// Returns true if the MI has EFLAGS as a register def operand and it's live,
// otherwise it returns false
static bool isEFLAGSDefLive(const MachineInstr &MI) {
- if (const MachineOperand *DefOp = MI.findRegisterDefOperand(X86::EFLAGS)) {
+ if (const MachineOperand *DefOp =
+ MI.findRegisterDefOperand(X86::EFLAGS, nullptr)) {
return !DefOp->isDead();
}
return false;
@@ -1213,7 +1215,8 @@ static bool isEFLAGSLive(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
// Check if EFLAGS are alive by seeing if there is a def of them or they
// live-in, and then seeing if that def is in turn used.
for (MachineInstr &MI : llvm::reverse(llvm::make_range(MBB.begin(), I))) {
- if (MachineOperand *DefOp = MI.findRegisterDefOperand(X86::EFLAGS)) {
+ if (MachineOperand *DefOp =
+ MI.findRegisterDefOperand(X86::EFLAGS, nullptr)) {
// If the def is dead, then EFLAGS is not live.
if (DefOp->isDead())
return false;
@@ -2182,7 +2185,7 @@ void X86SpeculativeLoadHardeningPass::tracePredStateThroughCall(
.addReg(NewStateReg, RegState::Kill)
.addReg(PS->PoisonReg)
.addImm(X86::COND_NE);
- CMovI->findRegisterUseOperand(X86::EFLAGS)->setIsKill(true);
+ CMovI->findRegisterUseOperand(X86::EFLAGS, nullptr)->setIsKill(true);
++NumInstsInserted;
LLVM_DEBUG(dbgs() << " Inserting cmov: "; CMovI->dump(); dbgs() << "\n");
More information about the llvm-commits
mailing list