[llvm] 96dfa52 - [AMDGPU] Refactor SIFoldOperands. NFC.

Jay Foad via llvm-commits llvm-commits at lists.llvm.org
Wed Sep 7 03:05:18 PDT 2022


Author: Jay Foad
Date: 2022-09-07T11:05:01+01:00
New Revision: 96dfa523c22c0d85c1d81fb9db86debe96e06a1d

URL: https://github.com/llvm/llvm-project/commit/96dfa523c22c0d85c1d81fb9db86debe96e06a1d
DIFF: https://github.com/llvm/llvm-project/commit/96dfa523c22c0d85c1d81fb9db86debe96e06a1d.diff

LOG: [AMDGPU] Refactor SIFoldOperands. NFC.

Refactor static functions into class methods so they have access to TII, MRI
etc.

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/SIFoldOperands.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
index eb31cb21825d..275a87e033ba 100644
--- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -84,12 +84,30 @@ class SIFoldOperands : public MachineFunctionPass {
   const GCNSubtarget *ST;
   const SIMachineFunctionInfo *MFI;
 
+  bool frameIndexMayFold(const MachineInstr &UseMI, int OpNo,
+                         const MachineOperand &OpToFold) const;
+
+  bool updateOperand(FoldCandidate &Fold) const;
+
+  bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
+                        MachineInstr *MI, unsigned OpNo,
+                        MachineOperand *OpToFold) const;
+  bool isUseSafeToFold(const MachineInstr &MI,
+                       const MachineOperand &UseMO) const;
+  bool
+  getRegSeqInit(SmallVectorImpl<std::pair<MachineOperand *, unsigned>> &Defs,
+                Register UseReg, uint8_t OpTy) const;
+  bool tryToFoldACImm(const MachineOperand &OpToFold, MachineInstr *UseMI,
+                      unsigned UseOpIdx,
+                      SmallVectorImpl<FoldCandidate> &FoldList) const;
   void foldOperand(MachineOperand &OpToFold,
                    MachineInstr *UseMI,
                    int UseOpIdx,
                    SmallVectorImpl<FoldCandidate> &FoldList,
                    SmallVectorImpl<MachineInstr *> &CopiesToReplace) const;
 
+  MachineOperand *getImmOrMaterializedImm(MachineOperand &Op) const;
+  bool tryConstantFoldOp(MachineInstr *MI) const;
   bool tryFoldCndMask(MachineInstr &MI) const;
   bool tryFoldZeroHighBits(MachineInstr &MI) const;
   bool foldInstOperand(MachineInstr &MI, MachineOperand &OpToFold) const;
@@ -148,10 +166,8 @@ static unsigned macToMad(unsigned Opc) {
 
 // TODO: Add heuristic that the frame index might not fit in the addressing mode
 // immediate offset to avoid materializing in loops.
-static bool frameIndexMayFold(const SIInstrInfo *TII,
-                              const MachineInstr &UseMI,
-                              int OpNo,
-                              const MachineOperand &OpToFold) {
+bool SIFoldOperands::frameIndexMayFold(const MachineInstr &UseMI, int OpNo,
+                                       const MachineOperand &OpToFold) const {
   if (!OpToFold.isFI())
     return false;
 
@@ -175,10 +191,7 @@ FunctionPass *llvm::createSIFoldOperandsPass() {
   return new SIFoldOperands();
 }
 
-static bool updateOperand(FoldCandidate &Fold,
-                          const SIInstrInfo &TII,
-                          const TargetRegisterInfo &TRI,
-                          const GCNSubtarget &ST) {
+bool SIFoldOperands::updateOperand(FoldCandidate &Fold) const {
   MachineInstr *MI = Fold.UseMI;
   MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
   assert(Old.isReg());
@@ -186,10 +199,10 @@ static bool updateOperand(FoldCandidate &Fold,
   if (Fold.isImm()) {
     if (MI->getDesc().TSFlags & SIInstrFlags::IsPacked &&
         !(MI->getDesc().TSFlags & SIInstrFlags::IsMAI) &&
-        (!ST.hasDOTOpSelHazard() ||
+        (!ST->hasDOTOpSelHazard() ||
          !(MI->getDesc().TSFlags & SIInstrFlags::IsDOT)) &&
         AMDGPU::isFoldableLiteralV216(Fold.ImmToFold,
-                                      ST.hasInv2PiInlineImm())) {
+                                      ST->hasInv2PiInlineImm())) {
       // Set op_sel/op_sel_hi on this operand or bail out if op_sel is
       // already set.
       unsigned Opcode = MI->getOpcode();
@@ -208,7 +221,7 @@ static bool updateOperand(FoldCandidate &Fold,
       if (!(Val & SISrcMods::OP_SEL_0) && (Val & SISrcMods::OP_SEL_1)) {
         // Only apply the following transformation if that operand requires
         // a packed immediate.
-        switch (TII.get(Opcode).OpInfo[OpNo].OperandType) {
+        switch (TII->get(Opcode).OpInfo[OpNo].OperandType) {
         case AMDGPU::OPERAND_REG_IMM_V2FP16:
         case AMDGPU::OPERAND_REG_IMM_V2INT16:
         case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
@@ -235,27 +248,27 @@ static bool updateOperand(FoldCandidate &Fold,
 
   if ((Fold.isImm() || Fold.isFI() || Fold.isGlobal()) && Fold.needsShrink()) {
     MachineBasicBlock *MBB = MI->getParent();
-    auto Liveness = MBB->computeRegisterLiveness(&TRI, AMDGPU::VCC, MI, 16);
+    auto Liveness = MBB->computeRegisterLiveness(TRI, AMDGPU::VCC, MI, 16);
     if (Liveness != MachineBasicBlock::LQR_Dead) {
       LLVM_DEBUG(dbgs() << "Not shrinking " << MI << " due to vcc liveness\n");
       return false;
     }
 
-    MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
     int Op32 = Fold.getShrinkOpcode();
     MachineOperand &Dst0 = MI->getOperand(0);
     MachineOperand &Dst1 = MI->getOperand(1);
     assert(Dst0.isDef() && Dst1.isDef());
 
-    bool HaveNonDbgCarryUse = !MRI.use_nodbg_empty(Dst1.getReg());
+    bool HaveNonDbgCarryUse = !MRI->use_nodbg_empty(Dst1.getReg());
 
-    const TargetRegisterClass *Dst0RC = MRI.getRegClass(Dst0.getReg());
-    Register NewReg0 = MRI.createVirtualRegister(Dst0RC);
+    const TargetRegisterClass *Dst0RC = MRI->getRegClass(Dst0.getReg());
+    Register NewReg0 = MRI->createVirtualRegister(Dst0RC);
 
-    MachineInstr *Inst32 = TII.buildShrunkInst(*MI, Op32);
+    MachineInstr *Inst32 = TII->buildShrunkInst(*MI, Op32);
 
     if (HaveNonDbgCarryUse) {
-      BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), Dst1.getReg())
+      BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(AMDGPU::COPY),
+              Dst1.getReg())
         .addReg(AMDGPU::VCC, RegState::Kill);
     }
 
@@ -268,10 +281,10 @@ static bool updateOperand(FoldCandidate &Fold,
     Dst0.setReg(NewReg0);
     for (unsigned I = MI->getNumOperands() - 1; I > 0; --I)
       MI->removeOperand(I);
-    MI->setDesc(TII.get(AMDGPU::IMPLICIT_DEF));
+    MI->setDesc(TII->get(AMDGPU::IMPLICIT_DEF));
 
     if (Fold.isCommuted())
-      TII.commuteInstruction(*Inst32, false);
+      TII->commuteInstruction(*Inst32, false);
     return true;
   }
 
@@ -282,7 +295,7 @@ static bool updateOperand(FoldCandidate &Fold,
       int NewMFMAOpc = AMDGPU::getMFMAEarlyClobberOp(MI->getOpcode());
       if (NewMFMAOpc == -1)
         return false;
-      MI->setDesc(TII.get(NewMFMAOpc));
+      MI->setDesc(TII->get(NewMFMAOpc));
       MI->untieRegOperand(0);
     }
     Old.ChangeToImmediate(Fold.ImmToFold);
@@ -301,7 +314,7 @@ static bool updateOperand(FoldCandidate &Fold,
   }
 
   MachineOperand *New = Fold.OpToFold;
-  Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
+  Old.substVirtReg(New->getReg(), New->getSubReg(), *TRI);
   Old.setIsUndef(New->isUndef());
   return true;
 }
@@ -328,10 +341,9 @@ static void appendFoldCandidate(SmallVectorImpl<FoldCandidate> &FoldList,
   FoldList.emplace_back(MI, OpNo, FoldOp, Commuted, ShrinkOp);
 }
 
-static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
-                             MachineInstr *MI, unsigned OpNo,
-                             MachineOperand *OpToFold,
-                             const SIInstrInfo *TII) {
+bool SIFoldOperands::tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
+                                      MachineInstr *MI, unsigned OpNo,
+                                      MachineOperand *OpToFold) const {
   if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
     // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2
     unsigned Opc = MI->getOpcode();
@@ -340,7 +352,7 @@ static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
       // Check if changing this to a v_mad_{f16, f32} instruction will allow us
       // to fold the operand.
       MI->setDesc(TII->get(NewOpc));
-      bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII);
+      bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold);
       if (FoldAsMAD) {
         MI->untieRegOperand(OpNo);
         return true;
@@ -401,14 +413,13 @@ static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
            Opc == AMDGPU::V_SUB_CO_U32_e64 ||
            Opc == AMDGPU::V_SUBREV_CO_U32_e64) && // FIXME
           (OpToFold->isImm() || OpToFold->isFI() || OpToFold->isGlobal())) {
-        MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
 
         // Verify the other operand is a VGPR, otherwise we would violate the
         // constant bus restriction.
         unsigned OtherIdx = CommuteOpNo == CommuteIdx0 ? CommuteIdx1 : CommuteIdx0;
         MachineOperand &OtherOp = MI->getOperand(OtherIdx);
         if (!OtherOp.isReg() ||
-            !TII->getRegisterInfo().isVGPR(MRI, OtherOp.getReg()))
+            !TII->getRegisterInfo().isVGPR(*MRI, OtherOp.getReg()))
           return false;
 
         assert(MI->getOperand(1).isDef());
@@ -434,11 +445,10 @@ static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
   if (TII->isSALU(MI->getOpcode())) {
     const MCInstrDesc &InstDesc = MI->getDesc();
     const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo];
-    const SIRegisterInfo &SRI = TII->getRegisterInfo();
 
     // Fine if the operand can be encoded as an inline constant
     if (TII->isLiteralConstantLike(*OpToFold, OpInfo)) {
-      if (!SRI.opCanUseInlineConstant(OpInfo.OperandType) ||
+      if (!TRI->opCanUseInlineConstant(OpInfo.OperandType) ||
           !TII->isInlineConstant(*OpToFold, OpInfo)) {
         // Otherwise check for another constant
         for (unsigned i = 0, e = InstDesc.getNumOperands(); i != e; ++i) {
@@ -458,9 +468,8 @@ static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
 
 // If the use operand doesn't care about the value, this may be an operand only
 // used for register indexing, in which case it is unsafe to fold.
-static bool isUseSafeToFold(const SIInstrInfo *TII,
-                            const MachineInstr &MI,
-                            const MachineOperand &UseMO) {
+bool SIFoldOperands::isUseSafeToFold(const MachineInstr &MI,
+                                     const MachineOperand &UseMO) const {
   if (UseMO.isUndef() || TII->isSDWA(MI))
     return false;
 
@@ -481,11 +490,10 @@ static bool isUseSafeToFold(const SIInstrInfo *TII,
 // Find a def of the UseReg, check if it is a reg_sequence and find initializers
 // for each subreg, tracking it to foldable inline immediate if possible.
 // Returns true on success.
-static bool getRegSeqInit(
-    SmallVectorImpl<std::pair<MachineOperand*, unsigned>> &Defs,
-    Register UseReg, uint8_t OpTy,
-    const SIInstrInfo *TII, const MachineRegisterInfo &MRI) {
-  MachineInstr *Def = MRI.getVRegDef(UseReg);
+bool SIFoldOperands::getRegSeqInit(
+    SmallVectorImpl<std::pair<MachineOperand *, unsigned>> &Defs,
+    Register UseReg, uint8_t OpTy) const {
+  MachineInstr *Def = MRI->getVRegDef(UseReg);
   if (!Def || !Def->isRegSequence())
     return false;
 
@@ -493,10 +501,10 @@ static bool getRegSeqInit(
     MachineOperand *Sub = &Def->getOperand(I);
     assert(Sub->isReg());
 
-    for (MachineInstr *SubDef = MRI.getVRegDef(Sub->getReg());
+    for (MachineInstr *SubDef = MRI->getVRegDef(Sub->getReg());
          SubDef && Sub->isReg() && Sub->getReg().isVirtual() &&
          !Sub->getSubReg() && TII->isFoldableCopy(*SubDef);
-         SubDef = MRI.getVRegDef(Sub->getReg())) {
+         SubDef = MRI->getVRegDef(Sub->getReg())) {
       MachineOperand *Op = &SubDef->getOperand(1);
       if (Op->isImm()) {
         if (TII->isInlineConstant(*Op, OpTy))
@@ -514,11 +522,9 @@ static bool getRegSeqInit(
   return true;
 }
 
-static bool tryToFoldACImm(const SIInstrInfo *TII,
-                           const MachineOperand &OpToFold,
-                           MachineInstr *UseMI,
-                           unsigned UseOpIdx,
-                           SmallVectorImpl<FoldCandidate> &FoldList) {
+bool SIFoldOperands::tryToFoldACImm(
+    const MachineOperand &OpToFold, MachineInstr *UseMI, unsigned UseOpIdx,
+    SmallVectorImpl<FoldCandidate> &FoldList) const {
   const MCInstrDesc &Desc = UseMI->getDesc();
   const MCOperandInfo *OpInfo = Desc.OpInfo;
   if (!OpInfo || UseOpIdx >= Desc.getNumOperands())
@@ -547,10 +553,8 @@ static bool tryToFoldACImm(const SIInstrInfo *TII,
   if (isUseMIInFoldList(FoldList, UseMI))
     return false;
 
-  MachineRegisterInfo &MRI = UseMI->getParent()->getParent()->getRegInfo();
-
   // Maybe it is just a COPY of an immediate itself.
-  MachineInstr *Def = MRI.getVRegDef(UseReg);
+  MachineInstr *Def = MRI->getVRegDef(UseReg);
   MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
   if (!UseOp.getSubReg() && Def && TII->isFoldableCopy(*Def)) {
     MachineOperand &DefOp = Def->getOperand(1);
@@ -562,7 +566,7 @@ static bool tryToFoldACImm(const SIInstrInfo *TII,
   }
 
   SmallVector<std::pair<MachineOperand*, unsigned>, 32> Defs;
-  if (!getRegSeqInit(Defs, UseReg, OpTy, TII, MRI))
+  if (!getRegSeqInit(Defs, UseReg, OpTy))
     return false;
 
   int32_t Imm;
@@ -596,7 +600,7 @@ void SIFoldOperands::foldOperand(
   SmallVectorImpl<MachineInstr *> &CopiesToReplace) const {
   const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
 
-  if (!isUseSafeToFold(TII, *UseMI, UseOp))
+  if (!isUseSafeToFold(*UseMI, UseOp))
     return;
 
   // FIXME: Fold operands with subregs.
@@ -615,7 +619,7 @@ void SIFoldOperands::foldOperand(
     for (auto &RSUse : make_early_inc_range(MRI->use_nodbg_operands(RegSeqDstReg))) {
       MachineInstr *RSUseMI = RSUse.getParent();
 
-      if (tryToFoldACImm(TII, UseMI->getOperand(0), RSUseMI,
+      if (tryToFoldACImm(UseMI->getOperand(0), RSUseMI,
                          RSUseMI->getOperandNo(&RSUse), FoldList))
         continue;
 
@@ -629,10 +633,10 @@ void SIFoldOperands::foldOperand(
     return;
   }
 
-  if (tryToFoldACImm(TII, OpToFold, UseMI, UseOpIdx, FoldList))
+  if (tryToFoldACImm(OpToFold, UseMI, UseOpIdx, FoldList))
     return;
 
-  if (frameIndexMayFold(TII, *UseMI, UseOpIdx, OpToFold)) {
+  if (frameIndexMayFold(*UseMI, UseOpIdx, OpToFold)) {
     // Verify that this is a stack access.
     // FIXME: Should probably use stack pseudos before frame lowering.
 
@@ -746,8 +750,7 @@ void SIFoldOperands::foldOperand(
       // avoid copies via 
diff erent reg classes.
       SmallVector<std::pair<MachineOperand*, unsigned>, 32> Defs;
       if (Size > 4 && TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg()) &&
-          getRegSeqInit(Defs, UseReg, AMDGPU::OPERAND_REG_INLINE_C_INT32, TII,
-                        *MRI)) {
+          getRegSeqInit(Defs, UseReg, AMDGPU::OPERAND_REG_INLINE_C_INT32)) {
         const DebugLoc &DL = UseMI->getDebugLoc();
         MachineBasicBlock &MBB = *UseMI->getParent();
 
@@ -907,7 +910,7 @@ void SIFoldOperands::foldOperand(
         return;
     }
 
-    tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
+    tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold);
 
     // FIXME: We could try to change the instruction from 64-bit to 32-bit
     // to enable more folding opportunities.  The shrink operands pass
@@ -937,13 +940,11 @@ void SIFoldOperands::foldOperand(
     }
 
     MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
-    tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
+    tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp);
     return;
   }
 
-
-
-  tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
+  tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold);
 }
 
 static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result,
@@ -1034,14 +1035,14 @@ static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) {
   stripExtraCopyOperands(MI);
 }
 
-static MachineOperand *getImmOrMaterializedImm(MachineRegisterInfo &MRI,
-                                               MachineOperand &Op) {
+MachineOperand *
+SIFoldOperands::getImmOrMaterializedImm(MachineOperand &Op) const {
   if (Op.isReg()) {
     // If this has a subregister, it obviously is a register source.
     if (Op.getSubReg() != AMDGPU::NoSubRegister || !Op.getReg().isVirtual())
       return &Op;
 
-    MachineInstr *Def = MRI.getVRegDef(Op.getReg());
+    MachineInstr *Def = MRI->getVRegDef(Op.getReg());
     if (Def && Def->isMoveImmediate()) {
       MachineOperand &ImmSrc = Def->getOperand(1);
       if (ImmSrc.isImm())
@@ -1055,14 +1056,13 @@ static MachineOperand *getImmOrMaterializedImm(MachineRegisterInfo &MRI,
 // Try to simplify operations with a constant that may appear after instruction
 // selection.
 // TODO: See if a frame index with a fixed offset can fold.
-static bool tryConstantFoldOp(MachineRegisterInfo &MRI, const SIInstrInfo *TII,
-                              MachineInstr *MI) {
+bool SIFoldOperands::tryConstantFoldOp(MachineInstr *MI) const {
   unsigned Opc = MI->getOpcode();
 
   int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
   if (Src0Idx == -1)
     return false;
-  MachineOperand *Src0 = getImmOrMaterializedImm(MRI, MI->getOperand(Src0Idx));
+  MachineOperand *Src0 = getImmOrMaterializedImm(MI->getOperand(Src0Idx));
 
   if ((Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 ||
        Opc == AMDGPU::S_NOT_B32) &&
@@ -1075,7 +1075,7 @@ static bool tryConstantFoldOp(MachineRegisterInfo &MRI, const SIInstrInfo *TII,
   int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
   if (Src1Idx == -1)
     return false;
-  MachineOperand *Src1 = getImmOrMaterializedImm(MRI, MI->getOperand(Src1Idx));
+  MachineOperand *Src1 = getImmOrMaterializedImm(MI->getOperand(Src1Idx));
 
   if (!Src0->isImm() && !Src1->isImm())
     return false;
@@ -1088,8 +1088,7 @@ static bool tryConstantFoldOp(MachineRegisterInfo &MRI, const SIInstrInfo *TII,
     if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm()))
       return false;
 
-    const SIRegisterInfo &TRI = TII->getRegisterInfo();
-    bool IsSGPR = TRI.isSGPRReg(MRI, MI->getOperand(0).getReg());
+    bool IsSGPR = TRI->isSGPRReg(*MRI, MI->getOperand(0).getReg());
 
     // Be careful to change the right operand, src0 may belong to a 
diff erent
     // instruction.
@@ -1167,8 +1166,8 @@ bool SIFoldOperands::tryFoldCndMask(MachineInstr &MI) const {
   MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
   MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
   if (!Src1->isIdenticalTo(*Src0)) {
-    auto *Src0Imm = getImmOrMaterializedImm(*MRI, *Src0);
-    auto *Src1Imm = getImmOrMaterializedImm(*MRI, *Src1);
+    auto *Src0Imm = getImmOrMaterializedImm(*Src0);
+    auto *Src1Imm = getImmOrMaterializedImm(*Src1);
     if (!Src1Imm->isIdenticalTo(*Src0Imm))
       return false;
   }
@@ -1202,7 +1201,7 @@ bool SIFoldOperands::tryFoldZeroHighBits(MachineInstr &MI) const {
       MI.getOpcode() != AMDGPU::V_AND_B32_e32)
     return false;
 
-  MachineOperand *Src0 = getImmOrMaterializedImm(*MRI, MI.getOperand(1));
+  MachineOperand *Src0 = getImmOrMaterializedImm(MI.getOperand(1));
   if (!Src0->isImm() || Src0->getImm() != 0xffff)
     return false;
 
@@ -1239,7 +1238,7 @@ bool SIFoldOperands::foldInstOperand(MachineInstr &MI,
       // We may also encounter cases where one or both operands are
       // immediates materialized into a register, which would ordinarily not
       // be folded due to multiple uses or operand constraints.
-      if (tryConstantFoldOp(*MRI, TII, &UseMI)) {
+      if (tryConstantFoldOp(&UseMI)) {
         LLVM_DEBUG(dbgs() << "Constant folded " << UseMI);
         Changed = true;
       }
@@ -1272,7 +1271,7 @@ bool SIFoldOperands::foldInstOperand(MachineInstr &MI,
           execMayBeModifiedBeforeUse(*MRI, Reg, *DefMI, *Fold.UseMI))
         continue;
     }
-    if (updateOperand(Fold, *TII, *TRI, *ST)) {
+    if (updateOperand(Fold)) {
       // Clear kill flags.
       if (Fold.isReg()) {
         assert(Fold.OpToFold && Fold.OpToFold->isReg());
@@ -1523,7 +1522,7 @@ bool SIFoldOperands::tryFoldRegSequence(MachineInstr &MI) {
     return false;
 
   SmallVector<std::pair<MachineOperand*, unsigned>, 32> Defs;
-  if (!getRegSeqInit(Defs, Reg, MCOI::OPERAND_REGISTER, TII, *MRI))
+  if (!getRegSeqInit(Defs, Reg, MCOI::OPERAND_REGISTER))
     return false;
 
   for (auto &Def : Defs) {


        


More information about the llvm-commits mailing list