[llvm] 2adc94c - AMDGPU/NewPM: Port SIFoldOperands to new pass manager (#105801)

via llvm-commits llvm-commits at lists.llvm.org
Wed Aug 28 23:04:58 PDT 2024


Author: Akshat Oke
Date: 2024-08-29T11:34:54+05:30
New Revision: 2adc94cd6c3dd1fc713a6ba8301fc04f21908700

URL: https://github.com/llvm/llvm-project/commit/2adc94cd6c3dd1fc713a6ba8301fc04f21908700
DIFF: https://github.com/llvm/llvm-project/commit/2adc94cd6c3dd1fc713a6ba8301fc04f21908700.diff

LOG: AMDGPU/NewPM: Port SIFoldOperands to new pass manager (#105801)

Added: 
    llvm/lib/Target/AMDGPU/SIFoldOperands.h

Modified: 
    llvm/lib/Target/AMDGPU/AMDGPU.h
    llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def
    llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
    llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
    llvm/test/CodeGen/AMDGPU/si-fold-scalar-clamp.mir
    llvm/test/CodeGen/AMDGPU/skip-fold-regsequence.mir

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/AMDGPU.h b/llvm/lib/Target/AMDGPU/AMDGPU.h
index c50474893eb7d5..6eb641db076958 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.h
@@ -35,7 +35,7 @@ void initializeAMDGPURegBankSelectPass(PassRegistry &);
 // SI Passes
 FunctionPass *createGCNDPPCombinePass();
 FunctionPass *createSIAnnotateControlFlowLegacyPass();
-FunctionPass *createSIFoldOperandsPass();
+FunctionPass *createSIFoldOperandsLegacyPass();
 FunctionPass *createSIPeepholeSDWAPass();
 FunctionPass *createSILowerI1CopiesLegacyPass();
 FunctionPass *createAMDGPUGlobalISelDivergenceLoweringPass();
@@ -160,8 +160,8 @@ extern char &AMDGPURewriteOutArgumentsID;
 void initializeGCNDPPCombinePass(PassRegistry &);
 extern char &GCNDPPCombineID;
 
-void initializeSIFoldOperandsPass(PassRegistry &);
-extern char &SIFoldOperandsID;
+void initializeSIFoldOperandsLegacyPass(PassRegistry &);
+extern char &SIFoldOperandsLegacyID;
 
 void initializeSIPeepholeSDWAPass(PassRegistry &);
 extern char &SIPeepholeSDWAID;

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def b/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def
index d8741b4b06a984..10e394ed03df8f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def
@@ -97,4 +97,5 @@ FUNCTION_PASS_WITH_PARAMS(
 MACHINE_FUNCTION_PASS("amdgpu-isel", AMDGPUISelDAGToDAGPass(*this))
 MACHINE_FUNCTION_PASS("si-fix-sgpr-copies", SIFixSGPRCopiesPass())
 MACHINE_FUNCTION_PASS("si-i1-copies", SILowerI1CopiesPass())
+MACHINE_FUNCTION_PASS("si-fold-operands", SIFoldOperandsPass());
 #undef MACHINE_FUNCTION_PASS

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index 37d6084ca1d25d..86cc9d1ecde817 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -34,6 +34,7 @@
 #include "R600.h"
 #include "R600TargetMachine.h"
 #include "SIFixSGPRCopies.h"
+#include "SIFoldOperands.h"
 #include "SIMachineFunctionInfo.h"
 #include "SIMachineScheduler.h"
 #include "TargetInfo/AMDGPUTargetInfo.h"
@@ -410,7 +411,7 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget() {
   initializeSILowerSGPRSpillsPass(*PR);
   initializeSIFixSGPRCopiesLegacyPass(*PR);
   initializeSIFixVGPRCopiesPass(*PR);
-  initializeSIFoldOperandsPass(*PR);
+  initializeSIFoldOperandsLegacyPass(*PR);
   initializeSIPeepholeSDWAPass(*PR);
   initializeSIShrinkInstructionsPass(*PR);
   initializeSIOptimizeExecMaskingPreRAPass(*PR);
@@ -1270,7 +1271,7 @@ void GCNPassConfig::addMachineSSAOptimization() {
   // instructions leftover after the operands are folded as well.
   //
   // XXX - Can we get away without running DeadMachineInstructionElim again?
-  addPass(&SIFoldOperandsID);
+  addPass(&SIFoldOperandsLegacyID);
   if (EnableDPPCombine)
     addPass(&GCNDPPCombineID);
   addPass(&SILoadStoreOptimizerID);
@@ -1278,7 +1279,7 @@ void GCNPassConfig::addMachineSSAOptimization() {
     addPass(&SIPeepholeSDWAID);
     addPass(&EarlyMachineLICMID);
     addPass(&MachineCSEID);
-    addPass(&SIFoldOperandsID);
+    addPass(&SIFoldOperandsLegacyID);
   }
   addPass(&DeadMachineInstructionElimID);
   addPass(createSIShrinkInstructionsPass());

diff  --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
index 875738dad74ced..1e2c77b08b9a63 100644
--- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -8,6 +8,7 @@
 //===----------------------------------------------------------------------===//
 //
 
+#include "SIFoldOperands.h"
 #include "AMDGPU.h"
 #include "GCNSubtarget.h"
 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
@@ -66,9 +67,8 @@ struct FoldCandidate {
   bool needsShrink() const { return ShrinkOpcode != -1; }
 };
 
-class SIFoldOperands : public MachineFunctionPass {
+class SIFoldOperandsImpl {
 public:
-  static char ID;
   MachineRegisterInfo *MRI;
   const SIInstrInfo *TII;
   const SIRegisterInfo *TRI;
@@ -121,11 +121,22 @@ class SIFoldOperands : public MachineFunctionPass {
   bool tryOptimizeAGPRPhis(MachineBasicBlock &MBB);
 
 public:
-  SIFoldOperands() : MachineFunctionPass(ID) {
-    initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
-  }
+  SIFoldOperandsImpl() = default;
+
+  bool run(MachineFunction &MF);
+};
 
-  bool runOnMachineFunction(MachineFunction &MF) override;
+class SIFoldOperandsLegacy : public MachineFunctionPass {
+public:
+  static char ID;
+
+  SIFoldOperandsLegacy() : MachineFunctionPass(ID) {}
+
+  bool runOnMachineFunction(MachineFunction &MF) override {
+    if (skipFunction(MF.getFunction()))
+      return false;
+    return SIFoldOperandsImpl().run(MF);
+  }
 
   StringRef getPassName() const override { return "SI Fold Operands"; }
 
@@ -137,12 +148,12 @@ class SIFoldOperands : public MachineFunctionPass {
 
 } // End anonymous namespace.
 
-INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE,
-                "SI Fold Operands", false, false)
+INITIALIZE_PASS(SIFoldOperandsLegacy, DEBUG_TYPE, "SI Fold Operands", false,
+                false)
 
-char SIFoldOperands::ID = 0;
+char SIFoldOperandsLegacy::ID = 0;
 
-char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
+char &llvm::SIFoldOperandsLegacyID = SIFoldOperandsLegacy::ID;
 
 static const TargetRegisterClass *getRegOpRC(const MachineRegisterInfo &MRI,
                                              const TargetRegisterInfo &TRI,
@@ -177,8 +188,8 @@ static unsigned macToMad(unsigned Opc) {
 
 // TODO: Add heuristic that the frame index might not fit in the addressing mode
 // immediate offset to avoid materializing in loops.
-bool SIFoldOperands::frameIndexMayFold(const MachineInstr &UseMI, int OpNo,
-                                       const MachineOperand &OpToFold) const {
+bool SIFoldOperandsImpl::frameIndexMayFold(
+    const MachineInstr &UseMI, int OpNo, const MachineOperand &OpToFold) const {
   if (!OpToFold.isFI())
     return false;
 
@@ -196,11 +207,11 @@ bool SIFoldOperands::frameIndexMayFold(const MachineInstr &UseMI, int OpNo,
   return OpNo == VIdx && SIdx == -1;
 }
 
-FunctionPass *llvm::createSIFoldOperandsPass() {
-  return new SIFoldOperands();
+FunctionPass *llvm::createSIFoldOperandsLegacyPass() {
+  return new SIFoldOperandsLegacy();
 }
 
-bool SIFoldOperands::canUseImmWithOpSel(FoldCandidate &Fold) const {
+bool SIFoldOperandsImpl::canUseImmWithOpSel(FoldCandidate &Fold) const {
   MachineInstr *MI = Fold.UseMI;
   MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
   const uint64_t TSFlags = MI->getDesc().TSFlags;
@@ -230,7 +241,7 @@ bool SIFoldOperands::canUseImmWithOpSel(FoldCandidate &Fold) const {
   return true;
 }
 
-bool SIFoldOperands::tryFoldImmWithOpSel(FoldCandidate &Fold) const {
+bool SIFoldOperandsImpl::tryFoldImmWithOpSel(FoldCandidate &Fold) const {
   MachineInstr *MI = Fold.UseMI;
   MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
   unsigned Opcode = MI->getOpcode();
@@ -354,7 +365,7 @@ bool SIFoldOperands::tryFoldImmWithOpSel(FoldCandidate &Fold) const {
   return false;
 }
 
-bool SIFoldOperands::updateOperand(FoldCandidate &Fold) const {
+bool SIFoldOperandsImpl::updateOperand(FoldCandidate &Fold) const {
   MachineInstr *MI = Fold.UseMI;
   MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
   assert(Old.isReg());
@@ -464,9 +475,9 @@ static void appendFoldCandidate(SmallVectorImpl<FoldCandidate> &FoldList,
   FoldList.emplace_back(MI, OpNo, FoldOp, Commuted, ShrinkOp);
 }
 
-bool SIFoldOperands::tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
-                                      MachineInstr *MI, unsigned OpNo,
-                                      MachineOperand *OpToFold) const {
+bool SIFoldOperandsImpl::tryAddToFoldList(
+    SmallVectorImpl<FoldCandidate> &FoldList, MachineInstr *MI, unsigned OpNo,
+    MachineOperand *OpToFold) const {
   const unsigned Opc = MI->getOpcode();
 
   auto tryToFoldAsFMAAKorMK = [&]() {
@@ -645,8 +656,8 @@ bool SIFoldOperands::tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
   return true;
 }
 
-bool SIFoldOperands::isUseSafeToFold(const MachineInstr &MI,
-                                     const MachineOperand &UseMO) const {
+bool SIFoldOperandsImpl::isUseSafeToFold(const MachineInstr &MI,
+                                         const MachineOperand &UseMO) const {
   // Operands of SDWA instructions must be registers.
   return !TII->isSDWA(MI);
 }
@@ -654,7 +665,7 @@ bool SIFoldOperands::isUseSafeToFold(const MachineInstr &MI,
 // Find a def of the UseReg, check if it is a reg_sequence and find initializers
 // for each subreg, tracking it to foldable inline immediate if possible.
 // Returns true on success.
-bool SIFoldOperands::getRegSeqInit(
+bool SIFoldOperandsImpl::getRegSeqInit(
     SmallVectorImpl<std::pair<MachineOperand *, unsigned>> &Defs,
     Register UseReg, uint8_t OpTy) const {
   MachineInstr *Def = MRI->getVRegDef(UseReg);
@@ -686,7 +697,7 @@ bool SIFoldOperands::getRegSeqInit(
   return true;
 }
 
-bool SIFoldOperands::tryToFoldACImm(
+bool SIFoldOperandsImpl::tryToFoldACImm(
     const MachineOperand &OpToFold, MachineInstr *UseMI, unsigned UseOpIdx,
     SmallVectorImpl<FoldCandidate> &FoldList) const {
   const MCInstrDesc &Desc = UseMI->getDesc();
@@ -752,12 +763,10 @@ bool SIFoldOperands::tryToFoldACImm(
   return true;
 }
 
-void SIFoldOperands::foldOperand(
-  MachineOperand &OpToFold,
-  MachineInstr *UseMI,
-  int UseOpIdx,
-  SmallVectorImpl<FoldCandidate> &FoldList,
-  SmallVectorImpl<MachineInstr *> &CopiesToReplace) const {
+void SIFoldOperandsImpl::foldOperand(
+    MachineOperand &OpToFold, MachineInstr *UseMI, int UseOpIdx,
+    SmallVectorImpl<FoldCandidate> &FoldList,
+    SmallVectorImpl<MachineInstr *> &CopiesToReplace) const {
   const MachineOperand *UseOp = &UseMI->getOperand(UseOpIdx);
 
   if (!isUseSafeToFold(*UseMI, *UseOp))
@@ -1187,7 +1196,7 @@ static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) {
 }
 
 MachineOperand *
-SIFoldOperands::getImmOrMaterializedImm(MachineOperand &Op) const {
+SIFoldOperandsImpl::getImmOrMaterializedImm(MachineOperand &Op) const {
   // If this has a subregister, it obviously is a register source.
   if (!Op.isReg() || Op.getSubReg() != AMDGPU::NoSubRegister ||
       !Op.getReg().isVirtual())
@@ -1206,7 +1215,7 @@ SIFoldOperands::getImmOrMaterializedImm(MachineOperand &Op) const {
 // Try to simplify operations with a constant that may appear after instruction
 // selection.
 // TODO: See if a frame index with a fixed offset can fold.
-bool SIFoldOperands::tryConstantFoldOp(MachineInstr *MI) const {
+bool SIFoldOperandsImpl::tryConstantFoldOp(MachineInstr *MI) const {
   if (!MI->allImplicitDefsAreDead())
     return false;
 
@@ -1307,7 +1316,7 @@ bool SIFoldOperands::tryConstantFoldOp(MachineInstr *MI) const {
 }
 
 // Try to fold an instruction into a simpler one
-bool SIFoldOperands::tryFoldCndMask(MachineInstr &MI) const {
+bool SIFoldOperandsImpl::tryFoldCndMask(MachineInstr &MI) const {
   unsigned Opc = MI.getOpcode();
   if (Opc != AMDGPU::V_CNDMASK_B32_e32 && Opc != AMDGPU::V_CNDMASK_B32_e64 &&
       Opc != AMDGPU::V_CNDMASK_B64_PSEUDO)
@@ -1346,7 +1355,7 @@ bool SIFoldOperands::tryFoldCndMask(MachineInstr &MI) const {
   return true;
 }
 
-bool SIFoldOperands::tryFoldZeroHighBits(MachineInstr &MI) const {
+bool SIFoldOperandsImpl::tryFoldZeroHighBits(MachineInstr &MI) const {
   if (MI.getOpcode() != AMDGPU::V_AND_B32_e64 &&
       MI.getOpcode() != AMDGPU::V_AND_B32_e32)
     return false;
@@ -1368,8 +1377,8 @@ bool SIFoldOperands::tryFoldZeroHighBits(MachineInstr &MI) const {
   return true;
 }
 
-bool SIFoldOperands::foldInstOperand(MachineInstr &MI,
-                                     MachineOperand &OpToFold) const {
+bool SIFoldOperandsImpl::foldInstOperand(MachineInstr &MI,
+                                         MachineOperand &OpToFold) const {
   // We need mutate the operands of new mov instructions to add implicit
   // uses of EXEC, but adding them invalidates the use_iterator, so defer
   // this.
@@ -1442,7 +1451,7 @@ bool SIFoldOperands::foldInstOperand(MachineInstr &MI,
   return true;
 }
 
-bool SIFoldOperands::tryFoldFoldableCopy(
+bool SIFoldOperandsImpl::tryFoldFoldableCopy(
     MachineInstr &MI, MachineOperand *&CurrentKnownM0Val) const {
   // Specially track simple redefs of m0 to the same value in a block, so we
   // can erase the later ones.
@@ -1519,7 +1528,8 @@ bool SIFoldOperands::tryFoldFoldableCopy(
 
 // Clamp patterns are canonically selected to v_max_* instructions, so only
 // handle them.
-const MachineOperand *SIFoldOperands::isClamp(const MachineInstr &MI) const {
+const MachineOperand *
+SIFoldOperandsImpl::isClamp(const MachineInstr &MI) const {
   unsigned Op = MI.getOpcode();
   switch (Op) {
   case AMDGPU::V_MAX_F32_e64:
@@ -1567,7 +1577,7 @@ const MachineOperand *SIFoldOperands::isClamp(const MachineInstr &MI) const {
 }
 
 // FIXME: Clamp for v_mad_mixhi_f16 handled during isel.
-bool SIFoldOperands::tryFoldClamp(MachineInstr &MI) {
+bool SIFoldOperandsImpl::tryFoldClamp(MachineInstr &MI) {
   const MachineOperand *ClampSrc = isClamp(MI);
   if (!ClampSrc || !MRI->hasOneNonDBGUser(ClampSrc->getReg()))
     return false;
@@ -1662,7 +1672,7 @@ static int getOModValue(unsigned Opc, int64_t Val) {
 // FIXME: Does this need to check IEEE mode bit? SNaNs are generally not
 // handled, so will anything other than that break?
 std::pair<const MachineOperand *, int>
-SIFoldOperands::isOMod(const MachineInstr &MI) const {
+SIFoldOperandsImpl::isOMod(const MachineInstr &MI) const {
   unsigned Op = MI.getOpcode();
   switch (Op) {
   case AMDGPU::V_MUL_F64_e64:
@@ -1740,7 +1750,7 @@ SIFoldOperands::isOMod(const MachineInstr &MI) const {
 }
 
 // FIXME: Does this need to check IEEE bit on function?
-bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) {
+bool SIFoldOperandsImpl::tryFoldOMod(MachineInstr &MI) {
   const MachineOperand *RegOp;
   int OMod;
   std::tie(RegOp, OMod) = isOMod(MI);
@@ -1779,7 +1789,7 @@ bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) {
 
 // Try to fold a reg_sequence with vgpr output and agpr inputs into an
 // instruction which can take an agpr. So far that means a store.
-bool SIFoldOperands::tryFoldRegSequence(MachineInstr &MI) {
+bool SIFoldOperandsImpl::tryFoldRegSequence(MachineInstr &MI) {
   assert(MI.isRegSequence());
   auto Reg = MI.getOperand(0).getReg();
 
@@ -1926,7 +1936,7 @@ static bool isAGPRCopy(const SIRegisterInfo &TRI,
 //      loop:
 //        %3:areg = PHI %2:areg, %entry, %X:areg,
 //        %4:areg = (instr using %3:areg)
-bool SIFoldOperands::tryFoldPhiAGPR(MachineInstr &PHI) {
+bool SIFoldOperandsImpl::tryFoldPhiAGPR(MachineInstr &PHI) {
   assert(PHI.isPHI());
 
   Register PhiOut = PHI.getOperand(0).getReg();
@@ -2030,7 +2040,7 @@ bool SIFoldOperands::tryFoldPhiAGPR(MachineInstr &PHI) {
 }
 
 // Attempt to convert VGPR load to an AGPR load.
-bool SIFoldOperands::tryFoldLoad(MachineInstr &MI) {
+bool SIFoldOperandsImpl::tryFoldLoad(MachineInstr &MI) {
   assert(MI.mayLoad());
   if (!ST->hasGFX90AInsts() || MI.getNumExplicitDefs() != 1)
     return false;
@@ -2117,7 +2127,7 @@ bool SIFoldOperands::tryFoldLoad(MachineInstr &MI) {
 //        %0:areg = PHI %tmp_agpr, %a, %x, %c
 //        %1:areg = PHI %tmp_agpr, %a, %y, %c
 //        %2:areg = PHI %tmp_agpr, %a, %z, %c
-bool SIFoldOperands::tryOptimizeAGPRPhis(MachineBasicBlock &MBB) {
+bool SIFoldOperandsImpl::tryOptimizeAGPRPhis(MachineBasicBlock &MBB) {
   // This is only really needed on GFX908 where AGPR-AGPR copies are
   // unreasonably 
diff icult.
   if (ST->hasGFX90AInsts())
@@ -2182,10 +2192,7 @@ bool SIFoldOperands::tryOptimizeAGPRPhis(MachineBasicBlock &MBB) {
   return Changed;
 }
 
-bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
-  if (skipFunction(MF.getFunction()))
-    return false;
-
+bool SIFoldOperandsImpl::run(MachineFunction &MF) {
   MRI = &MF.getRegInfo();
   ST = &MF.getSubtarget<GCNSubtarget>();
   TII = ST->getInstrInfo();
@@ -2246,3 +2253,14 @@ bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
 
   return Changed;
 }
+
+PreservedAnalyses SIFoldOperandsPass::run(MachineFunction &MF,
+                                          MachineFunctionAnalysisManager &) {
+  bool Changed = SIFoldOperandsImpl().run(MF);
+  if (!Changed) {
+    return PreservedAnalyses::all();
+  }
+  auto PA = getMachineFunctionPassPreservedAnalyses();
+  PA.preserveSet<CFGAnalyses>();
+  return PA;
+}

diff  --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.h b/llvm/lib/Target/AMDGPU/SIFoldOperands.h
new file mode 100644
index 00000000000000..d6b8f6a7295260
--- /dev/null
+++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.h
@@ -0,0 +1,23 @@
+//===- SIFoldOperands.h -----------------------------------------*- C++- *-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_AMDGPU_SIFOLDOPERANDS_H
+#define LLVM_LIB_TARGET_AMDGPU_SIFOLDOPERANDS_H
+
+#include "llvm/CodeGen/MachinePassManager.h"
+
+namespace llvm {
+class SIFoldOperandsPass : public PassInfoMixin<SIFoldOperandsPass> {
+public:
+  SIFoldOperandsPass() = default;
+  PreservedAnalyses run(MachineFunction &MF,
+                        MachineFunctionAnalysisManager &MFAM);
+};
+} // namespace llvm
+
+#endif // LLVM_LIB_TARGET_AMDGPU_SIFOLDOPERANDS_H

diff  --git a/llvm/test/CodeGen/AMDGPU/si-fold-scalar-clamp.mir b/llvm/test/CodeGen/AMDGPU/si-fold-scalar-clamp.mir
index 1f4d046a8739fa..381831ee9b2b4f 100644
--- a/llvm/test/CodeGen/AMDGPU/si-fold-scalar-clamp.mir
+++ b/llvm/test/CodeGen/AMDGPU/si-fold-scalar-clamp.mir
@@ -1,5 +1,6 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
 # RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 -run-pass=si-fold-operands -verify-machineinstrs -o - %s | FileCheck %s
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 -passes=si-fold-operands -verify-machineinstrs -o - %s | FileCheck %s
 ---
 name:            test
 tracksRegLiveness: true

diff  --git a/llvm/test/CodeGen/AMDGPU/skip-fold-regsequence.mir b/llvm/test/CodeGen/AMDGPU/skip-fold-regsequence.mir
index 4803566441483d..0ea7618df306e9 100644
--- a/llvm/test/CodeGen/AMDGPU/skip-fold-regsequence.mir
+++ b/llvm/test/CodeGen/AMDGPU/skip-fold-regsequence.mir
@@ -1,5 +1,6 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 # RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -verify-machineinstrs -run-pass si-fold-operands -o - %s | FileCheck -check-prefix=GCN %s
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -verify-machineinstrs -passes si-fold-operands -o - %s | FileCheck -check-prefix=GCN %s
 
 # Skip folding a REG_SEQUENCE to its user when the regclasses for the user operands can't be
 # fully determined from the instruction description.


        


More information about the llvm-commits mailing list