[llvm] [AMDGPU] Allow sinking of free vector ops (PR #162580)

Shilei Tian via llvm-commits llvm-commits at lists.llvm.org
Tue Oct 14 21:03:17 PDT 2025


================
@@ -1301,6 +1301,87 @@ bool GCNTTIImpl::isProfitableToSinkOperands(Instruction *I,
 
     if (match(&Op, m_FAbs(m_Value())) || match(&Op, m_FNeg(m_Value())))
       Ops.push_back(&Op);
+
+    // Zero cost vector instructions (e.g. extractelement 0 of i32 vectors)
+    // will be optimized away, and sinking them can help SDAG combines.
+    const DataLayout &DL = I->getModule()->getDataLayout();
+
+    uint64_t VecIndex;
+    Value *Vec;
+    if (match(Op.get(), m_ExtractElt(m_Value(Vec), m_ConstantInt(VecIndex)))) {
+      Instruction *OpInst = cast<Instruction>(Op.get());
+      Instruction *VecOpInst = dyn_cast<Instruction>(OpInst->getOperand(0));
+      // If a zero cost extractvector instruction is the only use of the vector,
+      // then it may be combined with the def.
+      if (VecOpInst && VecOpInst->hasOneUse())
+        continue;
+
+      if (getVectorInstrCost(OpInst->getOpcode(), Vec->getType(),
+                             TTI::TCK_RecipThroughput, VecIndex,
+                             OpInst->getOperand(0), OpInst->getOperand(1)) == 0)
+        Ops.push_back(&Op);
+
+      continue;
+    }
+
+    if (match(Op.get(),
+              m_InsertElt(m_Value(Vec), m_Value(), m_ConstantInt(VecIndex)))) {
+      Instruction *OpInst = cast<Instruction>(Op.get());
+      if (getVectorInstrCost(OpInst->getOpcode(), Vec->getType(),
+                             TTI::TCK_RecipThroughput, VecIndex,
+                             OpInst->getOperand(0), OpInst->getOperand(1)) == 0)
+        Ops.push_back(&Op);
+
+      continue;
+    }
+
+    if (auto *Shuffle = dyn_cast<ShuffleVectorInst>(Op.get())) {
+      if (Shuffle->isIdentity()) {
+        Ops.push_back(&Op);
+        continue;
+      }
+
+      unsigned EltSize = DL.getTypeSizeInBits(
+          cast<VectorType>(cast<VectorType>(Shuffle->getType()))
+              ->getElementType());
+
+      // For i32 (or greater) shufflevectors, these will be lowered into a
+      // series of insert / extract elements, which will be coalesced away.
+      if (EltSize >= 32) {
+        Ops.push_back(&Op);
+        continue;
+      }
+
+      if (EltSize < 16 || !ST->has16BitInsts())
+        continue;
+
+      int NumSubElts, SubIndex;
+      if (Shuffle->changesLength()) {
+        if (Shuffle->increasesLength() && Shuffle->isIdentityWithPadding()) {
+          Ops.push_back(&Op);
+          continue;
+        }
+
+        if ((Shuffle->isExtractSubvectorMask(SubIndex) ||
+             Shuffle->isInsertSubvectorMask(NumSubElts, SubIndex)) &&
+            !(SubIndex % 2)) {
+          Ops.push_back(&Op);
+          continue;
+        }
+      }
+
+      if (Shuffle->isReverse() || Shuffle->isZeroEltSplat() ||
+          Shuffle->isSingleSource()) {
+        Ops.push_back(&Op);
+        continue;
+      }
+
+      if (Shuffle->isInsertSubvectorMask(NumSubElts, SubIndex) &&
+          !(SubIndex % 2)) {
----------------
shiltian wrote:

here as well?

https://github.com/llvm/llvm-project/pull/162580


More information about the llvm-commits mailing list