[llvm] [AMDGPU] Implement vop3p complex pattern optmization for gisel (PR #130234)

via llvm-commits llvm-commits at lists.llvm.org
Tue Mar 11 20:18:33 PDT 2025


https://github.com/Shoreshen updated https://github.com/llvm/llvm-project/pull/130234

>From 556f7ff7836e4d884c64bc87bcef80d1687ccf86 Mon Sep 17 00:00:00 2001
From: shore <372660931 at qq.com>
Date: Fri, 7 Mar 2025 12:27:45 +0800
Subject: [PATCH 1/4] Implement vop3p complex pattern optmization for gisel

---
 .../AMDGPU/AMDGPUInstructionSelector.cpp      | 381 ++++++++++++++++--
 .../Target/AMDGPU/AMDGPUInstructionSelector.h |   4 +-
 .../AMDGPU/GlobalISel/llvm.amdgcn.fdot2.ll    |   3 +-
 .../AMDGPU/GlobalISel/llvm.amdgcn.sdot2.ll    |  24 +-
 .../AMDGPU/GlobalISel/llvm.amdgcn.sdot4.ll    |   6 +-
 .../AMDGPU/GlobalISel/llvm.amdgcn.sdot8.ll    |  12 +-
 .../AMDGPU/GlobalISel/llvm.amdgcn.udot2.ll    |  36 +-
 .../AMDGPU/GlobalISel/llvm.amdgcn.udot4.ll    |  12 +-
 .../AMDGPU/GlobalISel/llvm.amdgcn.udot8.ll    |  12 +-
 llvm/test/CodeGen/AMDGPU/packed-fp32.ll       |  10 +-
 llvm/test/CodeGen/AMDGPU/strict_fsub.f16.ll   |  11 +-
 llvm/test/lit.cfg.py                          |   2 +-
 12 files changed, 400 insertions(+), 113 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index 441fb5730a6d8..0dc47b957bdac 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -4282,30 +4282,346 @@ AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const {
   }};
 }
 
-std::pair<Register, unsigned>
-AMDGPUInstructionSelector::selectVOP3PModsImpl(
-  Register Src, const MachineRegisterInfo &MRI, bool IsDOT) const {
-  unsigned Mods = 0;
-  MachineInstr *MI = MRI.getVRegDef(Src);
+enum srcStatus {
+  IS_SAME,
+  IS_UPPER_HALF,
+  IS_LOWER_HALF,
+  IS_NEG,
+  IS_UPPER_HALF_NEG,
+  IS_LOWER_HALF_NEG,
+  LAST_STAT = IS_LOWER_HALF_NEG
+};
+
+bool isTruncHalf(MachineInstr *MI, const MachineRegisterInfo &MRI) {
+  assert(MI->getOpcode() == AMDGPU::G_TRUNC);
+  unsigned dstSize = MRI.getType(MI->getOperand(0).getReg()).getSizeInBits();
+  unsigned srcSize = MRI.getType(MI->getOperand(1).getReg()).getSizeInBits();
+  return dstSize * 2 == srcSize;
+}
+
+bool isLshrHalf(MachineInstr *MI, const MachineRegisterInfo &MRI) {
+  assert(MI->getOpcode() == AMDGPU::G_LSHR);
+  Register ShiftSrc;
+  std::optional<ValueAndVReg> ShiftAmt;
+  if (mi_match(MI->getOperand(0).getReg(), MRI,
+               m_GLShr(m_Reg(ShiftSrc), m_GCst(ShiftAmt)))) {
+    unsigned srcSize = MRI.getType(MI->getOperand(1).getReg()).getSizeInBits();
+    unsigned shift = ShiftAmt->Value.getZExtValue();
+    return shift * 2 == srcSize;
+  }
+  return false;
+}
 
-  if (MI->getOpcode() == AMDGPU::G_FNEG &&
-      // It's possible to see an f32 fneg here, but unlikely.
-      // TODO: Treat f32 fneg as only high bit.
-      MRI.getType(Src) == LLT::fixed_vector(2, 16)) {
-    Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
-    Src = MI->getOperand(1).getReg();
-    MI = MRI.getVRegDef(Src);
+bool isShlHalf(MachineInstr *MI, const MachineRegisterInfo &MRI) {
+  assert(MI->getOpcode() == AMDGPU::G_SHL);
+  Register ShiftSrc;
+  std::optional<ValueAndVReg> ShiftAmt;
+  if (mi_match(MI->getOperand(0).getReg(), MRI,
+               m_GShl(m_Reg(ShiftSrc), m_GCst(ShiftAmt)))) {
+    unsigned srcSize = MRI.getType(MI->getOperand(1).getReg()).getSizeInBits();
+    unsigned shift = ShiftAmt->Value.getZExtValue();
+    return shift * 2 == srcSize;
+  }
+  return false;
+}
+
+bool retOpStat(MachineOperand *Op, int stat,
+               std::pair<MachineOperand *, int> &curr) {
+  if ((Op->isReg() && !(Op->getReg().isPhysical())) || Op->isImm() ||
+      Op->isCImm() || Op->isFPImm()) {
+    curr = {Op, stat};
+    return true;
+  }
+  return false;
+}
+
+bool calcNextStatus(std::pair<MachineOperand *, int> &curr,
+                    const MachineRegisterInfo &MRI) {
+  if (!curr.first->isReg()) {
+    return false;
+  }
+  MachineInstr *MI = nullptr;
+
+  if (!curr.first->isDef()) {
+    // MRI.getVRegDef falls into infinite loop if use define reg
+    MI = MRI.getVRegDef(curr.first->getReg());
+  } else {
+    MI = curr.first->getParent();
+  }
+  if (!MI) {
+    return false;
+  }
+
+  unsigned Opc = MI->getOpcode();
+
+  // Handle general Opc cases
+  switch (Opc) {
+  case AMDGPU::G_BITCAST:
+  case AMDGPU::G_CONSTANT:
+  case AMDGPU::G_FCONSTANT:
+  case AMDGPU::COPY:
+    return retOpStat(&MI->getOperand(1), curr.second, curr);
+  case AMDGPU::G_FNEG:
+    // XXXX + 3 = XXXX_NEG, (XXXX_NEG + 3) mod 3 = XXXX
+    return retOpStat(&MI->getOperand(1),
+                     (curr.second + ((LAST_STAT + 1) / 2)) % (LAST_STAT + 1),
+                     curr);
+  }
+
+  // Calc next stat from current stat
+  switch (curr.second) {
+  case IS_SAME:
+    switch (Opc) {
+    case AMDGPU::G_TRUNC: {
+      if (isTruncHalf(MI, MRI)) {
+        return retOpStat(&MI->getOperand(1), IS_LOWER_HALF, curr);
+      }
+      break;
+    }
+    }
+    break;
+  case IS_NEG:
+    switch (Opc) {
+    case AMDGPU::G_TRUNC: {
+      if (isTruncHalf(MI, MRI)) {
+        return retOpStat(&MI->getOperand(1), IS_LOWER_HALF_NEG, curr);
+      }
+      break;
+    }
+    }
+    break;
+  case IS_UPPER_HALF:
+    switch (Opc) {
+    case AMDGPU::G_SHL: {
+      if (isShlHalf(MI, MRI)) {
+        return retOpStat(&MI->getOperand(1), IS_LOWER_HALF, curr);
+      }
+      break;
+    }
+    }
+    break;
+  case IS_LOWER_HALF:
+    switch (Opc) {
+    case AMDGPU::G_LSHR: {
+      if (isLshrHalf(MI, MRI)) {
+        return retOpStat(&MI->getOperand(1), IS_UPPER_HALF, curr);
+      }
+      break;
+    }
+    }
+    break;
+  case IS_UPPER_HALF_NEG:
+    switch (Opc) {
+    case AMDGPU::G_SHL: {
+      if (isShlHalf(MI, MRI)) {
+        return retOpStat(&MI->getOperand(1), IS_LOWER_HALF_NEG, curr);
+      }
+      break;
+    }
+    }
+    break;
+  case IS_LOWER_HALF_NEG:
+    switch (Opc) {
+    case AMDGPU::G_LSHR: {
+      if (isLshrHalf(MI, MRI)) {
+        return retOpStat(&MI->getOperand(1), IS_UPPER_HALF_NEG, curr);
+      }
+      break;
+    }
+    }
+    break;
+  }
+  return false;
+}
+
+std::vector<std::pair<MachineOperand *, int>>
+getSrcStats(MachineOperand *Op, const MachineRegisterInfo &MRI,
+            bool onlyLastSameOrNeg = false, int maxDepth = 6) {
+  int depth = 0;
+  std::pair<MachineOperand *, int> curr = {Op, IS_SAME};
+  std::vector<std::pair<MachineOperand *, int>> statList;
+
+  while (true) {
+    depth++;
+    if (depth > maxDepth) {
+      break;
+    }
+    bool ret = calcNextStatus(curr, MRI);
+    if (!ret || (onlyLastSameOrNeg &&
+                 (curr.second != IS_SAME && curr.second != IS_NEG))) {
+      break;
+    } else if (!onlyLastSameOrNeg) {
+      statList.push_back(curr);
+    }
   }
+  if (onlyLastSameOrNeg) {
+    statList.push_back(curr);
+  }
+  return statList;
+}
 
-  // TODO: Handle G_FSUB 0 as fneg
+bool isInlinableConstant(MachineOperand *Op, const SIInstrInfo &TII) {
+  bool a = TII.isInlineConstant(*Op);
+  switch (Op->getType()) {
+  case MachineOperand::MachineOperandType::MO_Immediate:
+    return TII.isInlineConstant(*Op);
+  case MachineOperand::MachineOperandType::MO_CImmediate:
+    return TII.isInlineConstant(Op->getCImm()->getValue());
+  case MachineOperand::MachineOperandType::MO_FPImmediate:
+    return TII.isInlineConstant(Op->getFPImm()->getValueAPF());
+  }
+  return false;
+}
 
-  // TODO: Match op_sel through g_build_vector_trunc and g_shuffle_vector.
-  (void)IsDOT; // DOTs do not use OPSEL on gfx942+, check ST.hasDOTOpSelHazard()
+bool isSameBitWidth(MachineOperand *Op1, MachineOperand *Op2,
+                    const MachineRegisterInfo &MRI) {
+  unsigned width1 = MRI.getType(Op1->getReg()).getSizeInBits();
+  unsigned width2 = MRI.getType(Op2->getReg()).getSizeInBits();
+  return width1 == width2;
+}
 
+bool isSameOperand(MachineOperand *Op1, MachineOperand *Op2) {
+  if (Op1->isReg()) {
+    if (Op2->isReg()) {
+      return Op1->getReg() == Op2->getReg();
+    }
+    return false;
+  }
+  return Op1->isIdenticalTo(*Op2);
+}
+
+bool validToPack(int HiStat, int LoStat, unsigned int &Mods,
+                 MachineOperand *newOp, MachineOperand *RootOp,
+                 const SIInstrInfo &TII, const MachineRegisterInfo &MRI) {
+  if (newOp->isReg()) {
+    if (isSameBitWidth(newOp, RootOp, MRI)) {
+      // IS_LOWER_HALF remain 0
+      if (HiStat == IS_UPPER_HALF_NEG) {
+        Mods ^= SISrcMods::NEG_HI;
+        Mods |= SISrcMods::OP_SEL_1;
+      } else if (HiStat == IS_UPPER_HALF) {
+        Mods |= SISrcMods::OP_SEL_1;
+      } else if (HiStat == IS_LOWER_HALF_NEG) {
+        Mods ^= SISrcMods::NEG_HI;
+      }
+      if (LoStat == IS_UPPER_HALF_NEG) {
+        Mods ^= SISrcMods::NEG;
+        Mods |= SISrcMods::OP_SEL_0;
+      } else if (LoStat == IS_UPPER_HALF) {
+        Mods |= SISrcMods::OP_SEL_0;
+      } else if (LoStat == IS_UPPER_HALF_NEG) {
+        Mods |= SISrcMods::NEG;
+      }
+      return true;
+    }
+  } else {
+    if ((HiStat == IS_SAME || HiStat == IS_NEG) &&
+        (LoStat == IS_SAME || LoStat == IS_NEG) &&
+        isInlinableConstant(newOp, TII)) {
+      if (HiStat == IS_NEG) {
+        Mods ^= SISrcMods::NEG_HI;
+      }
+      if (LoStat == IS_NEG) {
+        Mods ^= SISrcMods::NEG;
+      }
+      // opsel = opsel_hi = 0, since the upper half and lower half both
+      // the same as the target inlinable constant
+      return true;
+    }
+  }
+  return false;
+}
+
+std::pair<MachineOperand *, unsigned>
+AMDGPUInstructionSelector::selectVOP3PModsImpl(MachineOperand *Op,
+                                               const MachineRegisterInfo &MRI,
+                                               bool IsDOT) const {
+  unsigned Mods = 0;
+  MachineOperand *RootOp = Op;
+  std::pair<MachineOperand *, int> stat = getSrcStats(Op, MRI, true)[0];
+  if (!stat.first->isReg()) {
+    Mods |= SISrcMods::OP_SEL_1;
+    return {Op, Mods};
+  }
+  if (stat.second == IS_NEG) {
+    Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
+  }
+  Op = stat.first;
+  MachineInstr *MI = MRI.getVRegDef(Op->getReg());
+  if (MI->getOpcode() == AMDGPU::G_BUILD_VECTOR && MI->getNumOperands() == 3 &&
+      (!IsDOT || !Subtarget->hasDOTOpSelHazard())) {
+    std::vector<std::pair<MachineOperand *, int>> statList_Hi;
+    std::vector<std::pair<MachineOperand *, int>> statList_Lo;
+    statList_Hi = getSrcStats(&MI->getOperand(2), MRI);
+    if (statList_Hi.size() != 0) {
+      statList_Lo = getSrcStats(&MI->getOperand(1), MRI);
+      if (statList_Lo.size() != 0) {
+        for (int i = statList_Hi.size() - 1; i >= 0; i--) {
+          for (int j = statList_Lo.size() - 1; j >= 0; j--) {
+            if (isSameOperand(statList_Hi[i].first, statList_Lo[j].first)) {
+              if (validToPack(statList_Hi[i].second, statList_Lo[j].second,
+                              Mods, statList_Hi[i].first, RootOp, TII, MRI)) {
+                return {statList_Hi[i].first, Mods};
+              }
+            }
+          }
+        }
+      }
+    }
+  }
   // Packed instructions do not have abs modifiers.
   Mods |= SISrcMods::OP_SEL_1;
 
-  return std::pair(Src, Mods);
+  return {Op, Mods};
+}
+
+int64_t getAllKindImm(MachineOperand *Op) {
+  switch (Op->getType()) {
+  case MachineOperand::MachineOperandType::MO_Immediate:
+    return Op->getImm();
+  case MachineOperand::MachineOperandType::MO_CImmediate:
+    return Op->getCImm()->getSExtValue();
+    break;
+  case MachineOperand::MachineOperandType::MO_FPImmediate:
+    return Op->getFPImm()->getValueAPF().bitcastToAPInt().getSExtValue();
+    break;
+  }
+  llvm_unreachable("not an imm type");
+}
+
+bool checkRB(MachineOperand *Op, int RBNo, const AMDGPURegisterBankInfo &RBI,
+             const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI) {
+  const RegisterBank *RB = RBI.getRegBank(Op->getReg(), MRI, TRI);
+  return RB->getID() == RBNo;
+}
+
+MachineOperand *getVReg(MachineOperand *newOp, MachineOperand *RootOp,
+                        const AMDGPURegisterBankInfo &RBI,
+                        MachineRegisterInfo &MRI,
+                        const TargetRegisterInfo &TRI) {
+  // RootOp can only be VGPR or SGPR (some hand written cases such as
+  // inst-select-ashr.v2s16.mir::ashr_v2s16_vs)
+  if (checkRB(RootOp, AMDGPU::SGPRRegBankID, RBI, MRI, TRI) ||
+      checkRB(newOp, AMDGPU::VGPRRegBankID, RBI, MRI, TRI)) {
+    return newOp;
+  }
+  MachineInstr *MI = MRI.getVRegDef(RootOp->getReg());
+  if (MI->getOpcode() == AMDGPU::COPY &&
+      isSameOperand(newOp, &MI->getOperand(1))) {
+    // RootOp is VGPR, newOp is not VGPR, but RootOp = COPY newOp
+    return RootOp;
+  }
+
+  const TargetRegisterClass *DstRC =
+      TRI.getConstrainedRegClassForOperand(*RootOp, MRI);
+  Register dstReg = MRI.createVirtualRegister(DstRC);
+
+  MachineIRBuilder B(*RootOp->getParent());
+  MachineInstrBuilder MIB =
+      B.buildInstr(AMDGPU::COPY).addDef(dstReg).addUse(newOp->getReg());
+
+  // only accept VGPR
+  return &MIB->getOperand(0);
 }
 
 InstructionSelector::ComplexRendererFns
@@ -4313,13 +4629,17 @@ AMDGPUInstructionSelector::selectVOP3PMods(MachineOperand &Root) const {
   MachineRegisterInfo &MRI
     = Root.getParent()->getParent()->getParent()->getRegInfo();
 
-  Register Src;
-  unsigned Mods;
-  std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI);
-
+  std::pair<MachineOperand *, unsigned> res = selectVOP3PModsImpl(&Root, MRI);
+  if (!(res.first->isReg())) {
+    return {{
+        [=](MachineInstrBuilder &MIB) { MIB.addImm(getAllKindImm(res.first)); },
+        [=](MachineInstrBuilder &MIB) { MIB.addImm(res.second); } // src_mods
+    }};
+  }
+  res.first = getVReg(res.first, &Root, RBI, MRI, TRI);
   return {{
-      [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
-      [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
+      [=](MachineInstrBuilder &MIB) { MIB.addReg(res.first->getReg()); },
+      [=](MachineInstrBuilder &MIB) { MIB.addImm(res.second); } // src_mods
   }};
 }
 
@@ -4328,13 +4648,18 @@ AMDGPUInstructionSelector::selectVOP3PModsDOT(MachineOperand &Root) const {
   MachineRegisterInfo &MRI
     = Root.getParent()->getParent()->getParent()->getRegInfo();
 
-  Register Src;
-  unsigned Mods;
-  std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI, true);
-
+  std::pair<MachineOperand *, unsigned> res =
+      selectVOP3PModsImpl(&Root, MRI, true);
+  if (!(res.first->isReg())) {
+    return {{
+        [=](MachineInstrBuilder &MIB) { MIB.addImm(getAllKindImm(res.first)); },
+        [=](MachineInstrBuilder &MIB) { MIB.addImm(res.second); } // src_mods
+    }};
+  }
+  res.first = getVReg(res.first, &Root, RBI, MRI, TRI);
   return {{
-      [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
-      [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
+      [=](MachineInstrBuilder &MIB) { MIB.addReg(res.first->getReg()); },
+      [=](MachineInstrBuilder &MIB) { MIB.addImm(res.second); } // src_mods
   }};
 }
 
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h
index cc7552868a056..2af4f55403acc 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h
@@ -187,8 +187,8 @@ class AMDGPUInstructionSelector final : public InstructionSelector {
 
   ComplexRendererFns selectVOP3NoMods(MachineOperand &Root) const;
 
-  std::pair<Register, unsigned>
-  selectVOP3PModsImpl(Register Src, const MachineRegisterInfo &MRI,
+  std::pair<MachineOperand *, unsigned>
+  selectVOP3PModsImpl(MachineOperand *Op, const MachineRegisterInfo &MRI,
                       bool IsDOT = false) const;
 
   InstructionSelector::ComplexRendererFns
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.fdot2.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.fdot2.ll
index 1d9514c58ab9c..2243c57cf37ac 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.fdot2.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.fdot2.ll
@@ -68,8 +68,7 @@ define float @v_fdot2_neg_c(<2 x half> %a, <2 x half> %b, float %c) {
 ; GFX906-LABEL: v_fdot2_neg_c:
 ; GFX906:       ; %bb.0:
 ; GFX906-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX906-NEXT:    v_xor_b32_e32 v2, 0x80000000, v2
-; GFX906-NEXT:    v_dot2_f32_f16 v0, v0, v1, v2
+; GFX906-NEXT:    v_dot2_f32_f16 v0, v0, v1, v2 neg_lo:[0,0,1] neg_hi:[0,0,1]
 ; GFX906-NEXT:    s_setpc_b64 s[30:31]
   %neg.c = fneg float %c
   %r = call float @llvm.amdgcn.fdot2(<2 x half> %a, <2 x half> %b, float %neg.c, i1 false)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sdot2.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sdot2.ll
index e2dab03e410aa..7d6cfac52714e 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sdot2.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sdot2.ll
@@ -248,8 +248,7 @@ define i32 @v_sdot2_fnegf32_c(<2 x i16> %a, <2 x i16> %b, float %c) {
 ; GFX906-LABEL: v_sdot2_fnegf32_c:
 ; GFX906:       ; %bb.0:
 ; GFX906-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX906-NEXT:    v_xor_b32_e32 v2, 0x80000000, v2
-; GFX906-NEXT:    v_dot2_i32_i16 v0, v0, v1, v2
+; GFX906-NEXT:    v_dot2_i32_i16 v0, v0, v1, v2 neg_lo:[0,0,1] neg_hi:[0,0,1]
 ; GFX906-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX908-LABEL: v_sdot2_fnegf32_c:
@@ -263,8 +262,7 @@ define i32 @v_sdot2_fnegf32_c(<2 x i16> %a, <2 x i16> %b, float %c) {
 ; GFX10-LABEL: v_sdot2_fnegf32_c:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT:    v_xor_b32_e32 v2, 0x80000000, v2
-; GFX10-NEXT:    v_dot2_i32_i16 v0, v0, v1, v2
+; GFX10-NEXT:    v_dot2_i32_i16 v0, v0, v1, v2 neg_lo:[0,0,1] neg_hi:[0,0,1]
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
   %neg.c = fneg float %c
   %cast.neg.c = bitcast float %neg.c to i32
@@ -276,8 +274,7 @@ define i32 @v_sdot2_fnegv2f16_c(<2 x i16> %a, <2 x i16> %b, <2 x half> %c) {
 ; GFX906-LABEL: v_sdot2_fnegv2f16_c:
 ; GFX906:       ; %bb.0:
 ; GFX906-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX906-NEXT:    v_xor_b32_e32 v2, 0x80008000, v2
-; GFX906-NEXT:    v_dot2_i32_i16 v0, v0, v1, v2
+; GFX906-NEXT:    v_dot2_i32_i16 v0, v0, v1, v2 neg_lo:[0,0,1] neg_hi:[0,0,1]
 ; GFX906-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX908-LABEL: v_sdot2_fnegv2f16_c:
@@ -291,8 +288,7 @@ define i32 @v_sdot2_fnegv2f16_c(<2 x i16> %a, <2 x i16> %b, <2 x half> %c) {
 ; GFX10-LABEL: v_sdot2_fnegv2f16_c:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT:    v_xor_b32_e32 v2, 0x80008000, v2
-; GFX10-NEXT:    v_dot2_i32_i16 v0, v0, v1, v2
+; GFX10-NEXT:    v_dot2_i32_i16 v0, v0, v1, v2 neg_lo:[0,0,1] neg_hi:[0,0,1]
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
   %neg.c = fneg <2 x half> %c
   %cast.neg.c = bitcast <2 x half> %neg.c to i32
@@ -304,8 +300,7 @@ define i32 @v_sdot2_shuffle10_a(<2 x i16> %a, <2 x i16> %b, i32 %c) {
 ; GFX906-LABEL: v_sdot2_shuffle10_a:
 ; GFX906:       ; %bb.0:
 ; GFX906-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX906-NEXT:    v_alignbit_b32 v0, v0, v0, 16
-; GFX906-NEXT:    v_dot2_i32_i16 v0, v0, v1, v2
+; GFX906-NEXT:    v_dot2_i32_i16 v0, v0, v1, v2 op_sel:[1,0,0] op_sel_hi:[0,1,1]
 ; GFX906-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX908-LABEL: v_sdot2_shuffle10_a:
@@ -319,8 +314,7 @@ define i32 @v_sdot2_shuffle10_a(<2 x i16> %a, <2 x i16> %b, i32 %c) {
 ; GFX10-LABEL: v_sdot2_shuffle10_a:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT:    v_alignbit_b32 v0, v0, v0, 16
-; GFX10-NEXT:    v_dot2_i32_i16 v0, v0, v1, v2
+; GFX10-NEXT:    v_dot2_i32_i16 v0, v0, v1, v2 op_sel:[1,0,0] op_sel_hi:[0,1,1]
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
   %shuf.a = shufflevector <2 x i16> %a, <2 x i16> undef, <2 x i32> <i32 1, i32 0>
   %r = call i32 @llvm.amdgcn.sdot2(<2 x i16> %shuf.a, <2 x i16> %b, i32 %c, i1 false)
@@ -331,8 +325,7 @@ define i32 @v_sdot2_shuffle10_b(<2 x i16> %a, <2 x i16> %b, i32 %c) {
 ; GFX906-LABEL: v_sdot2_shuffle10_b:
 ; GFX906:       ; %bb.0:
 ; GFX906-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX906-NEXT:    v_alignbit_b32 v1, v1, v1, 16
-; GFX906-NEXT:    v_dot2_i32_i16 v0, v0, v1, v2
+; GFX906-NEXT:    v_dot2_i32_i16 v0, v0, v1, v2 op_sel:[0,1,0] op_sel_hi:[1,0,1]
 ; GFX906-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX908-LABEL: v_sdot2_shuffle10_b:
@@ -346,8 +339,7 @@ define i32 @v_sdot2_shuffle10_b(<2 x i16> %a, <2 x i16> %b, i32 %c) {
 ; GFX10-LABEL: v_sdot2_shuffle10_b:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT:    v_alignbit_b32 v1, v1, v1, 16
-; GFX10-NEXT:    v_dot2_i32_i16 v0, v0, v1, v2
+; GFX10-NEXT:    v_dot2_i32_i16 v0, v0, v1, v2 op_sel:[0,1,0] op_sel_hi:[1,0,1]
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
   %shuf.b = shufflevector <2 x i16> %b, <2 x i16> undef, <2 x i32> <i32 1, i32 0>
   %r = call i32 @llvm.amdgcn.sdot2(<2 x i16> %a, <2 x i16> %shuf.b, i32 %c, i1 false)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sdot4.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sdot4.ll
index 06560afee3c9a..d6ef48e25cafb 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sdot4.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sdot4.ll
@@ -91,8 +91,7 @@ define i32 @v_sdot4_fnegf32_a(float %a, i32 %b, i32 %c) {
 ; GFX906-LABEL: v_sdot4_fnegf32_a:
 ; GFX906:       ; %bb.0:
 ; GFX906-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX906-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
-; GFX906-NEXT:    v_dot4_i32_i8 v0, v0, v1, v2
+; GFX906-NEXT:    v_dot4_i32_i8 v0, v0, v1, v2 neg_lo:[1,0,0] neg_hi:[1,0,0]
 ; GFX906-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-LABEL: v_sdot4_fnegf32_a:
@@ -112,8 +111,7 @@ define i32 @v_sdot4_fnegv2f16_a(<2 x half> %a, i32 %b, i32 %c) {
 ; GFX906-LABEL: v_sdot4_fnegv2f16_a:
 ; GFX906:       ; %bb.0:
 ; GFX906-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX906-NEXT:    v_xor_b32_e32 v0, 0x80008000, v0
-; GFX906-NEXT:    v_dot4_i32_i8 v0, v0, v1, v2
+; GFX906-NEXT:    v_dot4_i32_i8 v0, v0, v1, v2 neg_lo:[1,0,0] neg_hi:[1,0,0]
 ; GFX906-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-LABEL: v_sdot4_fnegv2f16_a:
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sdot8.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sdot8.ll
index 0d729351f65a7..d2aa47df81cbe 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sdot8.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sdot8.ll
@@ -47,15 +47,13 @@ define i32 @v_sdot8_fnegf32_a(float %a, i32 %b, i32 %c) {
 ; GFX906-LABEL: v_sdot8_fnegf32_a:
 ; GFX906:       ; %bb.0:
 ; GFX906-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX906-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
-; GFX906-NEXT:    v_dot8_i32_i4 v0, v0, v1, v2
+; GFX906-NEXT:    v_dot8_i32_i4 v0, v0, v1, v2 neg_lo:[1,0,0] neg_hi:[1,0,0]
 ; GFX906-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-LABEL: v_sdot8_fnegf32_a:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
-; GFX10-NEXT:    v_dot8_i32_i4 v0, v0, v1, v2
+; GFX10-NEXT:    v_dot8_i32_i4 v0, v0, v1, v2 neg_lo:[1,0,0] neg_hi:[1,0,0]
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
   %neg.a = fneg float %a
   %cast.neg.a = bitcast float %neg.a to i32
@@ -67,15 +65,13 @@ define i32 @v_sdot8_fnegv2f16_a(<2 x half> %a, i32 %b, i32 %c) {
 ; GFX906-LABEL: v_sdot8_fnegv2f16_a:
 ; GFX906:       ; %bb.0:
 ; GFX906-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX906-NEXT:    v_xor_b32_e32 v0, 0x80008000, v0
-; GFX906-NEXT:    v_dot8_i32_i4 v0, v0, v1, v2
+; GFX906-NEXT:    v_dot8_i32_i4 v0, v0, v1, v2 neg_lo:[1,0,0] neg_hi:[1,0,0]
 ; GFX906-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-LABEL: v_sdot8_fnegv2f16_a:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT:    v_xor_b32_e32 v0, 0x80008000, v0
-; GFX10-NEXT:    v_dot8_i32_i4 v0, v0, v1, v2
+; GFX10-NEXT:    v_dot8_i32_i4 v0, v0, v1, v2 neg_lo:[1,0,0] neg_hi:[1,0,0]
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
   %neg.a = fneg <2 x half> %a
   %cast.neg.a = bitcast <2 x half> %neg.a to i32
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.udot2.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.udot2.ll
index 3acff52874dd9..347644826fd0c 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.udot2.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.udot2.ll
@@ -235,22 +235,19 @@ define i32 @v_udot2_fnegf32_c(<2 x i16> %a, <2 x i16> %b, float %c) {
 ; GFX906-LABEL: v_udot2_fnegf32_c:
 ; GFX906:       ; %bb.0:
 ; GFX906-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX906-NEXT:    v_xor_b32_e32 v2, 0x80000000, v2
-; GFX906-NEXT:    v_dot2_u32_u16 v0, v0, v1, v2
+; GFX906-NEXT:    v_dot2_u32_u16 v0, v0, v1, v2 neg_lo:[0,0,1] neg_hi:[0,0,1]
 ; GFX906-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX908-LABEL: v_udot2_fnegf32_c:
 ; GFX908:       ; %bb.0:
 ; GFX908-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT:    v_xor_b32_e32 v2, 0x80000000, v2
-; GFX908-NEXT:    v_dot2_u32_u16 v0, v0, v1, v2
+; GFX908-NEXT:    v_dot2_u32_u16 v0, v0, v1, v2 neg_lo:[0,0,1] neg_hi:[0,0,1]
 ; GFX908-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-LABEL: v_udot2_fnegf32_c:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT:    v_xor_b32_e32 v2, 0x80000000, v2
-; GFX10-NEXT:    v_dot2_u32_u16 v0, v0, v1, v2
+; GFX10-NEXT:    v_dot2_u32_u16 v0, v0, v1, v2 neg_lo:[0,0,1] neg_hi:[0,0,1]
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
   %neg.c = fneg float %c
   %cast.neg.c = bitcast float %neg.c to i32
@@ -262,22 +259,19 @@ define i32 @v_udot2_fnegv2f16_c(<2 x i16> %a, <2 x i16> %b, <2 x half> %c) {
 ; GFX906-LABEL: v_udot2_fnegv2f16_c:
 ; GFX906:       ; %bb.0:
 ; GFX906-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX906-NEXT:    v_xor_b32_e32 v2, 0x80008000, v2
-; GFX906-NEXT:    v_dot2_u32_u16 v0, v0, v1, v2
+; GFX906-NEXT:    v_dot2_u32_u16 v0, v0, v1, v2 neg_lo:[0,0,1] neg_hi:[0,0,1]
 ; GFX906-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX908-LABEL: v_udot2_fnegv2f16_c:
 ; GFX908:       ; %bb.0:
 ; GFX908-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT:    v_xor_b32_e32 v2, 0x80008000, v2
-; GFX908-NEXT:    v_dot2_u32_u16 v0, v0, v1, v2
+; GFX908-NEXT:    v_dot2_u32_u16 v0, v0, v1, v2 neg_lo:[0,0,1] neg_hi:[0,0,1]
 ; GFX908-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-LABEL: v_udot2_fnegv2f16_c:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT:    v_xor_b32_e32 v2, 0x80008000, v2
-; GFX10-NEXT:    v_dot2_u32_u16 v0, v0, v1, v2
+; GFX10-NEXT:    v_dot2_u32_u16 v0, v0, v1, v2 neg_lo:[0,0,1] neg_hi:[0,0,1]
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
   %neg.c = fneg <2 x half> %c
   %cast.neg.c = bitcast <2 x half> %neg.c to i32
@@ -289,22 +283,19 @@ define i32 @v_udot2_shuffle10_a(<2 x i16> %a, <2 x i16> %b, i32 %c) {
 ; GFX906-LABEL: v_udot2_shuffle10_a:
 ; GFX906:       ; %bb.0:
 ; GFX906-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX906-NEXT:    v_alignbit_b32 v0, v0, v0, 16
-; GFX906-NEXT:    v_dot2_u32_u16 v0, v0, v1, v2
+; GFX906-NEXT:    v_dot2_u32_u16 v0, v0, v1, v2 op_sel:[1,0,0] op_sel_hi:[0,1,1]
 ; GFX906-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX908-LABEL: v_udot2_shuffle10_a:
 ; GFX908:       ; %bb.0:
 ; GFX908-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT:    v_alignbit_b32 v0, v0, v0, 16
-; GFX908-NEXT:    v_dot2_u32_u16 v0, v0, v1, v2
+; GFX908-NEXT:    v_dot2_u32_u16 v0, v0, v1, v2 op_sel:[1,0,0] op_sel_hi:[0,1,1]
 ; GFX908-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-LABEL: v_udot2_shuffle10_a:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT:    v_alignbit_b32 v0, v0, v0, 16
-; GFX10-NEXT:    v_dot2_u32_u16 v0, v0, v1, v2
+; GFX10-NEXT:    v_dot2_u32_u16 v0, v0, v1, v2 op_sel:[1,0,0] op_sel_hi:[0,1,1]
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
   %shuf.a = shufflevector <2 x i16> %a, <2 x i16> undef, <2 x i32> <i32 1, i32 0>
   %r = call i32 @llvm.amdgcn.udot2(<2 x i16> %shuf.a, <2 x i16> %b, i32 %c, i1 false)
@@ -315,22 +306,19 @@ define i32 @v_udot2_shuffle10_b(<2 x i16> %a, <2 x i16> %b, i32 %c) {
 ; GFX906-LABEL: v_udot2_shuffle10_b:
 ; GFX906:       ; %bb.0:
 ; GFX906-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX906-NEXT:    v_alignbit_b32 v1, v1, v1, 16
-; GFX906-NEXT:    v_dot2_u32_u16 v0, v0, v1, v2
+; GFX906-NEXT:    v_dot2_u32_u16 v0, v0, v1, v2 op_sel:[0,1,0] op_sel_hi:[1,0,1]
 ; GFX906-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX908-LABEL: v_udot2_shuffle10_b:
 ; GFX908:       ; %bb.0:
 ; GFX908-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT:    v_alignbit_b32 v1, v1, v1, 16
-; GFX908-NEXT:    v_dot2_u32_u16 v0, v0, v1, v2
+; GFX908-NEXT:    v_dot2_u32_u16 v0, v0, v1, v2 op_sel:[0,1,0] op_sel_hi:[1,0,1]
 ; GFX908-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-LABEL: v_udot2_shuffle10_b:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT:    v_alignbit_b32 v1, v1, v1, 16
-; GFX10-NEXT:    v_dot2_u32_u16 v0, v0, v1, v2
+; GFX10-NEXT:    v_dot2_u32_u16 v0, v0, v1, v2 op_sel:[0,1,0] op_sel_hi:[1,0,1]
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
   %shuf.b = shufflevector <2 x i16> %b, <2 x i16> undef, <2 x i32> <i32 1, i32 0>
   %r = call i32 @llvm.amdgcn.udot2(<2 x i16> %a, <2 x i16> %shuf.b, i32 %c, i1 false)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.udot4.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.udot4.ll
index b14af9e043e09..7ad0404942feb 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.udot4.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.udot4.ll
@@ -112,15 +112,13 @@ define i32 @v_udot4_fnegf32_a(float %a, i32 %b, i32 %c) {
 ; GFX906-LABEL: v_udot4_fnegf32_a:
 ; GFX906:       ; %bb.0:
 ; GFX906-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX906-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
-; GFX906-NEXT:    v_dot4_u32_u8 v0, v0, v1, v2
+; GFX906-NEXT:    v_dot4_u32_u8 v0, v0, v1, v2 neg_lo:[1,0,0] neg_hi:[1,0,0]
 ; GFX906-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10PLUS-LABEL: v_udot4_fnegf32_a:
 ; GFX10PLUS:       ; %bb.0:
 ; GFX10PLUS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10PLUS-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
-; GFX10PLUS-NEXT:    v_dot4_u32_u8 v0, v0, v1, v2
+; GFX10PLUS-NEXT:    v_dot4_u32_u8 v0, v0, v1, v2 neg_lo:[1,0,0] neg_hi:[1,0,0]
 ; GFX10PLUS-NEXT:    s_setpc_b64 s[30:31]
   %neg.a = fneg float %a
   %cast.neg.a = bitcast float %neg.a to i32
@@ -132,15 +130,13 @@ define i32 @v_udot4_fnegv2f16_a(<2 x half> %a, i32 %b, i32 %c) {
 ; GFX906-LABEL: v_udot4_fnegv2f16_a:
 ; GFX906:       ; %bb.0:
 ; GFX906-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX906-NEXT:    v_xor_b32_e32 v0, 0x80008000, v0
-; GFX906-NEXT:    v_dot4_u32_u8 v0, v0, v1, v2
+; GFX906-NEXT:    v_dot4_u32_u8 v0, v0, v1, v2 neg_lo:[1,0,0] neg_hi:[1,0,0]
 ; GFX906-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10PLUS-LABEL: v_udot4_fnegv2f16_a:
 ; GFX10PLUS:       ; %bb.0:
 ; GFX10PLUS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10PLUS-NEXT:    v_xor_b32_e32 v0, 0x80008000, v0
-; GFX10PLUS-NEXT:    v_dot4_u32_u8 v0, v0, v1, v2
+; GFX10PLUS-NEXT:    v_dot4_u32_u8 v0, v0, v1, v2 neg_lo:[1,0,0] neg_hi:[1,0,0]
 ; GFX10PLUS-NEXT:    s_setpc_b64 s[30:31]
   %neg.a = fneg <2 x half> %a
   %cast.neg.a = bitcast <2 x half> %neg.a to i32
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.udot8.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.udot8.ll
index a664c8aa508ef..52763bbc24e40 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.udot8.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.udot8.ll
@@ -48,15 +48,13 @@ define i32 @v_udot8_fnegf32_a(float %a, i32 %b, i32 %c) {
 ; GFX906-LABEL: v_udot8_fnegf32_a:
 ; GFX906:       ; %bb.0:
 ; GFX906-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX906-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
-; GFX906-NEXT:    v_dot8_u32_u4 v0, v0, v1, v2
+; GFX906-NEXT:    v_dot8_u32_u4 v0, v0, v1, v2 neg_lo:[1,0,0] neg_hi:[1,0,0]
 ; GFX906-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10PLUS-LABEL: v_udot8_fnegf32_a:
 ; GFX10PLUS:       ; %bb.0:
 ; GFX10PLUS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10PLUS-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
-; GFX10PLUS-NEXT:    v_dot8_u32_u4 v0, v0, v1, v2
+; GFX10PLUS-NEXT:    v_dot8_u32_u4 v0, v0, v1, v2 neg_lo:[1,0,0] neg_hi:[1,0,0]
 ; GFX10PLUS-NEXT:    s_setpc_b64 s[30:31]
   %neg.a = fneg float %a
   %cast.neg.a = bitcast float %neg.a to i32
@@ -68,15 +66,13 @@ define i32 @v_udot8_fnegv2f16_a(<2 x half> %a, i32 %b, i32 %c) {
 ; GFX906-LABEL: v_udot8_fnegv2f16_a:
 ; GFX906:       ; %bb.0:
 ; GFX906-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX906-NEXT:    v_xor_b32_e32 v0, 0x80008000, v0
-; GFX906-NEXT:    v_dot8_u32_u4 v0, v0, v1, v2
+; GFX906-NEXT:    v_dot8_u32_u4 v0, v0, v1, v2 neg_lo:[1,0,0] neg_hi:[1,0,0]
 ; GFX906-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10PLUS-LABEL: v_udot8_fnegv2f16_a:
 ; GFX10PLUS:       ; %bb.0:
 ; GFX10PLUS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10PLUS-NEXT:    v_xor_b32_e32 v0, 0x80008000, v0
-; GFX10PLUS-NEXT:    v_dot8_u32_u4 v0, v0, v1, v2
+; GFX10PLUS-NEXT:    v_dot8_u32_u4 v0, v0, v1, v2 neg_lo:[1,0,0] neg_hi:[1,0,0]
 ; GFX10PLUS-NEXT:    s_setpc_b64 s[30:31]
   %neg.a = fneg <2 x half> %a
   %cast.neg.a = bitcast <2 x half> %neg.a to i32
diff --git a/llvm/test/CodeGen/AMDGPU/packed-fp32.ll b/llvm/test/CodeGen/AMDGPU/packed-fp32.ll
index 9b03a72fd826d..0577ba9b233be 100644
--- a/llvm/test/CodeGen/AMDGPU/packed-fp32.ll
+++ b/llvm/test/CodeGen/AMDGPU/packed-fp32.ll
@@ -87,7 +87,7 @@ define amdgpu_kernel void @fadd_v2_v_v_splat(ptr addrspace(1) %a) {
 ; GCN-LABEL: {{^}}fadd_v2_v_lit_splat:
 ; GFX900-COUNT-2: v_add_f32_e32 v{{[0-9]+}}, 1.0, v{{[0-9]+}}
 ; PACKED-SDAG:    v_pk_add_f32 v[{{[0-9:]+}}], v[{{[0-9:]+}}], 1.0 op_sel_hi:[1,0]{{$}}
-; PACKED-GISEL:    v_pk_add_f32 v[{{[0-9:]+}}], v[{{[0-9:]+}}], 1.0{{$}}
+; PACKED-GISEL:    v_pk_add_f32 v[{{[0-9:]+}}], v[{{[0-9:]+}}], 1.0 op_sel_hi:[1,0]{{$}}
 define amdgpu_kernel void @fadd_v2_v_lit_splat(ptr addrspace(1) %a) {
   %id = tail call i32 @llvm.amdgcn.workitem.id.x()
   %gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id
@@ -308,7 +308,7 @@ define amdgpu_kernel void @fmul_v2_v_v_splat(ptr addrspace(1) %a) {
 ; GCN-LABEL: {{^}}fmul_v2_v_lit_splat:
 ; GFX900-COUNT-2: v_mul_f32_e32 v{{[0-9]+}}, 4.0, v{{[0-9]+}}
 ; PACKED-SDAG:    v_pk_mul_f32 v[{{[0-9:]+}}], v[{{[0-9:]+}}], 4.0 op_sel_hi:[1,0]{{$}}
-; PACKED-GISEL:   v_pk_mul_f32 v[{{[0-9:]+}}], v[{{[0-9:]+}}], 4.0{{$}}
+; PACKED-GISEL:   v_pk_mul_f32 v[{{[0-9:]+}}], v[{{[0-9:]+}}], 4.0 op_sel_hi:[1,0]{{$}}
 define amdgpu_kernel void @fmul_v2_v_lit_splat(ptr addrspace(1) %a) {
   %id = tail call i32 @llvm.amdgcn.workitem.id.x()
   %gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id
@@ -432,7 +432,7 @@ define amdgpu_kernel void @fma_v2_v_v_splat(ptr addrspace(1) %a) {
 ; GCN-LABEL: {{^}}fma_v2_v_lit_splat:
 ; GFX900-COUNT-2: v_fma_f32 v{{[0-9]+}}, v{{[0-9]+}}, 4.0, 1.0
 ; PACKED-SDAG:    v_pk_fma_f32 v[{{[0-9:]+}}], v[{{[0-9:]+}}], 4.0, 1.0 op_sel_hi:[1,0,0]{{$}}
-; PACKED-GISEL:   v_pk_fma_f32 v[{{[0-9:]+}}], v[{{[0-9:]+}}], 4.0, 1.0{{$}}
+; PACKED-GISEL:   v_pk_fma_f32 v[{{[0-9:]+}}], v[{{[0-9:]+}}], 4.0, 1.0 op_sel_hi:[1,0,0]{{$}}
 define amdgpu_kernel void @fma_v2_v_lit_splat(ptr addrspace(1) %a) {
   %id = tail call i32 @llvm.amdgcn.workitem.id.x()
   %gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id
@@ -556,8 +556,8 @@ bb:
 ; PACKED-SDAG: v_add_f32_e64 v{{[0-9]+}}, s{{[0-9]+}}, 0
 ; PACKED-SDAG: v_add_f32_e32 v{{[0-9]+}}, 0, v{{[0-9]+}}
 
-; PACKED-GISEL: v_pk_add_f32 v[{{[0-9:]+}}], s[{{[0-9:]+}}], 0{{$}}
-; PACKED-GISEL: v_pk_add_f32 v[{{[0-9:]+}}], v[{{[0-9:]+}}], 0{{$}}
+; PACKED-GISEL: v_pk_add_f32 v[{{[0-9:]+}}], s[{{[0-9:]+}}], 0 op_sel_hi:[1,0]{{$}}
+; PACKED-GISEL: v_pk_add_f32 v[{{[0-9:]+}}], v[{{[0-9:]+}}], 0 op_sel_hi:[1,0]{{$}}
 define amdgpu_kernel void @fadd_fadd_fsub_0(<2 x float> %arg) {
 bb:
   %i12 = fadd <2 x float> zeroinitializer, %arg
diff --git a/llvm/test/CodeGen/AMDGPU/strict_fsub.f16.ll b/llvm/test/CodeGen/AMDGPU/strict_fsub.f16.ll
index 3420596da2aac..c6349bcbcdbf1 100644
--- a/llvm/test/CodeGen/AMDGPU/strict_fsub.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/strict_fsub.f16.ll
@@ -477,9 +477,8 @@ define amdgpu_ps <2 x half> @s_constained_fsub_v2f16_fpexcept_strict(<2 x half>
 ;
 ; GFX9-GISEL-LABEL: s_constained_fsub_v2f16_fpexcept_strict:
 ; GFX9-GISEL:       ; %bb.0:
-; GFX9-GISEL-NEXT:    s_xor_b32 s0, s3, 0x80008000
-; GFX9-GISEL-NEXT:    v_mov_b32_e32 v0, s0
-; GFX9-GISEL-NEXT:    v_pk_add_f16 v0, s2, v0
+; GFX9-GISEL-NEXT:    v_mov_b32_e32 v0, s3
+; GFX9-GISEL-NEXT:    v_pk_add_f16 v0, s2, v0 neg_lo:[0,1] neg_hi:[0,1]
 ; GFX9-GISEL-NEXT:    ; return to shader part epilog
 ;
 ; GFX8-SDAG-LABEL: s_constained_fsub_v2f16_fpexcept_strict:
@@ -519,8 +518,7 @@ define amdgpu_ps <2 x half> @s_constained_fsub_v2f16_fpexcept_strict(<2 x half>
 ;
 ; GFX10-GISEL-LABEL: s_constained_fsub_v2f16_fpexcept_strict:
 ; GFX10-GISEL:       ; %bb.0:
-; GFX10-GISEL-NEXT:    s_xor_b32 s0, s3, 0x80008000
-; GFX10-GISEL-NEXT:    v_pk_add_f16 v0, s2, s0
+; GFX10-GISEL-NEXT:    v_pk_add_f16 v0, s2, s3 neg_lo:[0,1] neg_hi:[0,1]
 ; GFX10-GISEL-NEXT:    ; return to shader part epilog
 ;
 ; GFX10PLUS-SDAG-LABEL: s_constained_fsub_v2f16_fpexcept_strict:
@@ -535,8 +533,7 @@ define amdgpu_ps <2 x half> @s_constained_fsub_v2f16_fpexcept_strict(<2 x half>
 ;
 ; GFX10PLUS-GISEL-LABEL: s_constained_fsub_v2f16_fpexcept_strict:
 ; GFX10PLUS-GISEL:       ; %bb.0:
-; GFX10PLUS-GISEL-NEXT:    s_xor_b32 s0, s3, 0x80008000
-; GFX10PLUS-GISEL-NEXT:    v_pk_add_f16 v0, s2, s0
+; GFX10PLUS-GISEL-NEXT:    v_pk_add_f16 v0, s2, s3 neg_lo:[0,1] neg_hi:[0,1]
 ; GFX10PLUS-GISEL-NEXT:    ; return to shader part epilog
   %val = call <2 x half> @llvm.experimental.constrained.fsub.v2f16(<2 x half> %x, <2 x half> %y, metadata !"round.tonearest", metadata !"fpexcept.strict")
   ret <2 x half> %val
diff --git a/llvm/test/lit.cfg.py b/llvm/test/lit.cfg.py
index aad7a088551b2..50921879cd1f2 100644
--- a/llvm/test/lit.cfg.py
+++ b/llvm/test/lit.cfg.py
@@ -466,7 +466,7 @@ def have_cxx_shared_library():
         print("could not exec llvm-readobj")
         return False
 
-    readobj_out = readobj_cmd.stdout.read().decode("ascii")
+    readobj_out = readobj_cmd.stdout.read().decode("utf-8")
     readobj_cmd.wait()
 
     regex = re.compile(r"(libc\+\+|libstdc\+\+|msvcp).*\.(so|dylib|dll)")

>From 58464a3a24d90211f784f54f816095378a7569dc Mon Sep 17 00:00:00 2001
From: shore <372660931 at qq.com>
Date: Fri, 7 Mar 2025 12:37:51 +0800
Subject: [PATCH 2/4] fix lit file

---
 llvm/test/lit.cfg.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/test/lit.cfg.py b/llvm/test/lit.cfg.py
index 50921879cd1f2..aad7a088551b2 100644
--- a/llvm/test/lit.cfg.py
+++ b/llvm/test/lit.cfg.py
@@ -466,7 +466,7 @@ def have_cxx_shared_library():
         print("could not exec llvm-readobj")
         return False
 
-    readobj_out = readobj_cmd.stdout.read().decode("utf-8")
+    readobj_out = readobj_cmd.stdout.read().decode("ascii")
     readobj_cmd.wait()
 
     regex = re.compile(r"(libc\+\+|libstdc\+\+|msvcp).*\.(so|dylib|dll)")

>From daae1aeaefe49f5cbb14facf8c4535e431ab741a Mon Sep 17 00:00:00 2001
From: shore <372660931 at qq.com>
Date: Mon, 10 Mar 2025 14:16:30 +0800
Subject: [PATCH 3/4] fix comments

---
 .../AMDGPU/AMDGPUInstructionSelector.cpp      | 98 +++++++++----------
 .../Target/AMDGPU/AMDGPUInstructionSelector.h |  4 +-
 2 files changed, 50 insertions(+), 52 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index 0dc47b957bdac..00d538f55a3cf 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -4292,14 +4292,15 @@ enum srcStatus {
   LAST_STAT = IS_LOWER_HALF_NEG
 };
 
-bool isTruncHalf(MachineInstr *MI, const MachineRegisterInfo &MRI) {
+static bool isTruncHalf(const MachineInstr *MI,
+                        const MachineRegisterInfo &MRI) {
   assert(MI->getOpcode() == AMDGPU::G_TRUNC);
   unsigned dstSize = MRI.getType(MI->getOperand(0).getReg()).getSizeInBits();
   unsigned srcSize = MRI.getType(MI->getOperand(1).getReg()).getSizeInBits();
   return dstSize * 2 == srcSize;
 }
 
-bool isLshrHalf(MachineInstr *MI, const MachineRegisterInfo &MRI) {
+static bool isLshrHalf(const MachineInstr *MI, const MachineRegisterInfo &MRI) {
   assert(MI->getOpcode() == AMDGPU::G_LSHR);
   Register ShiftSrc;
   std::optional<ValueAndVReg> ShiftAmt;
@@ -4312,7 +4313,7 @@ bool isLshrHalf(MachineInstr *MI, const MachineRegisterInfo &MRI) {
   return false;
 }
 
-bool isShlHalf(MachineInstr *MI, const MachineRegisterInfo &MRI) {
+static bool isShlHalf(const MachineInstr *MI, const MachineRegisterInfo &MRI) {
   assert(MI->getOpcode() == AMDGPU::G_SHL);
   Register ShiftSrc;
   std::optional<ValueAndVReg> ShiftAmt;
@@ -4325,8 +4326,8 @@ bool isShlHalf(MachineInstr *MI, const MachineRegisterInfo &MRI) {
   return false;
 }
 
-bool retOpStat(MachineOperand *Op, int stat,
-               std::pair<MachineOperand *, int> &curr) {
+static bool retOpStat(const MachineOperand *Op, int stat,
+                      std::pair<const MachineOperand *, int> &curr) {
   if ((Op->isReg() && !(Op->getReg().isPhysical())) || Op->isImm() ||
       Op->isCImm() || Op->isFPImm()) {
     curr = {Op, stat};
@@ -4335,15 +4336,14 @@ bool retOpStat(MachineOperand *Op, int stat,
   return false;
 }
 
-bool calcNextStatus(std::pair<MachineOperand *, int> &curr,
-                    const MachineRegisterInfo &MRI) {
+static bool calcNextStatus(std::pair<const MachineOperand *, int> &curr,
+                           const MachineRegisterInfo &MRI) {
   if (!curr.first->isReg()) {
     return false;
   }
-  MachineInstr *MI = nullptr;
+  const MachineInstr *MI = nullptr;
 
   if (!curr.first->isDef()) {
-    // MRI.getVRegDef falls into infinite loop if use define reg
     MI = MRI.getVRegDef(curr.first->getReg());
   } else {
     MI = curr.first->getParent();
@@ -4434,12 +4434,12 @@ bool calcNextStatus(std::pair<MachineOperand *, int> &curr,
   return false;
 }
 
-std::vector<std::pair<MachineOperand *, int>>
-getSrcStats(MachineOperand *Op, const MachineRegisterInfo &MRI,
+SmallVector<std::pair<const MachineOperand *, int>>
+getSrcStats(const MachineOperand *Op, const MachineRegisterInfo &MRI,
             bool onlyLastSameOrNeg = false, int maxDepth = 6) {
   int depth = 0;
-  std::pair<MachineOperand *, int> curr = {Op, IS_SAME};
-  std::vector<std::pair<MachineOperand *, int>> statList;
+  std::pair<const MachineOperand *, int> curr = {Op, IS_SAME};
+  SmallVector<std::pair<const MachineOperand *, int>> statList;
 
   while (true) {
     depth++;
@@ -4460,27 +4460,23 @@ getSrcStats(MachineOperand *Op, const MachineRegisterInfo &MRI,
   return statList;
 }
 
-bool isInlinableConstant(MachineOperand *Op, const SIInstrInfo &TII) {
-  bool a = TII.isInlineConstant(*Op);
-  switch (Op->getType()) {
-  case MachineOperand::MachineOperandType::MO_Immediate:
-    return TII.isInlineConstant(*Op);
-  case MachineOperand::MachineOperandType::MO_CImmediate:
-    return TII.isInlineConstant(Op->getCImm()->getValue());
-  case MachineOperand::MachineOperandType::MO_FPImmediate:
-    return TII.isInlineConstant(Op->getFPImm()->getValueAPF());
+static bool isInlinableConstant(const MachineOperand &Op,
+                                const SIInstrInfo &TII) {
+  if (Op.isFPImm()) {
+    return TII.isInlineConstant(Op.getFPImm()->getValueAPF());
   }
   return false;
 }
 
-bool isSameBitWidth(MachineOperand *Op1, MachineOperand *Op2,
-                    const MachineRegisterInfo &MRI) {
+static bool isSameBitWidth(const MachineOperand *Op1, const MachineOperand *Op2,
+                           const MachineRegisterInfo &MRI) {
   unsigned width1 = MRI.getType(Op1->getReg()).getSizeInBits();
   unsigned width2 = MRI.getType(Op2->getReg()).getSizeInBits();
   return width1 == width2;
 }
 
-bool isSameOperand(MachineOperand *Op1, MachineOperand *Op2) {
+static bool isSameOperand(const MachineOperand *Op1,
+                          const MachineOperand *Op2) {
   if (Op1->isReg()) {
     if (Op2->isReg()) {
       return Op1->getReg() == Op2->getReg();
@@ -4490,9 +4486,10 @@ bool isSameOperand(MachineOperand *Op1, MachineOperand *Op2) {
   return Op1->isIdenticalTo(*Op2);
 }
 
-bool validToPack(int HiStat, int LoStat, unsigned int &Mods,
-                 MachineOperand *newOp, MachineOperand *RootOp,
-                 const SIInstrInfo &TII, const MachineRegisterInfo &MRI) {
+static bool validToPack(int HiStat, int LoStat, unsigned int &Mods,
+                        const MachineOperand *newOp,
+                        const MachineOperand *RootOp, const SIInstrInfo &TII,
+                        const MachineRegisterInfo &MRI) {
   if (newOp->isReg()) {
     if (isSameBitWidth(newOp, RootOp, MRI)) {
       // IS_LOWER_HALF remain 0
@@ -4517,7 +4514,7 @@ bool validToPack(int HiStat, int LoStat, unsigned int &Mods,
   } else {
     if ((HiStat == IS_SAME || HiStat == IS_NEG) &&
         (LoStat == IS_SAME || LoStat == IS_NEG) &&
-        isInlinableConstant(newOp, TII)) {
+        isInlinableConstant(*newOp, TII)) {
       if (HiStat == IS_NEG) {
         Mods ^= SISrcMods::NEG_HI;
       }
@@ -4532,13 +4529,13 @@ bool validToPack(int HiStat, int LoStat, unsigned int &Mods,
   return false;
 }
 
-std::pair<MachineOperand *, unsigned>
-AMDGPUInstructionSelector::selectVOP3PModsImpl(MachineOperand *Op,
+std::pair<const MachineOperand *, unsigned>
+AMDGPUInstructionSelector::selectVOP3PModsImpl(const MachineOperand *Op,
                                                const MachineRegisterInfo &MRI,
                                                bool IsDOT) const {
   unsigned Mods = 0;
-  MachineOperand *RootOp = Op;
-  std::pair<MachineOperand *, int> stat = getSrcStats(Op, MRI, true)[0];
+  const MachineOperand *RootOp = Op;
+  std::pair<const MachineOperand *, int> stat = getSrcStats(Op, MRI, true)[0];
   if (!stat.first->isReg()) {
     Mods |= SISrcMods::OP_SEL_1;
     return {Op, Mods};
@@ -4550,8 +4547,8 @@ AMDGPUInstructionSelector::selectVOP3PModsImpl(MachineOperand *Op,
   MachineInstr *MI = MRI.getVRegDef(Op->getReg());
   if (MI->getOpcode() == AMDGPU::G_BUILD_VECTOR && MI->getNumOperands() == 3 &&
       (!IsDOT || !Subtarget->hasDOTOpSelHazard())) {
-    std::vector<std::pair<MachineOperand *, int>> statList_Hi;
-    std::vector<std::pair<MachineOperand *, int>> statList_Lo;
+    SmallVector<std::pair<const MachineOperand *, int>> statList_Hi;
+    SmallVector<std::pair<const MachineOperand *, int>> statList_Lo;
     statList_Hi = getSrcStats(&MI->getOperand(2), MRI);
     if (statList_Hi.size() != 0) {
       statList_Lo = getSrcStats(&MI->getOperand(1), MRI);
@@ -4575,30 +4572,29 @@ AMDGPUInstructionSelector::selectVOP3PModsImpl(MachineOperand *Op,
   return {Op, Mods};
 }
 
-int64_t getAllKindImm(MachineOperand *Op) {
+int64_t getAllKindImm(const MachineOperand *Op) {
   switch (Op->getType()) {
   case MachineOperand::MachineOperandType::MO_Immediate:
     return Op->getImm();
   case MachineOperand::MachineOperandType::MO_CImmediate:
     return Op->getCImm()->getSExtValue();
-    break;
   case MachineOperand::MachineOperandType::MO_FPImmediate:
     return Op->getFPImm()->getValueAPF().bitcastToAPInt().getSExtValue();
-    break;
   }
   llvm_unreachable("not an imm type");
 }
 
-bool checkRB(MachineOperand *Op, int RBNo, const AMDGPURegisterBankInfo &RBI,
-             const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI) {
+bool checkRB(const MachineOperand *Op, int RBNo,
+             const AMDGPURegisterBankInfo &RBI, const MachineRegisterInfo &MRI,
+             const TargetRegisterInfo &TRI) {
   const RegisterBank *RB = RBI.getRegBank(Op->getReg(), MRI, TRI);
   return RB->getID() == RBNo;
 }
 
-MachineOperand *getVReg(MachineOperand *newOp, MachineOperand *RootOp,
-                        const AMDGPURegisterBankInfo &RBI,
-                        MachineRegisterInfo &MRI,
-                        const TargetRegisterInfo &TRI) {
+const MachineOperand *
+getVReg(const MachineOperand *newOp, const MachineOperand *RootOp,
+        const AMDGPURegisterBankInfo &RBI, MachineRegisterInfo &MRI,
+        const TargetRegisterInfo &TRI, const SIInstrInfo &TII) {
   // RootOp can only be VGPR or SGPR (some hand written cases such as
   // inst-select-ashr.v2s16.mir::ashr_v2s16_vs)
   if (checkRB(RootOp, AMDGPU::SGPRRegBankID, RBI, MRI, TRI) ||
@@ -4612,13 +4608,14 @@ MachineOperand *getVReg(MachineOperand *newOp, MachineOperand *RootOp,
     return RootOp;
   }
 
+  MachineBasicBlock *BB = MI->getParent();
   const TargetRegisterClass *DstRC =
       TRI.getConstrainedRegClassForOperand(*RootOp, MRI);
   Register dstReg = MRI.createVirtualRegister(DstRC);
 
-  MachineIRBuilder B(*RootOp->getParent());
   MachineInstrBuilder MIB =
-      B.buildInstr(AMDGPU::COPY).addDef(dstReg).addUse(newOp->getReg());
+      BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), dstReg)
+          .addReg(newOp->getReg());
 
   // only accept VGPR
   return &MIB->getOperand(0);
@@ -4629,14 +4626,15 @@ AMDGPUInstructionSelector::selectVOP3PMods(MachineOperand &Root) const {
   MachineRegisterInfo &MRI
     = Root.getParent()->getParent()->getParent()->getRegInfo();
 
-  std::pair<MachineOperand *, unsigned> res = selectVOP3PModsImpl(&Root, MRI);
+  std::pair<const MachineOperand *, unsigned> res =
+      selectVOP3PModsImpl(&Root, MRI);
   if (!(res.first->isReg())) {
     return {{
         [=](MachineInstrBuilder &MIB) { MIB.addImm(getAllKindImm(res.first)); },
         [=](MachineInstrBuilder &MIB) { MIB.addImm(res.second); } // src_mods
     }};
   }
-  res.first = getVReg(res.first, &Root, RBI, MRI, TRI);
+  res.first = getVReg(res.first, &Root, RBI, MRI, TRI, TII);
   return {{
       [=](MachineInstrBuilder &MIB) { MIB.addReg(res.first->getReg()); },
       [=](MachineInstrBuilder &MIB) { MIB.addImm(res.second); } // src_mods
@@ -4648,7 +4646,7 @@ AMDGPUInstructionSelector::selectVOP3PModsDOT(MachineOperand &Root) const {
   MachineRegisterInfo &MRI
     = Root.getParent()->getParent()->getParent()->getRegInfo();
 
-  std::pair<MachineOperand *, unsigned> res =
+  std::pair<const MachineOperand *, unsigned> res =
       selectVOP3PModsImpl(&Root, MRI, true);
   if (!(res.first->isReg())) {
     return {{
@@ -4656,7 +4654,7 @@ AMDGPUInstructionSelector::selectVOP3PModsDOT(MachineOperand &Root) const {
         [=](MachineInstrBuilder &MIB) { MIB.addImm(res.second); } // src_mods
     }};
   }
-  res.first = getVReg(res.first, &Root, RBI, MRI, TRI);
+  res.first = getVReg(res.first, &Root, RBI, MRI, TRI, TII);
   return {{
       [=](MachineInstrBuilder &MIB) { MIB.addReg(res.first->getReg()); },
       [=](MachineInstrBuilder &MIB) { MIB.addImm(res.second); } // src_mods
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h
index 2af4f55403acc..dd172edfdf216 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h
@@ -187,8 +187,8 @@ class AMDGPUInstructionSelector final : public InstructionSelector {
 
   ComplexRendererFns selectVOP3NoMods(MachineOperand &Root) const;
 
-  std::pair<MachineOperand *, unsigned>
-  selectVOP3PModsImpl(MachineOperand *Op, const MachineRegisterInfo &MRI,
+  std::pair<const MachineOperand *, unsigned>
+  selectVOP3PModsImpl(const MachineOperand *Op, const MachineRegisterInfo &MRI,
                       bool IsDOT = false) const;
 
   InstructionSelector::ComplexRendererFns

>From 2e587f5fbcc23f6574c4f6f7b86974f0c6352ca4 Mon Sep 17 00:00:00 2001
From: shore <372660931 at qq.com>
Date: Wed, 12 Mar 2025 11:18:17 +0800
Subject: [PATCH 4/4] fix comments

---
 .../AMDGPU/AMDGPUInstructionSelector.cpp      | 122 ++++++++----------
 llvm/test/lit.cfg.py                          |   2 +-
 2 files changed, 58 insertions(+), 66 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index 00d538f55a3cf..622b1bd3f5bf5 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -4289,19 +4289,23 @@ enum srcStatus {
   IS_NEG,
   IS_UPPER_HALF_NEG,
   IS_LOWER_HALF_NEG,
-  LAST_STAT = IS_LOWER_HALF_NEG
+  INVALID
 };
 
 static bool isTruncHalf(const MachineInstr *MI,
                         const MachineRegisterInfo &MRI) {
-  assert(MI->getOpcode() == AMDGPU::G_TRUNC);
+  if (MI->getOpcode() != AMDGPU::G_TRUNC) {
+    return false;
+  }
   unsigned dstSize = MRI.getType(MI->getOperand(0).getReg()).getSizeInBits();
   unsigned srcSize = MRI.getType(MI->getOperand(1).getReg()).getSizeInBits();
   return dstSize * 2 == srcSize;
 }
 
 static bool isLshrHalf(const MachineInstr *MI, const MachineRegisterInfo &MRI) {
-  assert(MI->getOpcode() == AMDGPU::G_LSHR);
+  if (MI->getOpcode() != AMDGPU::G_LSHR) {
+    return false;
+  }
   Register ShiftSrc;
   std::optional<ValueAndVReg> ShiftAmt;
   if (mi_match(MI->getOperand(0).getReg(), MRI,
@@ -4314,7 +4318,9 @@ static bool isLshrHalf(const MachineInstr *MI, const MachineRegisterInfo &MRI) {
 }
 
 static bool isShlHalf(const MachineInstr *MI, const MachineRegisterInfo &MRI) {
-  assert(MI->getOpcode() == AMDGPU::G_SHL);
+  if (MI->getOpcode() != AMDGPU::G_SHL) {
+    return false;
+  }
   Register ShiftSrc;
   std::optional<ValueAndVReg> ShiftAmt;
   if (mi_match(MI->getOperand(0).getReg(), MRI,
@@ -4326,8 +4332,11 @@ static bool isShlHalf(const MachineInstr *MI, const MachineRegisterInfo &MRI) {
   return false;
 }
 
-static bool retOpStat(const MachineOperand *Op, int stat,
-                      std::pair<const MachineOperand *, int> &curr) {
+static bool retOpStat(const MachineOperand *Op, srcStatus stat,
+                      std::pair<const MachineOperand *, srcStatus> &curr) {
+  if (stat == INVALID) {
+    return false;
+  }
   if ((Op->isReg() && !(Op->getReg().isPhysical())) || Op->isImm() ||
       Op->isCImm() || Op->isFPImm()) {
     curr = {Op, stat};
@@ -4336,7 +4345,25 @@ static bool retOpStat(const MachineOperand *Op, int stat,
   return false;
 }
 
-static bool calcNextStatus(std::pair<const MachineOperand *, int> &curr,
+srcStatus getNegStatus(srcStatus S) {
+  switch (S) {
+  case IS_SAME:
+    return IS_NEG;
+  case IS_UPPER_HALF:
+    return IS_UPPER_HALF_NEG;
+  case IS_LOWER_HALF:
+    return IS_LOWER_HALF_NEG;
+  case IS_NEG:
+    return IS_SAME;
+  case IS_UPPER_HALF_NEG:
+    return IS_UPPER_HALF;
+  case IS_LOWER_HALF_NEG:
+    return IS_LOWER_HALF;
+  }
+  return INVALID;
+}
+
+static bool calcNextStatus(std::pair<const MachineOperand *, srcStatus> &curr,
                            const MachineRegisterInfo &MRI) {
   if (!curr.first->isReg()) {
     return false;
@@ -4363,92 +4390,56 @@ static bool calcNextStatus(std::pair<const MachineOperand *, int> &curr,
     return retOpStat(&MI->getOperand(1), curr.second, curr);
   case AMDGPU::G_FNEG:
     // XXXX + 3 = XXXX_NEG, (XXXX_NEG + 3) mod 3 = XXXX
-    return retOpStat(&MI->getOperand(1),
-                     (curr.second + ((LAST_STAT + 1) / 2)) % (LAST_STAT + 1),
-                     curr);
+    return retOpStat(&MI->getOperand(1), getNegStatus(curr.second), curr);
   }
 
   // Calc next stat from current stat
   switch (curr.second) {
   case IS_SAME:
-    switch (Opc) {
-    case AMDGPU::G_TRUNC: {
-      if (isTruncHalf(MI, MRI)) {
-        return retOpStat(&MI->getOperand(1), IS_LOWER_HALF, curr);
-      }
-      break;
-    }
+    if (isTruncHalf(MI, MRI)) {
+      return retOpStat(&MI->getOperand(1), IS_LOWER_HALF, curr);
     }
     break;
   case IS_NEG:
-    switch (Opc) {
-    case AMDGPU::G_TRUNC: {
-      if (isTruncHalf(MI, MRI)) {
-        return retOpStat(&MI->getOperand(1), IS_LOWER_HALF_NEG, curr);
-      }
-      break;
-    }
+    if (isTruncHalf(MI, MRI)) {
+      return retOpStat(&MI->getOperand(1), IS_LOWER_HALF_NEG, curr);
     }
     break;
   case IS_UPPER_HALF:
-    switch (Opc) {
-    case AMDGPU::G_SHL: {
-      if (isShlHalf(MI, MRI)) {
-        return retOpStat(&MI->getOperand(1), IS_LOWER_HALF, curr);
-      }
-      break;
-    }
+    if (isShlHalf(MI, MRI)) {
+      return retOpStat(&MI->getOperand(1), IS_LOWER_HALF, curr);
     }
     break;
   case IS_LOWER_HALF:
-    switch (Opc) {
-    case AMDGPU::G_LSHR: {
-      if (isLshrHalf(MI, MRI)) {
-        return retOpStat(&MI->getOperand(1), IS_UPPER_HALF, curr);
-      }
-      break;
-    }
+    if (isLshrHalf(MI, MRI)) {
+      return retOpStat(&MI->getOperand(1), IS_UPPER_HALF, curr);
     }
     break;
   case IS_UPPER_HALF_NEG:
-    switch (Opc) {
-    case AMDGPU::G_SHL: {
-      if (isShlHalf(MI, MRI)) {
-        return retOpStat(&MI->getOperand(1), IS_LOWER_HALF_NEG, curr);
-      }
-      break;
-    }
+    if (isShlHalf(MI, MRI)) {
+      return retOpStat(&MI->getOperand(1), IS_LOWER_HALF_NEG, curr);
     }
     break;
   case IS_LOWER_HALF_NEG:
-    switch (Opc) {
-    case AMDGPU::G_LSHR: {
-      if (isLshrHalf(MI, MRI)) {
-        return retOpStat(&MI->getOperand(1), IS_UPPER_HALF_NEG, curr);
-      }
-      break;
-    }
+    if (isLshrHalf(MI, MRI)) {
+      return retOpStat(&MI->getOperand(1), IS_UPPER_HALF_NEG, curr);
     }
     break;
   }
   return false;
 }
 
-SmallVector<std::pair<const MachineOperand *, int>>
+SmallVector<std::pair<const MachineOperand *, srcStatus>>
 getSrcStats(const MachineOperand *Op, const MachineRegisterInfo &MRI,
             bool onlyLastSameOrNeg = false, int maxDepth = 6) {
   int depth = 0;
-  std::pair<const MachineOperand *, int> curr = {Op, IS_SAME};
-  SmallVector<std::pair<const MachineOperand *, int>> statList;
+  std::pair<const MachineOperand *, srcStatus> curr = {Op, IS_SAME};
+  SmallVector<std::pair<const MachineOperand *, srcStatus>> statList;
 
-  while (true) {
+  while (depth <= maxDepth && calcNextStatus(curr, MRI)) {
     depth++;
-    if (depth > maxDepth) {
-      break;
-    }
-    bool ret = calcNextStatus(curr, MRI);
-    if (!ret || (onlyLastSameOrNeg &&
-                 (curr.second != IS_SAME && curr.second != IS_NEG))) {
+    if ((onlyLastSameOrNeg &&
+         (curr.second != IS_SAME && curr.second != IS_NEG))) {
       break;
     } else if (!onlyLastSameOrNeg) {
       statList.push_back(curr);
@@ -4535,7 +4526,8 @@ AMDGPUInstructionSelector::selectVOP3PModsImpl(const MachineOperand *Op,
                                                bool IsDOT) const {
   unsigned Mods = 0;
   const MachineOperand *RootOp = Op;
-  std::pair<const MachineOperand *, int> stat = getSrcStats(Op, MRI, true)[0];
+  std::pair<const MachineOperand *, srcStatus> stat =
+      getSrcStats(Op, MRI, true)[0];
   if (!stat.first->isReg()) {
     Mods |= SISrcMods::OP_SEL_1;
     return {Op, Mods};
@@ -4547,8 +4539,8 @@ AMDGPUInstructionSelector::selectVOP3PModsImpl(const MachineOperand *Op,
   MachineInstr *MI = MRI.getVRegDef(Op->getReg());
   if (MI->getOpcode() == AMDGPU::G_BUILD_VECTOR && MI->getNumOperands() == 3 &&
       (!IsDOT || !Subtarget->hasDOTOpSelHazard())) {
-    SmallVector<std::pair<const MachineOperand *, int>> statList_Hi;
-    SmallVector<std::pair<const MachineOperand *, int>> statList_Lo;
+    SmallVector<std::pair<const MachineOperand *, srcStatus>> statList_Hi;
+    SmallVector<std::pair<const MachineOperand *, srcStatus>> statList_Lo;
     statList_Hi = getSrcStats(&MI->getOperand(2), MRI);
     if (statList_Hi.size() != 0) {
       statList_Lo = getSrcStats(&MI->getOperand(1), MRI);
diff --git a/llvm/test/lit.cfg.py b/llvm/test/lit.cfg.py
index aad7a088551b2..50921879cd1f2 100644
--- a/llvm/test/lit.cfg.py
+++ b/llvm/test/lit.cfg.py
@@ -466,7 +466,7 @@ def have_cxx_shared_library():
         print("could not exec llvm-readobj")
         return False
 
-    readobj_out = readobj_cmd.stdout.read().decode("ascii")
+    readobj_out = readobj_cmd.stdout.read().decode("utf-8")
     readobj_cmd.wait()
 
     regex = re.compile(r"(libc\+\+|libstdc\+\+|msvcp).*\.(so|dylib|dll)")



More information about the llvm-commits mailing list