[llvm] [AMDGPU] Rework dot4 signedness checks (PR #68757)

Jeffrey Byrnes via llvm-commits llvm-commits at lists.llvm.org
Wed Nov 15 10:35:02 PST 2023


https://github.com/jrbyrnes updated https://github.com/llvm/llvm-project/pull/68757

>From 25c7d33a22f493bee0b70e38858be224623e4b61 Mon Sep 17 00:00:00 2001
From: Jeffrey Byrnes <Jeffrey.Byrnes at amd.com>
Date: Tue, 10 Oct 2023 16:50:59 -0700
Subject: [PATCH] [AMDGPU] Rework dot4 signedness checks

Change-Id: Ifde199cb73fa35d55b6be2cf295a50dd78c30a13
---
 llvm/include/llvm/CodeGen/ByteProvider.h  |  16 -
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 170 ++++-----
 llvm/test/CodeGen/AMDGPU/idot4u.ll        | 428 ++++++++++++++++++++++
 3 files changed, 516 insertions(+), 98 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/ByteProvider.h b/llvm/include/llvm/CodeGen/ByteProvider.h
index adfa59f14b1dc98..3187b4e68c56f3a 100644
--- a/llvm/include/llvm/CodeGen/ByteProvider.h
+++ b/llvm/include/llvm/CodeGen/ByteProvider.h
@@ -32,11 +32,6 @@ template <typename ISelOp> class ByteProvider {
   ByteProvider(std::optional<ISelOp> Src, int64_t DestOffset, int64_t SrcOffset)
       : Src(Src), DestOffset(DestOffset), SrcOffset(SrcOffset) {}
 
-  ByteProvider(std::optional<ISelOp> Src, int64_t DestOffset, int64_t SrcOffset,
-               std::optional<bool> IsSigned)
-      : Src(Src), DestOffset(DestOffset), SrcOffset(SrcOffset),
-        IsSigned(IsSigned) {}
-
   // TODO -- use constraint in c++20
   // Does this type correspond with an operation in selection DAG
   template <typename T> class is_op {
@@ -66,9 +61,6 @@ template <typename ISelOp> class ByteProvider {
   // DestOffset
   int64_t SrcOffset = 0;
 
-  // Whether or not the path to this Src involved signed extensions
-  std::optional<bool> IsSigned;
-
   ByteProvider() = default;
 
   static ByteProvider getSrc(std::optional<ISelOp> Val, int64_t ByteOffset,
@@ -78,14 +70,6 @@ template <typename ISelOp> class ByteProvider {
     return ByteProvider(Val, ByteOffset, VectorOffset);
   }
 
-  static ByteProvider getSrc(std::optional<ISelOp> Val, int64_t ByteOffset,
-                             int64_t VectorOffset,
-                             std::optional<bool> IsSigned) {
-    static_assert(is_op<ISelOp>().value,
-                  "ByteProviders must contain an operation in selection DAG.");
-    return ByteProvider(Val, ByteOffset, VectorOffset, IsSigned);
-  }
-
   static ByteProvider getConstantZero() {
     return ByteProvider<ISelOp>(std::nullopt, 0, 0);
   }
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index cff7e4bc66218ca..e7c8ec319666f91 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -10932,7 +10932,6 @@ SDValue SITargetLowering::performAndCombine(SDNode *N,
 // performed.
 static const std::optional<ByteProvider<SDValue>>
 calculateSrcByte(const SDValue Op, uint64_t DestByte, uint64_t SrcIndex = 0,
-                 std::optional<bool> IsSigned = std::nullopt,
                  unsigned Depth = 0) {
   // We may need to recursively traverse a series of SRLs
   if (Depth >= 6)
@@ -10944,16 +10943,12 @@ calculateSrcByte(const SDValue Op, uint64_t DestByte, uint64_t SrcIndex = 0,
 
   switch (Op->getOpcode()) {
   case ISD::TRUNCATE: {
-    return calculateSrcByte(Op->getOperand(0), DestByte, SrcIndex, IsSigned,
-                            Depth + 1);
+    return calculateSrcByte(Op->getOperand(0), DestByte, SrcIndex, Depth + 1);
   }
 
   case ISD::SIGN_EXTEND:
   case ISD::ZERO_EXTEND:
   case ISD::SIGN_EXTEND_INREG: {
-    IsSigned = IsSigned.value_or(false) ||
-               Op->getOpcode() == ISD::SIGN_EXTEND ||
-               Op->getOpcode() == ISD::SIGN_EXTEND_INREG;
     SDValue NarrowOp = Op->getOperand(0);
     auto NarrowVT = NarrowOp.getValueType();
     if (Op->getOpcode() == ISD::SIGN_EXTEND_INREG) {
@@ -10966,8 +10961,7 @@ calculateSrcByte(const SDValue Op, uint64_t DestByte, uint64_t SrcIndex = 0,
 
     if (SrcIndex >= NarrowByteWidth)
       return std::nullopt;
-    return calculateSrcByte(Op->getOperand(0), DestByte, SrcIndex, IsSigned,
-                            Depth + 1);
+    return calculateSrcByte(Op->getOperand(0), DestByte, SrcIndex, Depth + 1);
   }
 
   case ISD::SRA:
@@ -10983,24 +10977,11 @@ calculateSrcByte(const SDValue Op, uint64_t DestByte, uint64_t SrcIndex = 0,
 
     SrcIndex += BitShift / 8;
 
-    return calculateSrcByte(Op->getOperand(0), DestByte, SrcIndex, IsSigned,
-                            Depth + 1);
+    return calculateSrcByte(Op->getOperand(0), DestByte, SrcIndex, Depth + 1);
   }
 
   default: {
-    if (isa<AtomicSDNode>(Op) || Op->isMemIntrinsic()) {
-      // If this causes us to throw away signedness info, then fail.
-      if (IsSigned)
-        return std::nullopt;
-      return ByteProvider<SDValue>::getSrc(Op, DestByte, SrcIndex);
-    }
-
-    if (auto L = dyn_cast<LoadSDNode>(Op))
-      if (L->getExtensionType() != ISD::NON_EXTLOAD)
-        IsSigned =
-            IsSigned.value_or(false) || L->getExtensionType() == ISD::SEXTLOAD;
-
-    return ByteProvider<SDValue>::getSrc(Op, DestByte, SrcIndex, IsSigned);
+    return ByteProvider<SDValue>::getSrc(Op, DestByte, SrcIndex);
   }
   }
   llvm_unreachable("fully handled switch");
@@ -11014,8 +10995,7 @@ calculateSrcByte(const SDValue Op, uint64_t DestByte, uint64_t SrcIndex = 0,
 // performed. \p StartingIndex is the originally requested byte of the Or
 static const std::optional<ByteProvider<SDValue>>
 calculateByteProvider(const SDValue &Op, unsigned Index, unsigned Depth,
-                      unsigned StartingIndex = 0,
-                      std::optional<bool> IsSigned = std::nullopt) {
+                      unsigned StartingIndex = 0) {
   // Finding Src tree of RHS of or typically requires at least 1 additional
   // depth
   if (Depth > 6)
@@ -11030,11 +11010,11 @@ calculateByteProvider(const SDValue &Op, unsigned Index, unsigned Depth,
   switch (Op.getOpcode()) {
   case ISD::OR: {
     auto RHS = calculateByteProvider(Op.getOperand(1), Index, Depth + 1,
-                                     StartingIndex, IsSigned);
+                                     StartingIndex);
     if (!RHS)
       return std::nullopt;
     auto LHS = calculateByteProvider(Op.getOperand(0), Index, Depth + 1,
-                                     StartingIndex, IsSigned);
+                                     StartingIndex);
     if (!LHS)
       return std::nullopt;
     // A well formed Or will have two ByteProviders for each byte, one of which
@@ -11065,7 +11045,7 @@ calculateByteProvider(const SDValue &Op, unsigned Index, unsigned Depth,
       return ByteProvider<SDValue>::getConstantZero();
     }
 
-    return calculateSrcByte(Op->getOperand(0), StartingIndex, Index, IsSigned);
+    return calculateSrcByte(Op->getOperand(0), StartingIndex, Index);
   }
 
   case ISD::FSHR: {
@@ -11114,7 +11094,7 @@ calculateByteProvider(const SDValue &Op, unsigned Index, unsigned Depth,
     // the SRL is Index + ByteShift
     return BytesProvided - ByteShift > Index
                ? calculateSrcByte(Op->getOperand(0), StartingIndex,
-                                  Index + ByteShift, IsSigned)
+                                  Index + ByteShift)
                : ByteProvider<SDValue>::getConstantZero();
   }
 
@@ -11135,7 +11115,7 @@ calculateByteProvider(const SDValue &Op, unsigned Index, unsigned Depth,
     return Index < ByteShift
                ? ByteProvider<SDValue>::getConstantZero()
                : calculateByteProvider(Op.getOperand(0), Index - ByteShift,
-                                       Depth + 1, StartingIndex, IsSigned);
+                                       Depth + 1, StartingIndex);
   }
   case ISD::ANY_EXTEND:
   case ISD::SIGN_EXTEND:
@@ -11155,21 +11135,13 @@ calculateByteProvider(const SDValue &Op, unsigned Index, unsigned Depth,
       return std::nullopt;
     uint64_t NarrowByteWidth = NarrowBitWidth / 8;
 
-    IsSigned =
-        Op->getOpcode() != ISD::ANY_EXTEND
-            ? std::optional<bool>(IsSigned.value_or(false) ||
-                                  Op->getOpcode() == ISD::SIGN_EXTEND ||
-                                  Op->getOpcode() == ISD::SIGN_EXTEND_INREG ||
-                                  Op->getOpcode() == ISD::AssertSext)
-            : IsSigned;
 
     if (Index >= NarrowByteWidth)
       return Op.getOpcode() == ISD::ZERO_EXTEND
                  ? std::optional<ByteProvider<SDValue>>(
                        ByteProvider<SDValue>::getConstantZero())
                  : std::nullopt;
-    return calculateByteProvider(NarrowOp, Index, Depth + 1, StartingIndex,
-                                 IsSigned);
+    return calculateByteProvider(NarrowOp, Index, Depth + 1, StartingIndex);
   }
 
   case ISD::TRUNCATE: {
@@ -11177,7 +11149,7 @@ calculateByteProvider(const SDValue &Op, unsigned Index, unsigned Depth,
 
     if (NarrowByteWidth >= Index) {
       return calculateByteProvider(Op.getOperand(0), Index, Depth + 1,
-                                   StartingIndex, IsSigned);
+                                   StartingIndex);
     }
 
     return std::nullopt;
@@ -11185,7 +11157,7 @@ calculateByteProvider(const SDValue &Op, unsigned Index, unsigned Depth,
 
   case ISD::CopyFromReg: {
     if (BitWidth / 8 > Index)
-      return calculateSrcByte(Op, StartingIndex, Index, IsSigned);
+      return calculateSrcByte(Op, StartingIndex, Index);
 
     return std::nullopt;
   }
@@ -11193,10 +11165,6 @@ calculateByteProvider(const SDValue &Op, unsigned Index, unsigned Depth,
   case ISD::LOAD: {
     auto L = cast<LoadSDNode>(Op.getNode());
 
-    // Only set IsSigned if the load is extended.
-    if (L->getExtensionType() != ISD::NON_EXTLOAD)
-      IsSigned =
-          IsSigned.value_or(false) || L->getExtensionType() == ISD::SEXTLOAD;
     unsigned NarrowBitWidth = L->getMemoryVT().getSizeInBits();
     if (NarrowBitWidth % 8 != 0)
       return std::nullopt;
@@ -11213,7 +11181,7 @@ calculateByteProvider(const SDValue &Op, unsigned Index, unsigned Depth,
     }
 
     if (NarrowByteWidth > Index) {
-      return calculateSrcByte(Op, StartingIndex, Index, IsSigned);
+      return calculateSrcByte(Op, StartingIndex, Index);
     }
 
     return std::nullopt;
@@ -11221,7 +11189,7 @@ calculateByteProvider(const SDValue &Op, unsigned Index, unsigned Depth,
 
   case ISD::BSWAP:
     return calculateByteProvider(Op->getOperand(0), BitWidth / 8 - Index - 1,
-                                 Depth + 1, StartingIndex, IsSigned);
+                                 Depth + 1, StartingIndex);
 
   case ISD::EXTRACT_VECTOR_ELT: {
     auto IdxOp = dyn_cast<ConstantSDNode>(Op->getOperand(1));
@@ -11236,7 +11204,7 @@ calculateByteProvider(const SDValue &Op, unsigned Index, unsigned Depth,
     }
 
     return calculateSrcByte(ScalarSize == 32 ? Op : Op.getOperand(0),
-                            StartingIndex, Index, IsSigned);
+                            StartingIndex, Index);
   }
 
   case AMDGPUISD::PERM: {
@@ -11252,10 +11220,9 @@ calculateByteProvider(const SDValue &Op, unsigned Index, unsigned Depth,
     auto NextOp = Op.getOperand(IdxMask > 0x03 ? 0 : 1);
     auto NextIndex = IdxMask > 0x03 ? IdxMask % 4 : IdxMask;
 
-    return IdxMask != 0x0c
-               ? calculateSrcByte(NextOp, StartingIndex, NextIndex, IsSigned)
-               : ByteProvider<SDValue>(
-                     ByteProvider<SDValue>::getConstantZero());
+    return IdxMask != 0x0c ? calculateSrcByte(NextOp, StartingIndex, NextIndex)
+                           : ByteProvider<SDValue>(
+                                 ByteProvider<SDValue>::getConstantZero());
   }
 
   default: {
@@ -13056,32 +13023,67 @@ static bool isMul(const SDValue Op) {
           Opcode == AMDGPUISD::MUL_I24);
 }
 
-static std::optional<bool> checkSignedness(const SDValue &N,
-                                           ByteProvider<SDValue> &Src0,
-                                           ByteProvider<SDValue> &Src1) {
-  auto MulOpcode = N.getOpcode();
-  std::optional<bool> IterIsSigned;
-  // Both sides of the tree must have the same signedness semantics.
-  if ((Src0.IsSigned != Src1.IsSigned) ||
-      (Src0.IsSigned.value_or(false) != Src1.IsSigned.value_or(false)))
-    return IterIsSigned;
-  // If we have a MUL_U24 op with signed semantics, then fail.
-  if (Src0.IsSigned.value_or(false) && MulOpcode == AMDGPUISD::MUL_U24)
-    return IterIsSigned;
-  // If we have a MUL_I24 op with unsigned semantics, then fail.
-  if (!Src0.IsSigned.value_or(true) && MulOpcode == AMDGPUISD::MUL_I24)
-    return IterIsSigned;
-
-  bool TopLevelSignedness =
-      MulOpcode == AMDGPUISD::MUL_I24 ||
-      (MulOpcode == ISD::MUL && N.getNode()->getFlags().hasNoSignedWrap() &&
-       !N.getNode()->getFlags().hasNoUnsignedWrap());
-
-  // In cases where we are accumulating into an i8 (for v_dot4), the
-  // ByteProvider will not have signedness info since the MSBs are dont-cares.
-  // In this case, we simply use the TopLevelSignedness of the instruction.
-  IterIsSigned = Src0.IsSigned.value_or(TopLevelSignedness);
-  return IterIsSigned;
+static std::optional<bool>
+checkDot4MulSignedness(const SDValue &N, ByteProvider<SDValue> &Src0,
+                       ByteProvider<SDValue> &Src1, const SDValue &S0Op,
+                       const SDValue &S1Op, const SelectionDAG &DAG) {
+  // If we both ops are i8s (pre legalize-dag), then the signedness semantics
+  // of the dot4 is irrelevant.
+  if (S0Op.getValueSizeInBits() == 8 && S1Op.getValueSizeInBits() == 8)
+    return false;
+
+  auto Known0 = DAG.computeKnownBits(S0Op, 0);
+  bool S0IsUnsigned = Known0.countMinLeadingZeros() > 0;
+  bool S0IsSigned = Known0.countMinLeadingOnes() > 0;
+  auto Known1 = DAG.computeKnownBits(S1Op, 0);
+  bool S1IsUnsigned = Known1.countMinLeadingZeros() > 0;
+  bool S1IsSigned = Known1.countMinLeadingOnes() > 0;
+
+  assert(!(S0IsUnsigned && S0IsSigned));
+  assert(!(S1IsUnsigned && S1IsSigned));
+
+  // There are 9 possible permutations of
+  // {S0IsUnsigned, S0IsSigned, S1IsUnsigned, S1IsSigned}
+
+  // In two permutations, the sign bits are known to be the same for both Ops,
+  // so simply return Signed / Unsigned corresponding to the MSB
+
+  if ((S0IsUnsigned && S1IsUnsigned) || (S0IsSigned && S1IsSigned))
+    return S0IsSigned;
+
+  // In another two permutations, the sign bits are known to be opposite. In
+  // this case return std::nullopt to indicate a bad match.
+
+  if ((S0IsUnsigned && S1IsSigned) || (S0IsSigned && S1IsUnsigned))
+    return std::nullopt;
+
+  // In the remaining five permutations, we don't know the value of the sign
+  // bit for at least one Op. Since we have a valid ByteProvider, we know that
+  // the upper bits must be extension bits. Thus, the only ways for the sign
+  // bit to be unknown is if it was sign extended from unknown value, or if it
+  // was any extended. In either case, it is correct to use the signed
+  // version of the signedness semantics of dot4
+
+  // In two of such permutations, we known the sign bit is set for
+  // one op, and the other is unknown. It is okay to used signed version of
+  // dot4.
+  if ((S0IsSigned && !(S1IsSigned || S1IsUnsigned)) ||
+      ((S1IsSigned && !(S0IsSigned || S0IsUnsigned))))
+    return true;
+
+  // In one such permutation, we don't know either of the sign bits. It is okay
+  // to used the signed version of dot4.
+  if ((!(S1IsSigned || S1IsUnsigned) && !(S0IsSigned || S0IsUnsigned)))
+    return true;
+
+  // In two of such permutations, we known the sign bit is unset for
+  // one op, and the other is unknown. Return std::nullopt to indicate a
+  // bad match.
+  if ((S0IsUnsigned && !(S1IsSigned || S1IsUnsigned)) ||
+      ((S1IsUnsigned && !(S0IsSigned || S0IsUnsigned))))
+    return std::nullopt;
+
+  llvm_unreachable("Fully covered condition");
 }
 
 SDValue SITargetLowering::performAddCombine(SDNode *N,
@@ -13124,8 +13126,10 @@ SDValue SITargetLowering::performAddCombine(SDNode *N,
       if (!Src1)
         break;
 
-      auto IterIsSigned =
-          checkSignedness(TempNode->getOperand(MulIdx), *Src0, *Src1);
+      auto IterIsSigned = checkDot4MulSignedness(
+          TempNode->getOperand(MulIdx), *Src0, *Src1,
+          TempNode->getOperand(MulIdx)->getOperand(0),
+          TempNode->getOperand(MulIdx)->getOperand(1), DAG);
       if (!IterIsSigned)
         break;
       if (!IsSigned)
@@ -13146,8 +13150,10 @@ SDValue SITargetLowering::performAddCombine(SDNode *N,
             handleMulOperand(TempNode->getOperand(AddIdx)->getOperand(1));
         if (!Src1)
           break;
-        auto IterIsSigned =
-            checkSignedness(TempNode->getOperand(AddIdx), *Src0, *Src1);
+        auto IterIsSigned = checkDot4MulSignedness(
+            TempNode->getOperand(AddIdx), *Src0, *Src1,
+            TempNode->getOperand(AddIdx)->getOperand(0),
+            TempNode->getOperand(AddIdx)->getOperand(1), DAG);
         if (!IterIsSigned)
           break;
         assert(IsSigned);
diff --git a/llvm/test/CodeGen/AMDGPU/idot4u.ll b/llvm/test/CodeGen/AMDGPU/idot4u.ll
index a82c5215f3b2c65..ba9ccb4c636f948 100644
--- a/llvm/test/CodeGen/AMDGPU/idot4u.ll
+++ b/llvm/test/CodeGen/AMDGPU/idot4u.ll
@@ -1735,6 +1735,268 @@ entry:
   ret void
 }
 
+
+define amdgpu_kernel void @notdot4_mixedtypes2(ptr addrspace(1) %src1,
+; GFX7-LABEL: notdot4_mixedtypes2:
+; GFX7:       ; %bb.0: ; %entry
+; GFX7-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GFX7-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0xd
+; GFX7-NEXT:    s_mov_b32 s3, 0xf000
+; GFX7-NEXT:    s_mov_b32 s10, 0
+; GFX7-NEXT:    s_mov_b32 s11, s3
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    s_mov_b64 s[8:9], s[4:5]
+; GFX7-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; GFX7-NEXT:    v_mov_b32_e32 v1, 0
+; GFX7-NEXT:    buffer_load_dword v2, v[0:1], s[8:11], 0 addr64
+; GFX7-NEXT:    s_mov_b64 s[8:9], s[6:7]
+; GFX7-NEXT:    buffer_load_dword v0, v[0:1], s[8:11], 0 addr64
+; GFX7-NEXT:    s_mov_b32 s2, -1
+; GFX7-NEXT:    buffer_load_ushort v1, off, s[0:3], 0
+; GFX7-NEXT:    s_waitcnt vmcnt(2)
+; GFX7-NEXT:    v_bfe_i32 v3, v2, 0, 8
+; GFX7-NEXT:    v_bfe_u32 v4, v2, 8, 8
+; GFX7-NEXT:    s_waitcnt vmcnt(1)
+; GFX7-NEXT:    v_bfe_i32 v7, v0, 8, 8
+; GFX7-NEXT:    v_and_b32_e32 v7, 0xffff, v7
+; GFX7-NEXT:    v_bfe_i32 v5, v2, 16, 8
+; GFX7-NEXT:    v_and_b32_e32 v3, 0xffff, v3
+; GFX7-NEXT:    v_and_b32_e32 v6, 0xff, v0
+; GFX7-NEXT:    s_waitcnt vmcnt(0)
+; GFX7-NEXT:    v_mad_u32_u24 v1, v4, v7, v1
+; GFX7-NEXT:    v_and_b32_e32 v5, 0xffff, v5
+; GFX7-NEXT:    v_bfe_u32 v8, v0, 16, 8
+; GFX7-NEXT:    v_ashrrev_i32_e32 v0, 24, v0
+; GFX7-NEXT:    v_mad_u32_u24 v1, v3, v6, v1
+; GFX7-NEXT:    v_lshrrev_b32_e32 v2, 24, v2
+; GFX7-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT:    v_mad_u32_u24 v1, v5, v8, v1
+; GFX7-NEXT:    v_mad_u32_u24 v0, v2, v0, v1
+; GFX7-NEXT:    buffer_store_short v0, off, s[0:3], 0
+; GFX7-NEXT:    s_endpgm
+;
+; GFX8-LABEL: notdot4_mixedtypes2:
+; GFX8:       ; %bb.0: ; %entry
+; GFX8-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX8-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GFX8-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; GFX8-NEXT:    v_mov_b32_e32 v5, 0xff
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_mov_b32_e32 v1, s5
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, s4, v2
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX8-NEXT:    flat_load_dword v3, v[0:1]
+; GFX8-NEXT:    v_mov_b32_e32 v1, s7
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, s6, v2
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX8-NEXT:    flat_load_dword v2, v[0:1]
+; GFX8-NEXT:    v_mov_b32_e32 v0, s0
+; GFX8-NEXT:    v_mov_b32_e32 v1, s1
+; GFX8-NEXT:    flat_load_ushort v4, v[0:1]
+; GFX8-NEXT:    s_waitcnt vmcnt(2)
+; GFX8-NEXT:    v_lshrrev_b32_e32 v9, 8, v3
+; GFX8-NEXT:    v_and_b32_e32 v9, 0xff, v9
+; GFX8-NEXT:    v_lshrrev_b32_e32 v6, 16, v3
+; GFX8-NEXT:    v_bfe_i32 v7, v3, 0, 8
+; GFX8-NEXT:    v_bfe_i32 v6, v6, 0, 8
+; GFX8-NEXT:    v_lshrrev_b32_e32 v3, 24, v3
+; GFX8-NEXT:    s_waitcnt vmcnt(1)
+; GFX8-NEXT:    v_lshrrev_b32_e32 v10, 8, v2
+; GFX8-NEXT:    v_bfe_i32 v10, v10, 0, 8
+; GFX8-NEXT:    v_and_b32_e32 v8, 0xff, v2
+; GFX8-NEXT:    s_waitcnt vmcnt(0)
+; GFX8-NEXT:    v_mad_u16 v4, v9, v10, v4
+; GFX8-NEXT:    v_and_b32_sdwa v5, v2, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX8-NEXT:    v_lshrrev_b32_e32 v2, 24, v2
+; GFX8-NEXT:    v_mad_u16 v4, v7, v8, v4
+; GFX8-NEXT:    v_bfe_i32 v2, v2, 0, 8
+; GFX8-NEXT:    v_mad_u16 v4, v6, v5, v4
+; GFX8-NEXT:    v_mad_u16 v2, v3, v2, v4
+; GFX8-NEXT:    flat_store_short v[0:1], v2
+; GFX8-NEXT:    s_endpgm
+;
+; GFX9-NODL-LABEL: notdot4_mixedtypes2:
+; GFX9-NODL:       ; %bb.0: ; %entry
+; GFX9-NODL-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NODL-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NODL-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; GFX9-NODL-NEXT:    s_movk_i32 s0, 0xff
+; GFX9-NODL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NODL-NEXT:    global_load_dword v1, v0, s[4:5]
+; GFX9-NODL-NEXT:    global_load_dword v2, v0, s[6:7]
+; GFX9-NODL-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NODL-NEXT:    global_load_ushort v3, v0, s[2:3]
+; GFX9-NODL-NEXT:    s_waitcnt vmcnt(2)
+; GFX9-NODL-NEXT:    v_lshrrev_b32_e32 v7, 8, v1
+; GFX9-NODL-NEXT:    s_waitcnt vmcnt(1)
+; GFX9-NODL-NEXT:    v_lshrrev_b32_e32 v8, 8, v2
+; GFX9-NODL-NEXT:    v_and_b32_e32 v7, 0xff, v7
+; GFX9-NODL-NEXT:    v_bfe_i32 v8, v8, 0, 8
+; GFX9-NODL-NEXT:    v_lshrrev_b32_e32 v4, 16, v1
+; GFX9-NODL-NEXT:    v_bfe_i32 v5, v1, 0, 8
+; GFX9-NODL-NEXT:    v_and_b32_e32 v6, 0xff, v2
+; GFX9-NODL-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NODL-NEXT:    v_mad_legacy_u16 v3, v7, v8, v3
+; GFX9-NODL-NEXT:    v_and_b32_sdwa v9, v2, s0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX9-NODL-NEXT:    v_lshrrev_b32_e32 v2, 24, v2
+; GFX9-NODL-NEXT:    v_bfe_i32 v4, v4, 0, 8
+; GFX9-NODL-NEXT:    v_mad_legacy_u16 v3, v5, v6, v3
+; GFX9-NODL-NEXT:    v_lshrrev_b32_e32 v1, 24, v1
+; GFX9-NODL-NEXT:    v_bfe_i32 v2, v2, 0, 8
+; GFX9-NODL-NEXT:    v_mad_legacy_u16 v3, v4, v9, v3
+; GFX9-NODL-NEXT:    v_mad_legacy_u16 v1, v1, v2, v3
+; GFX9-NODL-NEXT:    global_store_short v0, v1, s[2:3]
+; GFX9-NODL-NEXT:    s_endpgm
+;
+; GFX9-DL-LABEL: notdot4_mixedtypes2:
+; GFX9-DL:       ; %bb.0: ; %entry
+; GFX9-DL-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-DL-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-DL-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; GFX9-DL-NEXT:    s_movk_i32 s0, 0xff
+; GFX9-DL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-DL-NEXT:    global_load_dword v1, v0, s[4:5]
+; GFX9-DL-NEXT:    global_load_dword v2, v0, s[6:7]
+; GFX9-DL-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-DL-NEXT:    global_load_ushort v3, v0, s[2:3]
+; GFX9-DL-NEXT:    s_waitcnt vmcnt(2)
+; GFX9-DL-NEXT:    v_lshrrev_b32_e32 v7, 8, v1
+; GFX9-DL-NEXT:    s_waitcnt vmcnt(1)
+; GFX9-DL-NEXT:    v_lshrrev_b32_e32 v8, 8, v2
+; GFX9-DL-NEXT:    v_and_b32_e32 v7, 0xff, v7
+; GFX9-DL-NEXT:    v_bfe_i32 v8, v8, 0, 8
+; GFX9-DL-NEXT:    v_lshrrev_b32_e32 v4, 16, v1
+; GFX9-DL-NEXT:    v_bfe_i32 v5, v1, 0, 8
+; GFX9-DL-NEXT:    v_and_b32_e32 v6, 0xff, v2
+; GFX9-DL-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-DL-NEXT:    v_mad_legacy_u16 v3, v7, v8, v3
+; GFX9-DL-NEXT:    v_and_b32_sdwa v9, v2, s0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX9-DL-NEXT:    v_lshrrev_b32_e32 v2, 24, v2
+; GFX9-DL-NEXT:    v_bfe_i32 v4, v4, 0, 8
+; GFX9-DL-NEXT:    v_mad_legacy_u16 v3, v5, v6, v3
+; GFX9-DL-NEXT:    v_lshrrev_b32_e32 v1, 24, v1
+; GFX9-DL-NEXT:    v_bfe_i32 v2, v2, 0, 8
+; GFX9-DL-NEXT:    v_mad_legacy_u16 v3, v4, v9, v3
+; GFX9-DL-NEXT:    v_mad_legacy_u16 v1, v1, v2, v3
+; GFX9-DL-NEXT:    global_store_short v0, v1, s[2:3]
+; GFX9-DL-NEXT:    s_endpgm
+;
+; GFX10-DL-LABEL: notdot4_mixedtypes2:
+; GFX10-DL:       ; %bb.0: ; %entry
+; GFX10-DL-NEXT:    s_clause 0x1
+; GFX10-DL-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX10-DL-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX10-DL-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; GFX10-DL-NEXT:    v_mov_b32_e32 v8, 0xff
+; GFX10-DL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10-DL-NEXT:    s_clause 0x1
+; GFX10-DL-NEXT:    global_load_dword v1, v0, s[4:5]
+; GFX10-DL-NEXT:    global_load_dword v2, v0, s[6:7]
+; GFX10-DL-NEXT:    v_mov_b32_e32 v0, 0
+; GFX10-DL-NEXT:    global_load_ushort v3, v0, s[2:3]
+; GFX10-DL-NEXT:    s_waitcnt vmcnt(2)
+; GFX10-DL-NEXT:    v_lshrrev_b32_e32 v4, 8, v1
+; GFX10-DL-NEXT:    s_waitcnt vmcnt(1)
+; GFX10-DL-NEXT:    v_lshrrev_b32_e32 v5, 8, v2
+; GFX10-DL-NEXT:    v_lshrrev_b32_e32 v6, 16, v1
+; GFX10-DL-NEXT:    v_bfe_i32 v7, v1, 0, 8
+; GFX10-DL-NEXT:    v_and_b32_e32 v9, 0xff, v2
+; GFX10-DL-NEXT:    v_and_b32_e32 v4, 0xff, v4
+; GFX10-DL-NEXT:    v_bfe_i32 v5, v5, 0, 8
+; GFX10-DL-NEXT:    v_lshrrev_b32_e32 v1, 24, v1
+; GFX10-DL-NEXT:    s_waitcnt vmcnt(0)
+; GFX10-DL-NEXT:    v_mad_u16 v3, v4, v5, v3
+; GFX10-DL-NEXT:    v_bfe_i32 v4, v6, 0, 8
+; GFX10-DL-NEXT:    v_and_b32_sdwa v5, v2, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX10-DL-NEXT:    v_lshrrev_b32_e32 v2, 24, v2
+; GFX10-DL-NEXT:    v_mad_u16 v3, v7, v9, v3
+; GFX10-DL-NEXT:    v_bfe_i32 v2, v2, 0, 8
+; GFX10-DL-NEXT:    v_mad_u16 v3, v4, v5, v3
+; GFX10-DL-NEXT:    v_mad_u16 v1, v1, v2, v3
+; GFX10-DL-NEXT:    global_store_short v0, v1, s[2:3]
+; GFX10-DL-NEXT:    s_endpgm
+;
+; GFX11-DL-LABEL: notdot4_mixedtypes2:
+; GFX11-DL:       ; %bb.0: ; %entry
+; GFX11-DL-NEXT:    s_clause 0x1
+; GFX11-DL-NEXT:    s_load_b128 s[4:7], s[0:1], 0x24
+; GFX11-DL-NEXT:    s_load_b64 s[0:1], s[0:1], 0x34
+; GFX11-DL-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; GFX11-DL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX11-DL-NEXT:    s_clause 0x1
+; GFX11-DL-NEXT:    global_load_b32 v1, v0, s[4:5]
+; GFX11-DL-NEXT:    global_load_b32 v0, v0, s[6:7]
+; GFX11-DL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX11-DL-NEXT:    s_waitcnt vmcnt(1)
+; GFX11-DL-NEXT:    v_lshrrev_b32_e32 v4, 8, v1
+; GFX11-DL-NEXT:    s_waitcnt vmcnt(0)
+; GFX11-DL-NEXT:    v_and_b32_e32 v9, 0xff, v0
+; GFX11-DL-NEXT:    global_load_u16 v3, v2, s[0:1]
+; GFX11-DL-NEXT:    v_lshrrev_b32_e32 v5, 8, v0
+; GFX11-DL-NEXT:    v_lshrrev_b32_e32 v6, 16, v1
+; GFX11-DL-NEXT:    v_and_b32_e32 v4, 0xff, v4
+; GFX11-DL-NEXT:    v_lshrrev_b32_e32 v7, 16, v0
+; GFX11-DL-NEXT:    v_bfe_i32 v8, v1, 0, 8
+; GFX11-DL-NEXT:    v_bfe_i32 v5, v5, 0, 8
+; GFX11-DL-NEXT:    v_lshrrev_b32_e32 v0, 24, v0
+; GFX11-DL-NEXT:    v_lshrrev_b32_e32 v1, 24, v1
+; GFX11-DL-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_3)
+; GFX11-DL-NEXT:    v_bfe_i32 v0, v0, 0, 8
+; GFX11-DL-NEXT:    s_waitcnt vmcnt(0)
+; GFX11-DL-NEXT:    v_mad_u16 v3, v4, v5, v3
+; GFX11-DL-NEXT:    v_bfe_i32 v4, v6, 0, 8
+; GFX11-DL-NEXT:    v_and_b32_e32 v5, 0xff, v7
+; GFX11-DL-NEXT:    v_mad_u16 v3, v8, v9, v3
+; GFX11-DL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-DL-NEXT:    v_mad_u16 v3, v4, v5, v3
+; GFX11-DL-NEXT:    v_mad_u16 v0, v1, v0, v3
+; GFX11-DL-NEXT:    global_store_b16 v2, v0, s[0:1]
+; GFX11-DL-NEXT:    s_nop 0
+; GFX11-DL-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-DL-NEXT:    s_endpgm
+                                              ptr addrspace(1) %src2,
+                                              ptr addrspace(1) nocapture %dst) {
+entry:
+  %idx = call i32 @llvm.amdgcn.workitem.id.x()
+  %gep1 = getelementptr <4 x i8>, ptr addrspace(1) %src1, i32 %idx
+  %vec1 = load <4 x i8>, ptr addrspace(1) %gep1
+  %gep2 = getelementptr <4 x i8>, ptr addrspace(1) %src2, i32 %idx
+  %vec2 = load <4 x i8>, ptr addrspace(1) %gep2
+
+  %v1e0 = extractelement <4 x i8> %vec1, i64 0
+  %cv1e0 = sext i8 %v1e0 to i16
+  %v2e0 = extractelement <4 x i8> %vec2, i64 0
+  %cv2e0 = zext i8 %v2e0 to i16
+  %mul1 = mul nuw nsw i16 %cv1e0, %cv2e0
+
+  %v1e1 = extractelement <4 x i8> %vec1, i64 1
+  %cv1e1 = zext i8 %v1e1 to i16
+  %v2e1 = extractelement <4 x i8> %vec2, i64 1
+  %cv2e1 = sext i8 %v2e1 to i16
+  %mul2 = mul nuw nsw i16 %cv1e1, %cv2e1
+
+  %v1e2 = extractelement <4 x i8> %vec1, i64 2
+  %cv1e2 = sext i8 %v1e2 to i16
+  %v2e2 = extractelement <4 x i8> %vec2, i64 2
+  %cv2e2 = zext i8 %v2e2 to i16
+  %mul3 = mul nuw nsw i16 %cv1e2, %cv2e2
+
+  %v1e3 = extractelement <4 x i8> %vec1, i64 3
+  %cv1e3 = zext i8 %v1e3 to i16
+  %v2e3 = extractelement <4 x i8> %vec2, i64 3
+  %cv2e3 = sext i8 %v2e3 to i16
+  %mul4 = mul nuw nsw i16 %cv1e3, %cv2e3
+
+  %acc = load i16, ptr addrspace(1) %dst, align 2
+  %add1 = add i16 %mul2, %acc
+  %add2 = add i16 %add1, %mul1
+  %add3 = add i16 %add2, %mul3
+  %add4 = add i16 %add3, %mul4
+
+  store i16 %add4, ptr addrspace(1) %dst, align 2
+  ret void
+}
+
 ; TODO: cleanup s_lshr_b32
 define amdgpu_kernel void @udot4_acc32_vecMul(ptr addrspace(1) %src1,
 ; GFX7-LABEL: udot4_acc32_vecMul:
@@ -4622,4 +4884,170 @@ entry:
   ret void
 }
 
+define amdgpu_kernel void @idot4_acc32_anyext(ptr addrspace(1) %src1,
+; GFX7-LABEL: idot4_acc32_anyext:
+; GFX7:       ; %bb.0: ; %entry
+; GFX7-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GFX7-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0xd
+; GFX7-NEXT:    s_mov_b32 s3, 0xf000
+; GFX7-NEXT:    s_mov_b32 s10, 0
+; GFX7-NEXT:    s_mov_b32 s11, s3
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    s_mov_b64 s[8:9], s[4:5]
+; GFX7-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; GFX7-NEXT:    v_mov_b32_e32 v1, 0
+; GFX7-NEXT:    buffer_load_dword v2, v[0:1], s[8:11], 0 addr64
+; GFX7-NEXT:    s_mov_b64 s[8:9], s[6:7]
+; GFX7-NEXT:    buffer_load_dword v0, v[0:1], s[8:11], 0 addr64
+; GFX7-NEXT:    s_load_dword s4, s[0:1], 0x0
+; GFX7-NEXT:    s_mov_b32 s2, -1
+; GFX7-NEXT:    s_waitcnt vmcnt(1)
+; GFX7-NEXT:    v_and_b32_e32 v1, 0xff, v2
+; GFX7-NEXT:    v_bfe_u32 v2, v2, 8, 8
+; GFX7-NEXT:    s_waitcnt vmcnt(0)
+; GFX7-NEXT:    v_bfe_u32 v0, v0, 8, 8
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_mad_u32_u24 v1, v1, v1, s4
+; GFX7-NEXT:    v_mad_u32_u24 v0, v2, v0, v1
+; GFX7-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; GFX7-NEXT:    s_endpgm
+;
+; GFX8-LABEL: idot4_acc32_anyext:
+; GFX8:       ; %bb.0: ; %entry
+; GFX8-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX8-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GFX8-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_mov_b32_e32 v1, s5
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, s4, v2
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX8-NEXT:    flat_load_dword v3, v[0:1]
+; GFX8-NEXT:    v_mov_b32_e32 v1, s7
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, s6, v2
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX8-NEXT:    flat_load_dword v0, v[0:1]
+; GFX8-NEXT:    s_load_dword s2, s[0:1], 0x0
+; GFX8-NEXT:    s_waitcnt vmcnt(1)
+; GFX8-NEXT:    v_and_b32_e32 v1, 0xff, v3
+; GFX8-NEXT:    v_bfe_u32 v2, v3, 8, 8
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_mad_u32_u24 v1, v1, v1, s2
+; GFX8-NEXT:    s_waitcnt vmcnt(0)
+; GFX8-NEXT:    v_bfe_u32 v0, v0, 8, 8
+; GFX8-NEXT:    v_mad_u32_u24 v2, v2, v0, v1
+; GFX8-NEXT:    v_mov_b32_e32 v0, s0
+; GFX8-NEXT:    v_mov_b32_e32 v1, s1
+; GFX8-NEXT:    flat_store_dword v[0:1], v2
+; GFX8-NEXT:    s_endpgm
+;
+; GFX9-NODL-LABEL: idot4_acc32_anyext:
+; GFX9-NODL:       ; %bb.0: ; %entry
+; GFX9-NODL-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NODL-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NODL-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; GFX9-NODL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NODL-NEXT:    global_load_dword v1, v0, s[4:5]
+; GFX9-NODL-NEXT:    global_load_dword v2, v0, s[6:7]
+; GFX9-NODL-NEXT:    s_load_dword s0, s[2:3], 0x0
+; GFX9-NODL-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NODL-NEXT:    s_waitcnt vmcnt(1)
+; GFX9-NODL-NEXT:    v_mul_u32_u24_sdwa v3, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-NODL-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NODL-NEXT:    v_mul_u32_u24_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
+; GFX9-NODL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NODL-NEXT:    v_add3_u32 v1, v3, s0, v1
+; GFX9-NODL-NEXT:    global_store_dword v0, v1, s[2:3]
+; GFX9-NODL-NEXT:    s_endpgm
+;
+; GFX9-DL-LABEL: idot4_acc32_anyext:
+; GFX9-DL:       ; %bb.0: ; %entry
+; GFX9-DL-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-DL-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-DL-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; GFX9-DL-NEXT:    s_mov_b32 s1, 0xc0c0500
+; GFX9-DL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-DL-NEXT:    global_load_dword v1, v0, s[4:5]
+; GFX9-DL-NEXT:    global_load_dword v2, v0, s[6:7]
+; GFX9-DL-NEXT:    s_load_dword s0, s[2:3], 0x0
+; GFX9-DL-NEXT:    s_mov_b32 s4, 0xc0c0100
+; GFX9-DL-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-DL-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-DL-NEXT:    v_perm_b32 v2, v2, v1, s1
+; GFX9-DL-NEXT:    v_perm_b32 v1, v1, v1, s4
+; GFX9-DL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-DL-NEXT:    v_dot4_u32_u8 v1, v1, v2, s0
+; GFX9-DL-NEXT:    global_store_dword v0, v1, s[2:3]
+; GFX9-DL-NEXT:    s_endpgm
+;
+; GFX10-DL-LABEL: idot4_acc32_anyext:
+; GFX10-DL:       ; %bb.0: ; %entry
+; GFX10-DL-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX10-DL-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; GFX10-DL-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GFX10-DL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10-DL-NEXT:    s_clause 0x1
+; GFX10-DL-NEXT:    global_load_dword v1, v0, s[4:5]
+; GFX10-DL-NEXT:    global_load_dword v2, v0, s[6:7]
+; GFX10-DL-NEXT:    s_load_dword s2, s[0:1], 0x0
+; GFX10-DL-NEXT:    s_waitcnt vmcnt(0)
+; GFX10-DL-NEXT:    v_perm_b32 v0, v2, v1, 0xc0c0500
+; GFX10-DL-NEXT:    v_perm_b32 v1, v1, v1, 0xc0c0100
+; GFX10-DL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX10-DL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10-DL-NEXT:    v_dot4_u32_u8 v0, v1, v0, s2
+; GFX10-DL-NEXT:    global_store_dword v2, v0, s[0:1]
+; GFX10-DL-NEXT:    s_endpgm
+;
+; GFX11-DL-LABEL: idot4_acc32_anyext:
+; GFX11-DL:       ; %bb.0: ; %entry
+; GFX11-DL-NEXT:    s_load_b128 s[4:7], s[0:1], 0x24
+; GFX11-DL-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; GFX11-DL-NEXT:    s_load_b64 s[0:1], s[0:1], 0x34
+; GFX11-DL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX11-DL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX11-DL-NEXT:    s_clause 0x1
+; GFX11-DL-NEXT:    global_load_b32 v1, v0, s[4:5]
+; GFX11-DL-NEXT:    global_load_b32 v0, v0, s[6:7]
+; GFX11-DL-NEXT:    s_load_b32 s2, s[0:1], 0x0
+; GFX11-DL-NEXT:    s_waitcnt vmcnt(0)
+; GFX11-DL-NEXT:    v_perm_b32 v0, v0, v1, 0xc0c0500
+; GFX11-DL-NEXT:    v_perm_b32 v1, v1, v1, 0xc0c0100
+; GFX11-DL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX11-DL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX11-DL-NEXT:    v_dot4_u32_u8 v0, v1, v0, s2
+; GFX11-DL-NEXT:    global_store_b32 v2, v0, s[0:1]
+; GFX11-DL-NEXT:    s_nop 0
+; GFX11-DL-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-DL-NEXT:    s_endpgm
+                                       ptr addrspace(1) %src2,
+                                       ptr addrspace(1) nocapture %dst) {
+entry:
+  %idx = call i32 @llvm.amdgcn.workitem.id.x()
+  %gep1 = getelementptr <4 x i8>, ptr addrspace(1) %src1, i32 %idx
+  %vec1 = load <4 x i8>, ptr addrspace(1) %gep1
+  %gep2 = getelementptr <4 x i8>, ptr addrspace(1) %src2, i32 %idx
+  %vec2 = load <4 x i8>, ptr addrspace(1) %gep2
+
+  %v1e0 = extractelement <4 x i8> %vec1, i64 0
+  %cv1e0t = sext i8 %v1e0 to i32
+  %cv1e0 = and i32 %cv1e0t, 255
+  %v2e0 = extractelement <4 x i8> %vec2, i64 0
+  %cv2e0t = sext i8 %v2e0 to i32
+  %cv2e0 = and i32 %cv1e0t, 255
+  %mul1 = mul nuw nsw i32 %cv1e0, %cv2e0
+
+  %v1e1 = extractelement <4 x i8> %vec1, i64 1
+  %cv1e1 = zext i8 %v1e1 to i32
+  %v2e1 = extractelement <4 x i8> %vec2, i64 1
+  %cv2e1t = sext i8 %v2e1 to i32
+  %cv2e1 = and i32 %cv2e1t, 255
+  %mul2 = mul nuw nsw i32 %cv1e1, %cv2e1
+
+  %acc = load i32, ptr addrspace(1) %dst, align 4
+  %add1 = add i32 %mul1, %acc
+  %add2 = add i32 %add1, %mul2
+  store i32 %add2, ptr addrspace(1) %dst, align 4
+  ret void
+}
+
 declare i32 @llvm.amdgcn.workitem.id.x()



More information about the llvm-commits mailing list