[clang] [llvm] [AMDGPU] Add dot product patterns with saturating add (clamp) (PR #187945)

via cfe-commits cfe-commits at lists.llvm.org
Sat Mar 28 09:50:17 PDT 2026


https://github.com/addmisol updated https://github.com/llvm/llvm-project/pull/187945

>From c5ffb2e73bcf69513f94d8e7b89e8372d0d280b2 Mon Sep 17 00:00:00 2001
From: addmisol <218448340+addmisol at users.noreply.github.com>
Date: Fri, 6 Mar 2026 23:56:34 +0530
Subject: [PATCH 01/16] Create amdgpu-abi-struct-coerce.c

---
 .../test/CodeGen/amdgpu-abi-struct-coerce.c   | 71 +++++++++++++++++++
 1 file changed, 71 insertions(+)
 create mode 100644 clang/test/CodeGen/clang/test/CodeGen/amdgpu-abi-struct-coerce.c

diff --git a/clang/test/CodeGen/clang/test/CodeGen/amdgpu-abi-struct-coerce.c b/clang/test/CodeGen/clang/test/CodeGen/amdgpu-abi-struct-coerce.c
new file mode 100644
index 0000000000000..2399630ff797b
--- /dev/null
+++ b/clang/test/CodeGen/clang/test/CodeGen/amdgpu-abi-struct-coerce.c
@@ -0,0 +1,71 @@
+// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -emit-llvm -o - %s | FileCheck %s
+
+// Check that structs containing mixed float and int types are not coerced
+// to integer arrays. They should preserve the original struct type and
+// individual field types.
+
+typedef struct fp_int_pair {
+    float f;
+    int i;
+} fp_int_pair;
+
+// CHECK-LABEL: define{{.*}} %struct.fp_int_pair @return_fp_int_pair(float %x.coerce0, i32 %x.coerce1)
+// CHECK: ret %struct.fp_int_pair
+fp_int_pair return_fp_int_pair(fp_int_pair x) {
+    return x;
+}
+
+typedef struct int_fp_pair {
+    int i;
+    float f;
+} int_fp_pair;
+
+// CHECK-LABEL: define{{.*}} %struct.int_fp_pair @return_int_fp_pair(i32 %x.coerce0, float %x.coerce1)
+// CHECK: ret %struct.int_fp_pair
+int_fp_pair return_int_fp_pair(int_fp_pair x) {
+    return x;
+}
+
+typedef struct two_floats {
+    float a;
+    float b;
+} two_floats;
+
+// CHECK-LABEL: define{{.*}} %struct.two_floats @return_two_floats(float %x.coerce0, float %x.coerce1)
+// CHECK: ret %struct.two_floats
+two_floats return_two_floats(two_floats x) {
+    return x;
+}
+
+typedef struct two_ints {
+    int a;
+    int b;
+} two_ints;
+
+// CHECK-LABEL: define{{.*}} %struct.two_ints @return_two_ints(i32 %x.coerce0, i32 %x.coerce1)
+// CHECK: ret %struct.two_ints
+two_ints return_two_ints(two_ints x) {
+    return x;
+}
+
+// Structs <= 32 bits should still be coerced to i32 for return value
+typedef struct small_struct {
+    short a;
+    short b;
+} small_struct;
+
+// CHECK-LABEL: define{{.*}} i32 @return_small_struct(i16 %x.coerce0, i16 %x.coerce1)
+small_struct return_small_struct(small_struct x) {
+    return x;
+}
+
+// Structs <= 16 bits should still be coerced to i16 for return value
+typedef struct tiny_struct {
+    char a;
+    char b;
+} tiny_struct;
+
+// CHECK-LABEL: define{{.*}} i16 @return_tiny_struct(i8 %x.coerce0, i8 %x.coerce1)
+tiny_struct return_tiny_struct(tiny_struct x) {
+    return x;
+}

>From 68c200f848058ab22b3d25ce810f1639eac50556 Mon Sep 17 00:00:00 2001
From: addmisol <218448340+addmisol at users.noreply.github.com>
Date: Fri, 6 Mar 2026 23:57:11 +0530
Subject: [PATCH 02/16] Delete
 clang/test/CodeGen/clang/test/CodeGen/amdgpu-abi-struct-coerce.c

---
 .../test/CodeGen/amdgpu-abi-struct-coerce.c   | 71 -------------------
 1 file changed, 71 deletions(-)
 delete mode 100644 clang/test/CodeGen/clang/test/CodeGen/amdgpu-abi-struct-coerce.c

diff --git a/clang/test/CodeGen/clang/test/CodeGen/amdgpu-abi-struct-coerce.c b/clang/test/CodeGen/clang/test/CodeGen/amdgpu-abi-struct-coerce.c
deleted file mode 100644
index 2399630ff797b..0000000000000
--- a/clang/test/CodeGen/clang/test/CodeGen/amdgpu-abi-struct-coerce.c
+++ /dev/null
@@ -1,71 +0,0 @@
-// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -emit-llvm -o - %s | FileCheck %s
-
-// Check that structs containing mixed float and int types are not coerced
-// to integer arrays. They should preserve the original struct type and
-// individual field types.
-
-typedef struct fp_int_pair {
-    float f;
-    int i;
-} fp_int_pair;
-
-// CHECK-LABEL: define{{.*}} %struct.fp_int_pair @return_fp_int_pair(float %x.coerce0, i32 %x.coerce1)
-// CHECK: ret %struct.fp_int_pair
-fp_int_pair return_fp_int_pair(fp_int_pair x) {
-    return x;
-}
-
-typedef struct int_fp_pair {
-    int i;
-    float f;
-} int_fp_pair;
-
-// CHECK-LABEL: define{{.*}} %struct.int_fp_pair @return_int_fp_pair(i32 %x.coerce0, float %x.coerce1)
-// CHECK: ret %struct.int_fp_pair
-int_fp_pair return_int_fp_pair(int_fp_pair x) {
-    return x;
-}
-
-typedef struct two_floats {
-    float a;
-    float b;
-} two_floats;
-
-// CHECK-LABEL: define{{.*}} %struct.two_floats @return_two_floats(float %x.coerce0, float %x.coerce1)
-// CHECK: ret %struct.two_floats
-two_floats return_two_floats(two_floats x) {
-    return x;
-}
-
-typedef struct two_ints {
-    int a;
-    int b;
-} two_ints;
-
-// CHECK-LABEL: define{{.*}} %struct.two_ints @return_two_ints(i32 %x.coerce0, i32 %x.coerce1)
-// CHECK: ret %struct.two_ints
-two_ints return_two_ints(two_ints x) {
-    return x;
-}
-
-// Structs <= 32 bits should still be coerced to i32 for return value
-typedef struct small_struct {
-    short a;
-    short b;
-} small_struct;
-
-// CHECK-LABEL: define{{.*}} i32 @return_small_struct(i16 %x.coerce0, i16 %x.coerce1)
-small_struct return_small_struct(small_struct x) {
-    return x;
-}
-
-// Structs <= 16 bits should still be coerced to i16 for return value
-typedef struct tiny_struct {
-    char a;
-    char b;
-} tiny_struct;
-
-// CHECK-LABEL: define{{.*}} i16 @return_tiny_struct(i8 %x.coerce0, i8 %x.coerce1)
-tiny_struct return_tiny_struct(tiny_struct x) {
-    return x;
-}

>From 4e970da37321d396a8bdb224c86003a20daa96f4 Mon Sep 17 00:00:00 2001
From: Addmisol <addmisol9 at gmail.com>
Date: Sun, 22 Mar 2026 23:01:29 +0530
Subject: [PATCH 03/16] Update SIISelLowering.cpp

---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 176 ++++++++++++++++++++++
 1 file changed, 176 insertions(+)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index a2de39862b62d..1fb7c48e3dd82 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -16896,6 +16896,179 @@ SDValue SITargetLowering::performAddCombine(SDNode *N,
   return SDValue();
 }
 
+// Try to fold saturating add with dot product pattern into dot instruction
+// with clamp. Matches patterns like:
+// uaddsat(a[0]*b[0] + a[1]*b[1] + a[2]*b[2] + a[3]*b[3], c) -> v_dot4 clamp
+SDValue SITargetLowering::performSatAddCombine(SDNode *N,
+                                               DAGCombinerInfo &DCI) const {
+  SelectionDAG &DAG = DCI.DAG;
+  EVT VT = N->getValueType(0);
+  SDLoc SL(N);
+
+  // Only handle i32 saturating adds
+  if (VT != MVT::i32)
+    return SDValue();
+
+  bool IsSigned = N->getOpcode() == ISD::SADDSAT;
+
+  // Check if we have dot instructions
+  if (!Subtarget->hasDot7Insts() ||
+      (!Subtarget->hasDot1Insts() && !Subtarget->hasDot8Insts()))
+    return SDValue();
+
+  // Pattern: sataddsat(sum_of_products, accumulator)
+  // where sum_of_products = add(add(add(mul0, mul1), mul2), mul3)
+  SDValue SumOp = N->getOperand(0);
+  SDValue Accum = N->getOperand(1);
+
+  // The sum operand should be an add of multiplications
+  if (SumOp.getOpcode() != ISD::ADD)
+    return SDValue();
+
+  SDValue LHS = SumOp.getOperand(0);
+  SDValue RHS = SumOp.getOperand(1);
+
+  // Walk the add tree looking for multiplications
+  if (!isMul(LHS) && !isMul(RHS))
+    return SDValue();
+
+  SDValue TempNode = SumOp;
+  std::optional<bool> MulIsSigned;
+  SmallVector<DotSrc, 4> Src0s;
+  SmallVector<DotSrc, 4> Src1s;
+  SmallVector<SDValue, 4> Src2s;
+
+  // Match the v_dot4 tree, while collecting src nodes.
+  int ChainLength = 0;
+  for (int I = 0; I < 4; I++) {
+    LHS = TempNode.getOperand(0);
+    RHS = TempNode.getOperand(1);
+    auto MulIdx = isMul(LHS) ? 0 : isMul(RHS) ? 1 : -1;
+    if (MulIdx == -1)
+      break;
+    auto Src0 = handleMulOperand(TempNode.getOperand(MulIdx).getOperand(0));
+    if (!Src0)
+      break;
+    auto Src1 = handleMulOperand(TempNode.getOperand(MulIdx).getOperand(1));
+    if (!Src1)
+      break;
+
+    auto IterIsSigned = checkDot4MulSignedness(
+        TempNode.getOperand(MulIdx), *Src0, *Src1,
+        TempNode.getOperand(MulIdx).getOperand(0),
+        TempNode.getOperand(MulIdx).getOperand(1), DAG);
+    if (!IterIsSigned)
+      break;
+    if (!MulIsSigned)
+      MulIsSigned = *IterIsSigned;
+    if (*IterIsSigned != *MulIsSigned)
+      break;
+    placeSources(*Src0, *Src1, Src0s, Src1s, I);
+    auto AddIdx = 1 - MulIdx;
+
+    // Allow the special case where add (add (mul24, 0), mul24) became ->
+    // add (mul24, mul24).
+    if (I == 2 && isMul(TempNode.getOperand(AddIdx))) {
+      Src2s.push_back(TempNode.getOperand(AddIdx));
+      auto Src0 =
+          handleMulOperand(TempNode.getOperand(AddIdx).getOperand(0));
+      if (!Src0)
+        break;
+      auto Src1 =
+          handleMulOperand(TempNode.getOperand(AddIdx).getOperand(1));
+      if (!Src1)
+        break;
+      auto IterIsSigned = checkDot4MulSignedness(
+          TempNode.getOperand(AddIdx), *Src0, *Src1,
+          TempNode.getOperand(AddIdx).getOperand(0),
+          TempNode.getOperand(AddIdx).getOperand(1), DAG);
+      if (!IterIsSigned)
+        break;
+      assert(MulIsSigned);
+      if (*IterIsSigned != *MulIsSigned)
+        break;
+      placeSources(*Src0, *Src1, Src0s, Src1s, I + 1);
+      Src2s.push_back(DAG.getConstant(0, SL, MVT::i32));
+      ChainLength = I + 2;
+      break;
+    }
+
+    TempNode = TempNode.getOperand(AddIdx);
+    Src2s.push_back(TempNode);
+    ChainLength = I + 1;
+    if (TempNode.getNumOperands() < 2)
+      break;
+  }
+
+  // Need at least 4 multiplications for dot4
+  if (ChainLength < 4)
+    return SDValue();
+
+  // Check signedness consistency: signed saturation requires signed muls
+  if (IsSigned != *MulIsSigned)
+    return SDValue();
+
+  SDValue Src0, Src1;
+
+  // If we are just using a single source for both, and have permuted the
+  // bytes consistently, we can just use the sources without permuting
+  // (commutation).
+  bool UseOriginalSrc = false;
+  if (Src0s.size() == 1 && Src1s.size() == 1 &&
+      Src0s.begin()->PermMask == Src1s.begin()->PermMask &&
+      Src0s.begin()->SrcOp.getValueSizeInBits() >= 32 &&
+      Src1s.begin()->SrcOp.getValueSizeInBits() >= 32) {
+    SmallVector<unsigned, 4> SrcBytes;
+    auto Src0Mask = Src0s.begin()->PermMask;
+    SrcBytes.push_back(Src0Mask & 0xFF000000);
+    bool UniqueEntries = true;
+    for (auto I = 1; I < 4; I++) {
+      auto NextByte = Src0Mask & (0xFF << ((3 - I) * 8));
+
+      if (is_contained(SrcBytes, NextByte)) {
+        UniqueEntries = false;
+        break;
+      }
+      SrcBytes.push_back(NextByte);
+    }
+
+    if (UniqueEntries) {
+      UseOriginalSrc = true;
+
+      auto *FirstElt = Src0s.begin();
+      auto FirstEltOp =
+          getDWordFromOffset(DAG, SL, FirstElt->SrcOp, FirstElt->DWordOffset);
+
+      auto *SecondElt = Src1s.begin();
+      auto SecondEltOp = getDWordFromOffset(DAG, SL, SecondElt->SrcOp,
+                                            SecondElt->DWordOffset);
+
+      Src0 = DAG.getBitcastedAnyExtOrTrunc(FirstEltOp, SL,
+                                           MVT::getIntegerVT(32));
+      Src1 = DAG.getBitcastedAnyExtOrTrunc(SecondEltOp, SL,
+                                           MVT::getIntegerVT(32));
+    }
+  }
+
+  if (!UseOriginalSrc) {
+    Src0 = resolveSources(DAG, SL, Src0s, false, true);
+    Src1 = resolveSources(DAG, SL, Src1s, false, true);
+  }
+
+  // Use the second operand of the saturating add as the accumulator
+  SDValue Src2 = DAG.getExtOrTrunc(IsSigned, Accum, SL, MVT::i32);
+
+  SDValue IID = DAG.getTargetConstant(IsSigned ? Intrinsic::amdgcn_sdot4
+                                                : Intrinsic::amdgcn_udot4,
+                                      SL, MVT::i64);
+
+  // Generate dot4 with clamp=1 for saturation
+  auto Dot = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SL, MVT::i32, IID, Src0,
+                         Src1, Src2, DAG.getTargetConstant(1, SL, MVT::i1));
+
+  return DAG.getExtOrTrunc(IsSigned, Dot, SL, VT);
+}
+
 SDValue SITargetLowering::performPtrAddCombine(SDNode *N,
                                                DAGCombinerInfo &DCI) const {
   SelectionDAG &DAG = DCI.DAG;
@@ -17725,6 +17898,9 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
   switch (N->getOpcode()) {
   case ISD::ADD:
     return performAddCombine(N, DCI);
+  case ISD::UADDSAT:
+  case ISD::SADDSAT:
+    return performSatAddCombine(N, DCI);
   case ISD::PTRADD:
     return performPtrAddCombine(N, DCI);
   case ISD::SUB:

>From 3cddbf54cbdded16ac0d65ca145bb3216c847725 Mon Sep 17 00:00:00 2001
From: Addmisol <addmisol9 at gmail.com>
Date: Sun, 22 Mar 2026 23:02:52 +0530
Subject: [PATCH 04/16] Update SIISelLowering.h

---
 llvm/lib/Target/AMDGPU/SIISelLowering.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h
index e37bd938dc35d..938860d234ca2 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h
@@ -239,6 +239,7 @@ class SITargetLowering final : public AMDGPUTargetLowering {
                                           DAGCombinerInfo &DCI) const;
 
   SDValue performAddCombine(SDNode *N, DAGCombinerInfo &DCI) const;
+  SDValue performSatAddCombine(SDNode *N, DAGCombinerInfo &DCI) const;
   SDValue performPtrAddCombine(SDNode *N, DAGCombinerInfo &DCI) const;
   SDValue performAddCarrySubCarryCombine(SDNode *N, DAGCombinerInfo &DCI) const;
   SDValue performSubCombine(SDNode *N, DAGCombinerInfo &DCI) const;

>From c676e4a3ea14aa39dc92cf7b68d9789165d7643b Mon Sep 17 00:00:00 2001
From: Addmisol <addmisol9 at gmail.com>
Date: Sun, 22 Mar 2026 23:03:47 +0530
Subject: [PATCH 05/16] Update VOP3PInstructions.td

---
 llvm/lib/Target/AMDGPU/VOP3PInstructions.td | 1245 +------------------
 1 file changed, 37 insertions(+), 1208 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
index 992c375069e77..3d6e06f03d900 100644
--- a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
@@ -589,6 +589,28 @@ class SDot2Pat<VOP_Pseudo Inst> : GCNPat <
   let Predicates = Inst.Predicates;
 }
 
+// Saturating unsigned dot2 pattern: uaddsat(a[0]*b[0] + a[1]*b[1], c)
+class UDot2SatPat<VOP_Pseudo Inst> : GCNPat <
+  (uaddsat (add_oneuse (AMDGPUmul_u24_oneuse (srl i32:$src0, (i32 16)),
+                                             (srl i32:$src1, (i32 16))),
+                       (AMDGPUmul_u24_oneuse (and i32:$src0, (i32 65535)),
+                                             (and i32:$src1, (i32 65535)))),
+           i32:$src2),
+  (Inst (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 1))> {
+  let Predicates = Inst.Predicates;
+}
+
+// Saturating signed dot2 pattern: saddsat(a[0]*b[0] + a[1]*b[1], c)
+class SDot2SatPat<VOP_Pseudo Inst> : GCNPat <
+  (saddsat (add_oneuse (AMDGPUmul_i24_oneuse (sra i32:$src0, (i32 16)),
+                                             (sra i32:$src1, (i32 16))),
+                       (AMDGPUmul_i24_oneuse (sext_inreg i32:$src0, i16),
+                                             (sext_inreg i32:$src1, i16))),
+           i32:$src2),
+  (Inst (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 1))> {
+  let Predicates = Inst.Predicates;
+}
+
 let IsDOT = 1 in {
 let OtherPredicates = [HasDot2Insts] in {
 defm V_DOT2_I32_I16 : VOP3PInst<"v_dot2_i32_i16",
@@ -710,6 +732,10 @@ defm V_DOT4_F32_BF8_BF8 : VOP3PDOTF8Inst<"v_dot4_f32_bf8_bf8", int_amdgcn_dot4_f
 def : UDot2Pat<V_DOT2_U32_U16>;
 def : SDot2Pat<V_DOT2_I32_I16>;
 
+// Saturating dot2 patterns (with clamp=1)
+def : UDot2SatPat<V_DOT2_U32_U16>;
+def : SDot2SatPat<V_DOT2_I32_I16>;
+
 foreach Type = ["U", "I"] in
   let Predicates = !cast<VOP_Pseudo>("V_DOT4_"#Type#"32_"#Type#8).Predicates in
   def : GCNPat <
@@ -717,6 +743,17 @@ foreach Type = ["U", "I"] in
                       (add_oneuse lhs, (!cast<PatFrag>("Mul"#Type#"_Elt"#y) i32:$src0, i32:$src1)))),
     (!cast<VOP3P_Pseudo>("V_DOT4_"#Type#"32_"#Type#8) (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))>;
 
+// Saturating dot4 patterns: (u/s)addsat(mul0*mul1 + mul2*mul3 + mul4*mul5 + mul6*mul7, src2)
+// Pattern: sataddsat(add(add(add(mul0, mul1), mul2), mul3), src2) with clamp=1
+foreach Type = ["U", "I"] in
+  let Predicates = !cast<VOP_Pseudo>("V_DOT4_"#Type#"32_"#Type#8).Predicates in
+  def : GCNPat <
+    (!cast<SDPatternOperator>(!if(!eq(Type, "U"), "uaddsat", "saddsat"))
+      !cast<dag>(!foldl((!cast<PatFrag>("Mul"#Type#"_Elt0") i32:$src0, i32:$src1), [1, 2, 3], lhs, y,
+                        (add_oneuse lhs, (!cast<PatFrag>("Mul"#Type#"_Elt"#y) i32:$src0, i32:$src1)))),
+      i32:$src2),
+    (!cast<VOP3P_Pseudo>("V_DOT4_"#Type#"32_"#Type#8) (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 1))>;
+
 foreach Type = ["U", "I"] in
   let Predicates = !cast<VOP_Pseudo>("V_DOT8_"#Type#"32_"#Type#4).Predicates in
   def : GCNPat <
@@ -1804,1211 +1841,3 @@ def BF16_BF16_SWMMAC_w32  : VOP3PWMMA_Profile<[v8i16, v8i16, v16i16, v8i16], /*_
 def I32_IU8_SWMMAC_w32    : VOP3PWMMA_Profile<[v8i32, v2i32,  v4i32, v8i32], /*_IsSWMMAC=*/1, /*_IndexType=*/16, /*_IsIU=*/1, /*_IsFP8BF8=*/0>; // 8xi8, 16xi8
 def I32_IU4X32_SWMMAC_w32 : VOP3PWMMA_Profile<[v8i32,   i32,  v2i32, v8i32], /*_IsSWMMAC=*/1, /*_IndexType=*/16, /*_IsIU=*/1, /*_IsFP8BF8=*/0>; // 8xi4, 16xi4
 def I32_IU4X64_SWMMAC_w32 : VOP3PWMMA_Profile<[v8i32, v2i32,  v4i32, v8i32], /*_IsSWMMAC=*/1, /*_IndexType=*/0,  /*_IsIU=*/1, /*_IsFP8BF8=*/0>; // 16xi4, 32xi4 **
-def F32_FP8BF8_SWMMAC_w32 : VOP3PWMMA_Profile<[v8f32, v2i32,  v4i32, v8f32], /*_IsSWMMAC=*/1, /*_IndexType=*/16, /*_IsIU=*/0, /*_IsFP8BF8=*/1>; // 8xf8, 16xf8
-
-def F32_F16_SWMMAC_w64    : VOP3PWMMA_Profile<[v4f32, v4f16, v8f16, v4f32], /*_IsSWMMAC=*/1, /*_IndexType=*/8,  /*_IsIU=*/0, /*_IsFP8BF8=*/0>;
-def F32_BF16_SWMMAC_w64   : VOP3PWMMA_Profile<[v4f32, v4i16, v8i16, v4f32], /*_IsSWMMAC=*/1, /*_IndexType=*/8,  /*_IsIU=*/0, /*_IsFP8BF8=*/0>;
-def F16_F16_SWMMAC_w64    : VOP3PWMMA_Profile<[v4f16, v4f16, v8f16, v4f16], /*_IsSWMMAC=*/1, /*_IndexType=*/8,  /*_IsIU=*/0, /*_IsFP8BF8=*/0>;
-def BF16_BF16_SWMMAC_w64  : VOP3PWMMA_Profile<[v4i16, v4i16, v8i16, v4i16], /*_IsSWMMAC=*/1, /*_IndexType=*/8,  /*_IsIU=*/0, /*_IsFP8BF8=*/0>;
-def I32_IU8_SWMMAC_w64    : VOP3PWMMA_Profile<[v4i32,   i32, v2i32, v4i32], /*_IsSWMMAC=*/1, /*_IndexType=*/8,  /*_IsIU=*/1, /*_IsFP8BF8=*/0>; // 4xi8, 8xi8
-def I32_IU4X32_SWMMAC_w64 : VOP3PWMMA_Profile<[v4i32,   i32,   i32, v4i32], /*_IsSWMMAC=*/1, /*_IndexType=*/16, /*_IsIU=*/1, /*_IsFP8BF8=*/0>; // 8xi4, 8xi4 ***
-def I32_IU4X64_SWMMAC_w64 : VOP3PWMMA_Profile<[v4i32,   i32, v2i32, v4i32], /*_IsSWMMAC=*/1, /*_IndexType=*/16, /*_IsIU=*/1, /*_IsFP8BF8=*/0>; // 8xi4, 16xi4
-def F32_FP8BF8_SWMMAC_w64 : VOP3PWMMA_Profile<[v4f32,   i32, v2i32, v4f32], /*_IsSWMMAC=*/1, /*_IndexType=*/8,  /*_IsIU=*/0, /*_IsFP8BF8=*/1>; // 4xf8, 8xf8
-
-// *   IU4X16_WMMA_w64 lanes 0-31 will have 8xi4, remaining lanes are ignored
-// **  IU4X64_SWMMAC_w32 index is i32, index_key is not used
-// *** IU4X32_SWMMAC_w64 lanes 0-31 will have 8xi4 remaining lanes are ignored
-//                       for matrix A, index is i16; Matrix B uses all lanes
-
-def F32_F32_WMMA_w32             : VOP3PWMMA_Profile<[v8f32, v2f32,    v2f32,    v8f32], /*_IsSWMMAC=*/0, /*_IndexType=*/0, /*_IsIU=*/0, /*_IsFP8BF8=*/0,
-                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
-def F32_BF16X32_WMMA_w32         : VOP3PWMMA_Profile<[v8f32, v16bf16,  v16bf16,  v8f32], /*_IsSWMMAC=*/0, /*_IndexType=*/0, /*_IsIU=*/0, /*_IsFP8BF8=*/0,
-                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
-def F32_F16X32_WMMA_w32          : VOP3PWMMA_Profile<[v8f32, v16f16,   v16f16,   v8f32], /*_IsSWMMAC=*/0, /*_IndexType=*/0, /*_IsIU=*/0, /*_IsFP8BF8=*/0,
-                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
-def F16_F16X32_WMMA_w32          : VOP3PWMMA_Profile<[v8f16, v16f16,   v16f16,   v8f16], /*_IsSWMMAC=*/0, /*_IndexType=*/0, /*_IsIU=*/0, /*_IsFP8BF8=*/0,
-                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
-def BF16_BF16X32_WMMA_w32        : VOP3PWMMA_Profile<[v8bf16, v16bf16, v16bf16, v8bf16], /*_IsSWMMAC=*/0, /*_IndexType=*/0, /*_IsIU=*/0, /*_IsFP8BF8=*/0,
-                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
-def BF16F32_BF16_WMMA_w32        : VOP3PWMMA_Profile<[v8bf16, v16bf16, v16bf16,  v8f32], /*_IsSWMMAC=*/0, /*_IndexType=*/0, /*_IsIU=*/0, /*_IsFP8BF8=*/0,
-                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
-def F32_FP8BF8X64_WMMA_w32       : VOP3PWMMA_Profile<[v8f32, v8i32,    v8i32,    v8f32], /*_IsSWMMAC=*/0, /*_IndexType=*/0, /*_IsIU=*/0, /*_IsFP8BF8=*/1,
-                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
-def F32_FP8BF8X128_WMMA_w32      : VOP3PWMMA_Profile<[v8f32, v16i32,   v16i32,   v8f32], /*_IsSWMMAC=*/0, /*_IndexType=*/0, /*_IsIU=*/0, /*_IsFP8BF8=*/1,
-                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
-def F16_FP8BF8X64_WMMA_w32       : VOP3PWMMA_Profile<[v8f16, v8i32,    v8i32,    v8f16], /*_IsSWMMAC=*/0, /*_IndexType=*/0, /*_IsIU=*/0, /*_IsFP8BF8=*/1,
-                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
-def F16_FP8BF8X128_WMMA_w32      : VOP3PWMMA_Profile<[v8f16, v16i32,   v16i32,   v8f16], /*_IsSWMMAC=*/0, /*_IndexType=*/0, /*_IsIU=*/0, /*_IsFP8BF8=*/1,
-                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
-def F32_32X16X128_F4_WMMA_w32    : VOP3PWMMA_Profile<[v16f32, v16i32,  v8i32,   v16f32], /*_IsSWMMAC=*/0, /*_IndexType=*/0, /*_IsIU=*/0, /*_IsFP8BF8=*/0,
-                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/0, /*_IsF4=*/1>;
-def I32_IU8X64_WMMA_w32          : VOP3PWMMA_Profile<[v8i32, v8i32,    v8i32,    v8i32], /*_IsSWMMAC=*/0, /*_IndexType=*/0, /*_IsIU=*/1, /*_IsFP8BF8=*/0,
-                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
-def F32_32X16X128_F4_SCALE_w32   : VOP3PWMMA_Profile<[v16f32, v16i32,  v8i32,   v16f32], /*_IsSWMMAC=*/0, /*_IndexType=*/0,  /*_IsIU=*/0, /*_IsFP8BF8=*/1,
-                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/1, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
-def F32_32X16X128_F4_SCALE16_w32 : VOP3PWMMA_Profile<[v16f32, v16i32,  v8i32,   v16f32], /*_IsSWMMAC=*/0, /*_IndexType=*/0,  /*_IsIU=*/0, /*_IsFP8BF8=*/1,
-                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/1, /*_Scale16=*/1, /*_HasMatrixReuse=*/1>;
-def F32_F16X64_SWMMAC_w32        : VOP3PWMMA_Profile<[v8f32, v16f16,   v32f16,   v8f32], /*_IsSWMMAC=*/1, /*_IndexType=*/16, /*_IsIU=*/0, /*_IsFP8BF8=*/0,
-                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
-def F32_BF16X64_SWMMAC_w32       : VOP3PWMMA_Profile<[v8f32, v16bf16,  v32bf16,  v8f32], /*_IsSWMMAC=*/1, /*_IndexType=*/16, /*_IsIU=*/0, /*_IsFP8BF8=*/0,
-                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
-def F16_F16X64_SWMMAC_w32        : VOP3PWMMA_Profile<[v8f16, v16f16,   v32f16,   v8f16], /*_IsSWMMAC=*/1, /*_IndexType=*/16, /*_IsIU=*/0, /*_IsFP8BF8=*/0,
-                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
-def BF16_BF16X64_SWMMAC_w32      : VOP3PWMMA_Profile<[v8bf16, v16bf16, v32bf16, v8bf16], /*_IsSWMMAC=*/1, /*_IndexType=*/16, /*_IsIU=*/0, /*_IsFP8BF8=*/0,
-                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
-def F32_FP8BF8X128_SWMMAC_w32    : VOP3PWMMA_Profile<[v8f32, v8i32,    v16i32,   v8f32], /*_IsSWMMAC=*/1, /*_IndexType=*/32, /*_IsIU=*/0, /*_IsFP8BF8=*/1,
-                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
-def F16_FP8BF8X128_SWMMAC_w32    : VOP3PWMMA_Profile<[v8f16, v8i32,    v16i32,   v8f16], /*_IsSWMMAC=*/1, /*_IndexType=*/32, /*_IsIU=*/0, /*_IsFP8BF8=*/1,
-                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
-def I32_IU8X128_SWMMAC_w32       : VOP3PWMMA_Profile<[v8i32, v8i32,    v16i32,   v8i32], /*_IsSWMMAC=*/1, /*_IndexType=*/32, /*_IsIU=*/1, /*_IsFP8BF8=*/0,
-                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
-
-// Helper class to compute the destination vector type of WMMA_F8F6F4 instructions based on element type and dimensions.
-class getWMMAF8F6F4DstVTy<ValueType DstEltTy, int M, int N> {
-  // Size in bits = (M * N / 32) * element_size_in_bits
-  defvar Size = !mul(!div(!mul(M, N), 32), DstEltTy.Size);
-  ValueType ret = !cond(!eq(Size, 256)  : v8f32,
-                        !eq(Size, 1024) : v64f16);
-}
-
-// Helper class to compute the type of matrix A and B of WMMA_F8F6F4 instructions based on format and dimensions.
-class getWMMAF8F6F4ABVTy<string Fmt, int D1, int D2> {
-  defvar FmtBits = !cond(!eq(Fmt, "f8") : 8,
-                         !eq(Fmt, "f6") : 6,
-                         !eq(Fmt, "f4") : 4);
-  // TypeSize in bits = (D1 * D2 / 32) * format_bits
-  defvar TypeSize = !mul(!div(!mul(D1, D2), 32), FmtBits);
-  ValueType ret = !cond(!eq(TypeSize, 256)  : v8i32,
-                        !eq(TypeSize, 384)  : v12i32,
-                        !eq(TypeSize, 512)  : v16i32,
-                        !eq(TypeSize, 1024) : v32i32);
-}
-
-multiclass WMMA_F8F6F4_Profiles<ValueType DstEltTy, int M, int N, int K,
-                                bit HasMatrixScale, bit Scale16, bit HasMatrixReuse> {
-  defvar DstTy = getWMMAF8F6F4DstVTy<DstEltTy, M, N>.ret;
-  foreach ATy = ["f8", "f6", "f4"] in {
-    foreach BTy = ["f8", "f6", "f4"] in {
-      def _#ATy#_#BTy#_w32 : VOP3PWMMA_Profile<
-        [DstTy, getWMMAF8F6F4ABVTy<ATy, M, K>.ret, getWMMAF8F6F4ABVTy<BTy, K, N>.ret, DstTy],
-        0, 0, 0, 1, 1, 1, HasMatrixScale, Scale16, HasMatrixReuse>;
-    }
-  }
-}
-
-defm F32_16X16X128_F8F6F4         : WMMA_F8F6F4_Profiles<f32, /*M=*/16, /*N=*/16, /*K=*/128, /*HasMatrixScale=*/0, /*Scale16=*/0, /*HasMatrixReuse=*/0>;
-defm F32_16X16X128_F8F6F4_SCALE   : WMMA_F8F6F4_Profiles<f32, /*M=*/16, /*N=*/16, /*K=*/128, /*HasMatrixScale=*/1, /*Scale16=*/0, /*HasMatrixReuse=*/1>;
-defm F32_16X16X128_F8F6F4_SCALE16 : WMMA_F8F6F4_Profiles<f32, /*M=*/16, /*N=*/16, /*K=*/128, /*HasMatrixScale=*/1, /*Scale16=*/1, /*HasMatrixReuse=*/1>;
-
-class VOP_WMMA_LD_SCALE<ValueType vt, RegisterOperand RC> : VOP3P_Profile<VOPProfile<[untyped, vt, vt, untyped]>> {
-  let HasMatrixScale = 1;
-  let HasMatrixReuse = 1;
-  let HasNeg = 0;
-  let Src0RC64 = RC;
-  let Src1RC64 = RC;
-  let Ins64 = (ins Src0RC64:$src0, Src1RC64:$src1, MatrixAScale:$matrix_a_scale, MatrixBScale:$matrix_b_scale,
-                   MatrixAScaleFmt:$matrix_a_scale_fmt, MatrixBScaleFmt:$matrix_b_scale_fmt,
-                   MatrixAReuse:$matrix_a_reuse, MatrixBReuse:$matrix_b_reuse);
-  let AsmVOP3P = " $src0, $src1$matrix_a_scale$matrix_b_scale$matrix_a_scale_fmt$matrix_b_scale_fmt$matrix_a_reuse$matrix_b_reuse";
-}
-
-multiclass WMMAInst_SrcFormats_mc<string OpName, string Profile> {
-  foreach I = ["f8_f8", "f8_f6", "f8_f4", "f6_f8", "f6_f6", "f6_f4", "f4_f8", "f4_f6", "f4_f4"] in {
-    defm _#I#_w32 : WMMAInstGFX12<OpName # "_" # I # "_w32", !cast<VOP3PWMMA_Profile>(Profile # "_" # I # "_w32"), "_w32">;
-  }
-}
-
-let WaveSizePredicate = isWave32 in {
-let SubtargetPredicate = isGFX125xOnly in {
-defm V_WMMA_F32_16X16X4_F32_w32       : WMMAInstGFX12<"v_wmma_f32_16x16x4_f32",       F32_F32_WMMA_w32, "_w32">;
-
-let is_wmma_xdl = 1 in {
-defm V_WMMA_F32_16X16X32_BF16_w32     : WMMAInstGFX12<"v_wmma_f32_16x16x32_bf16",     F32_BF16X32_WMMA_w32, "_w32">;
-defm V_WMMA_BF16_16X16X32_BF16_w32    : WMMAInstGFX12<"v_wmma_bf16_16x16x32_bf16",    BF16_BF16X32_WMMA_w32, "_w32">;
-defm V_WMMA_BF16F32_16X16X32_BF16_w32 : WMMAInstGFX12<"v_wmma_bf16f32_16x16x32_bf16", BF16F32_BF16_WMMA_w32, "_w32", 1>;
-defm V_WMMA_F32_16X16X64_FP8_FP8_w32  : WMMAInstGFX12<"v_wmma_f32_16x16x64_fp8_fp8",  F32_FP8BF8X64_WMMA_w32, "_w32">;
-defm V_WMMA_F32_16X16X64_FP8_BF8_w32  : WMMAInstGFX12<"v_wmma_f32_16x16x64_fp8_bf8",  F32_FP8BF8X64_WMMA_w32, "_w32">;
-defm V_WMMA_F32_16X16X64_BF8_FP8_w32  : WMMAInstGFX12<"v_wmma_f32_16x16x64_bf8_fp8",  F32_FP8BF8X64_WMMA_w32, "_w32">;
-defm V_WMMA_F32_16X16X64_BF8_BF8_w32  : WMMAInstGFX12<"v_wmma_f32_16x16x64_bf8_bf8",  F32_FP8BF8X64_WMMA_w32, "_w32">;
-defm V_WMMA_F16_16X16X64_FP8_FP8_w32  : WMMAInstGFX12<"v_wmma_f16_16x16x64_fp8_fp8",  F16_FP8BF8X64_WMMA_w32, "_w32">;
-defm V_WMMA_F16_16X16X64_FP8_BF8_w32  : WMMAInstGFX12<"v_wmma_f16_16x16x64_fp8_bf8",  F16_FP8BF8X64_WMMA_w32, "_w32">;
-defm V_WMMA_F16_16X16X64_BF8_FP8_w32  : WMMAInstGFX12<"v_wmma_f16_16x16x64_bf8_fp8",  F16_FP8BF8X64_WMMA_w32, "_w32">;
-defm V_WMMA_F16_16X16X64_BF8_BF8_w32  : WMMAInstGFX12<"v_wmma_f16_16x16x64_bf8_bf8",  F16_FP8BF8X64_WMMA_w32, "_w32">;
-defm V_WMMA_I32_16X16X64_IU8_w32      : WMMAInstGFX12<"v_wmma_i32_16x16x64_iu8",      I32_IU8X64_WMMA_w32, "_w32">;
-defm V_WMMA_F32_16X16X32_F16_w32      : WMMAInstGFX12<"v_wmma_f32_16x16x32_f16",      F32_F16X32_WMMA_w32, "_w32">;
-defm V_WMMA_F16_16X16X32_F16_w32      : WMMAInstGFX12<"v_wmma_f16_16x16x32_f16",      F16_F16X32_WMMA_w32, "_w32">;
-defm V_WMMA_F16_16X16X128_FP8_FP8_w32 : WMMAInstGFX12<"v_wmma_f16_16x16x128_fp8_fp8", F16_FP8BF8X128_WMMA_w32, "_w32">;
-defm V_WMMA_F16_16X16X128_FP8_BF8_w32 : WMMAInstGFX12<"v_wmma_f16_16x16x128_fp8_bf8", F16_FP8BF8X128_WMMA_w32, "_w32">;
-defm V_WMMA_F16_16X16X128_BF8_FP8_w32 : WMMAInstGFX12<"v_wmma_f16_16x16x128_bf8_fp8", F16_FP8BF8X128_WMMA_w32, "_w32">;
-defm V_WMMA_F16_16X16X128_BF8_BF8_w32 : WMMAInstGFX12<"v_wmma_f16_16x16x128_bf8_bf8", F16_FP8BF8X128_WMMA_w32, "_w32">;
-defm V_WMMA_F32_16X16X128_FP8_FP8_w32 : WMMAInstGFX12<"v_wmma_f32_16x16x128_fp8_fp8", F32_FP8BF8X128_WMMA_w32, "_w32">;
-defm V_WMMA_F32_16X16X128_FP8_BF8_w32 : WMMAInstGFX12<"v_wmma_f32_16x16x128_fp8_bf8", F32_FP8BF8X128_WMMA_w32, "_w32">;
-defm V_WMMA_F32_16X16X128_BF8_FP8_w32 : WMMAInstGFX12<"v_wmma_f32_16x16x128_bf8_fp8", F32_FP8BF8X128_WMMA_w32, "_w32">;
-defm V_WMMA_F32_16X16X128_BF8_BF8_w32 : WMMAInstGFX12<"v_wmma_f32_16x16x128_bf8_bf8", F32_FP8BF8X128_WMMA_w32, "_w32">;
-defm V_WMMA_F32_32X16X128_F4_w32      : WMMAInstGFX12<"v_wmma_f32_32x16x128_f4",      F32_32X16X128_F4_WMMA_w32, "_w32">;
-
-defm V_SWMMAC_F32_16X16X64_BF16_w32     : SWMMACInstGFX12<"v_swmmac_f32_16x16x64_bf16",     F32_BF16X64_SWMMAC_w32, "_w32">;
-defm V_SWMMAC_BF16_16X16X64_BF16_w32    : SWMMACInstGFX12<"v_swmmac_bf16_16x16x64_bf16",    BF16_BF16X64_SWMMAC_w32, "_w32">;
-defm V_SWMMAC_BF16F32_16X16X64_BF16_w32 : SWMMACInstGFX12<"v_swmmac_bf16f32_16x16x64_bf16", F32_BF16X64_SWMMAC_w32, "_w32">;
-defm V_SWMMAC_F32_16X16X128_FP8_FP8_w32 : SWMMACInstGFX12<"v_swmmac_f32_16x16x128_fp8_fp8", F32_FP8BF8X128_SWMMAC_w32, "_w32">;
-defm V_SWMMAC_F32_16X16X128_FP8_BF8_w32 : SWMMACInstGFX12<"v_swmmac_f32_16x16x128_fp8_bf8", F32_FP8BF8X128_SWMMAC_w32, "_w32">;
-defm V_SWMMAC_F32_16X16X128_BF8_FP8_w32 : SWMMACInstGFX12<"v_swmmac_f32_16x16x128_bf8_fp8", F32_FP8BF8X128_SWMMAC_w32, "_w32">;
-defm V_SWMMAC_F32_16X16X128_BF8_BF8_w32 : SWMMACInstGFX12<"v_swmmac_f32_16x16x128_bf8_bf8", F32_FP8BF8X128_SWMMAC_w32, "_w32">;
-defm V_SWMMAC_F16_16X16X128_FP8_FP8_w32 : SWMMACInstGFX12<"v_swmmac_f16_16x16x128_fp8_fp8", F16_FP8BF8X128_SWMMAC_w32, "_w32">;
-defm V_SWMMAC_F16_16X16X128_FP8_BF8_w32 : SWMMACInstGFX12<"v_swmmac_f16_16x16x128_fp8_bf8", F16_FP8BF8X128_SWMMAC_w32, "_w32">;
-defm V_SWMMAC_F16_16X16X128_BF8_FP8_w32 : SWMMACInstGFX12<"v_swmmac_f16_16x16x128_bf8_fp8", F16_FP8BF8X128_SWMMAC_w32, "_w32">;
-defm V_SWMMAC_F16_16X16X128_BF8_BF8_w32 : SWMMACInstGFX12<"v_swmmac_f16_16x16x128_bf8_bf8", F16_FP8BF8X128_SWMMAC_w32, "_w32">;
-defm V_SWMMAC_I32_16X16X128_IU8_w32     : SWMMACInstGFX12<"v_swmmac_i32_16x16x128_iu8",     I32_IU8X128_SWMMAC_w32, "_w32">;
-defm V_SWMMAC_F32_16X16X64_F16_w32      : SWMMACInstGFX12<"v_swmmac_f32_16x16x64_f16",      F32_F16X64_SWMMAC_w32, "_w32">;
-defm V_SWMMAC_F16_16X16X64_F16_w32      : SWMMACInstGFX12<"v_swmmac_f16_16x16x64_f16",      F16_F16X64_SWMMAC_w32, "_w32">;
-
-defm V_WMMA_F32_16X16X128_F8F6F4         : WMMAInst_SrcFormats_mc<"v_wmma_f32_16x16x128_f8f6f4", "F32_16X16X128_F8F6F4">;
-defm V_WMMA_SCALE_F32_16X16X128_F8F6F4   : WMMAInst_SrcFormats_mc<"v_wmma_scale_f32_16x16x128_f8f6f4", "F32_16X16X128_F8F6F4_SCALE">;
-defm V_WMMA_SCALE16_F32_16X16X128_F8F6F4 : WMMAInst_SrcFormats_mc<"v_wmma_scale16_f32_16x16x128_f8f6f4", "F32_16X16X128_F8F6F4_SCALE16">;
-
-defm V_WMMA_SCALE_F32_32X16X128_F4_w32   : WMMAInstGFX12<"v_wmma_scale_f32_32x16x128_f4",   F32_32X16X128_F4_SCALE_w32, "_w32">;
-defm V_WMMA_SCALE16_F32_32X16X128_F4_w32 : WMMAInstGFX12<"v_wmma_scale16_f32_32x16x128_f4", F32_32X16X128_F4_SCALE16_w32, "_w32">;
-} // End is_wmma_xdl = 1.
-
-let isConvergent = 1 in {
-  defm V_WMMA_LD_SCALE_PAIRED_B32   : VOP3PInst<"v_wmma_ld_scale_paired_b32",   VOP_WMMA_LD_SCALE<i32, VCSrc_b32_Lo256>>;
-  defm V_WMMA_LD_SCALE16_PAIRED_B64 : VOP3PInst<"v_wmma_ld_scale16_paired_b64", VOP_WMMA_LD_SCALE<i64, VCSrc_b64_Lo256>>;
-}
-} // End SubtargetPredicate = isGFX125xOnly
-} // End WaveSizePredicate = isWave32
-
-let WaveSizePredicate = isWave32 in {
-defm V_WMMA_F32_16X16X16_F16_w32     : WMMAInstGFX12<"v_wmma_f32_16x16x16_f16",     F32_F16_WMMA_w32, "_w32">;
-defm V_WMMA_F32_16X16X16_BF16_w32    : WMMAInstGFX12<"v_wmma_f32_16x16x16_bf16",    F32_BF16_WMMA_w32, "_w32">;
-defm V_WMMA_F16_16X16X16_F16_w32     : WMMAInstGFX12<"v_wmma_f16_16x16x16_f16",     F16_F16_WMMA_w32, "_w32">;
-defm V_WMMA_BF16_16X16X16_BF16_w32   : WMMAInstGFX12<"v_wmma_bf16_16x16x16_bf16",   BF16_BF16_WMMA_w32, "_w32">;
-defm V_WMMA_I32_16X16X16_IU8_w32     : WMMAInstGFX12<"v_wmma_i32_16x16x16_iu8",     I32_IU8_WMMA_w32, "_w32">;
-defm V_WMMA_I32_16X16X16_IU4_w32     : WMMAInstGFX12<"v_wmma_i32_16x16x16_iu4",     I32_IU4X16_WMMA_w32, "_w32">;
-defm V_WMMA_F32_16X16X16_FP8_FP8_w32 : WMMAInstGFX12<"v_wmma_f32_16x16x16_fp8_fp8", F32_FP8BF8_WMMA_w32, "_w32">;
-defm V_WMMA_F32_16X16X16_FP8_BF8_w32 : WMMAInstGFX12<"v_wmma_f32_16x16x16_fp8_bf8", F32_FP8BF8_WMMA_w32, "_w32">;
-defm V_WMMA_F32_16X16X16_BF8_FP8_w32 : WMMAInstGFX12<"v_wmma_f32_16x16x16_bf8_fp8", F32_FP8BF8_WMMA_w32, "_w32">;
-defm V_WMMA_F32_16X16X16_BF8_BF8_w32 : WMMAInstGFX12<"v_wmma_f32_16x16x16_bf8_bf8", F32_FP8BF8_WMMA_w32, "_w32">;
-defm V_WMMA_I32_16X16X32_IU4_w32     : WMMAInstGFX12<"v_wmma_i32_16x16x32_iu4",     I32_IU4X32_WMMA_w32, "_w32">;
-
-defm V_SWMMAC_F32_16X16X32_F16_w32     : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_f16",     F32_F16_SWMMAC_w32, "_w32">;
-defm V_SWMMAC_F32_16X16X32_BF16_w32    : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_bf16",    F32_BF16_SWMMAC_w32, "_w32">;
-defm V_SWMMAC_F16_16X16X32_F16_w32     : SWMMACInstGFX12<"v_swmmac_f16_16x16x32_f16",     F16_F16_SWMMAC_w32, "_w32">;
-defm V_SWMMAC_BF16_16X16X32_BF16_w32   : SWMMACInstGFX12<"v_swmmac_bf16_16x16x32_bf16",   BF16_BF16_SWMMAC_w32, "_w32">;
-defm V_SWMMAC_I32_16X16X32_IU8_w32     : SWMMACInstGFX12<"v_swmmac_i32_16x16x32_iu8",     I32_IU8_SWMMAC_w32, "_w32">;
-defm V_SWMMAC_I32_16X16X32_IU4_w32     : SWMMACInstGFX12<"v_swmmac_i32_16x16x32_iu4",     I32_IU4X32_SWMMAC_w32, "_w32">;
-defm V_SWMMAC_I32_16X16X64_IU4_w32     : SWMMACInstGFX12<"v_swmmac_i32_16x16x64_iu4",     I32_IU4X64_SWMMAC_w32, "_w32">;
-defm V_SWMMAC_F32_16X16X32_FP8_FP8_w32 : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_fp8_fp8", F32_FP8BF8_SWMMAC_w32, "_w32">;
-defm V_SWMMAC_F32_16X16X32_FP8_BF8_w32 : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_fp8_bf8", F32_FP8BF8_SWMMAC_w32, "_w32">;
-defm V_SWMMAC_F32_16X16X32_BF8_FP8_w32 : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_bf8_fp8", F32_FP8BF8_SWMMAC_w32, "_w32">;
-defm V_SWMMAC_F32_16X16X32_BF8_BF8_w32 : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_bf8_bf8", F32_FP8BF8_SWMMAC_w32, "_w32">;
-}
-
-let WaveSizePredicate = isWave64 in {
-defm V_WMMA_F32_16X16X16_F16_w64     : WMMAInstGFX12<"v_wmma_f32_16x16x16_f16",     F32_F16_WMMA_w64, "_w64">;
-defm V_WMMA_F32_16X16X16_BF16_w64    : WMMAInstGFX12<"v_wmma_f32_16x16x16_bf16",    F32_BF16_WMMA_w64, "_w64">;
-defm V_WMMA_F16_16X16X16_F16_w64     : WMMAInstGFX12<"v_wmma_f16_16x16x16_f16",     F16_F16_WMMA_w64, "_w64">;
-defm V_WMMA_BF16_16X16X16_BF16_w64   : WMMAInstGFX12<"v_wmma_bf16_16x16x16_bf16",   BF16_BF16_WMMA_w64, "_w64">;
-defm V_WMMA_I32_16X16X16_IU8_w64     : WMMAInstGFX12<"v_wmma_i32_16x16x16_iu8",     I32_IU8_WMMA_w64, "_w64">;
-defm V_WMMA_I32_16X16X16_IU4_w64     : WMMAInstGFX12<"v_wmma_i32_16x16x16_iu4",     I32_IU4X16_WMMA_w64, "_w64">;
-defm V_WMMA_F32_16X16X16_FP8_FP8_w64 : WMMAInstGFX12<"v_wmma_f32_16x16x16_fp8_fp8", F32_FP8BF8_WMMA_w64, "_w64">;
-defm V_WMMA_F32_16X16X16_FP8_BF8_w64 : WMMAInstGFX12<"v_wmma_f32_16x16x16_fp8_bf8", F32_FP8BF8_WMMA_w64, "_w64">;
-defm V_WMMA_F32_16X16X16_BF8_FP8_w64 : WMMAInstGFX12<"v_wmma_f32_16x16x16_bf8_fp8", F32_FP8BF8_WMMA_w64, "_w64">;
-defm V_WMMA_F32_16X16X16_BF8_BF8_w64 : WMMAInstGFX12<"v_wmma_f32_16x16x16_bf8_bf8", F32_FP8BF8_WMMA_w64, "_w64">;
-defm V_WMMA_I32_16X16X32_IU4_w64     : WMMAInstGFX12<"v_wmma_i32_16x16x32_iu4",     I32_IU4X32_WMMA_w64, "_w64">;
-
-defm V_SWMMAC_F32_16X16X32_F16_w64     : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_f16",     F32_F16_SWMMAC_w64, "_w64">;
-defm V_SWMMAC_F32_16X16X32_BF16_w64    : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_bf16",    F32_BF16_SWMMAC_w64, "_w64">;
-defm V_SWMMAC_F16_16X16X32_F16_w64     : SWMMACInstGFX12<"v_swmmac_f16_16x16x32_f16",     F16_F16_SWMMAC_w64, "_w64">;
-defm V_SWMMAC_BF16_16X16X32_BF16_w64   : SWMMACInstGFX12<"v_swmmac_bf16_16x16x32_bf16",   BF16_BF16_SWMMAC_w64, "_w64">;
-defm V_SWMMAC_I32_16X16X32_IU8_w64     : SWMMACInstGFX12<"v_swmmac_i32_16x16x32_iu8",     I32_IU8_SWMMAC_w64, "_w64">;
-defm V_SWMMAC_I32_16X16X32_IU4_w64     : SWMMACInstGFX12<"v_swmmac_i32_16x16x32_iu4",     I32_IU4X32_SWMMAC_w64, "_w64">;
-defm V_SWMMAC_I32_16X16X64_IU4_w64     : SWMMACInstGFX12<"v_swmmac_i32_16x16x64_iu4",     I32_IU4X64_SWMMAC_w64, "_w64">;
-defm V_SWMMAC_F32_16X16X32_FP8_FP8_w64 : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_fp8_fp8", F32_FP8BF8_SWMMAC_w64, "_w64">;
-defm V_SWMMAC_F32_16X16X32_FP8_BF8_w64 : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_fp8_bf8", F32_FP8BF8_SWMMAC_w64, "_w64">;
-defm V_SWMMAC_F32_16X16X32_BF8_FP8_w64 : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_bf8_fp8", F32_FP8BF8_SWMMAC_w64, "_w64">;
-defm V_SWMMAC_F32_16X16X32_BF8_BF8_w64 : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_bf8_bf8", F32_FP8BF8_SWMMAC_w64, "_w64">;
-}
-
-// IsGFX11OpselIntrinsic: f16_f16 and bf16_bf16 Intrinsics have imm operand that
-// controls opsel. Used by gfx11, removed in gfx12 (operand must be 0).
-multiclass WMMAPat<string Inst, SDPatternOperator node, VOP3PWMMA_Profile P, bit IsGFX11OpselIntrinsic = 0> {
-  def : GCNPat <(P.DstVT !setdagop(!con(P.WmmaInPat, !if(IsGFX11OpselIntrinsic, (ins 0), (ins))), node)),
-                (P.DstVT !setdagop(P.WmmaOutPat, !cast<Instruction>(Inst#"_twoaddr")))>;
-  let AddedComplexity = 4 in
-  def : GCNPat <(P.DstVT !setdagop(!con(P.WmmaInlineInPat, !if(IsGFX11OpselIntrinsic, (ins 0), (ins))), node)),
-                (P.DstVT !setdagop(P.WmmaInlineOutPat, !cast<Instruction>(Inst#"_threeaddr")))>;
-}
-
-class SWMMACPat<Instruction Inst, SDPatternOperator node, VOP3PWMMA_Profile P> :
-  GCNPat <(P.DstVT !setdagop(P.SwmmacInPat, node)),
-          (P.DstVT !setdagop(P.SwmmacOutPat, Inst))>;
-
-class SWMMACPat_w64<Instruction Inst, SDPatternOperator node, VOP3PWMMA_Profile P> :
-  GCNPat <(P.DstVT !setdagop(P.SwmmacInPat, node)),
-          (P.DstVT !setdagop(P.SwmmacOutPat, Inst))>{
-            let WaveSizePredicate = isWave64;
-          }
-
-let WaveSizePredicate = isWave32, SubtargetPredicate = isGFX11PlusNot12_50, OtherPredicates = [HasWMMA128bInsts] in {
-  defm : WMMAPat<"V_WMMA_F32_16X16X16_F16_w32",     int_amdgcn_wmma_f32_16x16x16_f16,     F32_F16_WMMA_w32>;
-  defm : WMMAPat<"V_WMMA_F32_16X16X16_BF16_w32",    int_amdgcn_wmma_f32_16x16x16_bf16,    F32_BF16_WMMA_w32>;
-  defm : WMMAPat<"V_WMMA_F16_16X16X16_F16_w32",     int_amdgcn_wmma_f16_16x16x16_f16,     F16_F16_WMMA_w32,1>;
-  defm : WMMAPat<"V_WMMA_BF16_16X16X16_BF16_w32",   int_amdgcn_wmma_bf16_16x16x16_bf16,   BF16_BF16_WMMA_w32,1>;
-  defm : WMMAPat<"V_WMMA_I32_16X16X16_IU8_w32",     int_amdgcn_wmma_i32_16x16x16_iu8,     I32_IU8_WMMA_w32>;
-  defm : WMMAPat<"V_WMMA_I32_16X16X16_IU4_w32",     int_amdgcn_wmma_i32_16x16x16_iu4,     I32_IU4X16_WMMA_w32>;
-  defm : WMMAPat<"V_WMMA_F32_16X16X16_FP8_FP8_w32", int_amdgcn_wmma_f32_16x16x16_fp8_fp8, F32_FP8BF8_WMMA_w32>;
-  defm : WMMAPat<"V_WMMA_F32_16X16X16_FP8_BF8_w32", int_amdgcn_wmma_f32_16x16x16_fp8_bf8, F32_FP8BF8_WMMA_w32>;
-  defm : WMMAPat<"V_WMMA_F32_16X16X16_BF8_FP8_w32", int_amdgcn_wmma_f32_16x16x16_bf8_fp8, F32_FP8BF8_WMMA_w32>;
-  defm : WMMAPat<"V_WMMA_F32_16X16X16_BF8_BF8_w32", int_amdgcn_wmma_f32_16x16x16_bf8_bf8, F32_FP8BF8_WMMA_w32>;
-  defm : WMMAPat<"V_WMMA_I32_16X16X32_IU4_w32",     int_amdgcn_wmma_i32_16x16x32_iu4,     I32_IU4X32_WMMA_w32>;
-}
-
-let WaveSizePredicate = isWave32, SubtargetPredicate = HasSWMMACGfx1200Insts in {
-  def : SWMMACPat<V_SWMMAC_F32_16X16X32_F16_w32_twoaddr,     int_amdgcn_swmmac_f32_16x16x32_f16,     F32_F16_SWMMAC_w32>;
-  def : SWMMACPat<V_SWMMAC_F32_16X16X32_BF16_w32_twoaddr,    int_amdgcn_swmmac_f32_16x16x32_bf16,    F32_BF16_SWMMAC_w32>;
-  def : SWMMACPat<V_SWMMAC_F16_16X16X32_F16_w32_twoaddr,     int_amdgcn_swmmac_f16_16x16x32_f16,     F16_F16_SWMMAC_w32>;
-  def : SWMMACPat<V_SWMMAC_BF16_16X16X32_BF16_w32_twoaddr,   int_amdgcn_swmmac_bf16_16x16x32_bf16,   BF16_BF16_SWMMAC_w32>;
-  def : SWMMACPat<V_SWMMAC_I32_16X16X32_IU8_w32_twoaddr,     int_amdgcn_swmmac_i32_16x16x32_iu8,     I32_IU8_SWMMAC_w32>;
-  def : SWMMACPat<V_SWMMAC_I32_16X16X32_IU4_w32_twoaddr,     int_amdgcn_swmmac_i32_16x16x32_iu4,     I32_IU4X32_SWMMAC_w32>;
-  def : GCNPat <(I32_IU4X64_SWMMAC_w32.DstVT !setdagop(I32_IU4X64_SWMMAC_w32.SwmmacInPat,  int_amdgcn_swmmac_i32_16x16x64_iu4)),
-                (I32_IU4X64_SWMMAC_w32.DstVT !setdagop(I32_IU4X64_SWMMAC_w32.SwmmacOutPat, V_SWMMAC_I32_16X16X64_IU4_w32_twoaddr))>;
-  def : SWMMACPat<V_SWMMAC_F32_16X16X32_FP8_FP8_w32_twoaddr, int_amdgcn_swmmac_f32_16x16x32_fp8_fp8, F32_FP8BF8_SWMMAC_w32>;
-  def : SWMMACPat<V_SWMMAC_F32_16X16X32_FP8_BF8_w32_twoaddr, int_amdgcn_swmmac_f32_16x16x32_fp8_bf8, F32_FP8BF8_SWMMAC_w32>;
-  def : SWMMACPat<V_SWMMAC_F32_16X16X32_BF8_FP8_w32_twoaddr, int_amdgcn_swmmac_f32_16x16x32_bf8_fp8, F32_FP8BF8_SWMMAC_w32>;
-  def : SWMMACPat<V_SWMMAC_F32_16X16X32_BF8_BF8_w32_twoaddr, int_amdgcn_swmmac_f32_16x16x32_bf8_bf8, F32_FP8BF8_SWMMAC_w32>;
-}
-
-let WaveSizePredicate = isWave64, SubtargetPredicate = isGFX11PlusNot12_50, OtherPredicates = [HasWMMA128bInsts] in {
-  defm : WMMAPat<"V_WMMA_F32_16X16X16_F16_w64",     int_amdgcn_wmma_f32_16x16x16_f16,     F32_F16_WMMA_w64>;
-  defm : WMMAPat<"V_WMMA_F32_16X16X16_BF16_w64",    int_amdgcn_wmma_f32_16x16x16_bf16,    F32_BF16_WMMA_w64>;
-  defm : WMMAPat<"V_WMMA_F16_16X16X16_F16_w64",     int_amdgcn_wmma_f16_16x16x16_f16,     F16_F16_WMMA_w64,1>;
-  defm : WMMAPat<"V_WMMA_BF16_16X16X16_BF16_w64",   int_amdgcn_wmma_bf16_16x16x16_bf16,   BF16_BF16_WMMA_w64,1>;
-  defm : WMMAPat<"V_WMMA_I32_16X16X16_IU8_w64",     int_amdgcn_wmma_i32_16x16x16_iu8,     I32_IU8_WMMA_w64>;
-  defm : WMMAPat<"V_WMMA_I32_16X16X16_IU4_w64",     int_amdgcn_wmma_i32_16x16x16_iu4,     I32_IU4X16_WMMA_w64>;
-  defm : WMMAPat<"V_WMMA_F32_16X16X16_FP8_FP8_w64", int_amdgcn_wmma_f32_16x16x16_fp8_fp8, F32_FP8BF8_WMMA_w64>;
-  defm : WMMAPat<"V_WMMA_F32_16X16X16_FP8_BF8_w64", int_amdgcn_wmma_f32_16x16x16_fp8_bf8, F32_FP8BF8_WMMA_w64>;
-  defm : WMMAPat<"V_WMMA_F32_16X16X16_BF8_FP8_w64", int_amdgcn_wmma_f32_16x16x16_bf8_fp8, F32_FP8BF8_WMMA_w64>;
-  defm : WMMAPat<"V_WMMA_F32_16X16X16_BF8_BF8_w64", int_amdgcn_wmma_f32_16x16x16_bf8_bf8, F32_FP8BF8_WMMA_w64>;
-  defm : WMMAPat<"V_WMMA_I32_16X16X32_IU4_w64",     int_amdgcn_wmma_i32_16x16x32_iu4,     I32_IU4X32_WMMA_w64>;
-}
-
-let WaveSizePredicate = isWave64, SubtargetPredicate = HasSWMMACGfx1200Insts in {
-  def : SWMMACPat<V_SWMMAC_F32_16X16X32_F16_w64_twoaddr,     int_amdgcn_swmmac_f32_16x16x32_f16,     F32_F16_SWMMAC_w64>;
-  def : SWMMACPat<V_SWMMAC_F32_16X16X32_BF16_w64_twoaddr,    int_amdgcn_swmmac_f32_16x16x32_bf16,    F32_BF16_SWMMAC_w64>;
-  def : SWMMACPat<V_SWMMAC_F16_16X16X32_F16_w64_twoaddr,     int_amdgcn_swmmac_f16_16x16x32_f16,     F16_F16_SWMMAC_w64>;
-  def : SWMMACPat<V_SWMMAC_BF16_16X16X32_BF16_w64_twoaddr,   int_amdgcn_swmmac_bf16_16x16x32_bf16,   BF16_BF16_SWMMAC_w64>;
-  def : SWMMACPat<V_SWMMAC_I32_16X16X32_IU8_w64_twoaddr,     int_amdgcn_swmmac_i32_16x16x32_iu8,     I32_IU8_SWMMAC_w64>;
-  def : SWMMACPat<V_SWMMAC_I32_16X16X32_IU4_w64_twoaddr,     int_amdgcn_swmmac_i32_16x16x32_iu4,     I32_IU4X32_SWMMAC_w64>;
-  def : SWMMACPat<V_SWMMAC_I32_16X16X64_IU4_w64_twoaddr,     int_amdgcn_swmmac_i32_16x16x64_iu4,     I32_IU4X64_SWMMAC_w64>;
-  def : SWMMACPat<V_SWMMAC_F32_16X16X32_FP8_FP8_w64_twoaddr, int_amdgcn_swmmac_f32_16x16x32_fp8_fp8, F32_FP8BF8_SWMMAC_w64>;
-  def : SWMMACPat<V_SWMMAC_F32_16X16X32_FP8_BF8_w64_twoaddr, int_amdgcn_swmmac_f32_16x16x32_fp8_bf8, F32_FP8BF8_SWMMAC_w64>;
-  def : SWMMACPat<V_SWMMAC_F32_16X16X32_BF8_FP8_w64_twoaddr, int_amdgcn_swmmac_f32_16x16x32_bf8_fp8, F32_FP8BF8_SWMMAC_w64>;
-  def : SWMMACPat<V_SWMMAC_F32_16X16X32_BF8_BF8_w64_twoaddr, int_amdgcn_swmmac_f32_16x16x32_bf8_bf8, F32_FP8BF8_SWMMAC_w64>;
-}
-
-let WaveSizePredicate = isWave32 in {
-let SubtargetPredicate = isGFX125xOnly in {
-  defm : WMMAPat<"V_WMMA_F32_16X16X4_F32_w32",          int_amdgcn_wmma_f32_16x16x4_f32,          F32_F32_WMMA_w32>;
-  defm : WMMAPat<"V_WMMA_F32_16X16X32_BF16_w32",        int_amdgcn_wmma_f32_16x16x32_bf16,        F32_BF16X32_WMMA_w32>;
-  defm : WMMAPat<"V_WMMA_BF16_16X16X32_BF16_w32",       int_amdgcn_wmma_bf16_16x16x32_bf16,       BF16_BF16X32_WMMA_w32>;
-  defm : WMMAPat<"V_WMMA_BF16F32_16X16X32_BF16_w32",    int_amdgcn_wmma_bf16f32_16x16x32_bf16,    BF16F32_BF16_WMMA_w32>;
-  defm : WMMAPat<"V_WMMA_F32_16X16X64_FP8_FP8_w32",     int_amdgcn_wmma_f32_16x16x64_fp8_fp8,     F32_FP8BF8X64_WMMA_w32>;
-  defm : WMMAPat<"V_WMMA_F32_16X16X64_FP8_BF8_w32",     int_amdgcn_wmma_f32_16x16x64_fp8_bf8,     F32_FP8BF8X64_WMMA_w32>;
-  defm : WMMAPat<"V_WMMA_F32_16X16X64_BF8_FP8_w32",     int_amdgcn_wmma_f32_16x16x64_bf8_fp8,     F32_FP8BF8X64_WMMA_w32>;
-  defm : WMMAPat<"V_WMMA_F32_16X16X64_BF8_BF8_w32",     int_amdgcn_wmma_f32_16x16x64_bf8_bf8,     F32_FP8BF8X64_WMMA_w32>;
-  defm : WMMAPat<"V_WMMA_F16_16X16X64_FP8_FP8_w32",     int_amdgcn_wmma_f16_16x16x64_fp8_fp8,     F16_FP8BF8X64_WMMA_w32>;
-  defm : WMMAPat<"V_WMMA_F16_16X16X64_FP8_BF8_w32",     int_amdgcn_wmma_f16_16x16x64_fp8_bf8,     F16_FP8BF8X64_WMMA_w32>;
-  defm : WMMAPat<"V_WMMA_F16_16X16X64_BF8_FP8_w32",     int_amdgcn_wmma_f16_16x16x64_bf8_fp8,     F16_FP8BF8X64_WMMA_w32>;
-  defm : WMMAPat<"V_WMMA_F16_16X16X64_BF8_BF8_w32",     int_amdgcn_wmma_f16_16x16x64_bf8_bf8,     F16_FP8BF8X64_WMMA_w32>;
-  defm : WMMAPat<"V_WMMA_I32_16X16X64_IU8_w32",         int_amdgcn_wmma_i32_16x16x64_iu8,         I32_IU8X64_WMMA_w32>;
-  defm : WMMAPat<"V_WMMA_F32_16X16X32_F16_w32",         int_amdgcn_wmma_f32_16x16x32_f16,         F32_F16X32_WMMA_w32>;
-  defm : WMMAPat<"V_WMMA_F16_16X16X32_F16_w32",         int_amdgcn_wmma_f16_16x16x32_f16,         F16_F16X32_WMMA_w32>;
-  defm : WMMAPat<"V_WMMA_F16_16X16X128_FP8_FP8_w32",    int_amdgcn_wmma_f16_16x16x128_fp8_fp8,    F16_FP8BF8X128_WMMA_w32>;
-  defm : WMMAPat<"V_WMMA_F16_16X16X128_FP8_BF8_w32",    int_amdgcn_wmma_f16_16x16x128_fp8_bf8,    F16_FP8BF8X128_WMMA_w32>;
-  defm : WMMAPat<"V_WMMA_F16_16X16X128_BF8_FP8_w32",    int_amdgcn_wmma_f16_16x16x128_bf8_fp8,    F16_FP8BF8X128_WMMA_w32>;
-  defm : WMMAPat<"V_WMMA_F16_16X16X128_BF8_BF8_w32",    int_amdgcn_wmma_f16_16x16x128_bf8_bf8,    F16_FP8BF8X128_WMMA_w32>;
-  defm : WMMAPat<"V_WMMA_F32_16X16X128_FP8_FP8_w32",    int_amdgcn_wmma_f32_16x16x128_fp8_fp8,    F32_FP8BF8X128_WMMA_w32>;
-  defm : WMMAPat<"V_WMMA_F32_16X16X128_FP8_BF8_w32",    int_amdgcn_wmma_f32_16x16x128_fp8_bf8,    F32_FP8BF8X128_WMMA_w32>;
-  defm : WMMAPat<"V_WMMA_F32_16X16X128_BF8_FP8_w32",    int_amdgcn_wmma_f32_16x16x128_bf8_fp8,    F32_FP8BF8X128_WMMA_w32>;
-  defm : WMMAPat<"V_WMMA_F32_16X16X128_BF8_BF8_w32",    int_amdgcn_wmma_f32_16x16x128_bf8_bf8,    F32_FP8BF8X128_WMMA_w32>;
-  defm : WMMAPat<"V_WMMA_F32_32X16X128_F4_w32",         int_amdgcn_wmma_f32_32x16x128_f4,         F32_32X16X128_F4_WMMA_w32>;
-  defm : WMMAPat<"V_WMMA_SCALE_F32_32X16X128_F4_w32",   int_amdgcn_wmma_scale_f32_32x16x128_f4,   F32_32X16X128_F4_SCALE_w32>;
-  defm : WMMAPat<"V_WMMA_SCALE16_F32_32X16X128_F4_w32", int_amdgcn_wmma_scale16_f32_32x16x128_f4, F32_32X16X128_F4_SCALE16_w32>;
-
-  foreach I = ["f8_f8", "f8_f6", "f8_f4", "f6_f8", "f6_f6", "f6_f4", "f4_f8", "f4_f6", "f4_f4"] in {
-    defm : WMMAPat<"V_WMMA_F32_16X16X128_F8F6F4_" # I # "_w32",         int_amdgcn_wmma_f32_16x16x128_f8f6f4,         !cast<VOP3PWMMA_Profile>("F32_16X16X128_F8F6F4_" # I # "_w32")>;
-    defm : WMMAPat<"V_WMMA_SCALE_F32_16X16X128_F8F6F4_" # I # "_w32",   int_amdgcn_wmma_scale_f32_16x16x128_f8f6f4,   !cast<VOP3PWMMA_Profile>("F32_16X16X128_F8F6F4_SCALE_" # I # "_w32")>;
-    defm : WMMAPat<"V_WMMA_SCALE16_F32_16X16X128_F8F6F4_" # I # "_w32", int_amdgcn_wmma_scale16_f32_16x16x128_f8f6f4, !cast<VOP3PWMMA_Profile>("F32_16X16X128_F8F6F4_SCALE16_" # I # "_w32")>;
-  }
-
-  def : SWMMACPat<V_SWMMAC_F32_16X16X64_BF16_w32_twoaddr,     int_amdgcn_swmmac_f32_16x16x64_bf16,     F32_BF16X64_SWMMAC_w32>;
-  def : SWMMACPat<V_SWMMAC_BF16_16X16X64_BF16_w32_twoaddr,    int_amdgcn_swmmac_bf16_16x16x64_bf16,    BF16_BF16X64_SWMMAC_w32>;
-  def : SWMMACPat<V_SWMMAC_BF16F32_16X16X64_BF16_w32_twoaddr, int_amdgcn_swmmac_bf16f32_16x16x64_bf16, F32_BF16X64_SWMMAC_w32>;
-  def : SWMMACPat<V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr, int_amdgcn_swmmac_f32_16x16x128_fp8_fp8, F32_FP8BF8X128_SWMMAC_w32>;
-  def : SWMMACPat<V_SWMMAC_F32_16X16X128_FP8_BF8_w32_twoaddr, int_amdgcn_swmmac_f32_16x16x128_fp8_bf8, F32_FP8BF8X128_SWMMAC_w32>;
-  def : SWMMACPat<V_SWMMAC_F32_16X16X128_BF8_FP8_w32_twoaddr, int_amdgcn_swmmac_f32_16x16x128_bf8_fp8, F32_FP8BF8X128_SWMMAC_w32>;
-  def : SWMMACPat<V_SWMMAC_F32_16X16X128_BF8_BF8_w32_twoaddr, int_amdgcn_swmmac_f32_16x16x128_bf8_bf8, F32_FP8BF8X128_SWMMAC_w32>;
-  def : SWMMACPat<V_SWMMAC_F16_16X16X128_FP8_FP8_w32_twoaddr, int_amdgcn_swmmac_f16_16x16x128_fp8_fp8, F16_FP8BF8X128_SWMMAC_w32>;
-  def : SWMMACPat<V_SWMMAC_F16_16X16X128_FP8_BF8_w32_twoaddr, int_amdgcn_swmmac_f16_16x16x128_fp8_bf8, F16_FP8BF8X128_SWMMAC_w32>;
-  def : SWMMACPat<V_SWMMAC_F16_16X16X128_BF8_FP8_w32_twoaddr, int_amdgcn_swmmac_f16_16x16x128_bf8_fp8, F16_FP8BF8X128_SWMMAC_w32>;
-  def : SWMMACPat<V_SWMMAC_F16_16X16X128_BF8_BF8_w32_twoaddr, int_amdgcn_swmmac_f16_16x16x128_bf8_bf8, F16_FP8BF8X128_SWMMAC_w32>;
-  def : SWMMACPat<V_SWMMAC_I32_16X16X128_IU8_w32_twoaddr,     int_amdgcn_swmmac_i32_16x16x128_iu8,     I32_IU8X128_SWMMAC_w32>;
-  def : SWMMACPat<V_SWMMAC_F32_16X16X64_F16_w32_twoaddr,      int_amdgcn_swmmac_f32_16x16x64_f16,      F32_F16X64_SWMMAC_w32>;
-  def : SWMMACPat<V_SWMMAC_F16_16X16X64_F16_w32_twoaddr,      int_amdgcn_swmmac_f16_16x16x64_f16,      F16_F16X64_SWMMAC_w32>;
-} // End SubtargetPredicate = isGFX125xOnly
-} // End WaveSizePredicate = isWave32
-
-//===----------------------------------------------------------------------===//
-// Begin Real Encodings
-//===----------------------------------------------------------------------===//
-
-class VOP3P_DPP16<bits<8> op, VOP_DPP_Pseudo ps, int subtarget,
-                  string opName = ps.OpName>
-    : VOP3P_DPP<op, opName, ps.Pfl, 1>, SIMCInstr<ps.PseudoInstr, subtarget> {
-  let hasSideEffects = ps.hasSideEffects;
-  let Defs = ps.Defs;
-  let SchedRW = ps.SchedRW;
-  let Uses = ps.Uses;
-  let AssemblerPredicate = HasDPP16;
-  let SubtargetPredicate = ps.SubtargetPredicate;
-  let OtherPredicates = ps.OtherPredicates;
-  let IsPacked = ps.IsPacked;
-}
-
-class VOP3P_DPP8_Base<bits<8> op, VOP_Pseudo ps, string opName = ps.OpName>
-    : VOP3P_DPP8<op, opName, ps.Pfl> {
-  let hasSideEffects = ps.hasSideEffects;
-  let Defs = ps.Defs;
-  let SchedRW = ps.SchedRW;
-  let Uses = ps.Uses;
-  let SubtargetPredicate = ps.SubtargetPredicate;
-  let OtherPredicates = ps.OtherPredicates;
-  let IsPacked = ps.IsPacked;
-}
-
-//===----------------------------------------------------------------------===//
-// GFX11, GFX12
-//===----------------------------------------------------------------------===//
-
-multiclass VOP3P_Real_Base<GFXGen Gen, bits<8> op, string backing_ps_name = NAME,
-                      string asmName = !cast<VOP3P_Pseudo>(NAME).Mnemonic> {
-  def Gen.Suffix :
-    VOP3P_Real_Gen<!cast<VOP3P_Pseudo>(backing_ps_name), Gen, asmName>,
-    VOP3Pe_gfx11_gfx12<op, !cast<VOP3P_Pseudo>(backing_ps_name).Pfl>;
-}
-
-class VOP3PeWmma<bits<8> op, VOPProfile P, VOP3PWMMA_Profile WMMAP>
-    : VOP3Pe_gfx11_gfx12<op, P>{
-
-  // opsel
-  let Inst{11} = !cond(WMMAP.HasMatrixFMT       : matrix_a_fmt{0},
-                       !eq(WMMAP.IndexType, 0)  : 0,
-                       !eq(WMMAP.IndexType, 8)  : index_key_8bit{0},
-                       !eq(WMMAP.IndexType, 16) : index_key_16bit{0},
-                       !eq(WMMAP.IndexType, 32) : index_key_32bit{0});
-  let Inst{12} = !if(WMMAP.HasMatrixFMT, matrix_a_fmt{1},
-                     !if(!eq(WMMAP.IndexType, 8), index_key_8bit{1}, 0));
-  let Inst{13} = !if (WMMAP.HasMatrixFMT, matrix_a_fmt{2},
-                      !if(WMMAP.HasMatrixReuse, matrix_a_reuse, 0));
-  // opsel_hi
-  let Inst{59} = !if (WMMAP.HasMatrixFMT, matrix_b_fmt{0}, 1);
-  let Inst{60} = !if (WMMAP.HasMatrixFMT, matrix_b_fmt{1}, 1);
-  let Inst{14} = !if (WMMAP.HasMatrixFMT, matrix_b_fmt{2},
-                      !if(WMMAP.HasMatrixReuse, matrix_b_reuse, 1));
-  // neg_lo
-  let Inst{61} = !if(WMMAP.NegLo01, src0_modifiers{0}, 0);
-  let Inst{62} = !if(WMMAP.NegLo01, src1_modifiers{0}, 0);
-  let Inst{63} = !if(WMMAP.NegLo2, src2_modifiers{0}, 0);
-  // neg_hi
-  let Inst{8}  = !if(WMMAP.NegHi01, src0_modifiers{1}, 0);
-  let Inst{9}  = !if(WMMAP.NegHi01, src1_modifiers{1}, 0);
-  let Inst{10} = !if(WMMAP.NegHi2, src2_modifiers{1}, 0);
-  // clamp
-  let Inst{15} = !if(WMMAP.HasClamp, clamp{0}, 0);
-}
-
-multiclass VOP3P_WMMA_Real_Base<GFXGen Gen, bits<8> op, VOP3PWMMA_Profile WMMAP,
-                                string backing_ps_name = NAME,
-                                string asmName = !cast<VOP3P_Pseudo>(NAME).Mnemonic> {
-  def Gen.Suffix :
-    VOP3P_Real_Gen<!cast<VOP3P_Pseudo>(backing_ps_name), Gen, asmName>,
-    VOP3PeWmma<op, !cast<VOP3P_Pseudo>(backing_ps_name).Pfl, WMMAP>;
-}
-
-multiclass VOP3P_Real_WMMA_gfx1170 <bits<8> op, VOP3PWMMA_Profile WMMAP> {
-  let WaveSizePredicate = isWave32, DecoderNamespace = "GFX1170" in {
-    defm _twoaddr : VOP3P_WMMA_Real_Base <GFX1170Gen, op, WMMAP>;
-  }
-}
-
-multiclass VOP3P_Real_WMMA_gfx1170w64 <bits<8> op, VOP3PWMMA_Profile WMMAP> {
-  let WaveSizePredicate = isWave64, DecoderNamespace = "GFX1170W64" in {
-    defm _twoaddr : VOP3P_WMMA_Real_Base <GFX1170Gen, op, WMMAP>;
-  }
-}
-
-multiclass VOP3P_Real_WMMA_gfx12 <bits<8> op, VOP3PWMMA_Profile WMMAP> {
-  let WaveSizePredicate = isWave32, DecoderNamespace = "GFX12" in {
-    defm _twoaddr : VOP3P_WMMA_Real_Base <GFX12Gen, op, WMMAP>;
-  }
-}
-
-multiclass VOP3P_Real_WMMA_gfx12w64 <bits<8> op, VOP3PWMMA_Profile WMMAP> {
-  let WaveSizePredicate = isWave64, DecoderNamespace = "GFX12W64" in {
-    defm _twoaddr : VOP3P_WMMA_Real_Base <GFX12Gen, op, WMMAP>;
-  }
-}
-
-multiclass VOP3P_Real_WMMA_gfx1170_gfx12 <bits<8> op, VOP3PWMMA_Profile WMMAP> :
-  VOP3P_Real_WMMA_gfx1170<op, WMMAP>,
-  VOP3P_Real_WMMA_gfx12<op, WMMAP>;
-
-multiclass VOP3P_Real_WMMA_gfx1170_gfx12w64 <bits<8> op, VOP3PWMMA_Profile WMMAP> :
-  VOP3P_Real_WMMA_gfx1170w64<op, WMMAP>,
-  VOP3P_Real_WMMA_gfx12w64<op, WMMAP>;
-
-multiclass VOP3P_Real_WMMA_gfx1250 <bits<8> op, VOP3PWMMA_Profile WMMAP> {
-  let WaveSizePredicate = isWave32, DecoderNamespace = "GFX12" in {
-    defm _twoaddr : VOP3P_WMMA_Real_Base <GFX1250Gen, op, WMMAP>;
-  }
-}
-
-multiclass VOP3P_Real_WMMA_F8F6F4<string Gen, bits<8> op, VOP3PWMMA_Profile WMMAP> {
-  defvar PS = !cast<VOP3P_Pseudo>(NAME # "_twoaddr");
-  defvar asmName = !substr(PS.Mnemonic, 0, !sub(!size(PS.Mnemonic), !size("_f8_f8_w32")));
-  defvar psName = !substr(NAME, 0, !sub(!size(PS.Mnemonic), !size("_f8_f8_w32")));
-  let AsmString = asmName # PS.AsmOperands in {
-    if !eq(Gen, "gfx1250") then {
-      defm NAME : VOP3P_Real_WMMA_gfx1250<op, WMMAP>,
-                  MFMA_F8F6F4_WithSizeTable_Helper<PS, psName # "_f8_f8_w32_twoaddr_" # Gen>;
-    }
-  }
-}
-
-multiclass VOP3P_Real_WMMA_SrcFormats<string Gen, bits<8> op, string WMMAP> {
-  defm _f8_f8_w32 : VOP3P_Real_WMMA_F8F6F4<Gen, op, !cast<VOP3PWMMA_Profile>(WMMAP # "_f8_f8_w32")>;
-  foreach I = ["f8_f6", "f8_f4", "f6_f8", "f6_f6", "f6_f4", "f4_f8", "f4_f6", "f4_f4"] in {
-    let isAsmParserOnly = true in { // Disable ambiguous disassembly.
-      defm _#I#_w32 : VOP3P_Real_WMMA_F8F6F4<Gen, op, !cast<VOP3PWMMA_Profile>(WMMAP # "_" # I # "_w32")>;
-    }
-  }
-}
-
-class VOP3PX2e <bits<8> op, bits<8> LdScaleOp, VOP3PWMMA_Profile P> : Enc128, VOP3Pe_Base {
-  bits<9> scale_src0;
-  bits<9> scale_src1;
-
-  // Inst{7-0} = unused
-  let Inst{10-8} = {0, matrix_b_scale_fmt{1-0}}; // neg_hi
-  let Inst{11} = matrix_a_scale{0}; // scale_op_sel(0)
-  let Inst{12} = 0;                 // scale_op_sel(1)
-  let Inst{13} = matrix_a_reuse;    // scale_op_sel(2)
-  let Inst{14} = matrix_b_reuse;    // scale_op_sel_hi(2)
-  let Inst{15} = 0; // scale_clamp
-  let Inst{31-24} = 0xcc; // Encoding
-  let Inst{23-16} = LdScaleOp;
-  let Inst{40-32} = scale_src0;
-  let Inst{49-41} = scale_src1;
-  let Inst{58-50} = ?; // scale src2
-  let Inst{59}    = matrix_b_scale{0}; // scale_op_sel_hi(0)
-  let Inst{60}    = 0;                 // scale_op_sel_hi(1)
-  let Inst{63-61} = {0, matrix_a_scale_fmt{1-0}}; // neg (lo)
-
-  // The high half of the encoding is the unscaled wmma op.
-  let Inst{71-64} = vdst;
-
-  let Inst{72} = !if(P.NegHi01, src0_modifiers{1}, 0); // neg_hi src0
-  let Inst{73} = !if(P.NegHi01, src1_modifiers{1}, 0); // neg_hi src1
-  let Inst{74} = !if(P.NegHi2, src2_modifiers{1}, 0); // neg_hi src2
-
-  let Inst{77-75} = !if(P.HasMatrixFMT, matrix_a_fmt{2-0}, 0); // op_sel
-
-  let Inst{78,124,123} = !if(P.HasMatrixFMT, matrix_b_fmt{2-0}, 7); // op_sel_hi
-  let Inst{79} = !if(P.HasClamp, clamp{0}, 0);
-
-  let Inst{87-80} = op;
-  let Inst{95-88} = 0xcc; //encoding
-  let Inst{104-96} = !if(P.HasSrc0, src0, ?);
-  let Inst{113-105} = !if(P.HasSrc1, src1, ?);
-  let Inst{122-114} = !if(P.HasSrc2, src2, ?);
-
-  // neg_lo
-  let Inst{125} = !if(P.NegLo01, src0_modifiers{0}, 0);
-  let Inst{126} = !if(P.NegLo01, src1_modifiers{0}, 0);
-  let Inst{127} = !if(P.NegLo2, src2_modifiers{0}, 0);
-}
-
-multiclass VOP3PX2_Real_ScaledWMMA_F4<string Gen, bits<8> op, bits<8> LdScaleOp, VOP3PWMMA_Profile WMMAP> {
-  defvar PS = !cast<VOP3P_Pseudo>(NAME # "_twoaddr");
-  if !eq(Gen, "gfx1250") then {
-    def _gfx1250 : VOP3P_Real_Gen<PS, GFX1250Gen, PS.Mnemonic>,
-                   VOP3PX2e <op, LdScaleOp, WMMAP> {
-      let PostEncoderMethod = "postEncodeVOP3<true, true, false>";
-    }
-  }
-}
-
-multiclass VOP3PX2_Real_ScaledWMMA<string Gen, bits<8> op, bits<8> LdScaleOp, VOP3PWMMA_Profile WMMAP> {
-  defvar PS = !cast<VOP3P_Pseudo>(NAME # "_twoaddr");
-  defvar asmName = !substr(PS.Mnemonic, 0, !sub(!size(PS.Mnemonic), !size("_f8_f8_w32")));
-  defvar psName = !substr(NAME, 0, !sub(!size(PS.Mnemonic), !size("_f8_f8_w32")));
-  if !eq(Gen, "gfx1250") then {
-    def _gfx1250 : VOP3P_Real_Gen<PS, GFX1250Gen, asmName>,
-                   VOP3PX2e <op, LdScaleOp, WMMAP>,
-                   MFMA_F8F6F4_WithSizeTable_Helper<PS, psName # "_f8_f8_w32_" # Gen> {
-      let AsmString = asmName # PS.AsmOperands;
-      let PostEncoderMethod = "postEncodeVOP3<true, true, false>";
-    }
-  }
-}
-
-multiclass VOP3PX2_Real_ScaledWMMA_SrcFormats<string Gen, bits<8> op, bits<8> LdScaleOp, string WMMAP> {
-  defm _f8_f8_w32 : VOP3PX2_Real_ScaledWMMA<Gen, op, LdScaleOp, !cast<VOP3PWMMA_Profile>(WMMAP # "_f8_f8_w32")>;
-  foreach I = ["f8_f6", "f8_f4", "f6_f8", "f6_f6", "f6_f4", "f4_f8", "f4_f6", "f4_f4"] in {
-    let isAsmParserOnly = true in { // Disable ambiguous disassembly.
-      defm _#I#_w32 : VOP3PX2_Real_ScaledWMMA<Gen, op, LdScaleOp, !cast<VOP3PWMMA_Profile>(WMMAP # "_" # I # "_w32")>;
-    }
-  }
-}
-
-defm V_WMMA_F32_16X16X16_F16_w32     : VOP3P_Real_WMMA_gfx1170_gfx12 <0x040, F32_F16_WMMA_w32>;
-defm V_WMMA_F32_16X16X16_BF16_w32    : VOP3P_Real_WMMA_gfx1170_gfx12 <0x041, F32_BF16_WMMA_w32>;
-defm V_WMMA_F16_16X16X16_F16_w32     : VOP3P_Real_WMMA_gfx1170_gfx12 <0x042, F16_F16_WMMA_w32>;
-defm V_WMMA_BF16_16X16X16_BF16_w32   : VOP3P_Real_WMMA_gfx1170_gfx12 <0x043, BF16_BF16_WMMA_w32>;
-defm V_WMMA_I32_16X16X16_IU8_w32     : VOP3P_Real_WMMA_gfx1170_gfx12 <0x044, I32_IU8_WMMA_w32>;
-defm V_WMMA_I32_16X16X16_IU4_w32     : VOP3P_Real_WMMA_gfx1170_gfx12 <0x045, I32_IU4X16_WMMA_w32>;
-defm V_WMMA_F32_16X16X16_FP8_FP8_w32 : VOP3P_Real_WMMA_gfx1170_gfx12 <0x046, F32_FP8BF8_WMMA_w32>;
-defm V_WMMA_F32_16X16X16_FP8_BF8_w32 : VOP3P_Real_WMMA_gfx1170_gfx12 <0x047, F32_FP8BF8_WMMA_w32>;
-defm V_WMMA_F32_16X16X16_BF8_FP8_w32 : VOP3P_Real_WMMA_gfx1170_gfx12 <0x048, F32_FP8BF8_WMMA_w32>;
-defm V_WMMA_F32_16X16X16_BF8_BF8_w32 : VOP3P_Real_WMMA_gfx1170_gfx12 <0x049, F32_FP8BF8_WMMA_w32>;
-defm V_WMMA_I32_16X16X32_IU4_w32     : VOP3P_Real_WMMA_gfx1170_gfx12 <0x04a, I32_IU4X32_WMMA_w32>;
-
-defm V_WMMA_F32_16X16X16_F16_w64     : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x040, F32_F16_WMMA_w64>;
-defm V_WMMA_F32_16X16X16_BF16_w64    : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x041, F32_BF16_WMMA_w64>;
-defm V_WMMA_F16_16X16X16_F16_w64     : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x042, F16_F16_WMMA_w64>;
-defm V_WMMA_BF16_16X16X16_BF16_w64   : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x043, BF16_BF16_WMMA_w64>;
-defm V_WMMA_I32_16X16X16_IU8_w64     : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x044, I32_IU8_WMMA_w64>;
-defm V_WMMA_I32_16X16X16_IU4_w64     : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x045, I32_IU4X16_WMMA_w64>;
-defm V_WMMA_F32_16X16X16_FP8_FP8_w64 : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x046, F32_FP8BF8_WMMA_w64>;
-defm V_WMMA_F32_16X16X16_FP8_BF8_w64 : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x047, F32_FP8BF8_WMMA_w64>;
-defm V_WMMA_F32_16X16X16_BF8_FP8_w64 : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x048, F32_FP8BF8_WMMA_w64>;
-defm V_WMMA_F32_16X16X16_BF8_BF8_w64 : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x049, F32_FP8BF8_WMMA_w64>;
-defm V_WMMA_I32_16X16X32_IU4_w64     : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x04a, I32_IU4X32_WMMA_w64>;
-
-defm V_SWMMAC_F32_16X16X32_F16_w32     : VOP3P_Real_WMMA_gfx1170_gfx12 <0x050, F32_F16_SWMMAC_w32>;
-defm V_SWMMAC_F32_16X16X32_BF16_w32    : VOP3P_Real_WMMA_gfx1170_gfx12 <0x051, F32_BF16_SWMMAC_w32>;
-defm V_SWMMAC_F16_16X16X32_F16_w32     : VOP3P_Real_WMMA_gfx1170_gfx12 <0x052, F16_F16_SWMMAC_w32>;
-defm V_SWMMAC_BF16_16X16X32_BF16_w32   : VOP3P_Real_WMMA_gfx1170_gfx12 <0x053, BF16_BF16_SWMMAC_w32>;
-defm V_SWMMAC_I32_16X16X32_IU8_w32     : VOP3P_Real_WMMA_gfx1170_gfx12 <0x054, I32_IU8_SWMMAC_w32>;
-defm V_SWMMAC_I32_16X16X32_IU4_w32     : VOP3P_Real_WMMA_gfx1170_gfx12 <0x055, I32_IU4X32_SWMMAC_w32>;
-defm V_SWMMAC_I32_16X16X64_IU4_w32     : VOP3P_Real_WMMA_gfx1170_gfx12 <0x056, I32_IU4X64_SWMMAC_w32>;
-defm V_SWMMAC_F32_16X16X32_FP8_FP8_w32 : VOP3P_Real_WMMA_gfx1170_gfx12 <0x057, F32_FP8BF8_SWMMAC_w32>;
-defm V_SWMMAC_F32_16X16X32_FP8_BF8_w32 : VOP3P_Real_WMMA_gfx1170_gfx12 <0x058, F32_FP8BF8_SWMMAC_w32>;
-defm V_SWMMAC_F32_16X16X32_BF8_FP8_w32 : VOP3P_Real_WMMA_gfx1170_gfx12 <0x059, F32_FP8BF8_SWMMAC_w32>;
-defm V_SWMMAC_F32_16X16X32_BF8_BF8_w32 : VOP3P_Real_WMMA_gfx1170_gfx12 <0x05a, F32_FP8BF8_SWMMAC_w32>;
-
-defm V_SWMMAC_F32_16X16X32_F16_w64     : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x050, F32_F16_SWMMAC_w64>;
-defm V_SWMMAC_F32_16X16X32_BF16_w64    : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x051, F32_BF16_SWMMAC_w64>;
-defm V_SWMMAC_F16_16X16X32_F16_w64     : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x052, F16_F16_SWMMAC_w64>;
-defm V_SWMMAC_BF16_16X16X32_BF16_w64   : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x053, BF16_BF16_SWMMAC_w64>;
-defm V_SWMMAC_I32_16X16X32_IU8_w64     : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x054, I32_IU8_SWMMAC_w64>;
-defm V_SWMMAC_I32_16X16X32_IU4_w64     : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x055, I32_IU4X32_SWMMAC_w64>;
-defm V_SWMMAC_I32_16X16X64_IU4_w64     : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x056, I32_IU4X64_SWMMAC_w64>;
-defm V_SWMMAC_F32_16X16X32_FP8_FP8_w64 : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x057, F32_FP8BF8_SWMMAC_w64>;
-defm V_SWMMAC_F32_16X16X32_FP8_BF8_w64 : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x058, F32_FP8BF8_SWMMAC_w64>;
-defm V_SWMMAC_F32_16X16X32_BF8_FP8_w64 : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x059, F32_FP8BF8_SWMMAC_w64>;
-defm V_SWMMAC_F32_16X16X32_BF8_BF8_w64 : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x05a, F32_FP8BF8_SWMMAC_w64>;
-
-defm V_WMMA_F32_16X16X4_F32_w32       : VOP3P_Real_WMMA_gfx1250 <0x05d, F32_F32_WMMA_w32>;
-defm V_WMMA_F32_16X16X32_BF16_w32     : VOP3P_Real_WMMA_gfx1250 <0x062, F32_BF16X32_WMMA_w32>;
-defm V_WMMA_F32_16X16X32_F16_w32      : VOP3P_Real_WMMA_gfx1250 <0x060, F32_F16X32_WMMA_w32>;
-defm V_WMMA_F16_16X16X32_F16_w32      : VOP3P_Real_WMMA_gfx1250 <0x061, F16_F16X32_WMMA_w32>;
-defm V_WMMA_BF16_16X16X32_BF16_w32    : VOP3P_Real_WMMA_gfx1250 <0x063, BF16_BF16X32_WMMA_w32>;
-defm V_WMMA_BF16F32_16X16X32_BF16_w32 : VOP3P_Real_WMMA_gfx1250 <0x064, BF16F32_BF16_WMMA_w32>;
-defm V_WMMA_F32_16X16X64_FP8_FP8_w32  : VOP3P_Real_WMMA_gfx1250 <0x06a, F32_FP8BF8X64_WMMA_w32>;
-defm V_WMMA_F32_16X16X64_FP8_BF8_w32  : VOP3P_Real_WMMA_gfx1250 <0x06b, F32_FP8BF8X64_WMMA_w32>;
-defm V_WMMA_F32_16X16X64_BF8_FP8_w32  : VOP3P_Real_WMMA_gfx1250 <0x06c, F32_FP8BF8X64_WMMA_w32>;
-defm V_WMMA_F32_16X16X64_BF8_BF8_w32  : VOP3P_Real_WMMA_gfx1250 <0x06d, F32_FP8BF8X64_WMMA_w32>;
-defm V_WMMA_F16_16X16X64_FP8_FP8_w32  : VOP3P_Real_WMMA_gfx1250 <0x06e, F16_FP8BF8X64_WMMA_w32>;
-defm V_WMMA_F16_16X16X64_FP8_BF8_w32  : VOP3P_Real_WMMA_gfx1250 <0x06f, F16_FP8BF8X64_WMMA_w32>;
-defm V_WMMA_F16_16X16X64_BF8_FP8_w32  : VOP3P_Real_WMMA_gfx1250 <0x070, F16_FP8BF8X64_WMMA_w32>;
-defm V_WMMA_F16_16X16X64_BF8_BF8_w32  : VOP3P_Real_WMMA_gfx1250 <0x071, F16_FP8BF8X64_WMMA_w32>;
-defm V_WMMA_I32_16X16X64_IU8_w32      : VOP3P_Real_WMMA_gfx1250 <0x072, I32_IU8X64_WMMA_w32>;
-defm V_WMMA_F32_16X16X128_FP8_FP8_w32 : VOP3P_Real_WMMA_gfx1250 <0x080, F32_FP8BF8X128_WMMA_w32>;
-defm V_WMMA_F32_16X16X128_FP8_BF8_w32 : VOP3P_Real_WMMA_gfx1250 <0x081, F32_FP8BF8X128_WMMA_w32>;
-defm V_WMMA_F32_16X16X128_BF8_FP8_w32 : VOP3P_Real_WMMA_gfx1250 <0x082, F32_FP8BF8X128_WMMA_w32>;
-defm V_WMMA_F32_16X16X128_BF8_BF8_w32 : VOP3P_Real_WMMA_gfx1250 <0x083, F32_FP8BF8X128_WMMA_w32>;
-defm V_WMMA_F16_16X16X128_FP8_FP8_w32 : VOP3P_Real_WMMA_gfx1250 <0x084, F16_FP8BF8X128_WMMA_w32>;
-defm V_WMMA_F16_16X16X128_FP8_BF8_w32 : VOP3P_Real_WMMA_gfx1250 <0x085, F16_FP8BF8X128_WMMA_w32>;
-defm V_WMMA_F16_16X16X128_BF8_FP8_w32 : VOP3P_Real_WMMA_gfx1250 <0x086, F16_FP8BF8X128_WMMA_w32>;
-defm V_WMMA_F16_16X16X128_BF8_BF8_w32 : VOP3P_Real_WMMA_gfx1250 <0x087, F16_FP8BF8X128_WMMA_w32>;
-defm V_WMMA_F32_32X16X128_F4_w32      : VOP3P_Real_WMMA_gfx1250 <0x088, F32_32X16X128_F4_WMMA_w32>;
-
-let WaveSizePredicate = isWave32, SubtargetPredicate = isGFX1250Plus, DecoderNamespace = "GFX1250" in {
-defm V_WMMA_F32_16X16X128_F8F6F4         : VOP3P_Real_WMMA_SrcFormats <"gfx1250", 0x033, "F32_16X16X128_F8F6F4">;
-defm V_WMMA_SCALE_F32_16X16X128_F8F6F4   : VOP3PX2_Real_ScaledWMMA_SrcFormats <"gfx1250", 0x033, 0x35, "F32_16X16X128_F8F6F4_SCALE">;
-defm V_WMMA_SCALE16_F32_16X16X128_F8F6F4 : VOP3PX2_Real_ScaledWMMA_SrcFormats <"gfx1250", 0x033, 0x3a, "F32_16X16X128_F8F6F4_SCALE16">;
-
-defm V_WMMA_SCALE_F32_32X16X128_F4_w32   : VOP3PX2_Real_ScaledWMMA_F4 <"gfx1250", 0x088, 0x35, F32_32X16X128_F4_SCALE_w32>;
-defm V_WMMA_SCALE16_F32_32X16X128_F4_w32 : VOP3PX2_Real_ScaledWMMA_F4 <"gfx1250", 0x088, 0x3a, F32_32X16X128_F4_SCALE16_w32>;
-} // End WaveSizePredicate = isWave32, SubtargetPredicate = isGFX1250Plus, DecoderNamespace = "GFX1250"
-
-defm V_SWMMAC_F32_16X16X64_F16_w32      : VOP3P_Real_WMMA_gfx1250 <0x065, F32_F16X64_SWMMAC_w32>;
-defm V_SWMMAC_F32_16X16X64_BF16_w32     : VOP3P_Real_WMMA_gfx1250 <0x066, F32_BF16X64_SWMMAC_w32>;
-defm V_SWMMAC_F16_16X16X64_F16_w32      : VOP3P_Real_WMMA_gfx1250 <0x067, F16_F16X64_SWMMAC_w32>;
-defm V_SWMMAC_BF16_16X16X64_BF16_w32    : VOP3P_Real_WMMA_gfx1250 <0x068, BF16_BF16X64_SWMMAC_w32>;
-defm V_SWMMAC_BF16F32_16X16X64_BF16_w32 : VOP3P_Real_WMMA_gfx1250 <0x069, F32_BF16X64_SWMMAC_w32>;
-defm V_SWMMAC_F32_16X16X128_FP8_FP8_w32 : VOP3P_Real_WMMA_gfx1250 <0x073, F32_FP8BF8X128_SWMMAC_w32>;
-defm V_SWMMAC_F32_16X16X128_FP8_BF8_w32 : VOP3P_Real_WMMA_gfx1250 <0x074, F32_FP8BF8X128_SWMMAC_w32>;
-defm V_SWMMAC_F32_16X16X128_BF8_FP8_w32 : VOP3P_Real_WMMA_gfx1250 <0x075, F32_FP8BF8X128_SWMMAC_w32>;
-defm V_SWMMAC_F32_16X16X128_BF8_BF8_w32 : VOP3P_Real_WMMA_gfx1250 <0x076, F32_FP8BF8X128_SWMMAC_w32>;
-defm V_SWMMAC_F16_16X16X128_FP8_FP8_w32 : VOP3P_Real_WMMA_gfx1250 <0x077, F16_FP8BF8X128_SWMMAC_w32>;
-defm V_SWMMAC_F16_16X16X128_FP8_BF8_w32 : VOP3P_Real_WMMA_gfx1250 <0x078, F16_FP8BF8X128_SWMMAC_w32>;
-defm V_SWMMAC_F16_16X16X128_BF8_FP8_w32 : VOP3P_Real_WMMA_gfx1250 <0x079, F16_FP8BF8X128_SWMMAC_w32>;
-defm V_SWMMAC_F16_16X16X128_BF8_BF8_w32 : VOP3P_Real_WMMA_gfx1250 <0x07a, F16_FP8BF8X128_SWMMAC_w32>;
-defm V_SWMMAC_I32_16X16X128_IU8_w32     : VOP3P_Real_WMMA_gfx1250 <0x07b, I32_IU8X128_SWMMAC_w32>;
-
-multiclass VOP3P_Real_with_name<GFXGen Gen, bits<8> op,
-                          string backing_ps_name = NAME,
-                          string asmName = !cast<VOP3P_Pseudo>(NAME).Mnemonic> {
-  defvar ps = !cast<VOP3P_Pseudo>(backing_ps_name);
-  let AsmString = asmName # ps.AsmOperands in
-    def Gen.Suffix :
-      VOP3P_Real_Gen<!cast<VOP3P_Pseudo>(backing_ps_name), Gen, asmName>,
-      VOP3Pe_gfx11_gfx12<op, !cast<VOP3P_Pseudo>(backing_ps_name).Pfl>;
-
-  def : AMDGPUMnemonicAlias<ps.Mnemonic, asmName> {
-    let AssemblerPredicate = Gen.AssemblerPredicate;
-  }
-}
-
-multiclass VOP3P_Real_dpp<GFXGen Gen, bits<8> op, string backing_ps_name = NAME,
-                          string asmName = !cast<VOP3P_Pseudo>(NAME).Mnemonic> {
-  defvar ps = !cast<VOP3P_Pseudo>(backing_ps_name);
-  def _dpp#Gen.Suffix
-      : VOP3P_DPP16<op, !cast<VOP_DPP_Pseudo>(backing_ps_name #"_dpp"),
-                    Gen.Subtarget> {
-    let AsmString = asmName #ps.Pfl.AsmVOP3DPP16;
-    let DecoderNamespace = Gen.DecoderNamespace;
-    let AssemblerPredicate = Gen.AssemblerPredicate;
-  }
-}
-
-multiclass VOP3P_Real_dpp8<GFXGen Gen, bits<8> op, string backing_ps_name = NAME,
-                           string asmName = !cast<VOP3P_Pseudo>(NAME).Mnemonic> {
-  defvar ps = !cast<VOP3P_Pseudo>(backing_ps_name);
-  def _dpp8#Gen.Suffix : VOP3P_DPP8_Base<op, ps> {
-    let AsmString = asmName #ps.Pfl.AsmVOP3DPP8;
-    let DecoderNamespace = Gen.DecoderNamespace;
-    let AssemblerPredicate = Gen.AssemblerPredicate;
-  }
-}
-
-multiclass VOP3P_Realtriple<GFXGen Gen, bits<8> op, string backing_ps_name = NAME,
-                            string asmName = !cast<VOP3P_Pseudo>(NAME).Mnemonic>
-    : VOP3P_Real_Base<Gen, op, backing_ps_name, asmName>,
-      VOP3P_Real_dpp<Gen, op, backing_ps_name, asmName>,
-      VOP3P_Real_dpp8<Gen, op, backing_ps_name, asmName>;
-
-multiclass VOP3P_Realtriple_gfx11_gfx12<bits<8> op>
-  : VOP3P_Realtriple<GFX11Gen, op>, VOP3P_Realtriple<GFX12Gen, op>;
-
-defm V_DOT4_F32_FP8_BF8 : VOP3P_Realtriple_gfx11_gfx12<0x24>;
-defm V_DOT4_F32_BF8_FP8 : VOP3P_Realtriple_gfx11_gfx12<0x25>;
-defm V_DOT4_F32_FP8_FP8 : VOP3P_Realtriple_gfx11_gfx12<0x26>;
-defm V_DOT4_F32_BF8_BF8 : VOP3P_Realtriple_gfx11_gfx12<0x27>;
-
-//===----------------------------------------------------------------------===//
-// GFX12
-//===----------------------------------------------------------------------===//
-
-multiclass VOP3P_Real_gfx12<bits<8> op> : VOP3P_Real_Base<GFX12Gen, op>;
-
-multiclass VOP3P_Real_gfx1250<bits<8> op> : VOP3P_Real_Base<GFX1250Gen, op>;
-
-multiclass VOP3P_Real_with_name_gfx12<bits<8> op,
-                          string backing_ps_name = NAME,
-                          string asmName = !cast<VOP3P_Pseudo>(NAME).Mnemonic> :
-  VOP3P_Real_with_name<GFX12Gen, op, backing_ps_name, asmName>;
-
-defm V_PK_MIN_NUM_F16 : VOP3P_Real_with_name_gfx12<0x1b, "V_PK_MIN_F16", "v_pk_min_num_f16">;
-defm V_PK_MAX_NUM_F16 : VOP3P_Real_with_name_gfx12<0x1c, "V_PK_MAX_F16", "v_pk_max_num_f16">;
-
-defm V_PK_FMA_F32 : VOP3P_Real_gfx12<0x1f>;
-defm V_PK_MUL_F32 : VOP3P_Real_gfx12<0x28>;
-defm V_PK_ADD_F32 : VOP3P_Real_gfx12<0x29>;
-
-defm V_PK_ADD_MAX_I16  : VOP3P_Real_gfx1250<0x14>;
-defm V_PK_ADD_MAX_U16  : VOP3P_Real_gfx1250<0x15>;
-defm V_PK_ADD_MIN_I16  : VOP3P_Real_gfx1250<0x2d>;
-defm V_PK_ADD_MIN_U16  : VOP3P_Real_gfx1250<0x2e>;
-defm V_PK_MAX3_I16     : VOP3P_Real_gfx1250<0x2f>;
-defm V_PK_MAX3_U16     : VOP3P_Real_gfx1250<0x30>;
-defm V_PK_MIN3_I16     : VOP3P_Real_gfx1250<0x31>;
-defm V_PK_MIN3_U16     : VOP3P_Real_gfx1250<0x32>;
-defm V_PK_FMA_BF16     : VOP3P_Real_gfx1250<0x11>;
-defm V_PK_ADD_BF16     : VOP3P_Real_gfx1250<0x23>;
-defm V_PK_MUL_BF16     : VOP3P_Real_gfx1250<0x2a>;
-defm V_PK_MIN_NUM_BF16 : VOP3P_Real_gfx1250<0x2b>;
-defm V_PK_MAX_NUM_BF16 : VOP3P_Real_gfx1250<0x2c>;
-defm V_PK_MINIMUM3_F16 : VOP3P_Real_gfx1250<0x36>;
-defm V_PK_MAXIMUM3_F16 : VOP3P_Real_gfx1250<0x37>;
-defm V_PK_MIN3_NUM_F16 : VOP3P_Real_gfx1250<0x38>;
-defm V_PK_MAX3_NUM_F16 : VOP3P_Real_gfx1250<0x39>;
-
-defm V_FMA_MIX_F32_BF16 : VOP3P_Realtriple<GFX1250Gen, 0x3d>;
-defm V_FMA_MIXLO_BF16   : VOP3P_Realtriple<GFX1250Gen, 0x3e>;
-defm V_FMA_MIXHI_BF16   : VOP3P_Realtriple<GFX1250Gen, 0x3f>;
-
-let PostEncoderMethod = "postEncodeVOP3<true, true, false>" in {
-  defm V_WMMA_LD_SCALE_PAIRED_B32   : VOP3P_Real_gfx1250<0x35>;
-  defm V_WMMA_LD_SCALE16_PAIRED_B64 : VOP3P_Real_gfx1250<0x3a>;
-}
-
-let AssemblerPredicate = isGFX1250Plus in
-def : AMDGPUMnemonicAlias<"v_fma_mix_f32_f16",  "v_fma_mix_f32">;
-
-//===----------------------------------------------------------------------===//
-// GFX1170
-//===----------------------------------------------------------------------===//
-
-multiclass VOP3P_Real_gfx11_gfx12<bits<8> op> :
-   VOP3P_Real_Base<GFX11Gen, op>, VOP3P_Real_Base<GFX12Gen, op>;
-
-multiclass VOP3P_Real_with_name_gfx1170<bits<8> op,
-                          string backing_ps_name = NAME,
-                          string asmName = !cast<VOP3P_Pseudo>(NAME).Mnemonic> :
-  VOP3P_Real_with_name<GFX1170Gen, op, backing_ps_name, asmName>;
-
-defm V_PK_MAX_NUM_F16 : VOP3P_Real_with_name_gfx1170<0x11, "V_PK_MAX_F16", "v_pk_max_num_f16">;
-defm V_PK_MIN_NUM_F16 : VOP3P_Real_with_name_gfx1170<0x12, "V_PK_MIN_F16", "v_pk_min_num_f16">;
-
-defm V_PK_MINIMUM_F16 : VOP3P_Real_gfx11_gfx12<0x1d>;
-defm V_PK_MAXIMUM_F16 : VOP3P_Real_gfx11_gfx12<0x1e>;
-
-//===----------------------------------------------------------------------===//
-// GFX11
-//===----------------------------------------------------------------------===//
-
-defm V_DOT4_I32_IU8  : VOP3P_Real_gfx11_gfx12<0x16>;
-defm V_DOT8_I32_IU4  : VOP3P_Real_gfx11_gfx12<0x18>;
-defm V_DOT2_F32_BF16 : VOP3P_Realtriple_gfx11_gfx12<0x1a>;
-
-let AssemblerPredicate = isGFX11Plus in {
-  def : AMDGPUMnemonicAlias<"v_dot4_i32_i8", "v_dot4_i32_iu8">;
-  def : AMDGPUMnemonicAlias<"v_dot8_i32_i4", "v_dot8_i32_iu4">;
-}
-
-multiclass VOP3P_Real_WMMA <bits<8> op> {
-  let WaveSizePredicate = isWave32, DecoderNamespace = "GFX11" in {
-    defm _twoaddr_w32 : VOP3P_Real_Base <GFX11Gen, op>;
-  }
-  let WaveSizePredicate = isWave64, DecoderNamespace = "GFX11W64" in {
-    defm _twoaddr_w64 : VOP3P_Real_Base <GFX11Gen, op>;
-  }
-}
-
-defm V_WMMA_F32_16X16X16_F16   : VOP3P_Real_WMMA <0x040>;
-defm V_WMMA_F32_16X16X16_BF16  : VOP3P_Real_WMMA <0x041>;
-defm V_WMMA_F16_16X16X16_F16   : VOP3P_Real_WMMA <0x042>;
-defm V_WMMA_BF16_16X16X16_BF16 : VOP3P_Real_WMMA <0x043>;
-defm V_WMMA_I32_16X16X16_IU8   : VOP3P_Real_WMMA <0x044>;
-defm V_WMMA_I32_16X16X16_IU4   : VOP3P_Real_WMMA <0x045>;
-
-//===----------------------------------------------------------------------===//
-// GFX8 (VI)
-//===----------------------------------------------------------------------===//
-
-multiclass VOP3P_Real_vi<bits<7> op> {
-  def _vi : VOP3P_Real<!cast<VOP3_Pseudo>(NAME), SIEncodingFamily.VI>,
-            VOP3Pe_vi <op, !cast<VOP3_Pseudo>(NAME).Pfl> {
-    let AssemblerPredicate = HasVOP3PInsts;
-    let DecoderNamespace = "GFX8";
-    let VOP3P = 1;
-  }
-}
-
-multiclass VOP3P_Real_MAI<bits<7> op> {
-  def _vi : VOP3P_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.VI>,
-            VOP3Pe_MAI <op, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl, ?> {
-    let AssemblerPredicate = HasMAIInsts;
-    let DecoderNamespace = "GFX8";
-    let Inst{14} = ?; // op_sel_hi(2)
-    let Inst{59} = ?; // op_sel_hi(0)
-    let Inst{60} = ?; // op_sel_hi(1)
-  }
-}
-
-let Constraints = "" in {
-multiclass VOP3P_Real_MFMA_gfx90a<bits<7> op> {
-  let SubtargetPredicate = isGFX90AOnly,
-      AssemblerPredicate = isGFX90AOnly, DecoderNamespace = "GFX90A" in {
-  def _gfx90a_acd : VOP3P_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.GFX90A>,
-             VOP3Pe_MAI <op, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl, 1>;
-
-  def _gfx90a_vcd : VOP3P_Real<!cast<VOP3_Pseudo>(NAME # "_vgprcd" # "_e64"), SIEncodingFamily.GFX90A>,
-             VOP3Pe_MAI <op, !cast<VOP3_Pseudo>(NAME # "_vgprcd" # "_e64").Pfl, 0>;
-  } // End AssemblerPredicate = isGFX90AOnly, DecoderNamespace = "GFX90A"
-}
-}
-
-multiclass VOP3P_Real_MFMA_gfx940_aliases<string NameFrom, string NameTo, string Op,
-                                          VOP3_Pseudo PS_ACD = !cast<VOP3_Pseudo>(Op # "_e64"),
-                                          VOP3_Pseudo PS_VCD = !cast<VOP3_Pseudo>(Op # "_vgprcd" # "_e64"),
-                                          VOPProfile Pfl_ACD = PS_ACD.Pfl,
-                                          VOPProfile Pfl_VCD = PS_VCD.Pfl> {
-  if !ne(NameFrom, NameTo) then {
-    let SubtargetPredicate = PS_ACD.SubtargetPredicate,
-        OtherPredicates = PS_ACD.OtherPredicates in {
-      def : InstAlias <NameTo # " " # PS_ACD.AsmOperands,
-                     (!cast<VOP3P_Real>(Op # "_gfx940_acd") Pfl_ACD.DstRC:$vdst,
-                         Pfl_ACD.Src0RC64:$src0, Pfl_ACD.Src1RC64:$src1, Pfl_ACD.Src2RC64:$src2,
-                         CBSZ:$cbsz, ABID:$abid, blgp:$blgp)>, PredicateControl;
-      def : InstAlias <NameTo # " " # PS_VCD.AsmOperands,
-                     (!cast<VOP3P_Real>(Op # "_gfx940_vcd") Pfl_VCD.DstRC:$vdst,
-                         Pfl_VCD.Src0RC64:$src0, Pfl_VCD.Src1RC64:$src1, Pfl_VCD.Src2RC64:$src2,
-                         CBSZ:$cbsz, ABID:$abid, blgp:$blgp)>, PredicateControl;
-    }
-  }
-}
-
-multiclass VOP3P_Real_MFMA_gfx940<bits<7> op, string Name = !cast<VOP3_Pseudo>(NAME#"_e64").Mnemonic,
-                                  VOP3_Pseudo PS_ACD = !cast<VOP3_Pseudo>(NAME # "_e64"),
-                                  VOP3_Pseudo PS_VCD = !cast<VOP3_Pseudo>(NAME # "_vgprcd" # "_e64")> {
-  let AssemblerPredicate = isGFX940Plus,
-      DecoderNamespace = "GFX940",
-      AsmString = Name # PS_ACD.AsmOperands, Constraints = "" in {
-  def _gfx940_acd : VOP3P_Real<PS_ACD, SIEncodingFamily.GFX940>,
-                    VOP3Pe_MAI <op, PS_ACD.Pfl, 1>;
-
-  def _gfx940_vcd : VOP3P_Real<PS_VCD, SIEncodingFamily.GFX940>,
-                    VOP3Pe_MAI <op, PS_VCD.Pfl, 0>;
-  } // End AssemblerPredicate = isGFX940Plus, DecoderNamespace = "GFX940"
-
-  let SubtargetPredicate = PS_ACD.SubtargetPredicate,
-      OtherPredicates = PS_ACD.OtherPredicates,
-      AssemblerPredicate = isGFX940Plus
-      in {
-    defm : VOP3P_Real_MFMA_gfx940_aliases<Name, PS_ACD.Mnemonic, NAME>;
-
-    if !ne(!subst("_1k", "", PS_ACD.Mnemonic), PS_ACD.Mnemonic) then
-    defm : VOP3P_Real_MFMA_gfx940_aliases<Name, !subst("_1k", "", PS_ACD.Mnemonic), NAME>;
-  }
-}
-
-multiclass VOP3P_Real_MFMA_F8F6F4_gfx940<bits<7> op, string Name = !cast<VOP3_Pseudo>(NAME#"_e64").Mnemonic,
-                                  VOP3_Pseudo PS_ACD = !cast<VOP3_Pseudo>(NAME # "_e64"),
-                                  VOP3_Pseudo PS_VCD = !cast<VOP3_Pseudo>(NAME # "_vgprcd" # "_e64")> {
-
-  defvar F8F8Name = !substr(NAME, 0, !sub(!size(NAME), !size("_fN_fM")))#"_f8_f8";
-
-  let AssemblerPredicate = isGFX940Plus,
-      DecoderNamespace = "GFX940",
-      AsmString = Name # PS_ACD.AsmOperands,
-      Constraints = "" in {
-  def _gfx940_acd : VOP3P_Real<PS_ACD, SIEncodingFamily.GFX940>,
-                    VOP3Pe_MAI <op, PS_ACD.Pfl, 1>,
-                    MFMA_F8F6F4_WithSizeTable_Helper<PS_ACD, F8F8Name#"_gfx940_acd">;
-
-  def _gfx940_vcd : VOP3P_Real<PS_VCD, SIEncodingFamily.GFX940>,
-                    VOP3Pe_MAI <op, PS_VCD.Pfl, 0>,
-                    MFMA_F8F6F4_WithSizeTable_Helper<PS_VCD, F8F8Name#"_gfx940_vcd">;
-  } // End AssemblerPredicate = isGFX940Plus, DecoderNamespace = "GFX940"
-}
-
-multiclass VOP3P_Real_MFMA_gfx950<bits<7> op, string Name = !cast<VOP3_Pseudo>(NAME#"_e64").Mnemonic,
-                                  VOP3_Pseudo PS_ACD = !cast<VOP3_Pseudo>(NAME # "_e64"),
-                                  VOP3_Pseudo PS_VCD = !cast<VOP3_Pseudo>(NAME # "_vgprcd" # "_e64")> {
-    let SubtargetPredicate = HasGFX950Insts,
-        AssemblerPredicate = HasGFX950Insts in {
-    defm "" : VOP3P_Real_MFMA_gfx940<op, Name, PS_ACD, PS_VCD>;
-  }
-}
-
-
-multiclass VOP3P_Real_MFMA_F8F6F4_gfx950_mc<bits<7> op, string Name> {
-  defm _f8_f8 : VOP3P_Real_MFMA_F8F6F4_gfx940<op, Name>;
-
-  let isAsmParserOnly = true in { // Disable ambiguous disassembly.
-    defm _f8_f6 : VOP3P_Real_MFMA_F8F6F4_gfx940<op, Name>;
-    defm _f6_f8 : VOP3P_Real_MFMA_F8F6F4_gfx940<op, Name>;
-    defm _f8_f4 : VOP3P_Real_MFMA_F8F6F4_gfx940<op, Name>;
-    defm _f4_f8 : VOP3P_Real_MFMA_F8F6F4_gfx940<op, Name>;
-    defm _f6_f6 : VOP3P_Real_MFMA_F8F6F4_gfx940<op, Name>;
-    defm _f6_f4 : VOP3P_Real_MFMA_F8F6F4_gfx940<op, Name>;
-    defm _f4_f6 : VOP3P_Real_MFMA_F8F6F4_gfx940<op, Name>;
-    defm _f4_f4 : VOP3P_Real_MFMA_F8F6F4_gfx940<op, Name>;
-  }
-}
-
-multiclass VOP3PX_Real_ScaledMFMA<bits<7> op> {
-  defvar PS_ACD = !cast<VOP3_Pseudo>(NAME # "_e64");
-  defvar PS_VCD = !cast<VOP3_Pseudo>(NAME # "_vgprcd" # "_e64");
-  defvar Name = PS_ACD.Mnemonic;
-  defvar F8F8Name = !substr(NAME, 0, !sub(!size(NAME), !size("_fN_fM")))#"_f8_f8";
-  let SubtargetPredicate = HasGFX950Insts,
-      DecoderNamespace = "GFX940",
-      AsmString = Name # PS_ACD.AsmOperands, Constraints = "" in {
-   def _gfx940_acd : VOP3P_Real<PS_ACD, SIEncodingFamily.GFX940>,
-                     VOP3PXe <op, PS_ACD.Pfl, /*acc_cd=*/1>,
-                     MFMA_F8F6F4_WithSizeTable_Helper<PS_ACD, F8F8Name#"_gfx940_acd">;
-
-   def _gfx940_vcd : VOP3P_Real<PS_VCD, SIEncodingFamily.GFX940>,
-                     VOP3PXe <op, PS_VCD.Pfl, /*acc_cd=*/0>,
-                     MFMA_F8F6F4_WithSizeTable_Helper<PS_VCD, F8F8Name#"_gfx940_vcd">;
-  }
-}
-
-multiclass VOP3PX_Real_ScaledMFMA_F8F6F4_mc<bits<7> op> {
-  defm _f8_f8 : VOP3PX_Real_ScaledMFMA<op>;
-
-  let isAsmParserOnly = 1 in { // Disable ambiguous disassembly.
-  defm _f8_f6 : VOP3PX_Real_ScaledMFMA<op>;
-  defm _f6_f8 : VOP3PX_Real_ScaledMFMA<op>;
-  defm _f8_f4 : VOP3PX_Real_ScaledMFMA<op>;
-  defm _f4_f8 : VOP3PX_Real_ScaledMFMA<op>;
-  defm _f6_f6 : VOP3PX_Real_ScaledMFMA<op>;
-  defm _f6_f4 : VOP3PX_Real_ScaledMFMA<op>;
-  defm _f4_f6 : VOP3PX_Real_ScaledMFMA<op>;
-  defm _f4_f4 : VOP3PX_Real_ScaledMFMA<op>;
-  }
-}
-
-multiclass VOP3P_Real_MFMA_vi<bits<7> op> {
-  def _vi : VOP3P_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.VI>,
-            VOP3Pe_MAI <op, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl, ?> {
-    let SubtargetPredicate = isGFX8GFX9NotGFX90A;
-    let AssemblerPredicate = HasMAIInsts;
-    let DecoderNamespace = "GFX8";
-    let Constraints = "";
-  }
-}
-
-multiclass VOP3P_Real_MFMA_vi_gfx90a<bits<7> op> :
-  VOP3P_Real_MFMA_gfx90a <op>,
-  VOP3P_Real_MFMA_vi <op>;
-
-multiclass VOP3P_Real_MFMA<bits<7> op, string GFX940Name = !cast<VOP3_Pseudo>(NAME#"_e64").Mnemonic> :
-  VOP3P_Real_MFMA_vi_gfx90a <op>,
-  VOP3P_Real_MFMA_gfx940 <op, GFX940Name>;
-
-multiclass VOP3P_Real_SMFMAC<bits<7> op, string alias> {
-  def _gfx940 : VOP3P_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.VI>,
-                VOP3Pe_SMFMAC <op> {
-    let AssemblerPredicate = isGFX940Plus;
-    let DecoderNamespace = "GFX8";
-  }
-  def : AMDGPUMnemonicAlias<alias, !cast<VOP3_Pseudo>(NAME#"_e64").Mnemonic> {
-    let AssemblerPredicate = isGFX940Plus;
-  }
-}
-
-defm V_PK_MAD_I16 : VOP3P_Real_vi <0x00>;
-defm V_PK_MUL_LO_U16 : VOP3P_Real_vi <0x01>;
-defm V_PK_ADD_I16 : VOP3P_Real_vi <0x02>;
-defm V_PK_SUB_I16 : VOP3P_Real_vi <0x03>;
-defm V_PK_LSHLREV_B16 : VOP3P_Real_vi <0x04>;
-defm V_PK_LSHRREV_B16 : VOP3P_Real_vi <0x05>;
-defm V_PK_ASHRREV_I16 : VOP3P_Real_vi <0x06>;
-defm V_PK_MAX_I16 : VOP3P_Real_vi <0x07>;
-defm V_PK_MIN_I16 : VOP3P_Real_vi <0x08>;
-defm V_PK_MAD_U16 : VOP3P_Real_vi <0x09>;
-
-defm V_PK_ADD_U16 : VOP3P_Real_vi <0x0a>;
-defm V_PK_SUB_U16 : VOP3P_Real_vi <0x0b>;
-defm V_PK_MAX_U16 : VOP3P_Real_vi <0x0c>;
-defm V_PK_MIN_U16 : VOP3P_Real_vi <0x0d>;
-defm V_PK_FMA_F16 : VOP3P_Real_vi <0x0e>;
-defm V_PK_ADD_F16 : VOP3P_Real_vi <0x0f>;
-defm V_PK_MUL_F16 : VOP3P_Real_vi <0x10>;
-defm V_PK_MIN_F16 : VOP3P_Real_vi <0x11>;
-defm V_PK_MAX_F16 : VOP3P_Real_vi <0x12>;
-
-defm V_DOT2_F32_BF16 : VOP3P_Real_vi<0x1a>;
-defm V_PK_MINIMUM3_F16 : VOP3P_Real_vi <0x1b>;
-defm V_PK_MAXIMUM3_F16 : VOP3P_Real_vi <0x1c>;
-
-defm V_MAD_MIX_F32 : VOP3P_Real_vi <0x20>;
-defm V_MAD_MIXLO_F16 : VOP3P_Real_vi <0x21>;
-defm V_MAD_MIXHI_F16 : VOP3P_Real_vi <0x22>;
-
-let OtherPredicates = [HasFmaMixInsts],
-    DecoderNamespace = "GFX9_DL" in {
-// The mad_mix instructions were renamed and their behaviors changed,
-// but the opcode stayed the same so we need to put these in a
-// different DecoderNamespace to avoid the ambiguity.
-defm V_FMA_MIX_F32 : VOP3P_Real_vi <0x20>;
-defm V_FMA_MIXLO_F16 : VOP3P_Real_vi <0x21>;
-defm V_FMA_MIXHI_F16 : VOP3P_Real_vi <0x22>;
-}
-
-defm V_DOT2_I32_I16 : VOP3P_Real_vi <0x26>;
-defm V_DOT2_U32_U16 : VOP3P_Real_vi <0x27>;
-
-defm V_DOT2_F32_F16 : VOP3P_Real_vi <0x23>;
-defm V_DOT4_U32_U8  : VOP3P_Real_vi <0x29>;
-defm V_DOT8_U32_U4  : VOP3P_Real_vi <0x2b>;
-
-defm V_DOT4_I32_I8  : VOP3P_Real_vi <0x28>;
-defm V_DOT8_I32_I4  : VOP3P_Real_vi <0x2a>;
-
-defm V_ACCVGPR_READ_B32  : VOP3P_Real_MAI <0x58>;
-defm V_ACCVGPR_WRITE_B32 : VOP3P_Real_MAI <0x59>;
-defm V_MFMA_F32_32X32X1F32  : VOP3P_Real_MFMA <0x40, "v_mfma_f32_32x32x1_2b_f32">;
-defm V_MFMA_F32_16X16X1F32  : VOP3P_Real_MFMA <0x41, "v_mfma_f32_16x16x1_4b_f32">;
-defm V_MFMA_F32_4X4X1F32    : VOP3P_Real_MFMA <0x42, "v_mfma_f32_4x4x1_16b_f32">;
-defm V_MFMA_F32_32X32X2F32  : VOP3P_Real_MFMA <0x44, "v_mfma_f32_32x32x2_f32">;
-defm V_MFMA_F32_16X16X4F32  : VOP3P_Real_MFMA <0x45, "v_mfma_f32_16x16x4_f32">;
-defm V_MFMA_F32_32X32X4F16  : VOP3P_Real_MFMA <0x48, "v_mfma_f32_32x32x4_2b_f16">;
-defm V_MFMA_F32_16X16X4F16  : VOP3P_Real_MFMA <0x49, "v_mfma_f32_16x16x4_4b_f16">;
-defm V_MFMA_F32_4X4X4F16    : VOP3P_Real_MFMA <0x4a, "v_mfma_f32_4x4x4_16b_f16">;
-defm V_MFMA_F32_32X32X8F16  : VOP3P_Real_MFMA <0x4c, "v_mfma_f32_32x32x8_f16">;
-defm V_MFMA_F32_16X16X16F16 : VOP3P_Real_MFMA <0x4d, "v_mfma_f32_16x16x16_f16">;
-defm V_MFMA_I32_32X32X4I8   : VOP3P_Real_MFMA <0x50, "v_mfma_i32_32x32x4_2b_i8">;
-defm V_MFMA_I32_16X16X4I8   : VOP3P_Real_MFMA <0x51, "v_mfma_i32_16x16x4_4b_i8">;
-defm V_MFMA_I32_4X4X4I8     : VOP3P_Real_MFMA <0x52, "v_mfma_i32_4x4x4_16b_i8">;
-
-defm V_MFMA_I32_16X16X16I8  : VOP3P_Real_MFMA_vi_gfx90a <0x55>;
-defm V_MFMA_I32_32X32X8I8   : VOP3P_Real_MFMA_vi_gfx90a <0x54>;
-defm V_MFMA_F32_32X32X2BF16 : VOP3P_Real_MFMA_vi_gfx90a <0x68>;
-defm V_MFMA_F32_16X16X2BF16 : VOP3P_Real_MFMA_vi_gfx90a <0x69>;
-defm V_MFMA_F32_4X4X2BF16   : VOP3P_Real_MFMA_vi_gfx90a <0x6b>;
-defm V_MFMA_F32_32X32X4BF16 : VOP3P_Real_MFMA_vi_gfx90a <0x6c>;
-defm V_MFMA_F32_16X16X8BF16 : VOP3P_Real_MFMA_vi_gfx90a <0x6d>;
-
-defm V_MFMA_F32_32X32X4BF16_1K  : VOP3P_Real_MFMA_gfx90a <0x63>;
-defm V_MFMA_F32_16X16X4BF16_1K  : VOP3P_Real_MFMA_gfx90a <0x64>;
-defm V_MFMA_F32_4X4X4BF16_1K    : VOP3P_Real_MFMA_gfx90a <0x65>;
-defm V_MFMA_F32_32X32X8BF16_1K  : VOP3P_Real_MFMA_gfx90a <0x66>;
-defm V_MFMA_F32_16X16X16BF16_1K : VOP3P_Real_MFMA_gfx90a <0x67>;
-defm V_MFMA_F64_16X16X4F64      : VOP3P_Real_MFMA_gfx90a <0x6e>;
-defm V_MFMA_F64_4X4X4F64        : VOP3P_Real_MFMA_gfx90a <0x6f>;
-
-defm V_MFMA_F32_16X16X32_F16     : VOP3P_Real_MFMA_gfx950 <0x54, "v_mfma_f32_16x16x32_f16">;
-defm V_MFMA_F32_32X32X16_F16     : VOP3P_Real_MFMA_gfx950 <0x55, "v_mfma_f32_32x32x16_f16">;
-defm V_MFMA_F32_16X16X32_BF16    : VOP3P_Real_MFMA_gfx950 <0x35, "v_mfma_f32_16x16x32_bf16">;
-defm V_MFMA_I32_16X16X64_I8      : VOP3P_Real_MFMA_gfx950 <0x36, "v_mfma_i32_16x16x64_i8">;
-defm V_MFMA_F32_32X32X16_BF16    : VOP3P_Real_MFMA_gfx950 <0x37, "v_mfma_f32_32x32x16_bf16">;
-defm V_MFMA_I32_32X32X32_I8      : VOP3P_Real_MFMA_gfx950 <0x38, "v_mfma_i32_32x32x32_i8">;
-
-defm V_MFMA_LD_SCALE_B32 : VOP3P_Real_vi <0x2c>;
-defm V_MFMA_F32_16X16X128_F8F6F4 : VOP3P_Real_MFMA_F8F6F4_gfx950_mc <0x2d, "v_mfma_f32_16x16x128_f8f6f4">;
-defm V_MFMA_SCALE_F32_16X16X128_F8F6F4 : VOP3PX_Real_ScaledMFMA_F8F6F4_mc <0x2d>;
-defm V_MFMA_F32_32X32X64_F8F6F4  : VOP3P_Real_MFMA_F8F6F4_gfx950_mc <0x2e, "v_mfma_f32_32x32x64_f8f6f4">;
-defm V_MFMA_SCALE_F32_32X32X64_F8F6F4 : VOP3PX_Real_ScaledMFMA_F8F6F4_mc <0x2e>;
-
-defm V_MFMA_I32_32X32X16I8       : VOP3P_Real_MFMA_gfx940 <0x56, "v_mfma_i32_32x32x16_i8">;
-defm V_MFMA_I32_16X16X32I8       : VOP3P_Real_MFMA_gfx940 <0x57, "v_mfma_i32_16x16x32_i8">;
-defm V_MFMA_F32_16X16X8XF32      : VOP3P_Real_MFMA_gfx940 <0x3e, "v_mfma_f32_16x16x8_xf32">;
-defm V_MFMA_F32_32X32X4XF32      : VOP3P_Real_MFMA_gfx940 <0x3f, "v_mfma_f32_32x32x4_xf32">;
-
-defm V_MFMA_F32_16X16X32_BF8_BF8 : VOP3P_Real_MFMA_gfx940 <0x70>;
-defm V_MFMA_F32_16X16X32_BF8_FP8 : VOP3P_Real_MFMA_gfx940 <0x71>;
-defm V_MFMA_F32_16X16X32_FP8_BF8 : VOP3P_Real_MFMA_gfx940 <0x72>;
-defm V_MFMA_F32_16X16X32_FP8_FP8 : VOP3P_Real_MFMA_gfx940 <0x73>;
-defm V_MFMA_F32_32X32X16_BF8_BF8 : VOP3P_Real_MFMA_gfx940 <0x74>;
-defm V_MFMA_F32_32X32X16_BF8_FP8 : VOP3P_Real_MFMA_gfx940 <0x75>;
-defm V_MFMA_F32_32X32X16_FP8_BF8 : VOP3P_Real_MFMA_gfx940 <0x76>;
-defm V_MFMA_F32_32X32X16_FP8_FP8 : VOP3P_Real_MFMA_gfx940 <0x77>;
-
-defm V_MFMA_F32_32X32X4BF16_1K   : VOP3P_Real_MFMA_gfx940 <0x5d, "v_mfma_f32_32x32x4_2b_bf16">;
-defm V_MFMA_F32_16X16X4BF16_1K   : VOP3P_Real_MFMA_gfx940 <0x5e, "v_mfma_f32_16x16x4_4b_bf16">;
-defm V_MFMA_F32_4X4X4BF16_1K     : VOP3P_Real_MFMA_gfx940 <0x5f, "v_mfma_f32_4x4x4_16b_bf16">;
-defm V_MFMA_F32_32X32X8BF16_1K   : VOP3P_Real_MFMA_gfx940 <0x60, "v_mfma_f32_32x32x8_bf16">;
-defm V_MFMA_F32_16X16X16BF16_1K  : VOP3P_Real_MFMA_gfx940 <0x61, "v_mfma_f32_16x16x16_bf16">;
-
-defm V_MFMA_F64_16X16X4F64       : VOP3P_Real_MFMA_gfx940 <0x6e, "v_mfma_f64_16x16x4_f64">;
-defm V_MFMA_F64_4X4X4F64         : VOP3P_Real_MFMA_gfx940 <0x6f, "v_mfma_f64_4x4x4_4b_f64">;
-
-defm V_SMFMAC_F32_16X16X32_F16     : VOP3P_Real_SMFMAC <0x62, "v_smfmac_f32_16x16x32f16">;
-defm V_SMFMAC_F32_32X32X16_F16     : VOP3P_Real_SMFMAC <0x64, "v_smfmac_f32_32x32x16f16">;
-defm V_SMFMAC_F32_16X16X32_BF16    : VOP3P_Real_SMFMAC <0x66, "v_smfmac_f32_16x16x32bf16">;
-defm V_SMFMAC_F32_32X32X16_BF16    : VOP3P_Real_SMFMAC <0x68, "v_smfmac_f32_32x32x16bf16">;
-defm V_SMFMAC_I32_16X16X64_I8      : VOP3P_Real_SMFMAC <0x6a, "v_smfmac_i32_16x16x64i8">;
-defm V_SMFMAC_I32_32X32X32_I8      : VOP3P_Real_SMFMAC <0x6c, "v_smfmac_i32_32x32x32i8">;
-defm V_SMFMAC_F32_16X16X64_BF8_BF8 : VOP3P_Real_SMFMAC <0x78, "v_smfmac_f32_16x16x64bf8bf8">;
-defm V_SMFMAC_F32_16X16X64_BF8_FP8 : VOP3P_Real_SMFMAC <0x79, "v_smfmac_f32_16x16x64bf8fp8">;
-defm V_SMFMAC_F32_16X16X64_FP8_BF8 : VOP3P_Real_SMFMAC <0x7a, "v_smfmac_f32_16x16x64fp8bf8">;
-defm V_SMFMAC_F32_16X16X64_FP8_FP8 : VOP3P_Real_SMFMAC <0x7b, "v_smfmac_f32_16x16x64fp8fp8">;
-defm V_SMFMAC_F32_32X32X32_BF8_BF8 : VOP3P_Real_SMFMAC <0x7c, "v_smfmac_f32_32x32x32bf8bf8">;
-defm V_SMFMAC_F32_32X32X32_BF8_FP8 : VOP3P_Real_SMFMAC <0x7d, "v_smfmac_f32_32x32x32bf8fp8">;
-defm V_SMFMAC_F32_32X32X32_FP8_BF8 : VOP3P_Real_SMFMAC <0x7e, "v_smfmac_f32_32x32x32fp8bf8">;
-defm V_SMFMAC_F32_32X32X32_FP8_FP8 : VOP3P_Real_SMFMAC <0x7f, "v_smfmac_f32_32x32x32fp8fp8">;
-
-defm V_SMFMAC_F32_16X16X64_F16     : VOP3P_Real_SMFMAC <0x5a, "v_smfmac_f32_16x16x64f16">;
-defm V_SMFMAC_F32_32X32X32_F16     : VOP3P_Real_SMFMAC <0x5b, "v_smfmac_f32_32x32x32f16">;
-defm V_SMFMAC_F32_16X16X64_BF16    : VOP3P_Real_SMFMAC <0x39, "v_smfmac_f32_16x16x64bf16">;
-defm V_SMFMAC_F32_32X32X32_BF16    : VOP3P_Real_SMFMAC <0x46, "v_smfmac_f32_32x32x32bf16">;
-defm V_SMFMAC_I32_16X16X128_I8     : VOP3P_Real_SMFMAC <0x3a, "v_smfmac_i32_16x16x128i8">;
-defm V_SMFMAC_I32_32X32X64_I8      : VOP3P_Real_SMFMAC <0x47, "v_smfmac_i32_32x32x64i8">;
-
-defm V_SMFMAC_F32_16X16X128_BF8_BF8 : VOP3P_Real_SMFMAC <0x3b, "v_smfmac_f32_16x16x128bf8bf8">;
-defm V_SMFMAC_F32_16X16X128_BF8_FP8 : VOP3P_Real_SMFMAC <0x3c, "v_smfmac_f32_16x16x128bf8fp8">;
-defm V_SMFMAC_F32_16X16X128_FP8_BF8 : VOP3P_Real_SMFMAC <0x3d, "v_smfmac_f32_16x16x128fp8bf8">;
-defm V_SMFMAC_F32_16X16X128_FP8_FP8 : VOP3P_Real_SMFMAC <0x43, "v_smfmac_f32_16x16x128fp8fp8">;
-defm V_SMFMAC_F32_32X32X64_BF8_BF8 : VOP3P_Real_SMFMAC <0x4b, "v_smfmac_f32_32x32x64bf8bf8">;
-defm V_SMFMAC_F32_32X32X64_BF8_FP8 : VOP3P_Real_SMFMAC <0x4e, "v_smfmac_f32_32x32x64bf8fp8">;
-defm V_SMFMAC_F32_32X32X64_FP8_BF8 : VOP3P_Real_SMFMAC <0x4f, "v_smfmac_f32_32x32x64fp8bf8">;
-defm V_SMFMAC_F32_32X32X64_FP8_FP8 : VOP3P_Real_SMFMAC <0x53, "v_smfmac_f32_32x32x64fp8fp8">;
-
-defm V_PK_FMA_F32 : VOP3P_Real_vi <0x30>;
-defm V_PK_MUL_F32 : VOP3P_Real_vi <0x31>;
-defm V_PK_ADD_F32 : VOP3P_Real_vi <0x32>;
-defm V_PK_MOV_B32 : VOP3P_Real_vi <0x33>;
-
-//===----------------------------------------------------------------------===//
-// GFX10.
-//===----------------------------------------------------------------------===//
-
-let AssemblerPredicate = isGFX10Only, DecoderNamespace = "GFX10", VOP3P = 1 in {
-  multiclass VOP3P_Real_gfx10<bits<8> op> {
-    def _gfx10 : VOP3P_Real<!cast<VOP3P_Pseudo>(NAME), SIEncodingFamily.GFX10>,
-                 VOP3Pe_gfx10 <op, !cast<VOP3P_Pseudo>(NAME).Pfl>;
-  }
-} // End AssemblerPredicate = isGFX10Only, DecoderNamespace = "GFX10", VOP3P = 1
-
-multiclass VOP3P_Real_gfx10_gfx11<bits<8> op> :
-  VOP3P_Real_gfx10<op>, VOP3P_Real_Base<GFX11Gen, op>;
-
-multiclass VOP3P_Real_gfx10_gfx11_not_gfx1170<bits<8> op> :
-  VOP3P_Real_gfx10<op>, VOP3P_Real_Base<GFX11Not11_70Gen, op>;
-
-multiclass VOP3P_Real_gfx10_gfx11_gfx12<bits<8> op> :
-  VOP3P_Real_gfx10_gfx11<op>, VOP3P_Real_Base<GFX12Gen, op>;
-
-multiclass VOP3P_Real_gfx10_gfx11_gfx12_Triple<bits<8> op> :
-  VOP3P_Real_gfx10<op>, VOP3P_Realtriple<GFX11Gen, op>,
-  VOP3P_Realtriple<GFX12Gen, op>;
-
-defm V_PK_MAD_I16     : VOP3P_Real_gfx10_gfx11_gfx12<0x00>;
-defm V_PK_MUL_LO_U16  : VOP3P_Real_gfx10_gfx11_gfx12<0x01>;
-defm V_PK_ADD_I16     : VOP3P_Real_gfx10_gfx11_gfx12<0x02>;
-defm V_PK_SUB_I16     : VOP3P_Real_gfx10_gfx11_gfx12<0x03>;
-defm V_PK_LSHLREV_B16 : VOP3P_Real_gfx10_gfx11_gfx12<0x04>;
-defm V_PK_LSHRREV_B16 : VOP3P_Real_gfx10_gfx11_gfx12<0x05>;
-defm V_PK_ASHRREV_I16 : VOP3P_Real_gfx10_gfx11_gfx12<0x06>;
-defm V_PK_MAX_I16     : VOP3P_Real_gfx10_gfx11_gfx12<0x07>;
-defm V_PK_MIN_I16     : VOP3P_Real_gfx10_gfx11_gfx12<0x08>;
-defm V_PK_MAD_U16     : VOP3P_Real_gfx10_gfx11_gfx12<0x09>;
-defm V_PK_ADD_U16     : VOP3P_Real_gfx10_gfx11_gfx12<0x0a>;
-defm V_PK_SUB_U16     : VOP3P_Real_gfx10_gfx11_gfx12<0x0b>;
-defm V_PK_MAX_U16     : VOP3P_Real_gfx10_gfx11_gfx12<0x0c>;
-defm V_PK_MIN_U16     : VOP3P_Real_gfx10_gfx11_gfx12<0x0d>;
-defm V_PK_FMA_F16     : VOP3P_Real_gfx10_gfx11_gfx12<0x0e>;
-defm V_PK_ADD_F16     : VOP3P_Real_gfx10_gfx11_gfx12<0x0f>;
-defm V_PK_MUL_F16     : VOP3P_Real_gfx10_gfx11_gfx12<0x10>;
-defm V_PK_MIN_F16     : VOP3P_Real_gfx10_gfx11_not_gfx1170<0x11>;
-defm V_PK_MAX_F16     : VOP3P_Real_gfx10_gfx11_not_gfx1170<0x12>;
-defm V_FMA_MIX_F32    : VOP3P_Real_gfx10_gfx11_gfx12_Triple<0x20>;
-defm V_FMA_MIXLO_F16  : VOP3P_Real_gfx10_gfx11_gfx12_Triple<0x21>;
-defm V_FMA_MIXHI_F16  : VOP3P_Real_gfx10_gfx11_gfx12_Triple<0x22>;
-
-defm V_DOT2_I32_I16 : VOP3P_Real_gfx10 <0x14>;
-defm V_DOT2_U32_U16 : VOP3P_Real_gfx10 <0x15>;
-
-defm V_DOT2_F32_F16 : VOP3P_Real_gfx10_gfx11_gfx12_Triple<0x13>;
-defm V_DOT4_U32_U8  : VOP3P_Real_gfx10_gfx11_gfx12<0x17>;
-defm V_DOT8_U32_U4  : VOP3P_Real_gfx10_gfx11_gfx12<0x19>;
-
-defm V_DOT4_I32_I8  : VOP3P_Real_gfx10 <0x16>;
-defm V_DOT8_I32_I4  : VOP3P_Real_gfx10 <0x18>;

>From 550b25ae214b273e8f0d9da81ff320cb91b1ade6 Mon Sep 17 00:00:00 2001
From: Addmisol <addmisol9 at gmail.com>
Date: Sun, 22 Mar 2026 23:07:08 +0530
Subject: [PATCH 06/16] Create idot2-sat.ll

---
 llvm/test/CodeGen/AMDGPU/idot2-sat.ll | 69 +++++++++++++++++++++++++++
 1 file changed, 69 insertions(+)
 create mode 100644 llvm/test/CodeGen/AMDGPU/idot2-sat.ll

diff --git a/llvm/test/CodeGen/AMDGPU/idot2-sat.ll b/llvm/test/CodeGen/AMDGPU/idot2-sat.ll
new file mode 100644
index 0000000000000..9c6921000e0b0
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/idot2-sat.ll
@@ -0,0 +1,69 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=amdgcn -mcpu=gfx906 < %s | FileCheck --check-prefixes=GFX9-DL %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1011 < %s | FileCheck --check-prefixes=GFX10-DL %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx950 < %s | FileCheck --check-prefixes=GFX950 %s
+
+; Test dot2 patterns with saturating add (clamp)
+
+; Unsigned dot2 with saturation: uaddsat(a[0]*b[0] + a[1]*b[1], c)
+define i32 @udot2_sat(<2 x i16> %a, <2 x i16> %b, i32 %c) {
+; GFX9-DL-LABEL: udot2_sat:
+; GFX9-DL:       ; %bb.0: ; %entry
+; GFX9-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-DL-NEXT:    v_dot2_u32_u16 v0, v1, v0, v2 clamp
+; GFX9-DL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-DL-LABEL: udot2_sat:
+; GFX10-DL:       ; %bb.0: ; %entry
+; GFX10-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-DL-NEXT:    v_dot2_u32_u16 v0, v1, v0, v2 clamp
+; GFX10-DL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX950-LABEL: udot2_sat:
+; GFX950:       ; %bb.0: ; %entry
+; GFX950-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX950-NEXT:    v_dot2_u32_u16 v0, v1, v0, v2 clamp
+; GFX950-NEXT:    s_setpc_b64 s[30:31]
+entry:
+  %conv.i = zext <2 x i16> %a to <2 x i32>
+  %conv6.i = zext <2 x i16> %b to <2 x i32>
+  %mul.i = mul nuw <2 x i32> %conv6.i, %conv.i
+  %0 = extractelement <2 x i32> %mul.i, i64 0
+  %1 = extractelement <2 x i32> %mul.i, i64 1
+  %add.i = add i32 %0, %1
+  %cond.i.i = tail call i32 @llvm.uadd.sat.i32(i32 %add.i, i32 %c)
+  ret i32 %cond.i.i
+}
+
+; Signed dot2 with saturation: saddsat(a[0]*b[0] + a[1]*b[1], c)
+define i32 @sdot2_sat(<2 x i16> %a, <2 x i16> %b, i32 %c) {
+; GFX9-DL-LABEL: sdot2_sat:
+; GFX9-DL:       ; %bb.0: ; %entry
+; GFX9-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-DL-NEXT:    v_dot2_i32_i16 v0, v1, v0, v2 clamp
+; GFX9-DL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-DL-LABEL: sdot2_sat:
+; GFX10-DL:       ; %bb.0: ; %entry
+; GFX10-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-DL-NEXT:    v_dot2_i32_i16 v0, v1, v0, v2 clamp
+; GFX10-DL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX950-LABEL: sdot2_sat:
+; GFX950:       ; %bb.0: ; %entry
+; GFX950-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX950-NEXT:    v_dot2_i32_i16 v0, v1, v0, v2 clamp
+; GFX950-NEXT:    s_setpc_b64 s[30:31]
+entry:
+  %conv.i = sext <2 x i16> %a to <2 x i32>
+  %conv6.i = sext <2 x i16> %b to <2 x i32>
+  %mul.i = mul nsw <2 x i32> %conv6.i, %conv.i
+  %0 = extractelement <2 x i32> %mul.i, i64 0
+  %1 = extractelement <2 x i32> %mul.i, i64 1
+  %add.i = add nsw i32 %0, %1
+  %cond1.i.i = tail call i32 @llvm.sadd.sat.i32(i32 %add.i, i32 %c)
+  ret i32 %cond1.i.i
+}
+
+declare i32 @llvm.sadd.sat.i32(i32, i32)
+declare i32 @llvm.uadd.sat.i32(i32, i32)

>From e7e728efe2db0e4f9b15c5b1381fb99bb1d8d4d3 Mon Sep 17 00:00:00 2001
From: Addmisol <addmisol9 at gmail.com>
Date: Sun, 22 Mar 2026 23:12:53 +0530
Subject: [PATCH 07/16] Update VOP3PInstructions.td

---
 llvm/lib/Target/AMDGPU/VOP3PInstructions.td | 1208 +++++++++++++++++++
 1 file changed, 1208 insertions(+)

diff --git a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
index 3d6e06f03d900..2360dd279446c 100644
--- a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
@@ -1841,3 +1841,1211 @@ def BF16_BF16_SWMMAC_w32  : VOP3PWMMA_Profile<[v8i16, v8i16, v16i16, v8i16], /*_
 def I32_IU8_SWMMAC_w32    : VOP3PWMMA_Profile<[v8i32, v2i32,  v4i32, v8i32], /*_IsSWMMAC=*/1, /*_IndexType=*/16, /*_IsIU=*/1, /*_IsFP8BF8=*/0>; // 8xi8, 16xi8
 def I32_IU4X32_SWMMAC_w32 : VOP3PWMMA_Profile<[v8i32,   i32,  v2i32, v8i32], /*_IsSWMMAC=*/1, /*_IndexType=*/16, /*_IsIU=*/1, /*_IsFP8BF8=*/0>; // 8xi4, 16xi4
 def I32_IU4X64_SWMMAC_w32 : VOP3PWMMA_Profile<[v8i32, v2i32,  v4i32, v8i32], /*_IsSWMMAC=*/1, /*_IndexType=*/0,  /*_IsIU=*/1, /*_IsFP8BF8=*/0>; // 16xi4, 32xi4 **
+def F32_FP8BF8_SWMMAC_w32 : VOP3PWMMA_Profile<[v8f32, v2i32,  v4i32, v8f32], /*_IsSWMMAC=*/1, /*_IndexType=*/16, /*_IsIU=*/0, /*_IsFP8BF8=*/1>; // 8xf8, 16xf8
+
+def F32_F16_SWMMAC_w64    : VOP3PWMMA_Profile<[v4f32, v4f16, v8f16, v4f32], /*_IsSWMMAC=*/1, /*_IndexType=*/8,  /*_IsIU=*/0, /*_IsFP8BF8=*/0>;
+def F32_BF16_SWMMAC_w64   : VOP3PWMMA_Profile<[v4f32, v4i16, v8i16, v4f32], /*_IsSWMMAC=*/1, /*_IndexType=*/8,  /*_IsIU=*/0, /*_IsFP8BF8=*/0>;
+def F16_F16_SWMMAC_w64    : VOP3PWMMA_Profile<[v4f16, v4f16, v8f16, v4f16], /*_IsSWMMAC=*/1, /*_IndexType=*/8,  /*_IsIU=*/0, /*_IsFP8BF8=*/0>;
+def BF16_BF16_SWMMAC_w64  : VOP3PWMMA_Profile<[v4i16, v4i16, v8i16, v4i16], /*_IsSWMMAC=*/1, /*_IndexType=*/8,  /*_IsIU=*/0, /*_IsFP8BF8=*/0>;
+def I32_IU8_SWMMAC_w64    : VOP3PWMMA_Profile<[v4i32,   i32, v2i32, v4i32], /*_IsSWMMAC=*/1, /*_IndexType=*/8,  /*_IsIU=*/1, /*_IsFP8BF8=*/0>; // 4xi8, 8xi8
+def I32_IU4X32_SWMMAC_w64 : VOP3PWMMA_Profile<[v4i32,   i32,   i32, v4i32], /*_IsSWMMAC=*/1, /*_IndexType=*/16, /*_IsIU=*/1, /*_IsFP8BF8=*/0>; // 8xi4, 8xi4 ***
+def I32_IU4X64_SWMMAC_w64 : VOP3PWMMA_Profile<[v4i32,   i32, v2i32, v4i32], /*_IsSWMMAC=*/1, /*_IndexType=*/16, /*_IsIU=*/1, /*_IsFP8BF8=*/0>; // 8xi4, 16xi4
+def F32_FP8BF8_SWMMAC_w64 : VOP3PWMMA_Profile<[v4f32,   i32, v2i32, v4f32], /*_IsSWMMAC=*/1, /*_IndexType=*/8,  /*_IsIU=*/0, /*_IsFP8BF8=*/1>; // 4xf8, 8xf8
+
+// *   IU4X16_WMMA_w64 lanes 0-31 will have 8xi4, remaining lanes are ignored
+// **  IU4X64_SWMMAC_w32 index is i32, index_key is not used
+// *** IU4X32_SWMMAC_w64 lanes 0-31 will have 8xi4 remaining lanes are ignored
+//                       for matrix A, index is i16; Matrix B uses all lanes
+
+def F32_F32_WMMA_w32             : VOP3PWMMA_Profile<[v8f32, v2f32,    v2f32,    v8f32], /*_IsSWMMAC=*/0, /*_IndexType=*/0, /*_IsIU=*/0, /*_IsFP8BF8=*/0,
+                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
+def F32_BF16X32_WMMA_w32         : VOP3PWMMA_Profile<[v8f32, v16bf16,  v16bf16,  v8f32], /*_IsSWMMAC=*/0, /*_IndexType=*/0, /*_IsIU=*/0, /*_IsFP8BF8=*/0,
+                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
+def F32_F16X32_WMMA_w32          : VOP3PWMMA_Profile<[v8f32, v16f16,   v16f16,   v8f32], /*_IsSWMMAC=*/0, /*_IndexType=*/0, /*_IsIU=*/0, /*_IsFP8BF8=*/0,
+                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
+def F16_F16X32_WMMA_w32          : VOP3PWMMA_Profile<[v8f16, v16f16,   v16f16,   v8f16], /*_IsSWMMAC=*/0, /*_IndexType=*/0, /*_IsIU=*/0, /*_IsFP8BF8=*/0,
+                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
+def BF16_BF16X32_WMMA_w32        : VOP3PWMMA_Profile<[v8bf16, v16bf16, v16bf16, v8bf16], /*_IsSWMMAC=*/0, /*_IndexType=*/0, /*_IsIU=*/0, /*_IsFP8BF8=*/0,
+                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
+def BF16F32_BF16_WMMA_w32        : VOP3PWMMA_Profile<[v8bf16, v16bf16, v16bf16,  v8f32], /*_IsSWMMAC=*/0, /*_IndexType=*/0, /*_IsIU=*/0, /*_IsFP8BF8=*/0,
+                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
+def F32_FP8BF8X64_WMMA_w32       : VOP3PWMMA_Profile<[v8f32, v8i32,    v8i32,    v8f32], /*_IsSWMMAC=*/0, /*_IndexType=*/0, /*_IsIU=*/0, /*_IsFP8BF8=*/1,
+                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
+def F32_FP8BF8X128_WMMA_w32      : VOP3PWMMA_Profile<[v8f32, v16i32,   v16i32,   v8f32], /*_IsSWMMAC=*/0, /*_IndexType=*/0, /*_IsIU=*/0, /*_IsFP8BF8=*/1,
+                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
+def F16_FP8BF8X64_WMMA_w32       : VOP3PWMMA_Profile<[v8f16, v8i32,    v8i32,    v8f16], /*_IsSWMMAC=*/0, /*_IndexType=*/0, /*_IsIU=*/0, /*_IsFP8BF8=*/1,
+                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
+def F16_FP8BF8X128_WMMA_w32      : VOP3PWMMA_Profile<[v8f16, v16i32,   v16i32,   v8f16], /*_IsSWMMAC=*/0, /*_IndexType=*/0, /*_IsIU=*/0, /*_IsFP8BF8=*/1,
+                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
+def F32_32X16X128_F4_WMMA_w32    : VOP3PWMMA_Profile<[v16f32, v16i32,  v8i32,   v16f32], /*_IsSWMMAC=*/0, /*_IndexType=*/0, /*_IsIU=*/0, /*_IsFP8BF8=*/0,
+                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/0, /*_IsF4=*/1>;
+def I32_IU8X64_WMMA_w32          : VOP3PWMMA_Profile<[v8i32, v8i32,    v8i32,    v8i32], /*_IsSWMMAC=*/0, /*_IndexType=*/0, /*_IsIU=*/1, /*_IsFP8BF8=*/0,
+                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
+def F32_32X16X128_F4_SCALE_w32   : VOP3PWMMA_Profile<[v16f32, v16i32,  v8i32,   v16f32], /*_IsSWMMAC=*/0, /*_IndexType=*/0,  /*_IsIU=*/0, /*_IsFP8BF8=*/1,
+                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/1, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
+def F32_32X16X128_F4_SCALE16_w32 : VOP3PWMMA_Profile<[v16f32, v16i32,  v8i32,   v16f32], /*_IsSWMMAC=*/0, /*_IndexType=*/0,  /*_IsIU=*/0, /*_IsFP8BF8=*/1,
+                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/1, /*_Scale16=*/1, /*_HasMatrixReuse=*/1>;
+def F32_F16X64_SWMMAC_w32        : VOP3PWMMA_Profile<[v8f32, v16f16,   v32f16,   v8f32], /*_IsSWMMAC=*/1, /*_IndexType=*/16, /*_IsIU=*/0, /*_IsFP8BF8=*/0,
+                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
+def F32_BF16X64_SWMMAC_w32       : VOP3PWMMA_Profile<[v8f32, v16bf16,  v32bf16,  v8f32], /*_IsSWMMAC=*/1, /*_IndexType=*/16, /*_IsIU=*/0, /*_IsFP8BF8=*/0,
+                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
+def F16_F16X64_SWMMAC_w32        : VOP3PWMMA_Profile<[v8f16, v16f16,   v32f16,   v8f16], /*_IsSWMMAC=*/1, /*_IndexType=*/16, /*_IsIU=*/0, /*_IsFP8BF8=*/0,
+                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
+def BF16_BF16X64_SWMMAC_w32      : VOP3PWMMA_Profile<[v8bf16, v16bf16, v32bf16, v8bf16], /*_IsSWMMAC=*/1, /*_IndexType=*/16, /*_IsIU=*/0, /*_IsFP8BF8=*/0,
+                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
+def F32_FP8BF8X128_SWMMAC_w32    : VOP3PWMMA_Profile<[v8f32, v8i32,    v16i32,   v8f32], /*_IsSWMMAC=*/1, /*_IndexType=*/32, /*_IsIU=*/0, /*_IsFP8BF8=*/1,
+                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
+def F16_FP8BF8X128_SWMMAC_w32    : VOP3PWMMA_Profile<[v8f16, v8i32,    v16i32,   v8f16], /*_IsSWMMAC=*/1, /*_IndexType=*/32, /*_IsIU=*/0, /*_IsFP8BF8=*/1,
+                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
+def I32_IU8X128_SWMMAC_w32       : VOP3PWMMA_Profile<[v8i32, v8i32,    v16i32,   v8i32], /*_IsSWMMAC=*/1, /*_IndexType=*/32, /*_IsIU=*/1, /*_IsFP8BF8=*/0,
+                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
+
+// Helper class to compute the destination vector type of WMMA_F8F6F4 instructions based on element type and dimensions.
+class getWMMAF8F6F4DstVTy<ValueType DstEltTy, int M, int N> {
+  // Size in bits = (M * N / 32) * element_size_in_bits
+  defvar Size = !mul(!div(!mul(M, N), 32), DstEltTy.Size);
+  ValueType ret = !cond(!eq(Size, 256)  : v8f32,
+                        !eq(Size, 1024) : v64f16);
+}
+
+// Helper class to compute the type of matrix A and B of WMMA_F8F6F4 instructions based on format and dimensions.
+class getWMMAF8F6F4ABVTy<string Fmt, int D1, int D2> {
+  defvar FmtBits = !cond(!eq(Fmt, "f8") : 8,
+                         !eq(Fmt, "f6") : 6,
+                         !eq(Fmt, "f4") : 4);
+  // TypeSize in bits = (D1 * D2 / 32) * format_bits
+  defvar TypeSize = !mul(!div(!mul(D1, D2), 32), FmtBits);
+  ValueType ret = !cond(!eq(TypeSize, 256)  : v8i32,
+                        !eq(TypeSize, 384)  : v12i32,
+                        !eq(TypeSize, 512)  : v16i32,
+                        !eq(TypeSize, 1024) : v32i32);
+}
+
+multiclass WMMA_F8F6F4_Profiles<ValueType DstEltTy, int M, int N, int K,
+                                bit HasMatrixScale, bit Scale16, bit HasMatrixReuse> {
+  defvar DstTy = getWMMAF8F6F4DstVTy<DstEltTy, M, N>.ret;
+  foreach ATy = ["f8", "f6", "f4"] in {
+    foreach BTy = ["f8", "f6", "f4"] in {
+      def _#ATy#_#BTy#_w32 : VOP3PWMMA_Profile<
+        [DstTy, getWMMAF8F6F4ABVTy<ATy, M, K>.ret, getWMMAF8F6F4ABVTy<BTy, K, N>.ret, DstTy],
+        0, 0, 0, 1, 1, 1, HasMatrixScale, Scale16, HasMatrixReuse>;
+    }
+  }
+}
+
+defm F32_16X16X128_F8F6F4         : WMMA_F8F6F4_Profiles<f32, /*M=*/16, /*N=*/16, /*K=*/128, /*HasMatrixScale=*/0, /*Scale16=*/0, /*HasMatrixReuse=*/0>;
+defm F32_16X16X128_F8F6F4_SCALE   : WMMA_F8F6F4_Profiles<f32, /*M=*/16, /*N=*/16, /*K=*/128, /*HasMatrixScale=*/1, /*Scale16=*/0, /*HasMatrixReuse=*/1>;
+defm F32_16X16X128_F8F6F4_SCALE16 : WMMA_F8F6F4_Profiles<f32, /*M=*/16, /*N=*/16, /*K=*/128, /*HasMatrixScale=*/1, /*Scale16=*/1, /*HasMatrixReuse=*/1>;
+
+class VOP_WMMA_LD_SCALE<ValueType vt, RegisterOperand RC> : VOP3P_Profile<VOPProfile<[untyped, vt, vt, untyped]>> {
+  let HasMatrixScale = 1;
+  let HasMatrixReuse = 1;
+  let HasNeg = 0;
+  let Src0RC64 = RC;
+  let Src1RC64 = RC;
+  let Ins64 = (ins Src0RC64:$src0, Src1RC64:$src1, MatrixAScale:$matrix_a_scale, MatrixBScale:$matrix_b_scale,
+                   MatrixAScaleFmt:$matrix_a_scale_fmt, MatrixBScaleFmt:$matrix_b_scale_fmt,
+                   MatrixAReuse:$matrix_a_reuse, MatrixBReuse:$matrix_b_reuse);
+  let AsmVOP3P = " $src0, $src1$matrix_a_scale$matrix_b_scale$matrix_a_scale_fmt$matrix_b_scale_fmt$matrix_a_reuse$matrix_b_reuse";
+}
+
+multiclass WMMAInst_SrcFormats_mc<string OpName, string Profile> {
+  foreach I = ["f8_f8", "f8_f6", "f8_f4", "f6_f8", "f6_f6", "f6_f4", "f4_f8", "f4_f6", "f4_f4"] in {
+    defm _#I#_w32 : WMMAInstGFX12<OpName # "_" # I # "_w32", !cast<VOP3PWMMA_Profile>(Profile # "_" # I # "_w32"), "_w32">;
+  }
+}
+
+let WaveSizePredicate = isWave32 in {
+let SubtargetPredicate = isGFX125xOnly in {
+defm V_WMMA_F32_16X16X4_F32_w32       : WMMAInstGFX12<"v_wmma_f32_16x16x4_f32",       F32_F32_WMMA_w32, "_w32">;
+
+let is_wmma_xdl = 1 in {
+defm V_WMMA_F32_16X16X32_BF16_w32     : WMMAInstGFX12<"v_wmma_f32_16x16x32_bf16",     F32_BF16X32_WMMA_w32, "_w32">;
+defm V_WMMA_BF16_16X16X32_BF16_w32    : WMMAInstGFX12<"v_wmma_bf16_16x16x32_bf16",    BF16_BF16X32_WMMA_w32, "_w32">;
+defm V_WMMA_BF16F32_16X16X32_BF16_w32 : WMMAInstGFX12<"v_wmma_bf16f32_16x16x32_bf16", BF16F32_BF16_WMMA_w32, "_w32", 1>;
+defm V_WMMA_F32_16X16X64_FP8_FP8_w32  : WMMAInstGFX12<"v_wmma_f32_16x16x64_fp8_fp8",  F32_FP8BF8X64_WMMA_w32, "_w32">;
+defm V_WMMA_F32_16X16X64_FP8_BF8_w32  : WMMAInstGFX12<"v_wmma_f32_16x16x64_fp8_bf8",  F32_FP8BF8X64_WMMA_w32, "_w32">;
+defm V_WMMA_F32_16X16X64_BF8_FP8_w32  : WMMAInstGFX12<"v_wmma_f32_16x16x64_bf8_fp8",  F32_FP8BF8X64_WMMA_w32, "_w32">;
+defm V_WMMA_F32_16X16X64_BF8_BF8_w32  : WMMAInstGFX12<"v_wmma_f32_16x16x64_bf8_bf8",  F32_FP8BF8X64_WMMA_w32, "_w32">;
+defm V_WMMA_F16_16X16X64_FP8_FP8_w32  : WMMAInstGFX12<"v_wmma_f16_16x16x64_fp8_fp8",  F16_FP8BF8X64_WMMA_w32, "_w32">;
+defm V_WMMA_F16_16X16X64_FP8_BF8_w32  : WMMAInstGFX12<"v_wmma_f16_16x16x64_fp8_bf8",  F16_FP8BF8X64_WMMA_w32, "_w32">;
+defm V_WMMA_F16_16X16X64_BF8_FP8_w32  : WMMAInstGFX12<"v_wmma_f16_16x16x64_bf8_fp8",  F16_FP8BF8X64_WMMA_w32, "_w32">;
+defm V_WMMA_F16_16X16X64_BF8_BF8_w32  : WMMAInstGFX12<"v_wmma_f16_16x16x64_bf8_bf8",  F16_FP8BF8X64_WMMA_w32, "_w32">;
+defm V_WMMA_I32_16X16X64_IU8_w32      : WMMAInstGFX12<"v_wmma_i32_16x16x64_iu8",      I32_IU8X64_WMMA_w32, "_w32">;
+defm V_WMMA_F32_16X16X32_F16_w32      : WMMAInstGFX12<"v_wmma_f32_16x16x32_f16",      F32_F16X32_WMMA_w32, "_w32">;
+defm V_WMMA_F16_16X16X32_F16_w32      : WMMAInstGFX12<"v_wmma_f16_16x16x32_f16",      F16_F16X32_WMMA_w32, "_w32">;
+defm V_WMMA_F16_16X16X128_FP8_FP8_w32 : WMMAInstGFX12<"v_wmma_f16_16x16x128_fp8_fp8", F16_FP8BF8X128_WMMA_w32, "_w32">;
+defm V_WMMA_F16_16X16X128_FP8_BF8_w32 : WMMAInstGFX12<"v_wmma_f16_16x16x128_fp8_bf8", F16_FP8BF8X128_WMMA_w32, "_w32">;
+defm V_WMMA_F16_16X16X128_BF8_FP8_w32 : WMMAInstGFX12<"v_wmma_f16_16x16x128_bf8_fp8", F16_FP8BF8X128_WMMA_w32, "_w32">;
+defm V_WMMA_F16_16X16X128_BF8_BF8_w32 : WMMAInstGFX12<"v_wmma_f16_16x16x128_bf8_bf8", F16_FP8BF8X128_WMMA_w32, "_w32">;
+defm V_WMMA_F32_16X16X128_FP8_FP8_w32 : WMMAInstGFX12<"v_wmma_f32_16x16x128_fp8_fp8", F32_FP8BF8X128_WMMA_w32, "_w32">;
+defm V_WMMA_F32_16X16X128_FP8_BF8_w32 : WMMAInstGFX12<"v_wmma_f32_16x16x128_fp8_bf8", F32_FP8BF8X128_WMMA_w32, "_w32">;
+defm V_WMMA_F32_16X16X128_BF8_FP8_w32 : WMMAInstGFX12<"v_wmma_f32_16x16x128_bf8_fp8", F32_FP8BF8X128_WMMA_w32, "_w32">;
+defm V_WMMA_F32_16X16X128_BF8_BF8_w32 : WMMAInstGFX12<"v_wmma_f32_16x16x128_bf8_bf8", F32_FP8BF8X128_WMMA_w32, "_w32">;
+defm V_WMMA_F32_32X16X128_F4_w32      : WMMAInstGFX12<"v_wmma_f32_32x16x128_f4",      F32_32X16X128_F4_WMMA_w32, "_w32">;
+
+defm V_SWMMAC_F32_16X16X64_BF16_w32     : SWMMACInstGFX12<"v_swmmac_f32_16x16x64_bf16",     F32_BF16X64_SWMMAC_w32, "_w32">;
+defm V_SWMMAC_BF16_16X16X64_BF16_w32    : SWMMACInstGFX12<"v_swmmac_bf16_16x16x64_bf16",    BF16_BF16X64_SWMMAC_w32, "_w32">;
+defm V_SWMMAC_BF16F32_16X16X64_BF16_w32 : SWMMACInstGFX12<"v_swmmac_bf16f32_16x16x64_bf16", F32_BF16X64_SWMMAC_w32, "_w32">;
+defm V_SWMMAC_F32_16X16X128_FP8_FP8_w32 : SWMMACInstGFX12<"v_swmmac_f32_16x16x128_fp8_fp8", F32_FP8BF8X128_SWMMAC_w32, "_w32">;
+defm V_SWMMAC_F32_16X16X128_FP8_BF8_w32 : SWMMACInstGFX12<"v_swmmac_f32_16x16x128_fp8_bf8", F32_FP8BF8X128_SWMMAC_w32, "_w32">;
+defm V_SWMMAC_F32_16X16X128_BF8_FP8_w32 : SWMMACInstGFX12<"v_swmmac_f32_16x16x128_bf8_fp8", F32_FP8BF8X128_SWMMAC_w32, "_w32">;
+defm V_SWMMAC_F32_16X16X128_BF8_BF8_w32 : SWMMACInstGFX12<"v_swmmac_f32_16x16x128_bf8_bf8", F32_FP8BF8X128_SWMMAC_w32, "_w32">;
+defm V_SWMMAC_F16_16X16X128_FP8_FP8_w32 : SWMMACInstGFX12<"v_swmmac_f16_16x16x128_fp8_fp8", F16_FP8BF8X128_SWMMAC_w32, "_w32">;
+defm V_SWMMAC_F16_16X16X128_FP8_BF8_w32 : SWMMACInstGFX12<"v_swmmac_f16_16x16x128_fp8_bf8", F16_FP8BF8X128_SWMMAC_w32, "_w32">;
+defm V_SWMMAC_F16_16X16X128_BF8_FP8_w32 : SWMMACInstGFX12<"v_swmmac_f16_16x16x128_bf8_fp8", F16_FP8BF8X128_SWMMAC_w32, "_w32">;
+defm V_SWMMAC_F16_16X16X128_BF8_BF8_w32 : SWMMACInstGFX12<"v_swmmac_f16_16x16x128_bf8_bf8", F16_FP8BF8X128_SWMMAC_w32, "_w32">;
+defm V_SWMMAC_I32_16X16X128_IU8_w32     : SWMMACInstGFX12<"v_swmmac_i32_16x16x128_iu8",     I32_IU8X128_SWMMAC_w32, "_w32">;
+defm V_SWMMAC_F32_16X16X64_F16_w32      : SWMMACInstGFX12<"v_swmmac_f32_16x16x64_f16",      F32_F16X64_SWMMAC_w32, "_w32">;
+defm V_SWMMAC_F16_16X16X64_F16_w32      : SWMMACInstGFX12<"v_swmmac_f16_16x16x64_f16",      F16_F16X64_SWMMAC_w32, "_w32">;
+
+defm V_WMMA_F32_16X16X128_F8F6F4         : WMMAInst_SrcFormats_mc<"v_wmma_f32_16x16x128_f8f6f4", "F32_16X16X128_F8F6F4">;
+defm V_WMMA_SCALE_F32_16X16X128_F8F6F4   : WMMAInst_SrcFormats_mc<"v_wmma_scale_f32_16x16x128_f8f6f4", "F32_16X16X128_F8F6F4_SCALE">;
+defm V_WMMA_SCALE16_F32_16X16X128_F8F6F4 : WMMAInst_SrcFormats_mc<"v_wmma_scale16_f32_16x16x128_f8f6f4", "F32_16X16X128_F8F6F4_SCALE16">;
+
+defm V_WMMA_SCALE_F32_32X16X128_F4_w32   : WMMAInstGFX12<"v_wmma_scale_f32_32x16x128_f4",   F32_32X16X128_F4_SCALE_w32, "_w32">;
+defm V_WMMA_SCALE16_F32_32X16X128_F4_w32 : WMMAInstGFX12<"v_wmma_scale16_f32_32x16x128_f4", F32_32X16X128_F4_SCALE16_w32, "_w32">;
+} // End is_wmma_xdl = 1.
+
+let isConvergent = 1 in {
+  defm V_WMMA_LD_SCALE_PAIRED_B32   : VOP3PInst<"v_wmma_ld_scale_paired_b32",   VOP_WMMA_LD_SCALE<i32, VCSrc_b32_Lo256>>;
+  defm V_WMMA_LD_SCALE16_PAIRED_B64 : VOP3PInst<"v_wmma_ld_scale16_paired_b64", VOP_WMMA_LD_SCALE<i64, VCSrc_b64_Lo256>>;
+}
+} // End SubtargetPredicate = isGFX125xOnly
+} // End WaveSizePredicate = isWave32
+
+let WaveSizePredicate = isWave32 in {
+defm V_WMMA_F32_16X16X16_F16_w32     : WMMAInstGFX12<"v_wmma_f32_16x16x16_f16",     F32_F16_WMMA_w32, "_w32">;
+defm V_WMMA_F32_16X16X16_BF16_w32    : WMMAInstGFX12<"v_wmma_f32_16x16x16_bf16",    F32_BF16_WMMA_w32, "_w32">;
+defm V_WMMA_F16_16X16X16_F16_w32     : WMMAInstGFX12<"v_wmma_f16_16x16x16_f16",     F16_F16_WMMA_w32, "_w32">;
+defm V_WMMA_BF16_16X16X16_BF16_w32   : WMMAInstGFX12<"v_wmma_bf16_16x16x16_bf16",   BF16_BF16_WMMA_w32, "_w32">;
+defm V_WMMA_I32_16X16X16_IU8_w32     : WMMAInstGFX12<"v_wmma_i32_16x16x16_iu8",     I32_IU8_WMMA_w32, "_w32">;
+defm V_WMMA_I32_16X16X16_IU4_w32     : WMMAInstGFX12<"v_wmma_i32_16x16x16_iu4",     I32_IU4X16_WMMA_w32, "_w32">;
+defm V_WMMA_F32_16X16X16_FP8_FP8_w32 : WMMAInstGFX12<"v_wmma_f32_16x16x16_fp8_fp8", F32_FP8BF8_WMMA_w32, "_w32">;
+defm V_WMMA_F32_16X16X16_FP8_BF8_w32 : WMMAInstGFX12<"v_wmma_f32_16x16x16_fp8_bf8", F32_FP8BF8_WMMA_w32, "_w32">;
+defm V_WMMA_F32_16X16X16_BF8_FP8_w32 : WMMAInstGFX12<"v_wmma_f32_16x16x16_bf8_fp8", F32_FP8BF8_WMMA_w32, "_w32">;
+defm V_WMMA_F32_16X16X16_BF8_BF8_w32 : WMMAInstGFX12<"v_wmma_f32_16x16x16_bf8_bf8", F32_FP8BF8_WMMA_w32, "_w32">;
+defm V_WMMA_I32_16X16X32_IU4_w32     : WMMAInstGFX12<"v_wmma_i32_16x16x32_iu4",     I32_IU4X32_WMMA_w32, "_w32">;
+
+defm V_SWMMAC_F32_16X16X32_F16_w32     : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_f16",     F32_F16_SWMMAC_w32, "_w32">;
+defm V_SWMMAC_F32_16X16X32_BF16_w32    : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_bf16",    F32_BF16_SWMMAC_w32, "_w32">;
+defm V_SWMMAC_F16_16X16X32_F16_w32     : SWMMACInstGFX12<"v_swmmac_f16_16x16x32_f16",     F16_F16_SWMMAC_w32, "_w32">;
+defm V_SWMMAC_BF16_16X16X32_BF16_w32   : SWMMACInstGFX12<"v_swmmac_bf16_16x16x32_bf16",   BF16_BF16_SWMMAC_w32, "_w32">;
+defm V_SWMMAC_I32_16X16X32_IU8_w32     : SWMMACInstGFX12<"v_swmmac_i32_16x16x32_iu8",     I32_IU8_SWMMAC_w32, "_w32">;
+defm V_SWMMAC_I32_16X16X32_IU4_w32     : SWMMACInstGFX12<"v_swmmac_i32_16x16x32_iu4",     I32_IU4X32_SWMMAC_w32, "_w32">;
+defm V_SWMMAC_I32_16X16X64_IU4_w32     : SWMMACInstGFX12<"v_swmmac_i32_16x16x64_iu4",     I32_IU4X64_SWMMAC_w32, "_w32">;
+defm V_SWMMAC_F32_16X16X32_FP8_FP8_w32 : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_fp8_fp8", F32_FP8BF8_SWMMAC_w32, "_w32">;
+defm V_SWMMAC_F32_16X16X32_FP8_BF8_w32 : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_fp8_bf8", F32_FP8BF8_SWMMAC_w32, "_w32">;
+defm V_SWMMAC_F32_16X16X32_BF8_FP8_w32 : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_bf8_fp8", F32_FP8BF8_SWMMAC_w32, "_w32">;
+defm V_SWMMAC_F32_16X16X32_BF8_BF8_w32 : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_bf8_bf8", F32_FP8BF8_SWMMAC_w32, "_w32">;
+}
+
+let WaveSizePredicate = isWave64 in {
+defm V_WMMA_F32_16X16X16_F16_w64     : WMMAInstGFX12<"v_wmma_f32_16x16x16_f16",     F32_F16_WMMA_w64, "_w64">;
+defm V_WMMA_F32_16X16X16_BF16_w64    : WMMAInstGFX12<"v_wmma_f32_16x16x16_bf16",    F32_BF16_WMMA_w64, "_w64">;
+defm V_WMMA_F16_16X16X16_F16_w64     : WMMAInstGFX12<"v_wmma_f16_16x16x16_f16",     F16_F16_WMMA_w64, "_w64">;
+defm V_WMMA_BF16_16X16X16_BF16_w64   : WMMAInstGFX12<"v_wmma_bf16_16x16x16_bf16",   BF16_BF16_WMMA_w64, "_w64">;
+defm V_WMMA_I32_16X16X16_IU8_w64     : WMMAInstGFX12<"v_wmma_i32_16x16x16_iu8",     I32_IU8_WMMA_w64, "_w64">;
+defm V_WMMA_I32_16X16X16_IU4_w64     : WMMAInstGFX12<"v_wmma_i32_16x16x16_iu4",     I32_IU4X16_WMMA_w64, "_w64">;
+defm V_WMMA_F32_16X16X16_FP8_FP8_w64 : WMMAInstGFX12<"v_wmma_f32_16x16x16_fp8_fp8", F32_FP8BF8_WMMA_w64, "_w64">;
+defm V_WMMA_F32_16X16X16_FP8_BF8_w64 : WMMAInstGFX12<"v_wmma_f32_16x16x16_fp8_bf8", F32_FP8BF8_WMMA_w64, "_w64">;
+defm V_WMMA_F32_16X16X16_BF8_FP8_w64 : WMMAInstGFX12<"v_wmma_f32_16x16x16_bf8_fp8", F32_FP8BF8_WMMA_w64, "_w64">;
+defm V_WMMA_F32_16X16X16_BF8_BF8_w64 : WMMAInstGFX12<"v_wmma_f32_16x16x16_bf8_bf8", F32_FP8BF8_WMMA_w64, "_w64">;
+defm V_WMMA_I32_16X16X32_IU4_w64     : WMMAInstGFX12<"v_wmma_i32_16x16x32_iu4",     I32_IU4X32_WMMA_w64, "_w64">;
+
+defm V_SWMMAC_F32_16X16X32_F16_w64     : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_f16",     F32_F16_SWMMAC_w64, "_w64">;
+defm V_SWMMAC_F32_16X16X32_BF16_w64    : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_bf16",    F32_BF16_SWMMAC_w64, "_w64">;
+defm V_SWMMAC_F16_16X16X32_F16_w64     : SWMMACInstGFX12<"v_swmmac_f16_16x16x32_f16",     F16_F16_SWMMAC_w64, "_w64">;
+defm V_SWMMAC_BF16_16X16X32_BF16_w64   : SWMMACInstGFX12<"v_swmmac_bf16_16x16x32_bf16",   BF16_BF16_SWMMAC_w64, "_w64">;
+defm V_SWMMAC_I32_16X16X32_IU8_w64     : SWMMACInstGFX12<"v_swmmac_i32_16x16x32_iu8",     I32_IU8_SWMMAC_w64, "_w64">;
+defm V_SWMMAC_I32_16X16X32_IU4_w64     : SWMMACInstGFX12<"v_swmmac_i32_16x16x32_iu4",     I32_IU4X32_SWMMAC_w64, "_w64">;
+defm V_SWMMAC_I32_16X16X64_IU4_w64     : SWMMACInstGFX12<"v_swmmac_i32_16x16x64_iu4",     I32_IU4X64_SWMMAC_w64, "_w64">;
+defm V_SWMMAC_F32_16X16X32_FP8_FP8_w64 : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_fp8_fp8", F32_FP8BF8_SWMMAC_w64, "_w64">;
+defm V_SWMMAC_F32_16X16X32_FP8_BF8_w64 : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_fp8_bf8", F32_FP8BF8_SWMMAC_w64, "_w64">;
+defm V_SWMMAC_F32_16X16X32_BF8_FP8_w64 : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_bf8_fp8", F32_FP8BF8_SWMMAC_w64, "_w64">;
+defm V_SWMMAC_F32_16X16X32_BF8_BF8_w64 : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_bf8_bf8", F32_FP8BF8_SWMMAC_w64, "_w64">;
+}
+
+// IsGFX11OpselIntrinsic: f16_f16 and bf16_bf16 Intrinsics have imm operand that
+// controls opsel. Used by gfx11, removed in gfx12 (operand must be 0).
+multiclass WMMAPat<string Inst, SDPatternOperator node, VOP3PWMMA_Profile P, bit IsGFX11OpselIntrinsic = 0> {
+  def : GCNPat <(P.DstVT !setdagop(!con(P.WmmaInPat, !if(IsGFX11OpselIntrinsic, (ins 0), (ins))), node)),
+                (P.DstVT !setdagop(P.WmmaOutPat, !cast<Instruction>(Inst#"_twoaddr")))>;
+  let AddedComplexity = 4 in
+  def : GCNPat <(P.DstVT !setdagop(!con(P.WmmaInlineInPat, !if(IsGFX11OpselIntrinsic, (ins 0), (ins))), node)),
+                (P.DstVT !setdagop(P.WmmaInlineOutPat, !cast<Instruction>(Inst#"_threeaddr")))>;
+}
+
+class SWMMACPat<Instruction Inst, SDPatternOperator node, VOP3PWMMA_Profile P> :
+  GCNPat <(P.DstVT !setdagop(P.SwmmacInPat, node)),
+          (P.DstVT !setdagop(P.SwmmacOutPat, Inst))>;
+
+class SWMMACPat_w64<Instruction Inst, SDPatternOperator node, VOP3PWMMA_Profile P> :
+  GCNPat <(P.DstVT !setdagop(P.SwmmacInPat, node)),
+          (P.DstVT !setdagop(P.SwmmacOutPat, Inst))>{
+            let WaveSizePredicate = isWave64;
+          }
+
+let WaveSizePredicate = isWave32, SubtargetPredicate = isGFX11PlusNot12_50, OtherPredicates = [HasWMMA128bInsts] in {
+  defm : WMMAPat<"V_WMMA_F32_16X16X16_F16_w32",     int_amdgcn_wmma_f32_16x16x16_f16,     F32_F16_WMMA_w32>;
+  defm : WMMAPat<"V_WMMA_F32_16X16X16_BF16_w32",    int_amdgcn_wmma_f32_16x16x16_bf16,    F32_BF16_WMMA_w32>;
+  defm : WMMAPat<"V_WMMA_F16_16X16X16_F16_w32",     int_amdgcn_wmma_f16_16x16x16_f16,     F16_F16_WMMA_w32,1>;
+  defm : WMMAPat<"V_WMMA_BF16_16X16X16_BF16_w32",   int_amdgcn_wmma_bf16_16x16x16_bf16,   BF16_BF16_WMMA_w32,1>;
+  defm : WMMAPat<"V_WMMA_I32_16X16X16_IU8_w32",     int_amdgcn_wmma_i32_16x16x16_iu8,     I32_IU8_WMMA_w32>;
+  defm : WMMAPat<"V_WMMA_I32_16X16X16_IU4_w32",     int_amdgcn_wmma_i32_16x16x16_iu4,     I32_IU4X16_WMMA_w32>;
+  defm : WMMAPat<"V_WMMA_F32_16X16X16_FP8_FP8_w32", int_amdgcn_wmma_f32_16x16x16_fp8_fp8, F32_FP8BF8_WMMA_w32>;
+  defm : WMMAPat<"V_WMMA_F32_16X16X16_FP8_BF8_w32", int_amdgcn_wmma_f32_16x16x16_fp8_bf8, F32_FP8BF8_WMMA_w32>;
+  defm : WMMAPat<"V_WMMA_F32_16X16X16_BF8_FP8_w32", int_amdgcn_wmma_f32_16x16x16_bf8_fp8, F32_FP8BF8_WMMA_w32>;
+  defm : WMMAPat<"V_WMMA_F32_16X16X16_BF8_BF8_w32", int_amdgcn_wmma_f32_16x16x16_bf8_bf8, F32_FP8BF8_WMMA_w32>;
+  defm : WMMAPat<"V_WMMA_I32_16X16X32_IU4_w32",     int_amdgcn_wmma_i32_16x16x32_iu4,     I32_IU4X32_WMMA_w32>;
+}
+
+let WaveSizePredicate = isWave32, SubtargetPredicate = HasSWMMACGfx1200Insts in {
+  def : SWMMACPat<V_SWMMAC_F32_16X16X32_F16_w32_twoaddr,     int_amdgcn_swmmac_f32_16x16x32_f16,     F32_F16_SWMMAC_w32>;
+  def : SWMMACPat<V_SWMMAC_F32_16X16X32_BF16_w32_twoaddr,    int_amdgcn_swmmac_f32_16x16x32_bf16,    F32_BF16_SWMMAC_w32>;
+  def : SWMMACPat<V_SWMMAC_F16_16X16X32_F16_w32_twoaddr,     int_amdgcn_swmmac_f16_16x16x32_f16,     F16_F16_SWMMAC_w32>;
+  def : SWMMACPat<V_SWMMAC_BF16_16X16X32_BF16_w32_twoaddr,   int_amdgcn_swmmac_bf16_16x16x32_bf16,   BF16_BF16_SWMMAC_w32>;
+  def : SWMMACPat<V_SWMMAC_I32_16X16X32_IU8_w32_twoaddr,     int_amdgcn_swmmac_i32_16x16x32_iu8,     I32_IU8_SWMMAC_w32>;
+  def : SWMMACPat<V_SWMMAC_I32_16X16X32_IU4_w32_twoaddr,     int_amdgcn_swmmac_i32_16x16x32_iu4,     I32_IU4X32_SWMMAC_w32>;
+  def : GCNPat <(I32_IU4X64_SWMMAC_w32.DstVT !setdagop(I32_IU4X64_SWMMAC_w32.SwmmacInPat,  int_amdgcn_swmmac_i32_16x16x64_iu4)),
+                (I32_IU4X64_SWMMAC_w32.DstVT !setdagop(I32_IU4X64_SWMMAC_w32.SwmmacOutPat, V_SWMMAC_I32_16X16X64_IU4_w32_twoaddr))>;
+  def : SWMMACPat<V_SWMMAC_F32_16X16X32_FP8_FP8_w32_twoaddr, int_amdgcn_swmmac_f32_16x16x32_fp8_fp8, F32_FP8BF8_SWMMAC_w32>;
+  def : SWMMACPat<V_SWMMAC_F32_16X16X32_FP8_BF8_w32_twoaddr, int_amdgcn_swmmac_f32_16x16x32_fp8_bf8, F32_FP8BF8_SWMMAC_w32>;
+  def : SWMMACPat<V_SWMMAC_F32_16X16X32_BF8_FP8_w32_twoaddr, int_amdgcn_swmmac_f32_16x16x32_bf8_fp8, F32_FP8BF8_SWMMAC_w32>;
+  def : SWMMACPat<V_SWMMAC_F32_16X16X32_BF8_BF8_w32_twoaddr, int_amdgcn_swmmac_f32_16x16x32_bf8_bf8, F32_FP8BF8_SWMMAC_w32>;
+}
+
+let WaveSizePredicate = isWave64, SubtargetPredicate = isGFX11PlusNot12_50, OtherPredicates = [HasWMMA128bInsts] in {
+  defm : WMMAPat<"V_WMMA_F32_16X16X16_F16_w64",     int_amdgcn_wmma_f32_16x16x16_f16,     F32_F16_WMMA_w64>;
+  defm : WMMAPat<"V_WMMA_F32_16X16X16_BF16_w64",    int_amdgcn_wmma_f32_16x16x16_bf16,    F32_BF16_WMMA_w64>;
+  defm : WMMAPat<"V_WMMA_F16_16X16X16_F16_w64",     int_amdgcn_wmma_f16_16x16x16_f16,     F16_F16_WMMA_w64,1>;
+  defm : WMMAPat<"V_WMMA_BF16_16X16X16_BF16_w64",   int_amdgcn_wmma_bf16_16x16x16_bf16,   BF16_BF16_WMMA_w64,1>;
+  defm : WMMAPat<"V_WMMA_I32_16X16X16_IU8_w64",     int_amdgcn_wmma_i32_16x16x16_iu8,     I32_IU8_WMMA_w64>;
+  defm : WMMAPat<"V_WMMA_I32_16X16X16_IU4_w64",     int_amdgcn_wmma_i32_16x16x16_iu4,     I32_IU4X16_WMMA_w64>;
+  defm : WMMAPat<"V_WMMA_F32_16X16X16_FP8_FP8_w64", int_amdgcn_wmma_f32_16x16x16_fp8_fp8, F32_FP8BF8_WMMA_w64>;
+  defm : WMMAPat<"V_WMMA_F32_16X16X16_FP8_BF8_w64", int_amdgcn_wmma_f32_16x16x16_fp8_bf8, F32_FP8BF8_WMMA_w64>;
+  defm : WMMAPat<"V_WMMA_F32_16X16X16_BF8_FP8_w64", int_amdgcn_wmma_f32_16x16x16_bf8_fp8, F32_FP8BF8_WMMA_w64>;
+  defm : WMMAPat<"V_WMMA_F32_16X16X16_BF8_BF8_w64", int_amdgcn_wmma_f32_16x16x16_bf8_bf8, F32_FP8BF8_WMMA_w64>;
+  defm : WMMAPat<"V_WMMA_I32_16X16X32_IU4_w64",     int_amdgcn_wmma_i32_16x16x32_iu4,     I32_IU4X32_WMMA_w64>;
+}
+
+let WaveSizePredicate = isWave64, SubtargetPredicate = HasSWMMACGfx1200Insts in {
+  def : SWMMACPat<V_SWMMAC_F32_16X16X32_F16_w64_twoaddr,     int_amdgcn_swmmac_f32_16x16x32_f16,     F32_F16_SWMMAC_w64>;
+  def : SWMMACPat<V_SWMMAC_F32_16X16X32_BF16_w64_twoaddr,    int_amdgcn_swmmac_f32_16x16x32_bf16,    F32_BF16_SWMMAC_w64>;
+  def : SWMMACPat<V_SWMMAC_F16_16X16X32_F16_w64_twoaddr,     int_amdgcn_swmmac_f16_16x16x32_f16,     F16_F16_SWMMAC_w64>;
+  def : SWMMACPat<V_SWMMAC_BF16_16X16X32_BF16_w64_twoaddr,   int_amdgcn_swmmac_bf16_16x16x32_bf16,   BF16_BF16_SWMMAC_w64>;
+  def : SWMMACPat<V_SWMMAC_I32_16X16X32_IU8_w64_twoaddr,     int_amdgcn_swmmac_i32_16x16x32_iu8,     I32_IU8_SWMMAC_w64>;
+  def : SWMMACPat<V_SWMMAC_I32_16X16X32_IU4_w64_twoaddr,     int_amdgcn_swmmac_i32_16x16x32_iu4,     I32_IU4X32_SWMMAC_w64>;
+  def : SWMMACPat<V_SWMMAC_I32_16X16X64_IU4_w64_twoaddr,     int_amdgcn_swmmac_i32_16x16x64_iu4,     I32_IU4X64_SWMMAC_w64>;
+  def : SWMMACPat<V_SWMMAC_F32_16X16X32_FP8_FP8_w64_twoaddr, int_amdgcn_swmmac_f32_16x16x32_fp8_fp8, F32_FP8BF8_SWMMAC_w64>;
+  def : SWMMACPat<V_SWMMAC_F32_16X16X32_FP8_BF8_w64_twoaddr, int_amdgcn_swmmac_f32_16x16x32_fp8_bf8, F32_FP8BF8_SWMMAC_w64>;
+  def : SWMMACPat<V_SWMMAC_F32_16X16X32_BF8_FP8_w64_twoaddr, int_amdgcn_swmmac_f32_16x16x32_bf8_fp8, F32_FP8BF8_SWMMAC_w64>;
+  def : SWMMACPat<V_SWMMAC_F32_16X16X32_BF8_BF8_w64_twoaddr, int_amdgcn_swmmac_f32_16x16x32_bf8_bf8, F32_FP8BF8_SWMMAC_w64>;
+}
+
+let WaveSizePredicate = isWave32 in {
+let SubtargetPredicate = isGFX125xOnly in {
+  defm : WMMAPat<"V_WMMA_F32_16X16X4_F32_w32",          int_amdgcn_wmma_f32_16x16x4_f32,          F32_F32_WMMA_w32>;
+  defm : WMMAPat<"V_WMMA_F32_16X16X32_BF16_w32",        int_amdgcn_wmma_f32_16x16x32_bf16,        F32_BF16X32_WMMA_w32>;
+  defm : WMMAPat<"V_WMMA_BF16_16X16X32_BF16_w32",       int_amdgcn_wmma_bf16_16x16x32_bf16,       BF16_BF16X32_WMMA_w32>;
+  defm : WMMAPat<"V_WMMA_BF16F32_16X16X32_BF16_w32",    int_amdgcn_wmma_bf16f32_16x16x32_bf16,    BF16F32_BF16_WMMA_w32>;
+  defm : WMMAPat<"V_WMMA_F32_16X16X64_FP8_FP8_w32",     int_amdgcn_wmma_f32_16x16x64_fp8_fp8,     F32_FP8BF8X64_WMMA_w32>;
+  defm : WMMAPat<"V_WMMA_F32_16X16X64_FP8_BF8_w32",     int_amdgcn_wmma_f32_16x16x64_fp8_bf8,     F32_FP8BF8X64_WMMA_w32>;
+  defm : WMMAPat<"V_WMMA_F32_16X16X64_BF8_FP8_w32",     int_amdgcn_wmma_f32_16x16x64_bf8_fp8,     F32_FP8BF8X64_WMMA_w32>;
+  defm : WMMAPat<"V_WMMA_F32_16X16X64_BF8_BF8_w32",     int_amdgcn_wmma_f32_16x16x64_bf8_bf8,     F32_FP8BF8X64_WMMA_w32>;
+  defm : WMMAPat<"V_WMMA_F16_16X16X64_FP8_FP8_w32",     int_amdgcn_wmma_f16_16x16x64_fp8_fp8,     F16_FP8BF8X64_WMMA_w32>;
+  defm : WMMAPat<"V_WMMA_F16_16X16X64_FP8_BF8_w32",     int_amdgcn_wmma_f16_16x16x64_fp8_bf8,     F16_FP8BF8X64_WMMA_w32>;
+  defm : WMMAPat<"V_WMMA_F16_16X16X64_BF8_FP8_w32",     int_amdgcn_wmma_f16_16x16x64_bf8_fp8,     F16_FP8BF8X64_WMMA_w32>;
+  defm : WMMAPat<"V_WMMA_F16_16X16X64_BF8_BF8_w32",     int_amdgcn_wmma_f16_16x16x64_bf8_bf8,     F16_FP8BF8X64_WMMA_w32>;
+  defm : WMMAPat<"V_WMMA_I32_16X16X64_IU8_w32",         int_amdgcn_wmma_i32_16x16x64_iu8,         I32_IU8X64_WMMA_w32>;
+  defm : WMMAPat<"V_WMMA_F32_16X16X32_F16_w32",         int_amdgcn_wmma_f32_16x16x32_f16,         F32_F16X32_WMMA_w32>;
+  defm : WMMAPat<"V_WMMA_F16_16X16X32_F16_w32",         int_amdgcn_wmma_f16_16x16x32_f16,         F16_F16X32_WMMA_w32>;
+  defm : WMMAPat<"V_WMMA_F16_16X16X128_FP8_FP8_w32",    int_amdgcn_wmma_f16_16x16x128_fp8_fp8,    F16_FP8BF8X128_WMMA_w32>;
+  defm : WMMAPat<"V_WMMA_F16_16X16X128_FP8_BF8_w32",    int_amdgcn_wmma_f16_16x16x128_fp8_bf8,    F16_FP8BF8X128_WMMA_w32>;
+  defm : WMMAPat<"V_WMMA_F16_16X16X128_BF8_FP8_w32",    int_amdgcn_wmma_f16_16x16x128_bf8_fp8,    F16_FP8BF8X128_WMMA_w32>;
+  defm : WMMAPat<"V_WMMA_F16_16X16X128_BF8_BF8_w32",    int_amdgcn_wmma_f16_16x16x128_bf8_bf8,    F16_FP8BF8X128_WMMA_w32>;
+  defm : WMMAPat<"V_WMMA_F32_16X16X128_FP8_FP8_w32",    int_amdgcn_wmma_f32_16x16x128_fp8_fp8,    F32_FP8BF8X128_WMMA_w32>;
+  defm : WMMAPat<"V_WMMA_F32_16X16X128_FP8_BF8_w32",    int_amdgcn_wmma_f32_16x16x128_fp8_bf8,    F32_FP8BF8X128_WMMA_w32>;
+  defm : WMMAPat<"V_WMMA_F32_16X16X128_BF8_FP8_w32",    int_amdgcn_wmma_f32_16x16x128_bf8_fp8,    F32_FP8BF8X128_WMMA_w32>;
+  defm : WMMAPat<"V_WMMA_F32_16X16X128_BF8_BF8_w32",    int_amdgcn_wmma_f32_16x16x128_bf8_bf8,    F32_FP8BF8X128_WMMA_w32>;
+  defm : WMMAPat<"V_WMMA_F32_32X16X128_F4_w32",         int_amdgcn_wmma_f32_32x16x128_f4,         F32_32X16X128_F4_WMMA_w32>;
+  defm : WMMAPat<"V_WMMA_SCALE_F32_32X16X128_F4_w32",   int_amdgcn_wmma_scale_f32_32x16x128_f4,   F32_32X16X128_F4_SCALE_w32>;
+  defm : WMMAPat<"V_WMMA_SCALE16_F32_32X16X128_F4_w32", int_amdgcn_wmma_scale16_f32_32x16x128_f4, F32_32X16X128_F4_SCALE16_w32>;
+
+  foreach I = ["f8_f8", "f8_f6", "f8_f4", "f6_f8", "f6_f6", "f6_f4", "f4_f8", "f4_f6", "f4_f4"] in {
+    defm : WMMAPat<"V_WMMA_F32_16X16X128_F8F6F4_" # I # "_w32",         int_amdgcn_wmma_f32_16x16x128_f8f6f4,         !cast<VOP3PWMMA_Profile>("F32_16X16X128_F8F6F4_" # I # "_w32")>;
+    defm : WMMAPat<"V_WMMA_SCALE_F32_16X16X128_F8F6F4_" # I # "_w32",   int_amdgcn_wmma_scale_f32_16x16x128_f8f6f4,   !cast<VOP3PWMMA_Profile>("F32_16X16X128_F8F6F4_SCALE_" # I # "_w32")>;
+    defm : WMMAPat<"V_WMMA_SCALE16_F32_16X16X128_F8F6F4_" # I # "_w32", int_amdgcn_wmma_scale16_f32_16x16x128_f8f6f4, !cast<VOP3PWMMA_Profile>("F32_16X16X128_F8F6F4_SCALE16_" # I # "_w32")>;
+  }
+
+  def : SWMMACPat<V_SWMMAC_F32_16X16X64_BF16_w32_twoaddr,     int_amdgcn_swmmac_f32_16x16x64_bf16,     F32_BF16X64_SWMMAC_w32>;
+  def : SWMMACPat<V_SWMMAC_BF16_16X16X64_BF16_w32_twoaddr,    int_amdgcn_swmmac_bf16_16x16x64_bf16,    BF16_BF16X64_SWMMAC_w32>;
+  def : SWMMACPat<V_SWMMAC_BF16F32_16X16X64_BF16_w32_twoaddr, int_amdgcn_swmmac_bf16f32_16x16x64_bf16, F32_BF16X64_SWMMAC_w32>;
+  def : SWMMACPat<V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr, int_amdgcn_swmmac_f32_16x16x128_fp8_fp8, F32_FP8BF8X128_SWMMAC_w32>;
+  def : SWMMACPat<V_SWMMAC_F32_16X16X128_FP8_BF8_w32_twoaddr, int_amdgcn_swmmac_f32_16x16x128_fp8_bf8, F32_FP8BF8X128_SWMMAC_w32>;
+  def : SWMMACPat<V_SWMMAC_F32_16X16X128_BF8_FP8_w32_twoaddr, int_amdgcn_swmmac_f32_16x16x128_bf8_fp8, F32_FP8BF8X128_SWMMAC_w32>;
+  def : SWMMACPat<V_SWMMAC_F32_16X16X128_BF8_BF8_w32_twoaddr, int_amdgcn_swmmac_f32_16x16x128_bf8_bf8, F32_FP8BF8X128_SWMMAC_w32>;
+  def : SWMMACPat<V_SWMMAC_F16_16X16X128_FP8_FP8_w32_twoaddr, int_amdgcn_swmmac_f16_16x16x128_fp8_fp8, F16_FP8BF8X128_SWMMAC_w32>;
+  def : SWMMACPat<V_SWMMAC_F16_16X16X128_FP8_BF8_w32_twoaddr, int_amdgcn_swmmac_f16_16x16x128_fp8_bf8, F16_FP8BF8X128_SWMMAC_w32>;
+  def : SWMMACPat<V_SWMMAC_F16_16X16X128_BF8_FP8_w32_twoaddr, int_amdgcn_swmmac_f16_16x16x128_bf8_fp8, F16_FP8BF8X128_SWMMAC_w32>;
+  def : SWMMACPat<V_SWMMAC_F16_16X16X128_BF8_BF8_w32_twoaddr, int_amdgcn_swmmac_f16_16x16x128_bf8_bf8, F16_FP8BF8X128_SWMMAC_w32>;
+  def : SWMMACPat<V_SWMMAC_I32_16X16X128_IU8_w32_twoaddr,     int_amdgcn_swmmac_i32_16x16x128_iu8,     I32_IU8X128_SWMMAC_w32>;
+  def : SWMMACPat<V_SWMMAC_F32_16X16X64_F16_w32_twoaddr,      int_amdgcn_swmmac_f32_16x16x64_f16,      F32_F16X64_SWMMAC_w32>;
+  def : SWMMACPat<V_SWMMAC_F16_16X16X64_F16_w32_twoaddr,      int_amdgcn_swmmac_f16_16x16x64_f16,      F16_F16X64_SWMMAC_w32>;
+} // End SubtargetPredicate = isGFX125xOnly
+} // End WaveSizePredicate = isWave32
+
+//===----------------------------------------------------------------------===//
+// Begin Real Encodings
+//===----------------------------------------------------------------------===//
+
+class VOP3P_DPP16<bits<8> op, VOP_DPP_Pseudo ps, int subtarget,
+                  string opName = ps.OpName>
+    : VOP3P_DPP<op, opName, ps.Pfl, 1>, SIMCInstr<ps.PseudoInstr, subtarget> {
+  let hasSideEffects = ps.hasSideEffects;
+  let Defs = ps.Defs;
+  let SchedRW = ps.SchedRW;
+  let Uses = ps.Uses;
+  let AssemblerPredicate = HasDPP16;
+  let SubtargetPredicate = ps.SubtargetPredicate;
+  let OtherPredicates = ps.OtherPredicates;
+  let IsPacked = ps.IsPacked;
+}
+
+class VOP3P_DPP8_Base<bits<8> op, VOP_Pseudo ps, string opName = ps.OpName>
+    : VOP3P_DPP8<op, opName, ps.Pfl> {
+  let hasSideEffects = ps.hasSideEffects;
+  let Defs = ps.Defs;
+  let SchedRW = ps.SchedRW;
+  let Uses = ps.Uses;
+  let SubtargetPredicate = ps.SubtargetPredicate;
+  let OtherPredicates = ps.OtherPredicates;
+  let IsPacked = ps.IsPacked;
+}
+
+//===----------------------------------------------------------------------===//
+// GFX11, GFX12
+//===----------------------------------------------------------------------===//
+
+multiclass VOP3P_Real_Base<GFXGen Gen, bits<8> op, string backing_ps_name = NAME,
+                      string asmName = !cast<VOP3P_Pseudo>(NAME).Mnemonic> {
+  def Gen.Suffix :
+    VOP3P_Real_Gen<!cast<VOP3P_Pseudo>(backing_ps_name), Gen, asmName>,
+    VOP3Pe_gfx11_gfx12<op, !cast<VOP3P_Pseudo>(backing_ps_name).Pfl>;
+}
+
+class VOP3PeWmma<bits<8> op, VOPProfile P, VOP3PWMMA_Profile WMMAP>
+    : VOP3Pe_gfx11_gfx12<op, P>{
+
+  // opsel
+  let Inst{11} = !cond(WMMAP.HasMatrixFMT       : matrix_a_fmt{0},
+                       !eq(WMMAP.IndexType, 0)  : 0,
+                       !eq(WMMAP.IndexType, 8)  : index_key_8bit{0},
+                       !eq(WMMAP.IndexType, 16) : index_key_16bit{0},
+                       !eq(WMMAP.IndexType, 32) : index_key_32bit{0});
+  let Inst{12} = !if(WMMAP.HasMatrixFMT, matrix_a_fmt{1},
+                     !if(!eq(WMMAP.IndexType, 8), index_key_8bit{1}, 0));
+  let Inst{13} = !if (WMMAP.HasMatrixFMT, matrix_a_fmt{2},
+                      !if(WMMAP.HasMatrixReuse, matrix_a_reuse, 0));
+  // opsel_hi
+  let Inst{59} = !if (WMMAP.HasMatrixFMT, matrix_b_fmt{0}, 1);
+  let Inst{60} = !if (WMMAP.HasMatrixFMT, matrix_b_fmt{1}, 1);
+  let Inst{14} = !if (WMMAP.HasMatrixFMT, matrix_b_fmt{2},
+                      !if(WMMAP.HasMatrixReuse, matrix_b_reuse, 1));
+  // neg_lo
+  let Inst{61} = !if(WMMAP.NegLo01, src0_modifiers{0}, 0);
+  let Inst{62} = !if(WMMAP.NegLo01, src1_modifiers{0}, 0);
+  let Inst{63} = !if(WMMAP.NegLo2, src2_modifiers{0}, 0);
+  // neg_hi
+  let Inst{8}  = !if(WMMAP.NegHi01, src0_modifiers{1}, 0);
+  let Inst{9}  = !if(WMMAP.NegHi01, src1_modifiers{1}, 0);
+  let Inst{10} = !if(WMMAP.NegHi2, src2_modifiers{1}, 0);
+  // clamp
+  let Inst{15} = !if(WMMAP.HasClamp, clamp{0}, 0);
+}
+
+multiclass VOP3P_WMMA_Real_Base<GFXGen Gen, bits<8> op, VOP3PWMMA_Profile WMMAP,
+                                string backing_ps_name = NAME,
+                                string asmName = !cast<VOP3P_Pseudo>(NAME).Mnemonic> {
+  def Gen.Suffix :
+    VOP3P_Real_Gen<!cast<VOP3P_Pseudo>(backing_ps_name), Gen, asmName>,
+    VOP3PeWmma<op, !cast<VOP3P_Pseudo>(backing_ps_name).Pfl, WMMAP>;
+}
+
+multiclass VOP3P_Real_WMMA_gfx1170 <bits<8> op, VOP3PWMMA_Profile WMMAP> {
+  let WaveSizePredicate = isWave32, DecoderNamespace = "GFX1170" in {
+    defm _twoaddr : VOP3P_WMMA_Real_Base <GFX1170Gen, op, WMMAP>;
+  }
+}
+
+multiclass VOP3P_Real_WMMA_gfx1170w64 <bits<8> op, VOP3PWMMA_Profile WMMAP> {
+  let WaveSizePredicate = isWave64, DecoderNamespace = "GFX1170W64" in {
+    defm _twoaddr : VOP3P_WMMA_Real_Base <GFX1170Gen, op, WMMAP>;
+  }
+}
+
+multiclass VOP3P_Real_WMMA_gfx12 <bits<8> op, VOP3PWMMA_Profile WMMAP> {
+  let WaveSizePredicate = isWave32, DecoderNamespace = "GFX12" in {
+    defm _twoaddr : VOP3P_WMMA_Real_Base <GFX12Gen, op, WMMAP>;
+  }
+}
+
+multiclass VOP3P_Real_WMMA_gfx12w64 <bits<8> op, VOP3PWMMA_Profile WMMAP> {
+  let WaveSizePredicate = isWave64, DecoderNamespace = "GFX12W64" in {
+    defm _twoaddr : VOP3P_WMMA_Real_Base <GFX12Gen, op, WMMAP>;
+  }
+}
+
+multiclass VOP3P_Real_WMMA_gfx1170_gfx12 <bits<8> op, VOP3PWMMA_Profile WMMAP> :
+  VOP3P_Real_WMMA_gfx1170<op, WMMAP>,
+  VOP3P_Real_WMMA_gfx12<op, WMMAP>;
+
+multiclass VOP3P_Real_WMMA_gfx1170_gfx12w64 <bits<8> op, VOP3PWMMA_Profile WMMAP> :
+  VOP3P_Real_WMMA_gfx1170w64<op, WMMAP>,
+  VOP3P_Real_WMMA_gfx12w64<op, WMMAP>;
+
+multiclass VOP3P_Real_WMMA_gfx1250 <bits<8> op, VOP3PWMMA_Profile WMMAP> {
+  let WaveSizePredicate = isWave32, DecoderNamespace = "GFX12" in {
+    defm _twoaddr : VOP3P_WMMA_Real_Base <GFX1250Gen, op, WMMAP>;
+  }
+}
+
+multiclass VOP3P_Real_WMMA_F8F6F4<string Gen, bits<8> op, VOP3PWMMA_Profile WMMAP> {
+  defvar PS = !cast<VOP3P_Pseudo>(NAME # "_twoaddr");
+  defvar asmName = !substr(PS.Mnemonic, 0, !sub(!size(PS.Mnemonic), !size("_f8_f8_w32")));
+  defvar psName = !substr(NAME, 0, !sub(!size(PS.Mnemonic), !size("_f8_f8_w32")));
+  let AsmString = asmName # PS.AsmOperands in {
+    if !eq(Gen, "gfx1250") then {
+      defm NAME : VOP3P_Real_WMMA_gfx1250<op, WMMAP>,
+                  MFMA_F8F6F4_WithSizeTable_Helper<PS, psName # "_f8_f8_w32_twoaddr_" # Gen>;
+    }
+  }
+}
+
+multiclass VOP3P_Real_WMMA_SrcFormats<string Gen, bits<8> op, string WMMAP> {
+  defm _f8_f8_w32 : VOP3P_Real_WMMA_F8F6F4<Gen, op, !cast<VOP3PWMMA_Profile>(WMMAP # "_f8_f8_w32")>;
+  foreach I = ["f8_f6", "f8_f4", "f6_f8", "f6_f6", "f6_f4", "f4_f8", "f4_f6", "f4_f4"] in {
+    let isAsmParserOnly = true in { // Disable ambiguous disassembly.
+      defm _#I#_w32 : VOP3P_Real_WMMA_F8F6F4<Gen, op, !cast<VOP3PWMMA_Profile>(WMMAP # "_" # I # "_w32")>;
+    }
+  }
+}
+
+class VOP3PX2e <bits<8> op, bits<8> LdScaleOp, VOP3PWMMA_Profile P> : Enc128, VOP3Pe_Base {
+  bits<9> scale_src0;
+  bits<9> scale_src1;
+
+  // Inst{7-0} = unused
+  let Inst{10-8} = {0, matrix_b_scale_fmt{1-0}}; // neg_hi
+  let Inst{11} = matrix_a_scale{0}; // scale_op_sel(0)
+  let Inst{12} = 0;                 // scale_op_sel(1)
+  let Inst{13} = matrix_a_reuse;    // scale_op_sel(2)
+  let Inst{14} = matrix_b_reuse;    // scale_op_sel_hi(2)
+  let Inst{15} = 0; // scale_clamp
+  let Inst{31-24} = 0xcc; // Encoding
+  let Inst{23-16} = LdScaleOp;
+  let Inst{40-32} = scale_src0;
+  let Inst{49-41} = scale_src1;
+  let Inst{58-50} = ?; // scale src2
+  let Inst{59}    = matrix_b_scale{0}; // scale_op_sel_hi(0)
+  let Inst{60}    = 0;                 // scale_op_sel_hi(1)
+  let Inst{63-61} = {0, matrix_a_scale_fmt{1-0}}; // neg (lo)
+
+  // The high half of the encoding is the unscaled wmma op.
+  let Inst{71-64} = vdst;
+
+  let Inst{72} = !if(P.NegHi01, src0_modifiers{1}, 0); // neg_hi src0
+  let Inst{73} = !if(P.NegHi01, src1_modifiers{1}, 0); // neg_hi src1
+  let Inst{74} = !if(P.NegHi2, src2_modifiers{1}, 0); // neg_hi src2
+
+  let Inst{77-75} = !if(P.HasMatrixFMT, matrix_a_fmt{2-0}, 0); // op_sel
+
+  let Inst{78,124,123} = !if(P.HasMatrixFMT, matrix_b_fmt{2-0}, 7); // op_sel_hi
+  let Inst{79} = !if(P.HasClamp, clamp{0}, 0);
+
+  let Inst{87-80} = op;
+  let Inst{95-88} = 0xcc; //encoding
+  let Inst{104-96} = !if(P.HasSrc0, src0, ?);
+  let Inst{113-105} = !if(P.HasSrc1, src1, ?);
+  let Inst{122-114} = !if(P.HasSrc2, src2, ?);
+
+  // neg_lo
+  let Inst{125} = !if(P.NegLo01, src0_modifiers{0}, 0);
+  let Inst{126} = !if(P.NegLo01, src1_modifiers{0}, 0);
+  let Inst{127} = !if(P.NegLo2, src2_modifiers{0}, 0);
+}
+
+multiclass VOP3PX2_Real_ScaledWMMA_F4<string Gen, bits<8> op, bits<8> LdScaleOp, VOP3PWMMA_Profile WMMAP> {
+  defvar PS = !cast<VOP3P_Pseudo>(NAME # "_twoaddr");
+  if !eq(Gen, "gfx1250") then {
+    def _gfx1250 : VOP3P_Real_Gen<PS, GFX1250Gen, PS.Mnemonic>,
+                   VOP3PX2e <op, LdScaleOp, WMMAP> {
+      let PostEncoderMethod = "postEncodeVOP3<true, true, false>";
+    }
+  }
+}
+
+multiclass VOP3PX2_Real_ScaledWMMA<string Gen, bits<8> op, bits<8> LdScaleOp, VOP3PWMMA_Profile WMMAP> {
+  defvar PS = !cast<VOP3P_Pseudo>(NAME # "_twoaddr");
+  defvar asmName = !substr(PS.Mnemonic, 0, !sub(!size(PS.Mnemonic), !size("_f8_f8_w32")));
+  defvar psName = !substr(NAME, 0, !sub(!size(PS.Mnemonic), !size("_f8_f8_w32")));
+  if !eq(Gen, "gfx1250") then {
+    def _gfx1250 : VOP3P_Real_Gen<PS, GFX1250Gen, asmName>,
+                   VOP3PX2e <op, LdScaleOp, WMMAP>,
+                   MFMA_F8F6F4_WithSizeTable_Helper<PS, psName # "_f8_f8_w32_" # Gen> {
+      let AsmString = asmName # PS.AsmOperands;
+      let PostEncoderMethod = "postEncodeVOP3<true, true, false>";
+    }
+  }
+}
+
+multiclass VOP3PX2_Real_ScaledWMMA_SrcFormats<string Gen, bits<8> op, bits<8> LdScaleOp, string WMMAP> {
+  defm _f8_f8_w32 : VOP3PX2_Real_ScaledWMMA<Gen, op, LdScaleOp, !cast<VOP3PWMMA_Profile>(WMMAP # "_f8_f8_w32")>;
+  foreach I = ["f8_f6", "f8_f4", "f6_f8", "f6_f6", "f6_f4", "f4_f8", "f4_f6", "f4_f4"] in {
+    let isAsmParserOnly = true in { // Disable ambiguous disassembly.
+      defm _#I#_w32 : VOP3PX2_Real_ScaledWMMA<Gen, op, LdScaleOp, !cast<VOP3PWMMA_Profile>(WMMAP # "_" # I # "_w32")>;
+    }
+  }
+}
+
+defm V_WMMA_F32_16X16X16_F16_w32     : VOP3P_Real_WMMA_gfx1170_gfx12 <0x040, F32_F16_WMMA_w32>;
+defm V_WMMA_F32_16X16X16_BF16_w32    : VOP3P_Real_WMMA_gfx1170_gfx12 <0x041, F32_BF16_WMMA_w32>;
+defm V_WMMA_F16_16X16X16_F16_w32     : VOP3P_Real_WMMA_gfx1170_gfx12 <0x042, F16_F16_WMMA_w32>;
+defm V_WMMA_BF16_16X16X16_BF16_w32   : VOP3P_Real_WMMA_gfx1170_gfx12 <0x043, BF16_BF16_WMMA_w32>;
+defm V_WMMA_I32_16X16X16_IU8_w32     : VOP3P_Real_WMMA_gfx1170_gfx12 <0x044, I32_IU8_WMMA_w32>;
+defm V_WMMA_I32_16X16X16_IU4_w32     : VOP3P_Real_WMMA_gfx1170_gfx12 <0x045, I32_IU4X16_WMMA_w32>;
+defm V_WMMA_F32_16X16X16_FP8_FP8_w32 : VOP3P_Real_WMMA_gfx1170_gfx12 <0x046, F32_FP8BF8_WMMA_w32>;
+defm V_WMMA_F32_16X16X16_FP8_BF8_w32 : VOP3P_Real_WMMA_gfx1170_gfx12 <0x047, F32_FP8BF8_WMMA_w32>;
+defm V_WMMA_F32_16X16X16_BF8_FP8_w32 : VOP3P_Real_WMMA_gfx1170_gfx12 <0x048, F32_FP8BF8_WMMA_w32>;
+defm V_WMMA_F32_16X16X16_BF8_BF8_w32 : VOP3P_Real_WMMA_gfx1170_gfx12 <0x049, F32_FP8BF8_WMMA_w32>;
+defm V_WMMA_I32_16X16X32_IU4_w32     : VOP3P_Real_WMMA_gfx1170_gfx12 <0x04a, I32_IU4X32_WMMA_w32>;
+
+defm V_WMMA_F32_16X16X16_F16_w64     : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x040, F32_F16_WMMA_w64>;
+defm V_WMMA_F32_16X16X16_BF16_w64    : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x041, F32_BF16_WMMA_w64>;
+defm V_WMMA_F16_16X16X16_F16_w64     : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x042, F16_F16_WMMA_w64>;
+defm V_WMMA_BF16_16X16X16_BF16_w64   : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x043, BF16_BF16_WMMA_w64>;
+defm V_WMMA_I32_16X16X16_IU8_w64     : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x044, I32_IU8_WMMA_w64>;
+defm V_WMMA_I32_16X16X16_IU4_w64     : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x045, I32_IU4X16_WMMA_w64>;
+defm V_WMMA_F32_16X16X16_FP8_FP8_w64 : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x046, F32_FP8BF8_WMMA_w64>;
+defm V_WMMA_F32_16X16X16_FP8_BF8_w64 : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x047, F32_FP8BF8_WMMA_w64>;
+defm V_WMMA_F32_16X16X16_BF8_FP8_w64 : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x048, F32_FP8BF8_WMMA_w64>;
+defm V_WMMA_F32_16X16X16_BF8_BF8_w64 : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x049, F32_FP8BF8_WMMA_w64>;
+defm V_WMMA_I32_16X16X32_IU4_w64     : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x04a, I32_IU4X32_WMMA_w64>;
+
+defm V_SWMMAC_F32_16X16X32_F16_w32     : VOP3P_Real_WMMA_gfx1170_gfx12 <0x050, F32_F16_SWMMAC_w32>;
+defm V_SWMMAC_F32_16X16X32_BF16_w32    : VOP3P_Real_WMMA_gfx1170_gfx12 <0x051, F32_BF16_SWMMAC_w32>;
+defm V_SWMMAC_F16_16X16X32_F16_w32     : VOP3P_Real_WMMA_gfx1170_gfx12 <0x052, F16_F16_SWMMAC_w32>;
+defm V_SWMMAC_BF16_16X16X32_BF16_w32   : VOP3P_Real_WMMA_gfx1170_gfx12 <0x053, BF16_BF16_SWMMAC_w32>;
+defm V_SWMMAC_I32_16X16X32_IU8_w32     : VOP3P_Real_WMMA_gfx1170_gfx12 <0x054, I32_IU8_SWMMAC_w32>;
+defm V_SWMMAC_I32_16X16X32_IU4_w32     : VOP3P_Real_WMMA_gfx1170_gfx12 <0x055, I32_IU4X32_SWMMAC_w32>;
+defm V_SWMMAC_I32_16X16X64_IU4_w32     : VOP3P_Real_WMMA_gfx1170_gfx12 <0x056, I32_IU4X64_SWMMAC_w32>;
+defm V_SWMMAC_F32_16X16X32_FP8_FP8_w32 : VOP3P_Real_WMMA_gfx1170_gfx12 <0x057, F32_FP8BF8_SWMMAC_w32>;
+defm V_SWMMAC_F32_16X16X32_FP8_BF8_w32 : VOP3P_Real_WMMA_gfx1170_gfx12 <0x058, F32_FP8BF8_SWMMAC_w32>;
+defm V_SWMMAC_F32_16X16X32_BF8_FP8_w32 : VOP3P_Real_WMMA_gfx1170_gfx12 <0x059, F32_FP8BF8_SWMMAC_w32>;
+defm V_SWMMAC_F32_16X16X32_BF8_BF8_w32 : VOP3P_Real_WMMA_gfx1170_gfx12 <0x05a, F32_FP8BF8_SWMMAC_w32>;
+
+defm V_SWMMAC_F32_16X16X32_F16_w64     : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x050, F32_F16_SWMMAC_w64>;
+defm V_SWMMAC_F32_16X16X32_BF16_w64    : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x051, F32_BF16_SWMMAC_w64>;
+defm V_SWMMAC_F16_16X16X32_F16_w64     : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x052, F16_F16_SWMMAC_w64>;
+defm V_SWMMAC_BF16_16X16X32_BF16_w64   : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x053, BF16_BF16_SWMMAC_w64>;
+defm V_SWMMAC_I32_16X16X32_IU8_w64     : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x054, I32_IU8_SWMMAC_w64>;
+defm V_SWMMAC_I32_16X16X32_IU4_w64     : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x055, I32_IU4X32_SWMMAC_w64>;
+defm V_SWMMAC_I32_16X16X64_IU4_w64     : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x056, I32_IU4X64_SWMMAC_w64>;
+defm V_SWMMAC_F32_16X16X32_FP8_FP8_w64 : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x057, F32_FP8BF8_SWMMAC_w64>;
+defm V_SWMMAC_F32_16X16X32_FP8_BF8_w64 : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x058, F32_FP8BF8_SWMMAC_w64>;
+defm V_SWMMAC_F32_16X16X32_BF8_FP8_w64 : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x059, F32_FP8BF8_SWMMAC_w64>;
+defm V_SWMMAC_F32_16X16X32_BF8_BF8_w64 : VOP3P_Real_WMMA_gfx1170_gfx12w64 <0x05a, F32_FP8BF8_SWMMAC_w64>;
+
+defm V_WMMA_F32_16X16X4_F32_w32       : VOP3P_Real_WMMA_gfx1250 <0x05d, F32_F32_WMMA_w32>;
+defm V_WMMA_F32_16X16X32_BF16_w32     : VOP3P_Real_WMMA_gfx1250 <0x062, F32_BF16X32_WMMA_w32>;
+defm V_WMMA_F32_16X16X32_F16_w32      : VOP3P_Real_WMMA_gfx1250 <0x060, F32_F16X32_WMMA_w32>;
+defm V_WMMA_F16_16X16X32_F16_w32      : VOP3P_Real_WMMA_gfx1250 <0x061, F16_F16X32_WMMA_w32>;
+defm V_WMMA_BF16_16X16X32_BF16_w32    : VOP3P_Real_WMMA_gfx1250 <0x063, BF16_BF16X32_WMMA_w32>;
+defm V_WMMA_BF16F32_16X16X32_BF16_w32 : VOP3P_Real_WMMA_gfx1250 <0x064, BF16F32_BF16_WMMA_w32>;
+defm V_WMMA_F32_16X16X64_FP8_FP8_w32  : VOP3P_Real_WMMA_gfx1250 <0x06a, F32_FP8BF8X64_WMMA_w32>;
+defm V_WMMA_F32_16X16X64_FP8_BF8_w32  : VOP3P_Real_WMMA_gfx1250 <0x06b, F32_FP8BF8X64_WMMA_w32>;
+defm V_WMMA_F32_16X16X64_BF8_FP8_w32  : VOP3P_Real_WMMA_gfx1250 <0x06c, F32_FP8BF8X64_WMMA_w32>;
+defm V_WMMA_F32_16X16X64_BF8_BF8_w32  : VOP3P_Real_WMMA_gfx1250 <0x06d, F32_FP8BF8X64_WMMA_w32>;
+defm V_WMMA_F16_16X16X64_FP8_FP8_w32  : VOP3P_Real_WMMA_gfx1250 <0x06e, F16_FP8BF8X64_WMMA_w32>;
+defm V_WMMA_F16_16X16X64_FP8_BF8_w32  : VOP3P_Real_WMMA_gfx1250 <0x06f, F16_FP8BF8X64_WMMA_w32>;
+defm V_WMMA_F16_16X16X64_BF8_FP8_w32  : VOP3P_Real_WMMA_gfx1250 <0x070, F16_FP8BF8X64_WMMA_w32>;
+defm V_WMMA_F16_16X16X64_BF8_BF8_w32  : VOP3P_Real_WMMA_gfx1250 <0x071, F16_FP8BF8X64_WMMA_w32>;
+defm V_WMMA_I32_16X16X64_IU8_w32      : VOP3P_Real_WMMA_gfx1250 <0x072, I32_IU8X64_WMMA_w32>;
+defm V_WMMA_F32_16X16X128_FP8_FP8_w32 : VOP3P_Real_WMMA_gfx1250 <0x080, F32_FP8BF8X128_WMMA_w32>;
+defm V_WMMA_F32_16X16X128_FP8_BF8_w32 : VOP3P_Real_WMMA_gfx1250 <0x081, F32_FP8BF8X128_WMMA_w32>;
+defm V_WMMA_F32_16X16X128_BF8_FP8_w32 : VOP3P_Real_WMMA_gfx1250 <0x082, F32_FP8BF8X128_WMMA_w32>;
+defm V_WMMA_F32_16X16X128_BF8_BF8_w32 : VOP3P_Real_WMMA_gfx1250 <0x083, F32_FP8BF8X128_WMMA_w32>;
+defm V_WMMA_F16_16X16X128_FP8_FP8_w32 : VOP3P_Real_WMMA_gfx1250 <0x084, F16_FP8BF8X128_WMMA_w32>;
+defm V_WMMA_F16_16X16X128_FP8_BF8_w32 : VOP3P_Real_WMMA_gfx1250 <0x085, F16_FP8BF8X128_WMMA_w32>;
+defm V_WMMA_F16_16X16X128_BF8_FP8_w32 : VOP3P_Real_WMMA_gfx1250 <0x086, F16_FP8BF8X128_WMMA_w32>;
+defm V_WMMA_F16_16X16X128_BF8_BF8_w32 : VOP3P_Real_WMMA_gfx1250 <0x087, F16_FP8BF8X128_WMMA_w32>;
+defm V_WMMA_F32_32X16X128_F4_w32      : VOP3P_Real_WMMA_gfx1250 <0x088, F32_32X16X128_F4_WMMA_w32>;
+
+let WaveSizePredicate = isWave32, SubtargetPredicate = isGFX1250Plus, DecoderNamespace = "GFX1250" in {
+defm V_WMMA_F32_16X16X128_F8F6F4         : VOP3P_Real_WMMA_SrcFormats <"gfx1250", 0x033, "F32_16X16X128_F8F6F4">;
+defm V_WMMA_SCALE_F32_16X16X128_F8F6F4   : VOP3PX2_Real_ScaledWMMA_SrcFormats <"gfx1250", 0x033, 0x35, "F32_16X16X128_F8F6F4_SCALE">;
+defm V_WMMA_SCALE16_F32_16X16X128_F8F6F4 : VOP3PX2_Real_ScaledWMMA_SrcFormats <"gfx1250", 0x033, 0x3a, "F32_16X16X128_F8F6F4_SCALE16">;
+
+defm V_WMMA_SCALE_F32_32X16X128_F4_w32   : VOP3PX2_Real_ScaledWMMA_F4 <"gfx1250", 0x088, 0x35, F32_32X16X128_F4_SCALE_w32>;
+defm V_WMMA_SCALE16_F32_32X16X128_F4_w32 : VOP3PX2_Real_ScaledWMMA_F4 <"gfx1250", 0x088, 0x3a, F32_32X16X128_F4_SCALE16_w32>;
+} // End WaveSizePredicate = isWave32, SubtargetPredicate = isGFX1250Plus, DecoderNamespace = "GFX1250"
+
+defm V_SWMMAC_F32_16X16X64_F16_w32      : VOP3P_Real_WMMA_gfx1250 <0x065, F32_F16X64_SWMMAC_w32>;
+defm V_SWMMAC_F32_16X16X64_BF16_w32     : VOP3P_Real_WMMA_gfx1250 <0x066, F32_BF16X64_SWMMAC_w32>;
+defm V_SWMMAC_F16_16X16X64_F16_w32      : VOP3P_Real_WMMA_gfx1250 <0x067, F16_F16X64_SWMMAC_w32>;
+defm V_SWMMAC_BF16_16X16X64_BF16_w32    : VOP3P_Real_WMMA_gfx1250 <0x068, BF16_BF16X64_SWMMAC_w32>;
+defm V_SWMMAC_BF16F32_16X16X64_BF16_w32 : VOP3P_Real_WMMA_gfx1250 <0x069, F32_BF16X64_SWMMAC_w32>;
+defm V_SWMMAC_F32_16X16X128_FP8_FP8_w32 : VOP3P_Real_WMMA_gfx1250 <0x073, F32_FP8BF8X128_SWMMAC_w32>;
+defm V_SWMMAC_F32_16X16X128_FP8_BF8_w32 : VOP3P_Real_WMMA_gfx1250 <0x074, F32_FP8BF8X128_SWMMAC_w32>;
+defm V_SWMMAC_F32_16X16X128_BF8_FP8_w32 : VOP3P_Real_WMMA_gfx1250 <0x075, F32_FP8BF8X128_SWMMAC_w32>;
+defm V_SWMMAC_F32_16X16X128_BF8_BF8_w32 : VOP3P_Real_WMMA_gfx1250 <0x076, F32_FP8BF8X128_SWMMAC_w32>;
+defm V_SWMMAC_F16_16X16X128_FP8_FP8_w32 : VOP3P_Real_WMMA_gfx1250 <0x077, F16_FP8BF8X128_SWMMAC_w32>;
+defm V_SWMMAC_F16_16X16X128_FP8_BF8_w32 : VOP3P_Real_WMMA_gfx1250 <0x078, F16_FP8BF8X128_SWMMAC_w32>;
+defm V_SWMMAC_F16_16X16X128_BF8_FP8_w32 : VOP3P_Real_WMMA_gfx1250 <0x079, F16_FP8BF8X128_SWMMAC_w32>;
+defm V_SWMMAC_F16_16X16X128_BF8_BF8_w32 : VOP3P_Real_WMMA_gfx1250 <0x07a, F16_FP8BF8X128_SWMMAC_w32>;
+defm V_SWMMAC_I32_16X16X128_IU8_w32     : VOP3P_Real_WMMA_gfx1250 <0x07b, I32_IU8X128_SWMMAC_w32>;
+
+multiclass VOP3P_Real_with_name<GFXGen Gen, bits<8> op,
+                          string backing_ps_name = NAME,
+                          string asmName = !cast<VOP3P_Pseudo>(NAME).Mnemonic> {
+  defvar ps = !cast<VOP3P_Pseudo>(backing_ps_name);
+  let AsmString = asmName # ps.AsmOperands in
+    def Gen.Suffix :
+      VOP3P_Real_Gen<!cast<VOP3P_Pseudo>(backing_ps_name), Gen, asmName>,
+      VOP3Pe_gfx11_gfx12<op, !cast<VOP3P_Pseudo>(backing_ps_name).Pfl>;
+
+  def : AMDGPUMnemonicAlias<ps.Mnemonic, asmName> {
+    let AssemblerPredicate = Gen.AssemblerPredicate;
+  }
+}
+
+multiclass VOP3P_Real_dpp<GFXGen Gen, bits<8> op, string backing_ps_name = NAME,
+                          string asmName = !cast<VOP3P_Pseudo>(NAME).Mnemonic> {
+  defvar ps = !cast<VOP3P_Pseudo>(backing_ps_name);
+  def _dpp#Gen.Suffix
+      : VOP3P_DPP16<op, !cast<VOP_DPP_Pseudo>(backing_ps_name #"_dpp"),
+                    Gen.Subtarget> {
+    let AsmString = asmName #ps.Pfl.AsmVOP3DPP16;
+    let DecoderNamespace = Gen.DecoderNamespace;
+    let AssemblerPredicate = Gen.AssemblerPredicate;
+  }
+}
+
+multiclass VOP3P_Real_dpp8<GFXGen Gen, bits<8> op, string backing_ps_name = NAME,
+                           string asmName = !cast<VOP3P_Pseudo>(NAME).Mnemonic> {
+  defvar ps = !cast<VOP3P_Pseudo>(backing_ps_name);
+  def _dpp8#Gen.Suffix : VOP3P_DPP8_Base<op, ps> {
+    let AsmString = asmName #ps.Pfl.AsmVOP3DPP8;
+    let DecoderNamespace = Gen.DecoderNamespace;
+    let AssemblerPredicate = Gen.AssemblerPredicate;
+  }
+}
+
+multiclass VOP3P_Realtriple<GFXGen Gen, bits<8> op, string backing_ps_name = NAME,
+                            string asmName = !cast<VOP3P_Pseudo>(NAME).Mnemonic>
+    : VOP3P_Real_Base<Gen, op, backing_ps_name, asmName>,
+      VOP3P_Real_dpp<Gen, op, backing_ps_name, asmName>,
+      VOP3P_Real_dpp8<Gen, op, backing_ps_name, asmName>;
+
+multiclass VOP3P_Realtriple_gfx11_gfx12<bits<8> op>
+  : VOP3P_Realtriple<GFX11Gen, op>, VOP3P_Realtriple<GFX12Gen, op>;
+
+defm V_DOT4_F32_FP8_BF8 : VOP3P_Realtriple_gfx11_gfx12<0x24>;
+defm V_DOT4_F32_BF8_FP8 : VOP3P_Realtriple_gfx11_gfx12<0x25>;
+defm V_DOT4_F32_FP8_FP8 : VOP3P_Realtriple_gfx11_gfx12<0x26>;
+defm V_DOT4_F32_BF8_BF8 : VOP3P_Realtriple_gfx11_gfx12<0x27>;
+
+//===----------------------------------------------------------------------===//
+// GFX12
+//===----------------------------------------------------------------------===//
+
+multiclass VOP3P_Real_gfx12<bits<8> op> : VOP3P_Real_Base<GFX12Gen, op>;
+
+multiclass VOP3P_Real_gfx1250<bits<8> op> : VOP3P_Real_Base<GFX1250Gen, op>;
+
+multiclass VOP3P_Real_with_name_gfx12<bits<8> op,
+                          string backing_ps_name = NAME,
+                          string asmName = !cast<VOP3P_Pseudo>(NAME).Mnemonic> :
+  VOP3P_Real_with_name<GFX12Gen, op, backing_ps_name, asmName>;
+
+defm V_PK_MIN_NUM_F16 : VOP3P_Real_with_name_gfx12<0x1b, "V_PK_MIN_F16", "v_pk_min_num_f16">;
+defm V_PK_MAX_NUM_F16 : VOP3P_Real_with_name_gfx12<0x1c, "V_PK_MAX_F16", "v_pk_max_num_f16">;
+
+defm V_PK_FMA_F32 : VOP3P_Real_gfx12<0x1f>;
+defm V_PK_MUL_F32 : VOP3P_Real_gfx12<0x28>;
+defm V_PK_ADD_F32 : VOP3P_Real_gfx12<0x29>;
+
+defm V_PK_ADD_MAX_I16  : VOP3P_Real_gfx1250<0x14>;
+defm V_PK_ADD_MAX_U16  : VOP3P_Real_gfx1250<0x15>;
+defm V_PK_ADD_MIN_I16  : VOP3P_Real_gfx1250<0x2d>;
+defm V_PK_ADD_MIN_U16  : VOP3P_Real_gfx1250<0x2e>;
+defm V_PK_MAX3_I16     : VOP3P_Real_gfx1250<0x2f>;
+defm V_PK_MAX3_U16     : VOP3P_Real_gfx1250<0x30>;
+defm V_PK_MIN3_I16     : VOP3P_Real_gfx1250<0x31>;
+defm V_PK_MIN3_U16     : VOP3P_Real_gfx1250<0x32>;
+defm V_PK_FMA_BF16     : VOP3P_Real_gfx1250<0x11>;
+defm V_PK_ADD_BF16     : VOP3P_Real_gfx1250<0x23>;
+defm V_PK_MUL_BF16     : VOP3P_Real_gfx1250<0x2a>;
+defm V_PK_MIN_NUM_BF16 : VOP3P_Real_gfx1250<0x2b>;
+defm V_PK_MAX_NUM_BF16 : VOP3P_Real_gfx1250<0x2c>;
+defm V_PK_MINIMUM3_F16 : VOP3P_Real_gfx1250<0x36>;
+defm V_PK_MAXIMUM3_F16 : VOP3P_Real_gfx1250<0x37>;
+defm V_PK_MIN3_NUM_F16 : VOP3P_Real_gfx1250<0x38>;
+defm V_PK_MAX3_NUM_F16 : VOP3P_Real_gfx1250<0x39>;
+
+defm V_FMA_MIX_F32_BF16 : VOP3P_Realtriple<GFX1250Gen, 0x3d>;
+defm V_FMA_MIXLO_BF16   : VOP3P_Realtriple<GFX1250Gen, 0x3e>;
+defm V_FMA_MIXHI_BF16   : VOP3P_Realtriple<GFX1250Gen, 0x3f>;
+
+let PostEncoderMethod = "postEncodeVOP3<true, true, false>" in {
+  defm V_WMMA_LD_SCALE_PAIRED_B32   : VOP3P_Real_gfx1250<0x35>;
+  defm V_WMMA_LD_SCALE16_PAIRED_B64 : VOP3P_Real_gfx1250<0x3a>;
+}
+
+let AssemblerPredicate = isGFX1250Plus in
+def : AMDGPUMnemonicAlias<"v_fma_mix_f32_f16",  "v_fma_mix_f32">;
+
+//===----------------------------------------------------------------------===//
+// GFX1170
+//===----------------------------------------------------------------------===//
+
+multiclass VOP3P_Real_gfx11_gfx12<bits<8> op> :
+   VOP3P_Real_Base<GFX11Gen, op>, VOP3P_Real_Base<GFX12Gen, op>;
+
+multiclass VOP3P_Real_with_name_gfx1170<bits<8> op,
+                          string backing_ps_name = NAME,
+                          string asmName = !cast<VOP3P_Pseudo>(NAME).Mnemonic> :
+  VOP3P_Real_with_name<GFX1170Gen, op, backing_ps_name, asmName>;
+
+defm V_PK_MAX_NUM_F16 : VOP3P_Real_with_name_gfx1170<0x11, "V_PK_MAX_F16", "v_pk_max_num_f16">;
+defm V_PK_MIN_NUM_F16 : VOP3P_Real_with_name_gfx1170<0x12, "V_PK_MIN_F16", "v_pk_min_num_f16">;
+
+defm V_PK_MINIMUM_F16 : VOP3P_Real_gfx11_gfx12<0x1d>;
+defm V_PK_MAXIMUM_F16 : VOP3P_Real_gfx11_gfx12<0x1e>;
+
+//===----------------------------------------------------------------------===//
+// GFX11
+//===----------------------------------------------------------------------===//
+
+defm V_DOT4_I32_IU8  : VOP3P_Real_gfx11_gfx12<0x16>;
+defm V_DOT8_I32_IU4  : VOP3P_Real_gfx11_gfx12<0x18>;
+defm V_DOT2_F32_BF16 : VOP3P_Realtriple_gfx11_gfx12<0x1a>;
+
+let AssemblerPredicate = isGFX11Plus in {
+  def : AMDGPUMnemonicAlias<"v_dot4_i32_i8", "v_dot4_i32_iu8">;
+  def : AMDGPUMnemonicAlias<"v_dot8_i32_i4", "v_dot8_i32_iu4">;
+}
+
+multiclass VOP3P_Real_WMMA <bits<8> op> {
+  let WaveSizePredicate = isWave32, DecoderNamespace = "GFX11" in {
+    defm _twoaddr_w32 : VOP3P_Real_Base <GFX11Gen, op>;
+  }
+  let WaveSizePredicate = isWave64, DecoderNamespace = "GFX11W64" in {
+    defm _twoaddr_w64 : VOP3P_Real_Base <GFX11Gen, op>;
+  }
+}
+
+defm V_WMMA_F32_16X16X16_F16   : VOP3P_Real_WMMA <0x040>;
+defm V_WMMA_F32_16X16X16_BF16  : VOP3P_Real_WMMA <0x041>;
+defm V_WMMA_F16_16X16X16_F16   : VOP3P_Real_WMMA <0x042>;
+defm V_WMMA_BF16_16X16X16_BF16 : VOP3P_Real_WMMA <0x043>;
+defm V_WMMA_I32_16X16X16_IU8   : VOP3P_Real_WMMA <0x044>;
+defm V_WMMA_I32_16X16X16_IU4   : VOP3P_Real_WMMA <0x045>;
+
+//===----------------------------------------------------------------------===//
+// GFX8 (VI)
+//===----------------------------------------------------------------------===//
+
+multiclass VOP3P_Real_vi<bits<7> op> {
+  def _vi : VOP3P_Real<!cast<VOP3_Pseudo>(NAME), SIEncodingFamily.VI>,
+            VOP3Pe_vi <op, !cast<VOP3_Pseudo>(NAME).Pfl> {
+    let AssemblerPredicate = HasVOP3PInsts;
+    let DecoderNamespace = "GFX8";
+    let VOP3P = 1;
+  }
+}
+
+multiclass VOP3P_Real_MAI<bits<7> op> {
+  def _vi : VOP3P_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.VI>,
+            VOP3Pe_MAI <op, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl, ?> {
+    let AssemblerPredicate = HasMAIInsts;
+    let DecoderNamespace = "GFX8";
+    let Inst{14} = ?; // op_sel_hi(2)
+    let Inst{59} = ?; // op_sel_hi(0)
+    let Inst{60} = ?; // op_sel_hi(1)
+  }
+}
+
+let Constraints = "" in {
+multiclass VOP3P_Real_MFMA_gfx90a<bits<7> op> {
+  let SubtargetPredicate = isGFX90AOnly,
+      AssemblerPredicate = isGFX90AOnly, DecoderNamespace = "GFX90A" in {
+  def _gfx90a_acd : VOP3P_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.GFX90A>,
+             VOP3Pe_MAI <op, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl, 1>;
+
+  def _gfx90a_vcd : VOP3P_Real<!cast<VOP3_Pseudo>(NAME # "_vgprcd" # "_e64"), SIEncodingFamily.GFX90A>,
+             VOP3Pe_MAI <op, !cast<VOP3_Pseudo>(NAME # "_vgprcd" # "_e64").Pfl, 0>;
+  } // End AssemblerPredicate = isGFX90AOnly, DecoderNamespace = "GFX90A"
+}
+}
+
+multiclass VOP3P_Real_MFMA_gfx940_aliases<string NameFrom, string NameTo, string Op,
+                                          VOP3_Pseudo PS_ACD = !cast<VOP3_Pseudo>(Op # "_e64"),
+                                          VOP3_Pseudo PS_VCD = !cast<VOP3_Pseudo>(Op # "_vgprcd" # "_e64"),
+                                          VOPProfile Pfl_ACD = PS_ACD.Pfl,
+                                          VOPProfile Pfl_VCD = PS_VCD.Pfl> {
+  if !ne(NameFrom, NameTo) then {
+    let SubtargetPredicate = PS_ACD.SubtargetPredicate,
+        OtherPredicates = PS_ACD.OtherPredicates in {
+      def : InstAlias <NameTo # " " # PS_ACD.AsmOperands,
+                     (!cast<VOP3P_Real>(Op # "_gfx940_acd") Pfl_ACD.DstRC:$vdst,
+                         Pfl_ACD.Src0RC64:$src0, Pfl_ACD.Src1RC64:$src1, Pfl_ACD.Src2RC64:$src2,
+                         CBSZ:$cbsz, ABID:$abid, blgp:$blgp)>, PredicateControl;
+      def : InstAlias <NameTo # " " # PS_VCD.AsmOperands,
+                     (!cast<VOP3P_Real>(Op # "_gfx940_vcd") Pfl_VCD.DstRC:$vdst,
+                         Pfl_VCD.Src0RC64:$src0, Pfl_VCD.Src1RC64:$src1, Pfl_VCD.Src2RC64:$src2,
+                         CBSZ:$cbsz, ABID:$abid, blgp:$blgp)>, PredicateControl;
+    }
+  }
+}
+
+multiclass VOP3P_Real_MFMA_gfx940<bits<7> op, string Name = !cast<VOP3_Pseudo>(NAME#"_e64").Mnemonic,
+                                  VOP3_Pseudo PS_ACD = !cast<VOP3_Pseudo>(NAME # "_e64"),
+                                  VOP3_Pseudo PS_VCD = !cast<VOP3_Pseudo>(NAME # "_vgprcd" # "_e64")> {
+  let AssemblerPredicate = isGFX940Plus,
+      DecoderNamespace = "GFX940",
+      AsmString = Name # PS_ACD.AsmOperands, Constraints = "" in {
+  def _gfx940_acd : VOP3P_Real<PS_ACD, SIEncodingFamily.GFX940>,
+                    VOP3Pe_MAI <op, PS_ACD.Pfl, 1>;
+
+  def _gfx940_vcd : VOP3P_Real<PS_VCD, SIEncodingFamily.GFX940>,
+                    VOP3Pe_MAI <op, PS_VCD.Pfl, 0>;
+  } // End AssemblerPredicate = isGFX940Plus, DecoderNamespace = "GFX940"
+
+  let SubtargetPredicate = PS_ACD.SubtargetPredicate,
+      OtherPredicates = PS_ACD.OtherPredicates,
+      AssemblerPredicate = isGFX940Plus
+      in {
+    defm : VOP3P_Real_MFMA_gfx940_aliases<Name, PS_ACD.Mnemonic, NAME>;
+
+    if !ne(!subst("_1k", "", PS_ACD.Mnemonic), PS_ACD.Mnemonic) then
+    defm : VOP3P_Real_MFMA_gfx940_aliases<Name, !subst("_1k", "", PS_ACD.Mnemonic), NAME>;
+  }
+}
+
+multiclass VOP3P_Real_MFMA_F8F6F4_gfx940<bits<7> op, string Name = !cast<VOP3_Pseudo>(NAME#"_e64").Mnemonic,
+                                  VOP3_Pseudo PS_ACD = !cast<VOP3_Pseudo>(NAME # "_e64"),
+                                  VOP3_Pseudo PS_VCD = !cast<VOP3_Pseudo>(NAME # "_vgprcd" # "_e64")> {
+
+  defvar F8F8Name = !substr(NAME, 0, !sub(!size(NAME), !size("_fN_fM")))#"_f8_f8";
+
+  let AssemblerPredicate = isGFX940Plus,
+      DecoderNamespace = "GFX940",
+      AsmString = Name # PS_ACD.AsmOperands,
+      Constraints = "" in {
+  def _gfx940_acd : VOP3P_Real<PS_ACD, SIEncodingFamily.GFX940>,
+                    VOP3Pe_MAI <op, PS_ACD.Pfl, 1>,
+                    MFMA_F8F6F4_WithSizeTable_Helper<PS_ACD, F8F8Name#"_gfx940_acd">;
+
+  def _gfx940_vcd : VOP3P_Real<PS_VCD, SIEncodingFamily.GFX940>,
+                    VOP3Pe_MAI <op, PS_VCD.Pfl, 0>,
+                    MFMA_F8F6F4_WithSizeTable_Helper<PS_VCD, F8F8Name#"_gfx940_vcd">;
+  } // End AssemblerPredicate = isGFX940Plus, DecoderNamespace = "GFX940"
+}
+
+multiclass VOP3P_Real_MFMA_gfx950<bits<7> op, string Name = !cast<VOP3_Pseudo>(NAME#"_e64").Mnemonic,
+                                  VOP3_Pseudo PS_ACD = !cast<VOP3_Pseudo>(NAME # "_e64"),
+                                  VOP3_Pseudo PS_VCD = !cast<VOP3_Pseudo>(NAME # "_vgprcd" # "_e64")> {
+    let SubtargetPredicate = HasGFX950Insts,
+        AssemblerPredicate = HasGFX950Insts in {
+    defm "" : VOP3P_Real_MFMA_gfx940<op, Name, PS_ACD, PS_VCD>;
+  }
+}
+
+
+multiclass VOP3P_Real_MFMA_F8F6F4_gfx950_mc<bits<7> op, string Name> {
+  defm _f8_f8 : VOP3P_Real_MFMA_F8F6F4_gfx940<op, Name>;
+
+  let isAsmParserOnly = true in { // Disable ambiguous disassembly.
+    defm _f8_f6 : VOP3P_Real_MFMA_F8F6F4_gfx940<op, Name>;
+    defm _f6_f8 : VOP3P_Real_MFMA_F8F6F4_gfx940<op, Name>;
+    defm _f8_f4 : VOP3P_Real_MFMA_F8F6F4_gfx940<op, Name>;
+    defm _f4_f8 : VOP3P_Real_MFMA_F8F6F4_gfx940<op, Name>;
+    defm _f6_f6 : VOP3P_Real_MFMA_F8F6F4_gfx940<op, Name>;
+    defm _f6_f4 : VOP3P_Real_MFMA_F8F6F4_gfx940<op, Name>;
+    defm _f4_f6 : VOP3P_Real_MFMA_F8F6F4_gfx940<op, Name>;
+    defm _f4_f4 : VOP3P_Real_MFMA_F8F6F4_gfx940<op, Name>;
+  }
+}
+
+multiclass VOP3PX_Real_ScaledMFMA<bits<7> op> {
+  defvar PS_ACD = !cast<VOP3_Pseudo>(NAME # "_e64");
+  defvar PS_VCD = !cast<VOP3_Pseudo>(NAME # "_vgprcd" # "_e64");
+  defvar Name = PS_ACD.Mnemonic;
+  defvar F8F8Name = !substr(NAME, 0, !sub(!size(NAME), !size("_fN_fM")))#"_f8_f8";
+  let SubtargetPredicate = HasGFX950Insts,
+      DecoderNamespace = "GFX940",
+      AsmString = Name # PS_ACD.AsmOperands, Constraints = "" in {
+   def _gfx940_acd : VOP3P_Real<PS_ACD, SIEncodingFamily.GFX940>,
+                     VOP3PXe <op, PS_ACD.Pfl, /*acc_cd=*/1>,
+                     MFMA_F8F6F4_WithSizeTable_Helper<PS_ACD, F8F8Name#"_gfx940_acd">;
+
+   def _gfx940_vcd : VOP3P_Real<PS_VCD, SIEncodingFamily.GFX940>,
+                     VOP3PXe <op, PS_VCD.Pfl, /*acc_cd=*/0>,
+                     MFMA_F8F6F4_WithSizeTable_Helper<PS_VCD, F8F8Name#"_gfx940_vcd">;
+  }
+}
+
+multiclass VOP3PX_Real_ScaledMFMA_F8F6F4_mc<bits<7> op> {
+  defm _f8_f8 : VOP3PX_Real_ScaledMFMA<op>;
+
+  let isAsmParserOnly = 1 in { // Disable ambiguous disassembly.
+  defm _f8_f6 : VOP3PX_Real_ScaledMFMA<op>;
+  defm _f6_f8 : VOP3PX_Real_ScaledMFMA<op>;
+  defm _f8_f4 : VOP3PX_Real_ScaledMFMA<op>;
+  defm _f4_f8 : VOP3PX_Real_ScaledMFMA<op>;
+  defm _f6_f6 : VOP3PX_Real_ScaledMFMA<op>;
+  defm _f6_f4 : VOP3PX_Real_ScaledMFMA<op>;
+  defm _f4_f6 : VOP3PX_Real_ScaledMFMA<op>;
+  defm _f4_f4 : VOP3PX_Real_ScaledMFMA<op>;
+  }
+}
+
+multiclass VOP3P_Real_MFMA_vi<bits<7> op> {
+  def _vi : VOP3P_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.VI>,
+            VOP3Pe_MAI <op, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl, ?> {
+    let SubtargetPredicate = isGFX8GFX9NotGFX90A;
+    let AssemblerPredicate = HasMAIInsts;
+    let DecoderNamespace = "GFX8";
+    let Constraints = "";
+  }
+}
+
+multiclass VOP3P_Real_MFMA_vi_gfx90a<bits<7> op> :
+  VOP3P_Real_MFMA_gfx90a <op>,
+  VOP3P_Real_MFMA_vi <op>;
+
+multiclass VOP3P_Real_MFMA<bits<7> op, string GFX940Name = !cast<VOP3_Pseudo>(NAME#"_e64").Mnemonic> :
+  VOP3P_Real_MFMA_vi_gfx90a <op>,
+  VOP3P_Real_MFMA_gfx940 <op, GFX940Name>;
+
+multiclass VOP3P_Real_SMFMAC<bits<7> op, string alias> {
+  def _gfx940 : VOP3P_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.VI>,
+                VOP3Pe_SMFMAC <op> {
+    let AssemblerPredicate = isGFX940Plus;
+    let DecoderNamespace = "GFX8";
+  }
+  def : AMDGPUMnemonicAlias<alias, !cast<VOP3_Pseudo>(NAME#"_e64").Mnemonic> {
+    let AssemblerPredicate = isGFX940Plus;
+  }
+}
+
+defm V_PK_MAD_I16 : VOP3P_Real_vi <0x00>;
+defm V_PK_MUL_LO_U16 : VOP3P_Real_vi <0x01>;
+defm V_PK_ADD_I16 : VOP3P_Real_vi <0x02>;
+defm V_PK_SUB_I16 : VOP3P_Real_vi <0x03>;
+defm V_PK_LSHLREV_B16 : VOP3P_Real_vi <0x04>;
+defm V_PK_LSHRREV_B16 : VOP3P_Real_vi <0x05>;
+defm V_PK_ASHRREV_I16 : VOP3P_Real_vi <0x06>;
+defm V_PK_MAX_I16 : VOP3P_Real_vi <0x07>;
+defm V_PK_MIN_I16 : VOP3P_Real_vi <0x08>;
+defm V_PK_MAD_U16 : VOP3P_Real_vi <0x09>;
+
+defm V_PK_ADD_U16 : VOP3P_Real_vi <0x0a>;
+defm V_PK_SUB_U16 : VOP3P_Real_vi <0x0b>;
+defm V_PK_MAX_U16 : VOP3P_Real_vi <0x0c>;
+defm V_PK_MIN_U16 : VOP3P_Real_vi <0x0d>;
+defm V_PK_FMA_F16 : VOP3P_Real_vi <0x0e>;
+defm V_PK_ADD_F16 : VOP3P_Real_vi <0x0f>;
+defm V_PK_MUL_F16 : VOP3P_Real_vi <0x10>;
+defm V_PK_MIN_F16 : VOP3P_Real_vi <0x11>;
+defm V_PK_MAX_F16 : VOP3P_Real_vi <0x12>;
+
+defm V_DOT2_F32_BF16 : VOP3P_Real_vi<0x1a>;
+defm V_PK_MINIMUM3_F16 : VOP3P_Real_vi <0x1b>;
+defm V_PK_MAXIMUM3_F16 : VOP3P_Real_vi <0x1c>;
+
+defm V_MAD_MIX_F32 : VOP3P_Real_vi <0x20>;
+defm V_MAD_MIXLO_F16 : VOP3P_Real_vi <0x21>;
+defm V_MAD_MIXHI_F16 : VOP3P_Real_vi <0x22>;
+
+let OtherPredicates = [HasFmaMixInsts],
+    DecoderNamespace = "GFX9_DL" in {
+// The mad_mix instructions were renamed and their behaviors changed,
+// but the opcode stayed the same so we need to put these in a
+// different DecoderNamespace to avoid the ambiguity.
+defm V_FMA_MIX_F32 : VOP3P_Real_vi <0x20>;
+defm V_FMA_MIXLO_F16 : VOP3P_Real_vi <0x21>;
+defm V_FMA_MIXHI_F16 : VOP3P_Real_vi <0x22>;
+}
+
+defm V_DOT2_I32_I16 : VOP3P_Real_vi <0x26>;
+defm V_DOT2_U32_U16 : VOP3P_Real_vi <0x27>;
+
+defm V_DOT2_F32_F16 : VOP3P_Real_vi <0x23>;
+defm V_DOT4_U32_U8  : VOP3P_Real_vi <0x29>;
+defm V_DOT8_U32_U4  : VOP3P_Real_vi <0x2b>;
+
+defm V_DOT4_I32_I8  : VOP3P_Real_vi <0x28>;
+defm V_DOT8_I32_I4  : VOP3P_Real_vi <0x2a>;
+
+defm V_ACCVGPR_READ_B32  : VOP3P_Real_MAI <0x58>;
+defm V_ACCVGPR_WRITE_B32 : VOP3P_Real_MAI <0x59>;
+defm V_MFMA_F32_32X32X1F32  : VOP3P_Real_MFMA <0x40, "v_mfma_f32_32x32x1_2b_f32">;
+defm V_MFMA_F32_16X16X1F32  : VOP3P_Real_MFMA <0x41, "v_mfma_f32_16x16x1_4b_f32">;
+defm V_MFMA_F32_4X4X1F32    : VOP3P_Real_MFMA <0x42, "v_mfma_f32_4x4x1_16b_f32">;
+defm V_MFMA_F32_32X32X2F32  : VOP3P_Real_MFMA <0x44, "v_mfma_f32_32x32x2_f32">;
+defm V_MFMA_F32_16X16X4F32  : VOP3P_Real_MFMA <0x45, "v_mfma_f32_16x16x4_f32">;
+defm V_MFMA_F32_32X32X4F16  : VOP3P_Real_MFMA <0x48, "v_mfma_f32_32x32x4_2b_f16">;
+defm V_MFMA_F32_16X16X4F16  : VOP3P_Real_MFMA <0x49, "v_mfma_f32_16x16x4_4b_f16">;
+defm V_MFMA_F32_4X4X4F16    : VOP3P_Real_MFMA <0x4a, "v_mfma_f32_4x4x4_16b_f16">;
+defm V_MFMA_F32_32X32X8F16  : VOP3P_Real_MFMA <0x4c, "v_mfma_f32_32x32x8_f16">;
+defm V_MFMA_F32_16X16X16F16 : VOP3P_Real_MFMA <0x4d, "v_mfma_f32_16x16x16_f16">;
+defm V_MFMA_I32_32X32X4I8   : VOP3P_Real_MFMA <0x50, "v_mfma_i32_32x32x4_2b_i8">;
+defm V_MFMA_I32_16X16X4I8   : VOP3P_Real_MFMA <0x51, "v_mfma_i32_16x16x4_4b_i8">;
+defm V_MFMA_I32_4X4X4I8     : VOP3P_Real_MFMA <0x52, "v_mfma_i32_4x4x4_16b_i8">;
+
+defm V_MFMA_I32_16X16X16I8  : VOP3P_Real_MFMA_vi_gfx90a <0x55>;
+defm V_MFMA_I32_32X32X8I8   : VOP3P_Real_MFMA_vi_gfx90a <0x54>;
+defm V_MFMA_F32_32X32X2BF16 : VOP3P_Real_MFMA_vi_gfx90a <0x68>;
+defm V_MFMA_F32_16X16X2BF16 : VOP3P_Real_MFMA_vi_gfx90a <0x69>;
+defm V_MFMA_F32_4X4X2BF16   : VOP3P_Real_MFMA_vi_gfx90a <0x6b>;
+defm V_MFMA_F32_32X32X4BF16 : VOP3P_Real_MFMA_vi_gfx90a <0x6c>;
+defm V_MFMA_F32_16X16X8BF16 : VOP3P_Real_MFMA_vi_gfx90a <0x6d>;
+
+defm V_MFMA_F32_32X32X4BF16_1K  : VOP3P_Real_MFMA_gfx90a <0x63>;
+defm V_MFMA_F32_16X16X4BF16_1K  : VOP3P_Real_MFMA_gfx90a <0x64>;
+defm V_MFMA_F32_4X4X4BF16_1K    : VOP3P_Real_MFMA_gfx90a <0x65>;
+defm V_MFMA_F32_32X32X8BF16_1K  : VOP3P_Real_MFMA_gfx90a <0x66>;
+defm V_MFMA_F32_16X16X16BF16_1K : VOP3P_Real_MFMA_gfx90a <0x67>;
+defm V_MFMA_F64_16X16X4F64      : VOP3P_Real_MFMA_gfx90a <0x6e>;
+defm V_MFMA_F64_4X4X4F64        : VOP3P_Real_MFMA_gfx90a <0x6f>;
+
+defm V_MFMA_F32_16X16X32_F16     : VOP3P_Real_MFMA_gfx950 <0x54, "v_mfma_f32_16x16x32_f16">;
+defm V_MFMA_F32_32X32X16_F16     : VOP3P_Real_MFMA_gfx950 <0x55, "v_mfma_f32_32x32x16_f16">;
+defm V_MFMA_F32_16X16X32_BF16    : VOP3P_Real_MFMA_gfx950 <0x35, "v_mfma_f32_16x16x32_bf16">;
+defm V_MFMA_I32_16X16X64_I8      : VOP3P_Real_MFMA_gfx950 <0x36, "v_mfma_i32_16x16x64_i8">;
+defm V_MFMA_F32_32X32X16_BF16    : VOP3P_Real_MFMA_gfx950 <0x37, "v_mfma_f32_32x32x16_bf16">;
+defm V_MFMA_I32_32X32X32_I8      : VOP3P_Real_MFMA_gfx950 <0x38, "v_mfma_i32_32x32x32_i8">;
+
+defm V_MFMA_LD_SCALE_B32 : VOP3P_Real_vi <0x2c>;
+defm V_MFMA_F32_16X16X128_F8F6F4 : VOP3P_Real_MFMA_F8F6F4_gfx950_mc <0x2d, "v_mfma_f32_16x16x128_f8f6f4">;
+defm V_MFMA_SCALE_F32_16X16X128_F8F6F4 : VOP3PX_Real_ScaledMFMA_F8F6F4_mc <0x2d>;
+defm V_MFMA_F32_32X32X64_F8F6F4  : VOP3P_Real_MFMA_F8F6F4_gfx950_mc <0x2e, "v_mfma_f32_32x32x64_f8f6f4">;
+defm V_MFMA_SCALE_F32_32X32X64_F8F6F4 : VOP3PX_Real_ScaledMFMA_F8F6F4_mc <0x2e>;
+
+defm V_MFMA_I32_32X32X16I8       : VOP3P_Real_MFMA_gfx940 <0x56, "v_mfma_i32_32x32x16_i8">;
+defm V_MFMA_I32_16X16X32I8       : VOP3P_Real_MFMA_gfx940 <0x57, "v_mfma_i32_16x16x32_i8">;
+defm V_MFMA_F32_16X16X8XF32      : VOP3P_Real_MFMA_gfx940 <0x3e, "v_mfma_f32_16x16x8_xf32">;
+defm V_MFMA_F32_32X32X4XF32      : VOP3P_Real_MFMA_gfx940 <0x3f, "v_mfma_f32_32x32x4_xf32">;
+
+defm V_MFMA_F32_16X16X32_BF8_BF8 : VOP3P_Real_MFMA_gfx940 <0x70>;
+defm V_MFMA_F32_16X16X32_BF8_FP8 : VOP3P_Real_MFMA_gfx940 <0x71>;
+defm V_MFMA_F32_16X16X32_FP8_BF8 : VOP3P_Real_MFMA_gfx940 <0x72>;
+defm V_MFMA_F32_16X16X32_FP8_FP8 : VOP3P_Real_MFMA_gfx940 <0x73>;
+defm V_MFMA_F32_32X32X16_BF8_BF8 : VOP3P_Real_MFMA_gfx940 <0x74>;
+defm V_MFMA_F32_32X32X16_BF8_FP8 : VOP3P_Real_MFMA_gfx940 <0x75>;
+defm V_MFMA_F32_32X32X16_FP8_BF8 : VOP3P_Real_MFMA_gfx940 <0x76>;
+defm V_MFMA_F32_32X32X16_FP8_FP8 : VOP3P_Real_MFMA_gfx940 <0x77>;
+
+defm V_MFMA_F32_32X32X4BF16_1K   : VOP3P_Real_MFMA_gfx940 <0x5d, "v_mfma_f32_32x32x4_2b_bf16">;
+defm V_MFMA_F32_16X16X4BF16_1K   : VOP3P_Real_MFMA_gfx940 <0x5e, "v_mfma_f32_16x16x4_4b_bf16">;
+defm V_MFMA_F32_4X4X4BF16_1K     : VOP3P_Real_MFMA_gfx940 <0x5f, "v_mfma_f32_4x4x4_16b_bf16">;
+defm V_MFMA_F32_32X32X8BF16_1K   : VOP3P_Real_MFMA_gfx940 <0x60, "v_mfma_f32_32x32x8_bf16">;
+defm V_MFMA_F32_16X16X16BF16_1K  : VOP3P_Real_MFMA_gfx940 <0x61, "v_mfma_f32_16x16x16_bf16">;
+
+defm V_MFMA_F64_16X16X4F64       : VOP3P_Real_MFMA_gfx940 <0x6e, "v_mfma_f64_16x16x4_f64">;
+defm V_MFMA_F64_4X4X4F64         : VOP3P_Real_MFMA_gfx940 <0x6f, "v_mfma_f64_4x4x4_4b_f64">;
+
+defm V_SMFMAC_F32_16X16X32_F16     : VOP3P_Real_SMFMAC <0x62, "v_smfmac_f32_16x16x32f16">;
+defm V_SMFMAC_F32_32X32X16_F16     : VOP3P_Real_SMFMAC <0x64, "v_smfmac_f32_32x32x16f16">;
+defm V_SMFMAC_F32_16X16X32_BF16    : VOP3P_Real_SMFMAC <0x66, "v_smfmac_f32_16x16x32bf16">;
+defm V_SMFMAC_F32_32X32X16_BF16    : VOP3P_Real_SMFMAC <0x68, "v_smfmac_f32_32x32x16bf16">;
+defm V_SMFMAC_I32_16X16X64_I8      : VOP3P_Real_SMFMAC <0x6a, "v_smfmac_i32_16x16x64i8">;
+defm V_SMFMAC_I32_32X32X32_I8      : VOP3P_Real_SMFMAC <0x6c, "v_smfmac_i32_32x32x32i8">;
+defm V_SMFMAC_F32_16X16X64_BF8_BF8 : VOP3P_Real_SMFMAC <0x78, "v_smfmac_f32_16x16x64bf8bf8">;
+defm V_SMFMAC_F32_16X16X64_BF8_FP8 : VOP3P_Real_SMFMAC <0x79, "v_smfmac_f32_16x16x64bf8fp8">;
+defm V_SMFMAC_F32_16X16X64_FP8_BF8 : VOP3P_Real_SMFMAC <0x7a, "v_smfmac_f32_16x16x64fp8bf8">;
+defm V_SMFMAC_F32_16X16X64_FP8_FP8 : VOP3P_Real_SMFMAC <0x7b, "v_smfmac_f32_16x16x64fp8fp8">;
+defm V_SMFMAC_F32_32X32X32_BF8_BF8 : VOP3P_Real_SMFMAC <0x7c, "v_smfmac_f32_32x32x32bf8bf8">;
+defm V_SMFMAC_F32_32X32X32_BF8_FP8 : VOP3P_Real_SMFMAC <0x7d, "v_smfmac_f32_32x32x32bf8fp8">;
+defm V_SMFMAC_F32_32X32X32_FP8_BF8 : VOP3P_Real_SMFMAC <0x7e, "v_smfmac_f32_32x32x32fp8bf8">;
+defm V_SMFMAC_F32_32X32X32_FP8_FP8 : VOP3P_Real_SMFMAC <0x7f, "v_smfmac_f32_32x32x32fp8fp8">;
+
+defm V_SMFMAC_F32_16X16X64_F16     : VOP3P_Real_SMFMAC <0x5a, "v_smfmac_f32_16x16x64f16">;
+defm V_SMFMAC_F32_32X32X32_F16     : VOP3P_Real_SMFMAC <0x5b, "v_smfmac_f32_32x32x32f16">;
+defm V_SMFMAC_F32_16X16X64_BF16    : VOP3P_Real_SMFMAC <0x39, "v_smfmac_f32_16x16x64bf16">;
+defm V_SMFMAC_F32_32X32X32_BF16    : VOP3P_Real_SMFMAC <0x46, "v_smfmac_f32_32x32x32bf16">;
+defm V_SMFMAC_I32_16X16X128_I8     : VOP3P_Real_SMFMAC <0x3a, "v_smfmac_i32_16x16x128i8">;
+defm V_SMFMAC_I32_32X32X64_I8      : VOP3P_Real_SMFMAC <0x47, "v_smfmac_i32_32x32x64i8">;
+
+defm V_SMFMAC_F32_16X16X128_BF8_BF8 : VOP3P_Real_SMFMAC <0x3b, "v_smfmac_f32_16x16x128bf8bf8">;
+defm V_SMFMAC_F32_16X16X128_BF8_FP8 : VOP3P_Real_SMFMAC <0x3c, "v_smfmac_f32_16x16x128bf8fp8">;
+defm V_SMFMAC_F32_16X16X128_FP8_BF8 : VOP3P_Real_SMFMAC <0x3d, "v_smfmac_f32_16x16x128fp8bf8">;
+defm V_SMFMAC_F32_16X16X128_FP8_FP8 : VOP3P_Real_SMFMAC <0x43, "v_smfmac_f32_16x16x128fp8fp8">;
+defm V_SMFMAC_F32_32X32X64_BF8_BF8 : VOP3P_Real_SMFMAC <0x4b, "v_smfmac_f32_32x32x64bf8bf8">;
+defm V_SMFMAC_F32_32X32X64_BF8_FP8 : VOP3P_Real_SMFMAC <0x4e, "v_smfmac_f32_32x32x64bf8fp8">;
+defm V_SMFMAC_F32_32X32X64_FP8_BF8 : VOP3P_Real_SMFMAC <0x4f, "v_smfmac_f32_32x32x64fp8bf8">;
+defm V_SMFMAC_F32_32X32X64_FP8_FP8 : VOP3P_Real_SMFMAC <0x53, "v_smfmac_f32_32x32x64fp8fp8">;
+
+defm V_PK_FMA_F32 : VOP3P_Real_vi <0x30>;
+defm V_PK_MUL_F32 : VOP3P_Real_vi <0x31>;
+defm V_PK_ADD_F32 : VOP3P_Real_vi <0x32>;
+defm V_PK_MOV_B32 : VOP3P_Real_vi <0x33>;
+
+//===----------------------------------------------------------------------===//
+// GFX10.
+//===----------------------------------------------------------------------===//
+
+let AssemblerPredicate = isGFX10Only, DecoderNamespace = "GFX10", VOP3P = 1 in {
+  multiclass VOP3P_Real_gfx10<bits<8> op> {
+    def _gfx10 : VOP3P_Real<!cast<VOP3P_Pseudo>(NAME), SIEncodingFamily.GFX10>,
+                 VOP3Pe_gfx10 <op, !cast<VOP3P_Pseudo>(NAME).Pfl>;
+  }
+} // End AssemblerPredicate = isGFX10Only, DecoderNamespace = "GFX10", VOP3P = 1
+
+multiclass VOP3P_Real_gfx10_gfx11<bits<8> op> :
+  VOP3P_Real_gfx10<op>, VOP3P_Real_Base<GFX11Gen, op>;
+
+multiclass VOP3P_Real_gfx10_gfx11_not_gfx1170<bits<8> op> :
+  VOP3P_Real_gfx10<op>, VOP3P_Real_Base<GFX11Not11_70Gen, op>;
+
+multiclass VOP3P_Real_gfx10_gfx11_gfx12<bits<8> op> :
+  VOP3P_Real_gfx10_gfx11<op>, VOP3P_Real_Base<GFX12Gen, op>;
+
+multiclass VOP3P_Real_gfx10_gfx11_gfx12_Triple<bits<8> op> :
+  VOP3P_Real_gfx10<op>, VOP3P_Realtriple<GFX11Gen, op>,
+  VOP3P_Realtriple<GFX12Gen, op>;
+
+defm V_PK_MAD_I16     : VOP3P_Real_gfx10_gfx11_gfx12<0x00>;
+defm V_PK_MUL_LO_U16  : VOP3P_Real_gfx10_gfx11_gfx12<0x01>;
+defm V_PK_ADD_I16     : VOP3P_Real_gfx10_gfx11_gfx12<0x02>;
+defm V_PK_SUB_I16     : VOP3P_Real_gfx10_gfx11_gfx12<0x03>;
+defm V_PK_LSHLREV_B16 : VOP3P_Real_gfx10_gfx11_gfx12<0x04>;
+defm V_PK_LSHRREV_B16 : VOP3P_Real_gfx10_gfx11_gfx12<0x05>;
+defm V_PK_ASHRREV_I16 : VOP3P_Real_gfx10_gfx11_gfx12<0x06>;
+defm V_PK_MAX_I16     : VOP3P_Real_gfx10_gfx11_gfx12<0x07>;
+defm V_PK_MIN_I16     : VOP3P_Real_gfx10_gfx11_gfx12<0x08>;
+defm V_PK_MAD_U16     : VOP3P_Real_gfx10_gfx11_gfx12<0x09>;
+defm V_PK_ADD_U16     : VOP3P_Real_gfx10_gfx11_gfx12<0x0a>;
+defm V_PK_SUB_U16     : VOP3P_Real_gfx10_gfx11_gfx12<0x0b>;
+defm V_PK_MAX_U16     : VOP3P_Real_gfx10_gfx11_gfx12<0x0c>;
+defm V_PK_MIN_U16     : VOP3P_Real_gfx10_gfx11_gfx12<0x0d>;
+defm V_PK_FMA_F16     : VOP3P_Real_gfx10_gfx11_gfx12<0x0e>;
+defm V_PK_ADD_F16     : VOP3P_Real_gfx10_gfx11_gfx12<0x0f>;
+defm V_PK_MUL_F16     : VOP3P_Real_gfx10_gfx11_gfx12<0x10>;
+defm V_PK_MIN_F16     : VOP3P_Real_gfx10_gfx11_not_gfx1170<0x11>;
+defm V_PK_MAX_F16     : VOP3P_Real_gfx10_gfx11_not_gfx1170<0x12>;
+defm V_FMA_MIX_F32    : VOP3P_Real_gfx10_gfx11_gfx12_Triple<0x20>;
+defm V_FMA_MIXLO_F16  : VOP3P_Real_gfx10_gfx11_gfx12_Triple<0x21>;
+defm V_FMA_MIXHI_F16  : VOP3P_Real_gfx10_gfx11_gfx12_Triple<0x22>;
+
+defm V_DOT2_I32_I16 : VOP3P_Real_gfx10 <0x14>;
+defm V_DOT2_U32_U16 : VOP3P_Real_gfx10 <0x15>;
+
+defm V_DOT2_F32_F16 : VOP3P_Real_gfx10_gfx11_gfx12_Triple<0x13>;
+defm V_DOT4_U32_U8  : VOP3P_Real_gfx10_gfx11_gfx12<0x17>;
+defm V_DOT8_U32_U4  : VOP3P_Real_gfx10_gfx11_gfx12<0x19>;
+
+defm V_DOT4_I32_I8  : VOP3P_Real_gfx10 <0x16>;
+defm V_DOT8_I32_I4  : VOP3P_Real_gfx10 <0x18>;

>From 5f4ddbba33a28f8d03f74c44da4d950446918a36 Mon Sep 17 00:00:00 2001
From: Addmisol <addmisol9 at gmail.com>
Date: Fri, 27 Mar 2026 22:43:36 +0530
Subject: [PATCH 08/16] Update SIISelLowering.cpp

---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 309 ++++++++--------------
 1 file changed, 116 insertions(+), 193 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 45a64d55a8057..199264c99704e 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5660,13 +5660,6 @@ static unsigned getDPPOpcForWaveReduction(unsigned Opc,
     return AMDGPU::V_OR_B32_dpp;
   case AMDGPU::S_XOR_B32:
     return AMDGPU::V_XOR_B32_dpp;
-  case AMDGPU::V_ADD_F32_e64:
-  case AMDGPU::V_SUB_F32_e64:
-    return AMDGPU::V_ADD_F32_dpp;
-  case AMDGPU::V_MIN_F32_e64:
-    return AMDGPU::V_MIN_F32_dpp;
-  case AMDGPU::V_MAX_F32_e64:
-    return AMDGPU::V_MAX_F32_dpp;
   default:
     llvm_unreachable("unhandled lane op");
   }
@@ -6224,7 +6217,6 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
     } else {
       assert(ST.hasDPP() && "Sub Target does not support DPP Operations");
 
-      bool IsFPOp = isFloatingPointWaveReduceOperation(Opc);
       Register SrcWithIdentity = MRI.createVirtualRegister(SrcRegClass);
       Register IdentityVGPR = MRI.createVirtualRegister(SrcRegClass);
       Register IdentitySGPR = MRI.createVirtualRegister(DstRegClass);
@@ -6257,39 +6249,15 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
       unsigned DPPOpc = getDPPOpcForWaveReduction(Opc, ST);
       auto BuildDPPMachineInstr = [&](Register Dst, Register Src,
                                       unsigned DPPCtrl) {
-        auto DPPInstr =
-            BuildMI(BB, MI, DL, TII->get(DPPOpc), Dst).addReg(Src); // old
-        if (IsFPOp)
-          DPPInstr.addImm(SISrcMods::NONE); // src0 modifier
-        DPPInstr.addReg(Src);               // src0
-        if (IsFPOp)
-          DPPInstr.addImm(SISrcMods::NONE); // src1 modifier
-        DPPInstr
+        BuildMI(BB, MI, DL, TII->get(DPPOpc), Dst)
+            .addReg(Src)     // old
+            .addReg(Src)     // src0
             .addReg(Src)     // src1
             .addImm(DPPCtrl) // dpp-ctrl
             .addImm(0xf)     // row-mask
             .addImm(0xf)     // bank-mask
             .addImm(0);      // bound-control
       };
-      auto BuildClampInstr = [&](Register Dst, Register Src0, Register Src1) {
-        unsigned ClampOpc = Opc;
-        if (!IsFPOp) {
-          if (Opc == AMDGPU::S_SUB_I32)
-            ClampOpc = AMDGPU::S_ADD_I32;
-          ClampOpc = TII->getVALUOp(ClampOpc);
-        }
-        auto ClampInstr = BuildMI(BB, MI, DL, TII->get(ClampOpc), Dst);
-        if (IsFPOp)
-          ClampInstr.addImm(SISrcMods::NONE); // src0 mod
-        ClampInstr.addReg(Src0);              // src0
-        if (IsFPOp)
-          ClampInstr.addImm(SISrcMods::NONE); // src1 mod
-        ClampInstr.addReg(Src1);              // src1
-        if (TII->hasIntClamp(*ClampInstr) || TII->hasFPClamp(*ClampInstr))
-          ClampInstr.addImm(0); // clamp
-        if (IsFPOp)
-          ClampInstr.addImm(0); // omod
-      };
       // DPP reduction
       BuildDPPMachineInstr(DPPRowShr1, SrcWithIdentity,
                            AMDGPU::DPP::ROW_SHR_FIRST);
@@ -6317,7 +6285,17 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
             .addReg(DPPRowShr8) // addr
             .addImm(0x1E0)      // swizzle offset (i16)
             .addImm(0x0);       // gds (i1)
-        BuildClampInstr(RowBcast15, DPPRowShr8, SwizzledValue);
+        auto ClampInstr =
+            BuildMI(BB, MI, DL,
+                    TII->get(TII->getVALUOp(
+                        Opc == AMDGPU::S_SUB_I32
+                            ? static_cast<unsigned>(AMDGPU::S_ADD_I32)
+                            : Opc)),
+                    RowBcast15)
+                .addReg(DPPRowShr8)
+                .addReg(SwizzledValue);
+        if (TII->hasIntClamp(*ClampInstr) || TII->hasFPClamp(*ClampInstr))
+          ClampInstr.addImm(0);
       }
       FinalDPPResult = RowBcast15;
       if (!IsWave32) {
@@ -6365,22 +6343,20 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
               .addReg(PermuteByteOffset) // addr
               .addReg(RowBcast15)        // data
               .addImm(0);                // offset
-          BuildClampInstr(RowBcast31, RowBcast15, PermutedValue);
+          auto ClampInstr =
+              BuildMI(BB, MI, DL,
+                      TII->get(TII->getVALUOp(
+                          Opc == AMDGPU::S_SUB_I32
+                              ? static_cast<unsigned>(AMDGPU::S_ADD_I32)
+                              : Opc)),
+                      RowBcast31)
+                  .addReg(RowBcast15)
+                  .addReg(PermutedValue);
+          if (TII->hasIntClamp(*ClampInstr) || TII->hasFPClamp(*ClampInstr))
+            ClampInstr.addImm(0);
         }
         FinalDPPResult = RowBcast31;
       }
-      if (Opc == AMDGPU::V_SUB_F32_e64) {
-        Register NegatedValVGPR =
-            MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
-        BuildMI(BB, MI, DL, TII->get(AMDGPU::V_SUB_F32_e64), NegatedValVGPR)
-            .addImm(SISrcMods::NONE)                    // src0 mods
-            .addReg(IdentityVGPR)                       // src0
-            .addImm(SISrcMods::NONE)                    // src1 mods
-            .addReg(IsWave32 ? RowBcast15 : RowBcast31) // src1
-            .addImm(SISrcMods::NONE)                    // clamp
-            .addImm(SISrcMods::NONE);                   // omod
-        FinalDPPResult = NegatedValVGPR;
-      }
       // The final reduced value is in the last lane.
       BuildMI(BB, MI, DL, TII->get(AMDGPU::V_READLANE_B32), ReducedValSGPR)
           .addReg(FinalDPPResult)
@@ -16871,15 +16847,56 @@ SDValue SITargetLowering::performAddCombine(SDNode *N,
     SDValue Src2 =
         DAG.getExtOrTrunc(*IsSigned, Src2s[ChainLength - 1], SL, MVT::i32);
 
+    // Check if this ADD result is used only by a saturating add (UADDSAT/SADDSAT).
+    // If so, we can fold the saturation into the dot instruction by setting clamp=1
+    // and using the saturation add's other operand as the accumulator.
+    bool Clamp = false;
+    SDValue ClampAccum;
+    SDNode *AddN = N;
+    if (AddN->hasOneUse()) {
+      SDNode *User = *AddN->user_begin();
+      unsigned UserOpc = User->getOpcode();
+      if ((UserOpc == ISD::UADDSAT && !*IsSigned) ||
+          (UserOpc == ISD::SADDSAT && *IsSigned)) {
+        // Found a saturating add user. Get the accumulator (the other operand).
+        SDValue Op0 = User->getOperand(0);
+        SDValue Op1 = User->getOperand(1);
+        // Our add result could be either operand of the saturating add.
+        if (Op0.getNode() == AddN) {
+          ClampAccum = Op1;
+        } else {
+          ClampAccum = Op0;
+        }
+        // Only fold if the original accumulator was 0 (we're replacing it)
+        auto *Src2Const = dyn_cast<ConstantSDNode>(Src2);
+        if (Src2Const && Src2Const->getZExtValue() == 0) {
+          Clamp = true;
+          Src2 = DAG.getExtOrTrunc(*IsSigned, ClampAccum, SL, MVT::i32);
+        }
+      }
+    }
+
     SDValue IID = DAG.getTargetConstant(*IsSigned ? Intrinsic::amdgcn_sdot4
                                                   : Intrinsic::amdgcn_udot4,
                                         SL, MVT::i64);
 
     assert(!VT.isVector());
     auto Dot = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SL, MVT::i32, IID, Src0,
-                           Src1, Src2, DAG.getTargetConstant(0, SL, MVT::i1));
+                           Src1, Src2,
+                           DAG.getTargetConstant(Clamp ? 1 : 0, SL, MVT::i1));
 
-    return DAG.getExtOrTrunc(*IsSigned, Dot, SL, VT);
+    SDValue Result = DAG.getExtOrTrunc(*IsSigned, Dot, SL, VT);
+
+    // If we folded the saturation, we need to replace the UADDSAT/SADDSAT node
+    // with our dot result, not just the ADD node.
+    if (Clamp) {
+      SDNode *User = *AddN->user_begin();
+      DCI.CombineTo(User, Result);
+      // Return the result but the caller will see that we've already combined.
+      return Result;
+    }
+
+    return Result;
   }
 
   if (VT != MVT::i32 || !DCI.isAfterLegalizeDAG())
@@ -16940,157 +16957,63 @@ SDValue SITargetLowering::performSatAddCombine(SDNode *N,
       (!Subtarget->hasDot1Insts() && !Subtarget->hasDot8Insts()))
     return SDValue();
 
-  // Pattern: sataddsat(sum_of_products, accumulator)
-  // where sum_of_products = add(add(add(mul0, mul1), mul2), mul3)
-  SDValue SumOp = N->getOperand(0);
+  // First, check if one operand is already a dot intrinsic without clamp.
+  // If performAddCombine already created a dot instruction with clamp=0,
+  // we can fold the saturating add by regenerating with clamp=1.
+  SDValue DotOp = N->getOperand(0);
   SDValue Accum = N->getOperand(1);
 
-  // The sum operand should be an add of multiplications
-  if (SumOp.getOpcode() != ISD::ADD)
-    return SDValue();
-
-  SDValue LHS = SumOp.getOperand(0);
-  SDValue RHS = SumOp.getOperand(1);
-
-  // Walk the add tree looking for multiplications
-  if (!isMul(LHS) && !isMul(RHS))
-    return SDValue();
-
-  SDValue TempNode = SumOp;
-  std::optional<bool> MulIsSigned;
-  SmallVector<DotSrc, 4> Src0s;
-  SmallVector<DotSrc, 4> Src1s;
-  SmallVector<SDValue, 4> Src2s;
-
-  // Match the v_dot4 tree, while collecting src nodes.
-  int ChainLength = 0;
-  for (int I = 0; I < 4; I++) {
-    LHS = TempNode.getOperand(0);
-    RHS = TempNode.getOperand(1);
-    auto MulIdx = isMul(LHS) ? 0 : isMul(RHS) ? 1 : -1;
-    if (MulIdx == -1)
-      break;
-    auto Src0 = handleMulOperand(TempNode.getOperand(MulIdx).getOperand(0));
-    if (!Src0)
-      break;
-    auto Src1 = handleMulOperand(TempNode.getOperand(MulIdx).getOperand(1));
-    if (!Src1)
-      break;
-
-    auto IterIsSigned = checkDot4MulSignedness(
-        TempNode.getOperand(MulIdx), *Src0, *Src1,
-        TempNode.getOperand(MulIdx).getOperand(0),
-        TempNode.getOperand(MulIdx).getOperand(1), DAG);
-    if (!IterIsSigned)
-      break;
-    if (!MulIsSigned)
-      MulIsSigned = *IterIsSigned;
-    if (*IterIsSigned != *MulIsSigned)
-      break;
-    placeSources(*Src0, *Src1, Src0s, Src1s, I);
-    auto AddIdx = 1 - MulIdx;
-
-    // Allow the special case where add (add (mul24, 0), mul24) became ->
-    // add (mul24, mul24).
-    if (I == 2 && isMul(TempNode.getOperand(AddIdx))) {
-      Src2s.push_back(TempNode.getOperand(AddIdx));
-      auto Src0 =
-          handleMulOperand(TempNode.getOperand(AddIdx).getOperand(0));
-      if (!Src0)
-        break;
-      auto Src1 =
-          handleMulOperand(TempNode.getOperand(AddIdx).getOperand(1));
-      if (!Src1)
-        break;
-      auto IterIsSigned = checkDot4MulSignedness(
-          TempNode.getOperand(AddIdx), *Src0, *Src1,
-          TempNode.getOperand(AddIdx).getOperand(0),
-          TempNode.getOperand(AddIdx).getOperand(1), DAG);
-      if (!IterIsSigned)
-        break;
-      assert(MulIsSigned);
-      if (*IterIsSigned != *MulIsSigned)
-        break;
-      placeSources(*Src0, *Src1, Src0s, Src1s, I + 1);
-      Src2s.push_back(DAG.getConstant(0, SL, MVT::i32));
-      ChainLength = I + 2;
-      break;
-    }
-
-    TempNode = TempNode.getOperand(AddIdx);
-    Src2s.push_back(TempNode);
-    ChainLength = I + 1;
-    if (TempNode.getNumOperands() < 2)
-      break;
-  }
-
-  // Need at least 4 multiplications for dot4
-  if (ChainLength < 4)
-    return SDValue();
-
-  // Check signedness consistency: signed saturation requires signed muls
-  if (IsSigned != *MulIsSigned)
-    return SDValue();
-
-  SDValue Src0, Src1;
-
-  // If we are just using a single source for both, and have permuted the
-  // bytes consistently, we can just use the sources without permuting
-  // (commutation).
-  bool UseOriginalSrc = false;
-  if (Src0s.size() == 1 && Src1s.size() == 1 &&
-      Src0s.begin()->PermMask == Src1s.begin()->PermMask &&
-      Src0s.begin()->SrcOp.getValueSizeInBits() >= 32 &&
-      Src1s.begin()->SrcOp.getValueSizeInBits() >= 32) {
-    SmallVector<unsigned, 4> SrcBytes;
-    auto Src0Mask = Src0s.begin()->PermMask;
-    SrcBytes.push_back(Src0Mask & 0xFF000000);
-    bool UniqueEntries = true;
-    for (auto I = 1; I < 4; I++) {
-      auto NextByte = Src0Mask & (0xFF << ((3 - I) * 8));
-
-      if (is_contained(SrcBytes, NextByte)) {
-        UniqueEntries = false;
-        break;
+  // Try both operand orders
+  for (int Swap = 0; Swap < 2; ++Swap) {
+    if (DotOp.getOpcode() == ISD::INTRINSIC_WO_CHAIN) {
+      auto *IIDNode = dyn_cast<ConstantSDNode>(DotOp.getOperand(0));
+      if (!IIDNode) {
+        std::swap(DotOp, Accum);
+        continue;
       }
-      SrcBytes.push_back(NextByte);
-    }
-
-    if (UniqueEntries) {
-      UseOriginalSrc = true;
+      unsigned IID = IIDNode->getZExtValue();
+      // Check for udot4/sdot4/udot2/sdot2 intrinsics
+      if ((IID == Intrinsic::amdgcn_udot4 && !IsSigned) ||
+          (IID == Intrinsic::amdgcn_sdot4 && IsSigned) ||
+          (IID == Intrinsic::amdgcn_udot2 && !IsSigned) ||
+          (IID == Intrinsic::amdgcn_sdot2 && IsSigned)) {
+        // DotOp layout: [IID, Src0, Src1, Src2/Accum, Clamp]
+        // Check if clamp is 0 and accumulator is 0
+        SDValue OldAccum = DotOp.getOperand(3);
+        SDValue OldClamp = DotOp.getOperand(4);
+
+        // Check if old clamp is 0 (otherwise already saturating)
+        auto *ClampConst = dyn_cast<ConstantSDNode>(OldClamp);
+        if (!ClampConst || ClampConst->getZExtValue() != 0) {
+          std::swap(DotOp, Accum);
+          continue;
+        }
 
-      auto *FirstElt = Src0s.begin();
-      auto FirstEltOp =
-          getDWordFromOffset(DAG, SL, FirstElt->SrcOp, FirstElt->DWordOffset);
+        // Check if old accumulator is 0 (the pattern is dot(..., 0) + accum)
+        auto *AccumConst = dyn_cast<ConstantSDNode>(OldAccum);
+        if (!AccumConst || AccumConst->getZExtValue() != 0) {
+          std::swap(DotOp, Accum);
+          continue;
+        }
 
-      auto *SecondElt = Src1s.begin();
-      auto SecondEltOp = getDWordFromOffset(DAG, SL, SecondElt->SrcOp,
-                                            SecondElt->DWordOffset);
+        // Regenerate the dot with clamp=1 and the new accumulator
+        SDValue NewIID = DAG.getTargetConstant(IID, SL, MVT::i64);
+        SDValue Src0 = DotOp.getOperand(1);
+        SDValue Src1 = DotOp.getOperand(2);
 
-      Src0 = DAG.getBitcastedAnyExtOrTrunc(FirstEltOp, SL,
-                                           MVT::getIntegerVT(32));
-      Src1 = DAG.getBitcastedAnyExtOrTrunc(SecondEltOp, SL,
-                                           MVT::getIntegerVT(32));
+        auto NewDot = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SL, MVT::i32,
+                                  NewIID, Src0, Src1, Accum,
+                                  DAG.getTargetConstant(1, SL, MVT::i1));
+        return NewDot;
+      }
     }
+    // Swap and try again
+    std::swap(DotOp, Accum);
   }
 
-  if (!UseOriginalSrc) {
-    Src0 = resolveSources(DAG, SL, Src0s, false, true);
-    Src1 = resolveSources(DAG, SL, Src1s, false, true);
-  }
-
-  // Use the second operand of the saturating add as the accumulator
-  SDValue Src2 = DAG.getExtOrTrunc(IsSigned, Accum, SL, MVT::i32);
-
-  SDValue IID = DAG.getTargetConstant(IsSigned ? Intrinsic::amdgcn_sdot4
-                                                : Intrinsic::amdgcn_udot4,
-                                      SL, MVT::i64);
-
-  // Generate dot4 with clamp=1 for saturation
-  auto Dot = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SL, MVT::i32, IID, Src0,
-                         Src1, Src2, DAG.getTargetConstant(1, SL, MVT::i1));
-
-  return DAG.getExtOrTrunc(IsSigned, Dot, SL, VT);
+  // Note: The pattern for UADDSAT(ADD(...), accum) is handled directly in
+  // performAddCombine which checks for UADDSAT users and sets clamp=1.
+  return SDValue();
 }
 
 SDValue SITargetLowering::performPtrAddCombine(SDNode *N,

>From af3a1362095e2a91960f612853eed68c92a114ae Mon Sep 17 00:00:00 2001
From: Addmisol <addmisol9 at gmail.com>
Date: Sat, 28 Mar 2026 16:29:45 +0530
Subject: [PATCH 09/16] Update SIISelLowering.cpp

---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 201 ++++++++++------------
 1 file changed, 94 insertions(+), 107 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index f5ade755fe360..cdd3662e8d0b4 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5661,6 +5661,13 @@ static unsigned getDPPOpcForWaveReduction(unsigned Opc,
     return AMDGPU::V_OR_B32_dpp;
   case AMDGPU::S_XOR_B32:
     return AMDGPU::V_XOR_B32_dpp;
+  case AMDGPU::V_ADD_F32_e64:
+  case AMDGPU::V_SUB_F32_e64:
+    return AMDGPU::V_ADD_F32_dpp;
+  case AMDGPU::V_MIN_F32_e64:
+    return AMDGPU::V_MIN_F32_dpp;
+  case AMDGPU::V_MAX_F32_e64:
+    return AMDGPU::V_MAX_F32_dpp;
   default:
     llvm_unreachable("unhandled lane op");
   }
@@ -6218,6 +6225,7 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
     } else {
       assert(ST.hasDPP() && "Sub Target does not support DPP Operations");
 
+      bool IsFPOp = isFloatingPointWaveReduceOperation(Opc);
       Register SrcWithIdentity = MRI.createVirtualRegister(SrcRegClass);
       Register IdentityVGPR = MRI.createVirtualRegister(SrcRegClass);
       Register IdentitySGPR = MRI.createVirtualRegister(DstRegClass);
@@ -6250,15 +6258,39 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
       unsigned DPPOpc = getDPPOpcForWaveReduction(Opc, ST);
       auto BuildDPPMachineInstr = [&](Register Dst, Register Src,
                                       unsigned DPPCtrl) {
-        BuildMI(BB, MI, DL, TII->get(DPPOpc), Dst)
-            .addReg(Src)     // old
-            .addReg(Src)     // src0
+        auto DPPInstr =
+            BuildMI(BB, MI, DL, TII->get(DPPOpc), Dst).addReg(Src); // old
+        if (IsFPOp)
+          DPPInstr.addImm(SISrcMods::NONE); // src0 modifier
+        DPPInstr.addReg(Src);               // src0
+        if (IsFPOp)
+          DPPInstr.addImm(SISrcMods::NONE); // src1 modifier
+        DPPInstr
             .addReg(Src)     // src1
             .addImm(DPPCtrl) // dpp-ctrl
             .addImm(0xf)     // row-mask
             .addImm(0xf)     // bank-mask
             .addImm(0);      // bound-control
       };
+      auto BuildClampInstr = [&](Register Dst, Register Src0, Register Src1) {
+        unsigned ClampOpc = Opc;
+        if (!IsFPOp) {
+          if (Opc == AMDGPU::S_SUB_I32)
+            ClampOpc = AMDGPU::S_ADD_I32;
+          ClampOpc = TII->getVALUOp(ClampOpc);
+        }
+        auto ClampInstr = BuildMI(BB, MI, DL, TII->get(ClampOpc), Dst);
+        if (IsFPOp)
+          ClampInstr.addImm(SISrcMods::NONE); // src0 mod
+        ClampInstr.addReg(Src0);              // src0
+        if (IsFPOp)
+          ClampInstr.addImm(SISrcMods::NONE); // src1 mod
+        ClampInstr.addReg(Src1);              // src1
+        if (TII->hasIntClamp(*ClampInstr) || TII->hasFPClamp(*ClampInstr))
+          ClampInstr.addImm(0); // clamp
+        if (IsFPOp)
+          ClampInstr.addImm(0); // omod
+      };
       // DPP reduction
       BuildDPPMachineInstr(DPPRowShr1, SrcWithIdentity,
                            AMDGPU::DPP::ROW_SHR_FIRST);
@@ -6286,17 +6318,7 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
             .addReg(DPPRowShr8) // addr
             .addImm(0x1E0)      // swizzle offset (i16)
             .addImm(0x0);       // gds (i1)
-        auto ClampInstr =
-            BuildMI(BB, MI, DL,
-                    TII->get(TII->getVALUOp(
-                        Opc == AMDGPU::S_SUB_I32
-                            ? static_cast<unsigned>(AMDGPU::S_ADD_I32)
-                            : Opc)),
-                    RowBcast15)
-                .addReg(DPPRowShr8)
-                .addReg(SwizzledValue);
-        if (TII->hasIntClamp(*ClampInstr) || TII->hasFPClamp(*ClampInstr))
-          ClampInstr.addImm(0);
+        BuildClampInstr(RowBcast15, DPPRowShr8, SwizzledValue);
       }
       FinalDPPResult = RowBcast15;
       if (!IsWave32) {
@@ -6344,20 +6366,22 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
               .addReg(PermuteByteOffset) // addr
               .addReg(RowBcast15)        // data
               .addImm(0);                // offset
-          auto ClampInstr =
-              BuildMI(BB, MI, DL,
-                      TII->get(TII->getVALUOp(
-                          Opc == AMDGPU::S_SUB_I32
-                              ? static_cast<unsigned>(AMDGPU::S_ADD_I32)
-                              : Opc)),
-                      RowBcast31)
-                  .addReg(RowBcast15)
-                  .addReg(PermutedValue);
-          if (TII->hasIntClamp(*ClampInstr) || TII->hasFPClamp(*ClampInstr))
-            ClampInstr.addImm(0);
+          BuildClampInstr(RowBcast31, RowBcast15, PermutedValue);
         }
         FinalDPPResult = RowBcast31;
       }
+      if (Opc == AMDGPU::V_SUB_F32_e64) {
+        Register NegatedValVGPR =
+            MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
+        BuildMI(BB, MI, DL, TII->get(AMDGPU::V_SUB_F32_e64), NegatedValVGPR)
+            .addImm(SISrcMods::NONE)                    // src0 mods
+            .addReg(IdentityVGPR)                       // src0
+            .addImm(SISrcMods::NONE)                    // src1 mods
+            .addReg(IsWave32 ? RowBcast15 : RowBcast31) // src1
+            .addImm(SISrcMods::NONE)                    // clamp
+            .addImm(SISrcMods::NONE);                   // omod
+        FinalDPPResult = NegatedValVGPR;
+      }
       // The final reduced value is in the last lane.
       BuildMI(BB, MI, DL, TII->get(AMDGPU::V_READLANE_B32), ReducedValSGPR)
           .addReg(FinalDPPResult)
@@ -13392,17 +13416,20 @@ SDValue SITargetLowering::lowerFSQRTF64(SDValue Op, SelectionDAG &DAG) const {
   SDLoc DL(Op);
 
   SDValue X = Op.getOperand(0);
-  SDValue ScaleConstant = DAG.getConstantFP(0x1.0p-767, DL, MVT::f64);
-
-  SDValue Scaling = DAG.getSetCC(DL, MVT::i1, X, ScaleConstant, ISD::SETOLT);
-
   SDValue ZeroInt = DAG.getConstant(0, DL, MVT::i32);
 
-  // Scale up input if it is too small.
-  SDValue ScaleUpFactor = DAG.getConstant(256, DL, MVT::i32);
-  SDValue ScaleUp =
-      DAG.getNode(ISD::SELECT, DL, MVT::i32, Scaling, ScaleUpFactor, ZeroInt);
-  SDValue SqrtX = DAG.getNode(ISD::FLDEXP, DL, MVT::f64, X, ScaleUp, Flags);
+  SDValue SqrtX = X;
+  SDValue Scaling;
+  if (!Flags.hasApproximateFuncs()) {
+    SDValue ScaleConstant = DAG.getConstantFP(0x1.0p-767, DL, MVT::f64);
+    Scaling = DAG.getSetCC(DL, MVT::i1, X, ScaleConstant, ISD::SETOLT);
+
+    // Scale up input if it is too small.
+    SDValue ScaleUpFactor = DAG.getConstant(256, DL, MVT::i32);
+    SDValue ScaleUp =
+        DAG.getNode(ISD::SELECT, DL, MVT::i32, Scaling, ScaleUpFactor, ZeroInt);
+    SqrtX = DAG.getNode(ISD::FLDEXP, DL, MVT::f64, X, ScaleUp, Flags);
+  }
 
   SDValue SqrtY = DAG.getNode(AMDGPUISD::RSQ, DL, MVT::f64, SqrtX);
 
@@ -13424,24 +13451,31 @@ SDValue SITargetLowering::lowerFSQRTF64(SDValue Op, SelectionDAG &DAG) const {
 
   SDValue SqrtS2 = DAG.getNode(ISD::FMA, DL, MVT::f64, SqrtD0, SqrtH1, SqrtS1);
 
-  SDValue NegSqrtS2 = DAG.getNode(ISD::FNEG, DL, MVT::f64, SqrtS2);
-  SDValue SqrtD1 =
-      DAG.getNode(ISD::FMA, DL, MVT::f64, NegSqrtS2, SqrtS2, SqrtX);
-
-  SDValue SqrtRet = DAG.getNode(ISD::FMA, DL, MVT::f64, SqrtD1, SqrtH1, SqrtS2);
+  SDValue SqrtRet = SqrtS2;
+  if (!Flags.hasApproximateFuncs()) {
+    SDValue NegSqrtS2 = DAG.getNode(ISD::FNEG, DL, MVT::f64, SqrtS2);
+    SDValue SqrtD1 =
+        DAG.getNode(ISD::FMA, DL, MVT::f64, NegSqrtS2, SqrtS2, SqrtX);
 
-  SDValue ScaleDownFactor = DAG.getSignedConstant(-128, DL, MVT::i32);
-  SDValue ScaleDown =
-      DAG.getNode(ISD::SELECT, DL, MVT::i32, Scaling, ScaleDownFactor, ZeroInt);
-  SqrtRet = DAG.getNode(ISD::FLDEXP, DL, MVT::f64, SqrtRet, ScaleDown, Flags);
+    SqrtRet = DAG.getNode(ISD::FMA, DL, MVT::f64, SqrtD1, SqrtH1, SqrtS2);
 
-  // TODO: Switch to fcmp oeq 0 for finite only. Can't fully remove this check
-  // with finite only or nsz because rsq(+/-0) = +/-inf
+    SDValue ScaleDownFactor = DAG.getSignedConstant(-128, DL, MVT::i32);
+    SDValue ScaleDown = DAG.getNode(ISD::SELECT, DL, MVT::i32, Scaling,
+                                    ScaleDownFactor, ZeroInt);
+    SqrtRet = DAG.getNode(ISD::FLDEXP, DL, MVT::f64, SqrtRet, ScaleDown, Flags);
+  }
 
   // TODO: Check for DAZ and expand to subnormals
-  SDValue IsZeroOrInf =
-      DAG.getNode(ISD::IS_FPCLASS, DL, MVT::i1, SqrtX,
-                  DAG.getTargetConstant(fcZero | fcPosInf, DL, MVT::i32));
+
+  SDValue IsZeroOrInf;
+  if (Flags.hasNoInfs()) {
+    SDValue Zero = DAG.getConstantFP(0.0, DL, MVT::f64);
+    IsZeroOrInf = DAG.getSetCC(DL, MVT::i1, SqrtX, Zero, ISD::SETOEQ);
+  } else {
+    IsZeroOrInf =
+        DAG.getNode(ISD::IS_FPCLASS, DL, MVT::i1, SqrtX,
+                    DAG.getTargetConstant(fcZero | fcPosInf, DL, MVT::i32));
+  }
 
   // If x is +INF, +0, or -0, use its original value
   return DAG.getNode(ISD::SELECT, DL, MVT::f64, IsZeroOrInf, SqrtX, SqrtRet,
@@ -16869,56 +16903,15 @@ SDValue SITargetLowering::performAddCombine(SDNode *N,
     SDValue Src2 =
         DAG.getExtOrTrunc(*IsSigned, Src2s[ChainLength - 1], SL, MVT::i32);
 
-    // Check if this ADD result is used only by a saturating add (UADDSAT/SADDSAT).
-    // If so, we can fold the saturation into the dot instruction by setting clamp=1
-    // and using the saturation add's other operand as the accumulator.
-    bool Clamp = false;
-    SDValue ClampAccum;
-    SDNode *AddN = N;
-    if (AddN->hasOneUse()) {
-      SDNode *User = *AddN->user_begin();
-      unsigned UserOpc = User->getOpcode();
-      if ((UserOpc == ISD::UADDSAT && !*IsSigned) ||
-          (UserOpc == ISD::SADDSAT && *IsSigned)) {
-        // Found a saturating add user. Get the accumulator (the other operand).
-        SDValue Op0 = User->getOperand(0);
-        SDValue Op1 = User->getOperand(1);
-        // Our add result could be either operand of the saturating add.
-        if (Op0.getNode() == AddN) {
-          ClampAccum = Op1;
-        } else {
-          ClampAccum = Op0;
-        }
-        // Only fold if the original accumulator was 0 (we're replacing it)
-        auto *Src2Const = dyn_cast<ConstantSDNode>(Src2);
-        if (Src2Const && Src2Const->getZExtValue() == 0) {
-          Clamp = true;
-          Src2 = DAG.getExtOrTrunc(*IsSigned, ClampAccum, SL, MVT::i32);
-        }
-      }
-    }
-
     SDValue IID = DAG.getTargetConstant(*IsSigned ? Intrinsic::amdgcn_sdot4
                                                   : Intrinsic::amdgcn_udot4,
                                         SL, MVT::i64);
 
     assert(!VT.isVector());
     auto Dot = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SL, MVT::i32, IID, Src0,
-                           Src1, Src2,
-                           DAG.getTargetConstant(Clamp ? 1 : 0, SL, MVT::i1));
-
-    SDValue Result = DAG.getExtOrTrunc(*IsSigned, Dot, SL, VT);
-
-    // If we folded the saturation, we need to replace the UADDSAT/SADDSAT node
-    // with our dot result, not just the ADD node.
-    if (Clamp) {
-      SDNode *User = *AddN->user_begin();
-      DCI.CombineTo(User, Result);
-      // Return the result but the caller will see that we've already combined.
-      return Result;
-    }
+                           Src1, Src2, DAG.getTargetConstant(0, SL, MVT::i1));
 
-    return Result;
+    return DAG.getExtOrTrunc(*IsSigned, Dot, SL, VT);
   }
 
   if (VT != MVT::i32 || !DCI.isAfterLegalizeDAG())
@@ -16959,9 +16952,10 @@ SDValue SITargetLowering::performAddCombine(SDNode *N,
   return SDValue();
 }
 
-// Try to fold saturating add with dot product pattern into dot instruction
+// Try to fold saturating add with dot product intrinsic into dot instruction
 // with clamp. Matches patterns like:
-// uaddsat(a[0]*b[0] + a[1]*b[1] + a[2]*b[2] + a[3]*b[3], c) -> v_dot4 clamp
+// uaddsat(dot4(..., 0), c) -> dot4(..., c) clamp
+// uaddsat(dot2(..., 0), c) -> dot2(..., c) clamp
 SDValue SITargetLowering::performSatAddCombine(SDNode *N,
                                                DAGCombinerInfo &DCI) const {
   SelectionDAG &DAG = DCI.DAG;
@@ -16979,28 +16973,23 @@ SDValue SITargetLowering::performSatAddCombine(SDNode *N,
       (!Subtarget->hasDot1Insts() && !Subtarget->hasDot8Insts()))
     return SDValue();
 
-  // First, check if one operand is already a dot intrinsic without clamp.
-  // If performAddCombine already created a dot instruction with clamp=0,
-  // we can fold the saturating add by regenerating with clamp=1.
+  // Check if one operand is a dot intrinsic without clamp.
+  // Pattern: uaddsat(dot(..., 0), accum) -> dot(..., accum) clamp
   SDValue DotOp = N->getOperand(0);
   SDValue Accum = N->getOperand(1);
 
   // Try both operand orders
   for (int Swap = 0; Swap < 2; ++Swap) {
     if (DotOp.getOpcode() == ISD::INTRINSIC_WO_CHAIN) {
-      auto *IIDNode = dyn_cast<ConstantSDNode>(DotOp.getOperand(0));
-      if (!IIDNode) {
-        std::swap(DotOp, Accum);
-        continue;
-      }
+      ConstantSDNode *IIDNode = cast<ConstantSDNode>(DotOp.getOperand(0));
       unsigned IID = IIDNode->getZExtValue();
+
       // Check for udot4/sdot4/udot2/sdot2 intrinsics
       if ((IID == Intrinsic::amdgcn_udot4 && !IsSigned) ||
           (IID == Intrinsic::amdgcn_sdot4 && IsSigned) ||
           (IID == Intrinsic::amdgcn_udot2 && !IsSigned) ||
           (IID == Intrinsic::amdgcn_sdot2 && IsSigned)) {
         // DotOp layout: [IID, Src0, Src1, Src2/Accum, Clamp]
-        // Check if clamp is 0 and accumulator is 0
         SDValue OldAccum = DotOp.getOperand(3);
         SDValue OldClamp = DotOp.getOperand(4);
 
@@ -17023,9 +17012,9 @@ SDValue SITargetLowering::performSatAddCombine(SDNode *N,
         SDValue Src0 = DotOp.getOperand(1);
         SDValue Src1 = DotOp.getOperand(2);
 
-        auto NewDot = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SL, MVT::i32,
-                                  NewIID, Src0, Src1, Accum,
-                                  DAG.getTargetConstant(1, SL, MVT::i1));
+        SDValue NewDot = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SL, MVT::i32,
+                                     NewIID, Src0, Src1, Accum,
+                                     DAG.getTargetConstant(1, SL, MVT::i1));
         return NewDot;
       }
     }
@@ -17033,8 +17022,6 @@ SDValue SITargetLowering::performSatAddCombine(SDNode *N,
     std::swap(DotOp, Accum);
   }
 
-  // Note: The pattern for UADDSAT(ADD(...), accum) is handled directly in
-  // performAddCombine which checks for UADDSAT users and sets clamp=1.
   return SDValue();
 }
 

>From 12c0971eb803364ef038b54a92e99d1d55762c56 Mon Sep 17 00:00:00 2001
From: Addmisol <addmisol9 at gmail.com>
Date: Sat, 28 Mar 2026 16:30:56 +0530
Subject: [PATCH 10/16] Update SIISelLowering.h


>From df7966a5f40dce35b116aa6fb0b9048fa4f710db Mon Sep 17 00:00:00 2001
From: Addmisol <addmisol9 at gmail.com>
Date: Sat, 28 Mar 2026 16:33:18 +0530
Subject: [PATCH 11/16] Update VOP3PInstructions.td

---
 llvm/lib/Target/AMDGPU/VOP3PInstructions.td | 61 +++++++++++----------
 1 file changed, 32 insertions(+), 29 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
index 9ce13eebb7ca8..23f3147ce0a7a 100644
--- a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
@@ -606,28 +606,6 @@ class SDot2Pat<VOP_Pseudo Inst> : GCNPat <
   let Predicates = Inst.Predicates;
 }
 
-// Saturating unsigned dot2 pattern: uaddsat(a[0]*b[0] + a[1]*b[1], c)
-class UDot2SatPat<VOP_Pseudo Inst> : GCNPat <
-  (uaddsat (add_oneuse (AMDGPUmul_u24_oneuse (srl i32:$src0, (i32 16)),
-                                             (srl i32:$src1, (i32 16))),
-                       (AMDGPUmul_u24_oneuse (and i32:$src0, (i32 65535)),
-                                             (and i32:$src1, (i32 65535)))),
-           i32:$src2),
-  (Inst (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 1))> {
-  let Predicates = Inst.Predicates;
-}
-
-// Saturating signed dot2 pattern: saddsat(a[0]*b[0] + a[1]*b[1], c)
-class SDot2SatPat<VOP_Pseudo Inst> : GCNPat <
-  (saddsat (add_oneuse (AMDGPUmul_i24_oneuse (sra i32:$src0, (i32 16)),
-                                             (sra i32:$src1, (i32 16))),
-                       (AMDGPUmul_i24_oneuse (sext_inreg i32:$src0, i16),
-                                             (sext_inreg i32:$src1, i16))),
-           i32:$src2),
-  (Inst (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 1))> {
-  let Predicates = Inst.Predicates;
-}
-
 let IsDOT = 1 in {
 let OtherPredicates = [HasDot2Insts] in {
 defm V_DOT2_I32_I16 : VOP3PInst<"v_dot2_i32_i16",
@@ -753,7 +731,29 @@ defm V_DOT4_F32_BF8_BF8 : VOP3PDOTF8Inst<"v_dot4_f32_bf8_bf8", int_amdgcn_dot4_f
 def : UDot2Pat<V_DOT2_U32_U16>;
 def : SDot2Pat<V_DOT2_I32_I16>;
 
-// Saturating dot2 patterns (with clamp=1)
+// Saturating unsigned dot2 pattern: uaddsat(a[0]*b[0] + a[1]*b[1], c)
+class UDot2SatPat<VOP_Pseudo Inst> : GCNPat <
+  (uaddsat (add_oneuse (AMDGPUmul_u24_oneuse (srl i32:$src0, (i32 16)),
+                                             (srl i32:$src1, (i32 16))),
+                       (AMDGPUmul_u24_oneuse (and i32:$src0, (i32 65535)),
+                                             (and i32:$src1, (i32 65535)))),
+           i32:$src2),
+  (Inst (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 1))> {
+  let Predicates = Inst.Predicates;
+}
+
+// Saturating signed dot2 pattern: saddsat(a[0]*b[0] + a[1]*b[1], c)
+class SDot2SatPat<VOP_Pseudo Inst> : GCNPat <
+  (saddsat (add_oneuse (AMDGPUmul_i24_oneuse (sra i32:$src0, (i32 16)),
+                                             (sra i32:$src1, (i32 16))),
+                       (AMDGPUmul_i24_oneuse (sext_inreg i32:$src0, i16),
+                                             (sext_inreg i32:$src1, i16))),
+           i32:$src2),
+  (Inst (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 1))> {
+  let Predicates = Inst.Predicates;
+}
+
+// Saturating dot2 pattern instantiations (with clamp=1)
 def : UDot2SatPat<V_DOT2_U32_U16>;
 def : SDot2SatPat<V_DOT2_I32_I16>;
 
@@ -764,8 +764,7 @@ foreach Type = ["U", "I"] in
                       (add_oneuse lhs, (!cast<PatFrag>("Mul"#Type#"_Elt"#y) i32:$src0, i32:$src1)))),
     (!cast<VOP3P_Pseudo>("V_DOT4_"#Type#"32_"#Type#8) (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))>;
 
-// Saturating dot4 patterns: (u/s)addsat(mul0*mul1 + mul2*mul3 + mul4*mul5 + mul6*mul7, src2)
-// Pattern: sataddsat(add(add(add(mul0, mul1), mul2), mul3), src2) with clamp=1
+// Saturating dot4 patterns: (u/s)addsat(mul0 + mul1 + mul2 + mul3, src2) with clamp=1
 foreach Type = ["U", "I"] in
   let Predicates = !cast<VOP_Pseudo>("V_DOT4_"#Type#"32_"#Type#8).Predicates in
   def : GCNPat <
@@ -1900,16 +1899,20 @@ def F32_FP8BF8_SWMMAC_w64 : VOP3PWMMA_Profile<[v4f32,   i32, v2i32, v4f32], /*_I
 //                       for matrix A, index is i16; Matrix B uses all lanes
 
 def F32_F32_WMMA_w32             : VOP3PWMMA_Profile<[v8f32, v2f32,    v2f32,    v8f32], /*_IsSWMMAC=*/0, /*_IndexType=*/0, /*_IsIU=*/0, /*_IsFP8BF8=*/0,
-                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
+                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1, /*_IsF4*/0,
+                                     /*_NoABMods*/1>;
 def F32_BF16X32_WMMA_w32         : VOP3PWMMA_Profile<[v8f32, v16bf16,  v16bf16,  v8f32], /*_IsSWMMAC=*/0, /*_IndexType=*/0, /*_IsIU=*/0, /*_IsFP8BF8=*/0,
                                      /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1, /*_IsF4*/0,
                                      /*_NoABMods*/1>;
 def F32_F16X32_WMMA_w32          : VOP3PWMMA_Profile<[v8f32, v16f16,   v16f16,   v8f32], /*_IsSWMMAC=*/0, /*_IndexType=*/0, /*_IsIU=*/0, /*_IsFP8BF8=*/0,
-                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
+                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1, /*_IsF4*/0,
+                                     /*_NoABMods*/1>;
 def F16_F16X32_WMMA_w32          : VOP3PWMMA_Profile<[v8f16, v16f16,   v16f16,   v8f16], /*_IsSWMMAC=*/0, /*_IndexType=*/0, /*_IsIU=*/0, /*_IsFP8BF8=*/0,
-                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
+                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1, /*_IsF4*/0,
+                                     /*_NoABMods*/1>;
 def BF16_BF16X32_WMMA_w32        : VOP3PWMMA_Profile<[v8bf16, v16bf16, v16bf16, v8bf16], /*_IsSWMMAC=*/0, /*_IndexType=*/0, /*_IsIU=*/0, /*_IsFP8BF8=*/0,
-                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
+                                     /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1, /*_IsF4*/0,
+                                     /*_NoABMods*/1>;
 def BF16F32_BF16_WMMA_w32        : VOP3PWMMA_Profile<[v8bf16, v16bf16, v16bf16,  v8f32], /*_IsSWMMAC=*/0, /*_IndexType=*/0, /*_IsIU=*/0, /*_IsFP8BF8=*/0,
                                      /*_Has_ImodOp=*/1, /*_HasMatrixFMT=*/0, /*_HasMatrixScale=*/0, /*_Scale16=*/0, /*_HasMatrixReuse=*/1>;
 def F32_FP8BF8X64_WMMA_w32       : VOP3PWMMA_Profile<[v8f32, v8i32,    v8i32,    v8f32], /*_IsSWMMAC=*/0, /*_IndexType=*/0, /*_IsIU=*/0, /*_IsFP8BF8=*/1,

>From b8f2e9c2751e196e2f52d8132500baac72f3ab70 Mon Sep 17 00:00:00 2001
From: Addmisol <addmisol9 at gmail.com>
Date: Sat, 28 Mar 2026 16:35:43 +0530
Subject: [PATCH 12/16] Update idot2-sat.ll

---
 llvm/test/CodeGen/AMDGPU/idot2-sat.ll | 550 +++++++++++++++++++++++++-
 1 file changed, 546 insertions(+), 4 deletions(-)

diff --git a/llvm/test/CodeGen/AMDGPU/idot2-sat.ll b/llvm/test/CodeGen/AMDGPU/idot2-sat.ll
index 9c6921000e0b0..0d1da43a06912 100644
--- a/llvm/test/CodeGen/AMDGPU/idot2-sat.ll
+++ b/llvm/test/CodeGen/AMDGPU/idot2-sat.ll
@@ -3,7 +3,11 @@
 ; RUN: llc -mtriple=amdgcn -mcpu=gfx1011 < %s | FileCheck --check-prefixes=GFX10-DL %s
 ; RUN: llc -mtriple=amdgcn -mcpu=gfx950 < %s | FileCheck --check-prefixes=GFX950 %s
 
-; Test dot2 patterns with saturating add (clamp)
+; Test dot2 and dot4 patterns with saturating add (clamp) and without
+
+;------------------------------------------------------------------------------
+; DOT2 SATURATING TESTS
+;------------------------------------------------------------------------------
 
 ; Unsigned dot2 with saturation: uaddsat(a[0]*b[0] + a[1]*b[1], c)
 define i32 @udot2_sat(<2 x i16> %a, <2 x i16> %b, i32 %c) {
@@ -27,7 +31,7 @@ define i32 @udot2_sat(<2 x i16> %a, <2 x i16> %b, i32 %c) {
 entry:
   %conv.i = zext <2 x i16> %a to <2 x i32>
   %conv6.i = zext <2 x i16> %b to <2 x i32>
-  %mul.i = mul nuw <2 x i32> %conv6.i, %conv.i
+  %mul.i = mul <2 x i32> %conv6.i, %conv.i
   %0 = extractelement <2 x i32> %mul.i, i64 0
   %1 = extractelement <2 x i32> %mul.i, i64 1
   %add.i = add i32 %0, %1
@@ -57,13 +61,551 @@ define i32 @sdot2_sat(<2 x i16> %a, <2 x i16> %b, i32 %c) {
 entry:
   %conv.i = sext <2 x i16> %a to <2 x i32>
   %conv6.i = sext <2 x i16> %b to <2 x i32>
-  %mul.i = mul nsw <2 x i32> %conv6.i, %conv.i
+  %mul.i = mul <2 x i32> %conv6.i, %conv.i
   %0 = extractelement <2 x i32> %mul.i, i64 0
   %1 = extractelement <2 x i32> %mul.i, i64 1
-  %add.i = add nsw i32 %0, %1
+  %add.i = add i32 %0, %1
   %cond1.i.i = tail call i32 @llvm.sadd.sat.i32(i32 %add.i, i32 %c)
   ret i32 %cond1.i.i
 }
 
+;------------------------------------------------------------------------------
+; DOT2 NON-SATURATING TESTS
+;------------------------------------------------------------------------------
+
+; Unsigned dot2 without saturation
+define i32 @udot2_unsat(<2 x i16> %a, <2 x i16> %b, i32 %c) {
+; GFX9-DL-LABEL: udot2_unsat:
+; GFX9-DL:       ; %bb.0: ; %entry
+; GFX9-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-DL-NEXT:    v_dot2_u32_u16 v0, v1, v0, v2
+; GFX9-DL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-DL-LABEL: udot2_unsat:
+; GFX10-DL:       ; %bb.0: ; %entry
+; GFX10-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-DL-NEXT:    v_dot2_u32_u16 v0, v1, v0, v2
+; GFX10-DL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX950-LABEL: udot2_unsat:
+; GFX950:       ; %bb.0: ; %entry
+; GFX950-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX950-NEXT:    v_dot2_u32_u16 v0, v1, v0, v2
+; GFX950-NEXT:    s_setpc_b64 s[30:31]
+entry:
+  %conv.i = zext <2 x i16> %a to <2 x i32>
+  %conv6.i = zext <2 x i16> %b to <2 x i32>
+  %mul.i = mul <2 x i32> %conv6.i, %conv.i
+  %0 = extractelement <2 x i32> %mul.i, i64 0
+  %1 = extractelement <2 x i32> %mul.i, i64 1
+  %add.i = add i32 %1, %c
+  %add8.i = add i32 %add.i, %0
+  ret i32 %add8.i
+}
+
+; Signed dot2 without saturation
+define i32 @sdot2_unsat(<2 x i16> %a, <2 x i16> %b, i32 %c) {
+; GFX9-DL-LABEL: sdot2_unsat:
+; GFX9-DL:       ; %bb.0: ; %entry
+; GFX9-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-DL-NEXT:    v_dot2_i32_i16 v0, v1, v0, v2
+; GFX9-DL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-DL-LABEL: sdot2_unsat:
+; GFX10-DL:       ; %bb.0: ; %entry
+; GFX10-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-DL-NEXT:    v_dot2_i32_i16 v0, v1, v0, v2
+; GFX10-DL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX950-LABEL: sdot2_unsat:
+; GFX950:       ; %bb.0: ; %entry
+; GFX950-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX950-NEXT:    v_dot2_i32_i16 v0, v1, v0, v2
+; GFX950-NEXT:    s_setpc_b64 s[30:31]
+entry:
+  %conv.i = sext <2 x i16> %a to <2 x i32>
+  %conv6.i = sext <2 x i16> %b to <2 x i32>
+  %mul.i = mul <2 x i32> %conv6.i, %conv.i
+  %0 = extractelement <2 x i32> %mul.i, i64 0
+  %1 = extractelement <2 x i32> %mul.i, i64 1
+  %add.i = add i32 %1, %c
+  %add8.i = add i32 %add.i, %0
+  ret i32 %add8.i
+}
+
+;------------------------------------------------------------------------------
+; DOT4 TESTS WITH BITCAST FROM I32
+;------------------------------------------------------------------------------
+
+; Unsigned dot4 without saturation using bitcast from i32
+define i32 @udot4_unsat_bitcast(i32 %a_packed, i32 %b_packed, i32 %c) {
+; GFX9-DL-LABEL: udot4_unsat_bitcast:
+; GFX9-DL:       ; %bb.0: ; %entry
+; GFX9-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-DL-NEXT:    v_lshrrev_b16_e32 v3, 8, v0
+; GFX9-DL-NEXT:    v_and_b32_e32 v4, 0xff, v0
+; GFX9-DL-NEXT:    v_lshrrev_b16_e32 v5, 8, v1
+; GFX9-DL-NEXT:    v_and_b32_e32 v6, 0xff, v1
+; GFX9-DL-NEXT:    v_mul_u32_u24_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:BYTE_2
+; GFX9-DL-NEXT:    v_mul_u32_u24_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_3
+; GFX9-DL-NEXT:    v_mad_u32_u24 v1, v4, v6, v7
+; GFX9-DL-NEXT:    v_mad_u32_u24 v0, v3, v5, v0
+; GFX9-DL-NEXT:    v_add3_u32 v0, v1, v0, v2
+; GFX9-DL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-DL-LABEL: udot4_unsat_bitcast:
+; GFX10-DL:       ; %bb.0: ; %entry
+; GFX10-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-DL-NEXT:    v_mov_b32_e32 v3, 0xffff
+; GFX10-DL-NEXT:    v_and_b32_e32 v5, 0xff, v0
+; GFX10-DL-NEXT:    v_and_b32_e32 v6, 0xff, v1
+; GFX10-DL-NEXT:    v_mul_u32_u24_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:BYTE_2
+; GFX10-DL-NEXT:    v_and_b32_sdwa v4, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX10-DL-NEXT:    v_and_b32_sdwa v3, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX10-DL-NEXT:    v_mul_u32_u24_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_3
+; GFX10-DL-NEXT:    v_mad_u32_u24 v1, v5, v6, v7
+; GFX10-DL-NEXT:    v_mad_u32_u24 v0, v4, v3, v0
+; GFX10-DL-NEXT:    v_add3_u32 v0, v1, v0, v2
+; GFX10-DL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX950-LABEL: udot4_unsat_bitcast:
+; GFX950:       ; %bb.0: ; %entry
+; GFX950-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX950-NEXT:    v_lshrrev_b16_e32 v3, 8, v0
+; GFX950-NEXT:    v_and_b32_e32 v4, 0xff, v0
+; GFX950-NEXT:    v_lshrrev_b16_e32 v5, 8, v1
+; GFX950-NEXT:    v_and_b32_e32 v6, 0xff, v1
+; GFX950-NEXT:    v_mul_u32_u24_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:BYTE_2
+; GFX950-NEXT:    v_mul_u32_u24_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_3
+; GFX950-NEXT:    v_mad_u32_u24 v1, v4, v6, v7
+; GFX950-NEXT:    v_mad_u32_u24 v0, v3, v5, v0
+; GFX950-NEXT:    v_add3_u32 v0, v1, v0, v2
+; GFX950-NEXT:    s_setpc_b64 s[30:31]
+entry:
+  %a_vec = bitcast i32 %a_packed to <4 x i8>
+  %b_vec = bitcast i32 %b_packed to <4 x i8>
+  %a_ext = zext <4 x i8> %a_vec to <4 x i32>
+  %b_ext = zext <4 x i8> %b_vec to <4 x i32>
+  %mul = mul <4 x i32> %a_ext, %b_ext
+  %sum = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %mul)
+  %result = add i32 %sum, %c
+  ret i32 %result
+}
+
+; Signed dot4 without saturation using bitcast from i32
+define i32 @sdot4_unsat_bitcast(i32 %a_packed, i32 %b_packed, i32 %c) {
+; GFX9-DL-LABEL: sdot4_unsat_bitcast:
+; GFX9-DL:       ; %bb.0: ; %entry
+; GFX9-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-DL-NEXT:    v_lshrrev_b16_e32 v3, 8, v1
+; GFX9-DL-NEXT:    v_lshrrev_b16_e32 v4, 8, v0
+; GFX9-DL-NEXT:    v_bfe_i32 v4, v4, 0, 8
+; GFX9-DL-NEXT:    v_bfe_i32 v5, v0, 0, 8
+; GFX9-DL-NEXT:    v_bfe_i32 v3, v3, 0, 8
+; GFX9-DL-NEXT:    v_bfe_i32 v6, v1, 0, 8
+; GFX9-DL-NEXT:    v_mul_i32_i24_sdwa v7, sext(v0), sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:BYTE_2
+; GFX9-DL-NEXT:    v_mul_i32_i24_sdwa v0, sext(v0), sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_3
+; GFX9-DL-NEXT:    v_mad_i32_i24 v0, v4, v3, v0
+; GFX9-DL-NEXT:    v_mad_i32_i24 v1, v5, v6, v7
+; GFX9-DL-NEXT:    v_add3_u32 v0, v1, v0, v2
+; GFX9-DL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-DL-LABEL: sdot4_unsat_bitcast:
+; GFX10-DL:       ; %bb.0: ; %entry
+; GFX10-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-DL-NEXT:    v_lshrrev_b16 v3, 8, v0
+; GFX10-DL-NEXT:    v_lshrrev_b16 v4, 8, v1
+; GFX10-DL-NEXT:    v_bfe_i32 v5, v0, 0, 8
+; GFX10-DL-NEXT:    v_bfe_i32 v6, v1, 0, 8
+; GFX10-DL-NEXT:    v_mul_i32_i24_sdwa v7, sext(v0), sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_3
+; GFX10-DL-NEXT:    v_bfe_i32 v3, v3, 0, 8
+; GFX10-DL-NEXT:    v_bfe_i32 v4, v4, 0, 8
+; GFX10-DL-NEXT:    v_mul_i32_i24_sdwa v0, sext(v0), sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:BYTE_2
+; GFX10-DL-NEXT:    v_mad_i32_i24 v1, v3, v4, v7
+; GFX10-DL-NEXT:    v_mad_i32_i24 v0, v5, v6, v0
+; GFX10-DL-NEXT:    v_add3_u32 v0, v0, v1, v2
+; GFX10-DL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX950-LABEL: sdot4_unsat_bitcast:
+; GFX950:       ; %bb.0: ; %entry
+; GFX950-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX950-NEXT:    v_lshrrev_b16_e32 v3, 8, v1
+; GFX950-NEXT:    v_lshrrev_b16_e32 v4, 8, v0
+; GFX950-NEXT:    v_bfe_i32 v4, v4, 0, 8
+; GFX950-NEXT:    v_bfe_i32 v5, v0, 0, 8
+; GFX950-NEXT:    v_bfe_i32 v3, v3, 0, 8
+; GFX950-NEXT:    v_bfe_i32 v6, v1, 0, 8
+; GFX950-NEXT:    v_mul_i32_i24_sdwa v7, sext(v0), sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:BYTE_2
+; GFX950-NEXT:    v_mul_i32_i24_sdwa v0, sext(v0), sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_3
+; GFX950-NEXT:    v_mad_i32_i24 v0, v4, v3, v0
+; GFX950-NEXT:    v_mad_i32_i24 v1, v5, v6, v7
+; GFX950-NEXT:    v_add3_u32 v0, v1, v0, v2
+; GFX950-NEXT:    s_setpc_b64 s[30:31]
+entry:
+  %a_vec = bitcast i32 %a_packed to <4 x i8>
+  %b_vec = bitcast i32 %b_packed to <4 x i8>
+  %a_ext = sext <4 x i8> %a_vec to <4 x i32>
+  %b_ext = sext <4 x i8> %b_vec to <4 x i32>
+  %mul = mul <4 x i32> %a_ext, %b_ext
+  %sum = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %mul)
+  %result = add i32 %sum, %c
+  ret i32 %result
+}
+
+; Unsigned dot4 with saturation using bitcast from i32
+define i32 @udot4_sat_bitcast(i32 %a_packed, i32 %b_packed, i32 %c) {
+; GFX9-DL-LABEL: udot4_sat_bitcast:
+; GFX9-DL:       ; %bb.0: ; %entry
+; GFX9-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-DL-NEXT:    v_lshrrev_b16_e32 v3, 8, v0
+; GFX9-DL-NEXT:    v_lshrrev_b16_e32 v4, 8, v1
+; GFX9-DL-NEXT:    v_mul_u32_u24_sdwa v5, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-DL-NEXT:    v_mul_u32_u24_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:BYTE_2
+; GFX9-DL-NEXT:    v_mul_u32_u24_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_3
+; GFX9-DL-NEXT:    v_mad_u32_u24 v0, v3, v4, v0
+; GFX9-DL-NEXT:    v_add3_u32 v0, v5, v6, v0
+; GFX9-DL-NEXT:    v_add_u32_e64 v0, v0, v2 clamp
+; GFX9-DL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-DL-LABEL: udot4_sat_bitcast:
+; GFX10-DL:       ; %bb.0: ; %entry
+; GFX10-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-DL-NEXT:    v_mov_b32_e32 v3, 0xffff
+; GFX10-DL-NEXT:    v_mul_u32_u24_sdwa v5, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_3
+; GFX10-DL-NEXT:    v_mul_u32_u24_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX10-DL-NEXT:    v_and_b32_sdwa v4, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX10-DL-NEXT:    v_and_b32_sdwa v3, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX10-DL-NEXT:    v_mul_u32_u24_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:BYTE_2
+; GFX10-DL-NEXT:    v_mad_u32_u24 v1, v4, v3, v5
+; GFX10-DL-NEXT:    v_add3_u32 v0, v6, v0, v1
+; GFX10-DL-NEXT:    v_add_nc_u32_e64 v0, v0, v2 clamp
+; GFX10-DL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX950-LABEL: udot4_sat_bitcast:
+; GFX950:       ; %bb.0: ; %entry
+; GFX950-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX950-NEXT:    v_lshrrev_b16_e32 v3, 8, v0
+; GFX950-NEXT:    v_lshrrev_b16_e32 v4, 8, v1
+; GFX950-NEXT:    v_mul_u32_u24_sdwa v5, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX950-NEXT:    v_mul_u32_u24_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:BYTE_2
+; GFX950-NEXT:    v_mul_u32_u24_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_3
+; GFX950-NEXT:    v_mad_u32_u24 v0, v3, v4, v0
+; GFX950-NEXT:    v_add3_u32 v0, v5, v6, v0
+; GFX950-NEXT:    v_add_u32_e64 v0, v0, v2 clamp
+; GFX950-NEXT:    s_setpc_b64 s[30:31]
+entry:
+  %a_vec = bitcast i32 %a_packed to <4 x i8>
+  %b_vec = bitcast i32 %b_packed to <4 x i8>
+  %a_ext = zext <4 x i8> %a_vec to <4 x i32>
+  %b_ext = zext <4 x i8> %b_vec to <4 x i32>
+  %mul = mul <4 x i32> %a_ext, %b_ext
+  %sum = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %mul)
+  %result = call i32 @llvm.uadd.sat.i32(i32 %sum, i32 %c)
+  ret i32 %result
+}
+
+; Signed dot4 with saturation using bitcast from i32
+define i32 @sdot4_sat_bitcast(i32 %a_packed, i32 %b_packed, i32 %c) {
+; GFX9-DL-LABEL: sdot4_sat_bitcast:
+; GFX9-DL:       ; %bb.0: ; %entry
+; GFX9-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-DL-NEXT:    v_lshrrev_b16_e32 v3, 8, v1
+; GFX9-DL-NEXT:    v_lshrrev_b16_e32 v4, 8, v0
+; GFX9-DL-NEXT:    v_bfe_i32 v4, v4, 0, 8
+; GFX9-DL-NEXT:    v_bfe_i32 v3, v3, 0, 8
+; GFX9-DL-NEXT:    v_mul_i32_i24_sdwa v5, sext(v0), sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-DL-NEXT:    v_mul_i32_i24_sdwa v6, sext(v0), sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:BYTE_2
+; GFX9-DL-NEXT:    v_mul_i32_i24_sdwa v0, sext(v0), sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_3
+; GFX9-DL-NEXT:    v_mad_i32_i24 v0, v4, v3, v0
+; GFX9-DL-NEXT:    v_add3_u32 v0, v5, v6, v0
+; GFX9-DL-NEXT:    v_add_i32 v0, v0, v2 clamp
+; GFX9-DL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-DL-LABEL: sdot4_sat_bitcast:
+; GFX10-DL:       ; %bb.0: ; %entry
+; GFX10-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-DL-NEXT:    v_lshrrev_b16 v3, 8, v0
+; GFX10-DL-NEXT:    v_lshrrev_b16 v4, 8, v1
+; GFX10-DL-NEXT:    v_mul_i32_i24_sdwa v5, sext(v0), sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_3
+; GFX10-DL-NEXT:    v_mul_i32_i24_sdwa v6, sext(v0), sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX10-DL-NEXT:    v_mul_i32_i24_sdwa v0, sext(v0), sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:BYTE_2
+; GFX10-DL-NEXT:    v_bfe_i32 v3, v3, 0, 8
+; GFX10-DL-NEXT:    v_bfe_i32 v4, v4, 0, 8
+; GFX10-DL-NEXT:    v_mad_i32_i24 v1, v3, v4, v5
+; GFX10-DL-NEXT:    v_add3_u32 v0, v6, v0, v1
+; GFX10-DL-NEXT:    v_add_nc_i32 v0, v0, v2 clamp
+; GFX10-DL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX950-LABEL: sdot4_sat_bitcast:
+; GFX950:       ; %bb.0: ; %entry
+; GFX950-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX950-NEXT:    v_lshrrev_b16_e32 v3, 8, v1
+; GFX950-NEXT:    v_lshrrev_b16_e32 v4, 8, v0
+; GFX950-NEXT:    v_bfe_i32 v4, v4, 0, 8
+; GFX950-NEXT:    v_bfe_i32 v3, v3, 0, 8
+; GFX950-NEXT:    v_mul_i32_i24_sdwa v5, sext(v0), sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX950-NEXT:    v_mul_i32_i24_sdwa v6, sext(v0), sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:BYTE_2
+; GFX950-NEXT:    v_mul_i32_i24_sdwa v0, sext(v0), sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_3
+; GFX950-NEXT:    v_mad_i32_i24 v0, v4, v3, v0
+; GFX950-NEXT:    v_add3_u32 v0, v5, v6, v0
+; GFX950-NEXT:    v_add_i32 v0, v0, v2 clamp
+; GFX950-NEXT:    s_setpc_b64 s[30:31]
+entry:
+  %a_vec = bitcast i32 %a_packed to <4 x i8>
+  %b_vec = bitcast i32 %b_packed to <4 x i8>
+  %a_ext = sext <4 x i8> %a_vec to <4 x i32>
+  %b_ext = sext <4 x i8> %b_vec to <4 x i32>
+  %mul = mul <4 x i32> %a_ext, %b_ext
+  %sum = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %mul)
+  %result = call i32 @llvm.sadd.sat.i32(i32 %sum, i32 %c)
+  ret i32 %result
+}
+
+;------------------------------------------------------------------------------
+; TESTS WITH I8 PROMOTION
+;------------------------------------------------------------------------------
+
+; Test unsigned dot4 with i8 values zext'd individually
+define i32 @udot4_i8_zext(i8 %a0, i8 %a1, i8 %a2, i8 %a3,
+; GFX9-DL-LABEL: udot4_i8_zext:
+; GFX9-DL:       ; %bb.0: ; %entry
+; GFX9-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-DL-NEXT:    s_mov_b32 s4, 0xc0c0400
+; GFX9-DL-NEXT:    s_mov_b32 s5, 0x4000c0c
+; GFX9-DL-NEXT:    v_perm_b32 v4, v4, v5, s4
+; GFX9-DL-NEXT:    v_perm_b32 v5, v7, v6, s5
+; GFX9-DL-NEXT:    v_perm_b32 v0, v0, v1, s4
+; GFX9-DL-NEXT:    v_perm_b32 v1, v3, v2, s5
+; GFX9-DL-NEXT:    v_or_b32_e32 v4, v5, v4
+; GFX9-DL-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX9-DL-NEXT:    v_dot4_u32_u8 v0, v0, v4, 0
+; GFX9-DL-NEXT:    v_add_u32_e32 v0, v0, v8
+; GFX9-DL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-DL-LABEL: udot4_i8_zext:
+; GFX10-DL:       ; %bb.0: ; %entry
+; GFX10-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-DL-NEXT:    v_perm_b32 v4, v4, v5, 0xc0c0400
+; GFX10-DL-NEXT:    v_perm_b32 v5, v7, v6, 0x4000c0c
+; GFX10-DL-NEXT:    v_perm_b32 v0, v0, v1, 0xc0c0400
+; GFX10-DL-NEXT:    v_perm_b32 v1, v3, v2, 0x4000c0c
+; GFX10-DL-NEXT:    v_or_b32_e32 v2, v5, v4
+; GFX10-DL-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX10-DL-NEXT:    v_dot4_u32_u8 v0, v0, v2, 0
+; GFX10-DL-NEXT:    v_add_nc_u32_e32 v0, v0, v8
+; GFX10-DL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX950-LABEL: udot4_i8_zext:
+; GFX950:       ; %bb.0: ; %entry
+; GFX950-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX950-NEXT:    s_mov_b32 s0, 0xc0c0400
+; GFX950-NEXT:    s_mov_b32 s1, 0x4000c0c
+; GFX950-NEXT:    v_perm_b32 v4, v4, v5, s0
+; GFX950-NEXT:    v_perm_b32 v5, v7, v6, s1
+; GFX950-NEXT:    v_perm_b32 v0, v0, v1, s0
+; GFX950-NEXT:    v_perm_b32 v1, v3, v2, s1
+; GFX950-NEXT:    v_or_b32_e32 v4, v5, v4
+; GFX950-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX950-NEXT:    v_dot4_u32_u8 v0, v0, v4, 0
+; GFX950-NEXT:    s_nop 2
+; GFX950-NEXT:    v_add_u32_e32 v0, v0, v8
+; GFX950-NEXT:    s_setpc_b64 s[30:31]
+                          i8 %b0, i8 %b1, i8 %b2, i8 %b3, i32 %c) {
+entry:
+  %a0_32 = zext i8 %a0 to i32
+  %a1_32 = zext i8 %a1 to i32
+  %a2_32 = zext i8 %a2 to i32
+  %a3_32 = zext i8 %a3 to i32
+  %b0_32 = zext i8 %b0 to i32
+  %b1_32 = zext i8 %b1 to i32
+  %b2_32 = zext i8 %b2 to i32
+  %b3_32 = zext i8 %b3 to i32
+  %m0 = mul i32 %a0_32, %b0_32
+  %m1 = mul i32 %a1_32, %b1_32
+  %m2 = mul i32 %a2_32, %b2_32
+  %m3 = mul i32 %a3_32, %b3_32
+  %sum01 = add i32 %m0, %m1
+  %sum012 = add i32 %sum01, %m2
+  %sum0123 = add i32 %sum012, %m3
+  %result = add i32 %sum0123, %c
+  ret i32 %result
+}
+
+; Test signed dot4 with i8 values sext'd individually
+define i32 @sdot4_i8_sext(i8 %a0, i8 %a1, i8 %a2, i8 %a3,
+; GFX9-DL-LABEL: sdot4_i8_sext:
+; GFX9-DL:       ; %bb.0: ; %entry
+; GFX9-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-DL-NEXT:    s_mov_b32 s4, 0xc0c0400
+; GFX9-DL-NEXT:    s_mov_b32 s5, 0x4000c0c
+; GFX9-DL-NEXT:    v_perm_b32 v4, v4, v5, s4
+; GFX9-DL-NEXT:    v_perm_b32 v5, v7, v6, s5
+; GFX9-DL-NEXT:    v_perm_b32 v0, v0, v1, s4
+; GFX9-DL-NEXT:    v_perm_b32 v1, v3, v2, s5
+; GFX9-DL-NEXT:    v_or_b32_e32 v4, v5, v4
+; GFX9-DL-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX9-DL-NEXT:    v_dot4_i32_i8 v0, v0, v4, 0
+; GFX9-DL-NEXT:    v_add_u32_e32 v0, v0, v8
+; GFX9-DL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-DL-LABEL: sdot4_i8_sext:
+; GFX10-DL:       ; %bb.0: ; %entry
+; GFX10-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-DL-NEXT:    v_perm_b32 v4, v4, v5, 0xc0c0400
+; GFX10-DL-NEXT:    v_perm_b32 v5, v7, v6, 0x4000c0c
+; GFX10-DL-NEXT:    v_perm_b32 v0, v0, v1, 0xc0c0400
+; GFX10-DL-NEXT:    v_perm_b32 v1, v3, v2, 0x4000c0c
+; GFX10-DL-NEXT:    v_or_b32_e32 v2, v5, v4
+; GFX10-DL-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX10-DL-NEXT:    v_mov_b32_e32 v1, 0
+; GFX10-DL-NEXT:    v_dot4c_i32_i8 v1, v0, v2
+; GFX10-DL-NEXT:    v_add_nc_u32_e32 v0, v1, v8
+; GFX10-DL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX950-LABEL: sdot4_i8_sext:
+; GFX950:       ; %bb.0: ; %entry
+; GFX950-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX950-NEXT:    s_mov_b32 s0, 0xc0c0400
+; GFX950-NEXT:    s_mov_b32 s1, 0x4000c0c
+; GFX950-NEXT:    v_perm_b32 v4, v4, v5, s0
+; GFX950-NEXT:    v_perm_b32 v5, v7, v6, s1
+; GFX950-NEXT:    v_perm_b32 v0, v0, v1, s0
+; GFX950-NEXT:    v_perm_b32 v1, v3, v2, s1
+; GFX950-NEXT:    v_or_b32_e32 v4, v5, v4
+; GFX950-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX950-NEXT:    v_mov_b32_e32 v1, 0
+; GFX950-NEXT:    v_dot4c_i32_i8_e32 v1, v0, v4
+; GFX950-NEXT:    s_nop 2
+; GFX950-NEXT:    v_add_u32_e32 v0, v1, v8
+; GFX950-NEXT:    s_setpc_b64 s[30:31]
+                          i8 %b0, i8 %b1, i8 %b2, i8 %b3, i32 %c) {
+entry:
+  %a0_32 = sext i8 %a0 to i32
+  %a1_32 = sext i8 %a1 to i32
+  %a2_32 = sext i8 %a2 to i32
+  %a3_32 = sext i8 %a3 to i32
+  %b0_32 = sext i8 %b0 to i32
+  %b1_32 = sext i8 %b1 to i32
+  %b2_32 = sext i8 %b2 to i32
+  %b3_32 = sext i8 %b3 to i32
+  %m0 = mul i32 %a0_32, %b0_32
+  %m1 = mul i32 %a1_32, %b1_32
+  %m2 = mul i32 %a2_32, %b2_32
+  %m3 = mul i32 %a3_32, %b3_32
+  %sum01 = add i32 %m0, %m1
+  %sum012 = add i32 %sum01, %m2
+  %sum0123 = add i32 %sum012, %m3
+  %result = add i32 %sum0123, %c
+  ret i32 %result
+}
+
+;------------------------------------------------------------------------------
+; TESTS WITH <4 x i8> FUNCTION ARGUMENTS
+;------------------------------------------------------------------------------
+
+; Test with raw <4 x i8> arguments (ABI passes as separate registers)
+define i32 @udot4_v4i8_arg(<4 x i8> %a, <4 x i8> %b, i32 %c) {
+; GFX9-DL-LABEL: udot4_v4i8_arg:
+; GFX9-DL:       ; %bb.0: ; %entry
+; GFX9-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-DL-NEXT:    v_and_b32_e32 v0, 0xff, v0
+; GFX9-DL-NEXT:    v_and_b32_e32 v1, 0xff, v1
+; GFX9-DL-NEXT:    v_and_b32_e32 v4, 0xff, v4
+; GFX9-DL-NEXT:    v_and_b32_e32 v5, 0xff, v5
+; GFX9-DL-NEXT:    v_mul_u32_u24_sdwa v2, v2, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-DL-NEXT:    v_mul_u32_u24_sdwa v3, v3, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-DL-NEXT:    v_mad_u32_u24 v1, v1, v5, v3
+; GFX9-DL-NEXT:    v_mad_u32_u24 v0, v0, v4, v2
+; GFX9-DL-NEXT:    v_add3_u32 v0, v0, v1, v8
+; GFX9-DL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-DL-LABEL: udot4_v4i8_arg:
+; GFX10-DL:       ; %bb.0: ; %entry
+; GFX10-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-DL-NEXT:    v_and_b32_e32 v0, 0xff, v0
+; GFX10-DL-NEXT:    v_and_b32_e32 v1, 0xff, v1
+; GFX10-DL-NEXT:    v_and_b32_e32 v4, 0xff, v4
+; GFX10-DL-NEXT:    v_and_b32_e32 v5, 0xff, v5
+; GFX10-DL-NEXT:    v_mul_u32_u24_sdwa v3, v3, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX10-DL-NEXT:    v_mul_u32_u24_sdwa v2, v2, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX10-DL-NEXT:    v_mad_u32_u24 v1, v1, v5, v3
+; GFX10-DL-NEXT:    v_mad_u32_u24 v0, v0, v4, v2
+; GFX10-DL-NEXT:    v_add3_u32 v0, v0, v1, v8
+; GFX10-DL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX950-LABEL: udot4_v4i8_arg:
+; GFX950:       ; %bb.0: ; %entry
+; GFX950-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX950-NEXT:    v_and_b32_e32 v0, 0xff, v0
+; GFX950-NEXT:    v_and_b32_e32 v1, 0xff, v1
+; GFX950-NEXT:    v_and_b32_e32 v4, 0xff, v4
+; GFX950-NEXT:    v_and_b32_e32 v5, 0xff, v5
+; GFX950-NEXT:    v_mul_u32_u24_sdwa v2, v2, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX950-NEXT:    v_mul_u32_u24_sdwa v3, v3, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX950-NEXT:    v_mad_u32_u24 v1, v1, v5, v3
+; GFX950-NEXT:    v_mad_u32_u24 v0, v0, v4, v2
+; GFX950-NEXT:    v_add3_u32 v0, v0, v1, v8
+; GFX950-NEXT:    s_setpc_b64 s[30:31]
+entry:
+  %a_ext = zext <4 x i8> %a to <4 x i32>
+  %b_ext = zext <4 x i8> %b to <4 x i32>
+  %mul = mul <4 x i32> %a_ext, %b_ext
+  %sum = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %mul)
+  %result = add i32 %sum, %c
+  ret i32 %result
+}
+
+define i32 @sdot4_v4i8_arg(<4 x i8> %a, <4 x i8> %b, i32 %c) {
+; GFX9-DL-LABEL: sdot4_v4i8_arg:
+; GFX9-DL:       ; %bb.0: ; %entry
+; GFX9-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-DL-NEXT:    v_bfe_i32 v0, v0, 0, 8
+; GFX9-DL-NEXT:    v_bfe_i32 v1, v1, 0, 8
+; GFX9-DL-NEXT:    v_bfe_i32 v4, v4, 0, 8
+; GFX9-DL-NEXT:    v_bfe_i32 v5, v5, 0, 8
+; GFX9-DL-NEXT:    v_mul_i32_i24_sdwa v2, sext(v2), sext(v6) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-DL-NEXT:    v_mul_i32_i24_sdwa v3, sext(v3), sext(v7) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-DL-NEXT:    v_mad_i32_i24 v1, v1, v5, v3
+; GFX9-DL-NEXT:    v_mad_i32_i24 v0, v0, v4, v2
+; GFX9-DL-NEXT:    v_add3_u32 v0, v0, v1, v8
+; GFX9-DL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-DL-LABEL: sdot4_v4i8_arg:
+; GFX10-DL:       ; %bb.0: ; %entry
+; GFX10-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-DL-NEXT:    v_bfe_i32 v0, v0, 0, 8
+; GFX10-DL-NEXT:    v_bfe_i32 v1, v1, 0, 8
+; GFX10-DL-NEXT:    v_bfe_i32 v4, v4, 0, 8
+; GFX10-DL-NEXT:    v_bfe_i32 v5, v5, 0, 8
+; GFX10-DL-NEXT:    v_mul_i32_i24_sdwa v3, sext(v3), sext(v7) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX10-DL-NEXT:    v_mul_i32_i24_sdwa v2, sext(v2), sext(v6) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX10-DL-NEXT:    v_mad_i32_i24 v1, v1, v5, v3
+; GFX10-DL-NEXT:    v_mad_i32_i24 v0, v0, v4, v2
+; GFX10-DL-NEXT:    v_add3_u32 v0, v0, v1, v8
+; GFX10-DL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX950-LABEL: sdot4_v4i8_arg:
+; GFX950:       ; %bb.0: ; %entry
+; GFX950-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX950-NEXT:    v_bfe_i32 v0, v0, 0, 8
+; GFX950-NEXT:    v_bfe_i32 v1, v1, 0, 8
+; GFX950-NEXT:    v_bfe_i32 v4, v4, 0, 8
+; GFX950-NEXT:    v_bfe_i32 v5, v5, 0, 8
+; GFX950-NEXT:    v_mul_i32_i24_sdwa v2, sext(v2), sext(v6) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX950-NEXT:    v_mul_i32_i24_sdwa v3, sext(v3), sext(v7) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX950-NEXT:    v_mad_i32_i24 v1, v1, v5, v3
+; GFX950-NEXT:    v_mad_i32_i24 v0, v0, v4, v2
+; GFX950-NEXT:    v_add3_u32 v0, v0, v1, v8
+; GFX950-NEXT:    s_setpc_b64 s[30:31]
+entry:
+  %a_ext = sext <4 x i8> %a to <4 x i32>
+  %b_ext = sext <4 x i8> %b to <4 x i32>
+  %mul = mul <4 x i32> %a_ext, %b_ext
+  %sum = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %mul)
+  %result = add i32 %sum, %c
+  ret i32 %result
+}
+
 declare i32 @llvm.sadd.sat.i32(i32, i32)
 declare i32 @llvm.uadd.sat.i32(i32, i32)
+declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)

>From 8e12a273e49575716119528e7d52ad03cf5d632f Mon Sep 17 00:00:00 2001
From: Addmisol <addmisol9 at gmail.com>
Date: Sat, 28 Mar 2026 19:52:46 +0530
Subject: [PATCH 13/16] Update AMDGPUCodeGenPrepare.cpp

---
 .../Target/AMDGPU/AMDGPUCodeGenPrepare.cpp    | 178 ++++++++++++++++++
 1 file changed, 178 insertions(+)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index fe5624d824f94..38e796c75fda5 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -263,6 +263,8 @@ class AMDGPUCodeGenPrepareImpl
   bool visitLog(FPMathOperator &Log, Intrinsic::ID IID);
   bool visitMbcntLo(IntrinsicInst &I) const;
   bool visitMbcntHi(IntrinsicInst &I) const;
+  bool visitVectorReduceAdd(IntrinsicInst &I);
+  bool visitSaturatingAdd(IntrinsicInst &I);
   bool run();
 };
 
@@ -2017,6 +2019,11 @@ bool AMDGPUCodeGenPrepareImpl::visitIntrinsicInst(IntrinsicInst &I) {
     return visitMbcntLo(I);
   case Intrinsic::amdgcn_mbcnt_hi:
     return visitMbcntHi(I);
+  case Intrinsic::vector_reduce_add:
+    return visitVectorReduceAdd(I);
+  case Intrinsic::uadd_sat:
+  case Intrinsic::sadd_sat:
+    return visitSaturatingAdd(I);
   default:
     return false;
   }
@@ -2336,6 +2343,177 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) const {
   return tryReplaceWithWorkitemId(I, Wave);
 }
 
+/// Helper to match the dot4 pattern: mul(zext/sext <4 x i8>, zext/sext <4 x i8>)
+/// Returns true if pattern matches, sets A, B to the <4 x i8> sources and
+/// IsSigned based on whether sext was used.
+static bool matchDot4Pattern(Value *MulOp, Value *&A, Value *&B,
+                             bool &IsSigned) {
+  auto *Mul = dyn_cast<BinaryOperator>(MulOp);
+  if (!Mul || Mul->getOpcode() != Instruction::Mul)
+    return false;
+
+  // Check that result type is <4 x i32>
+  auto *MulTy = dyn_cast<FixedVectorType>(Mul->getType());
+  if (!MulTy || MulTy->getNumElements() != 4 ||
+      !MulTy->getElementType()->isIntegerTy(32))
+    return false;
+
+  Value *Src0 = Mul->getOperand(0);
+  Value *Src1 = Mul->getOperand(1);
+
+  // Match zext <4 x i8> or sext <4 x i8>
+  auto matchExtend = [](Value *V, Value *&Src, bool &Signed) -> bool {
+    if (auto *ZExt = dyn_cast<ZExtInst>(V)) {
+      auto *SrcTy = dyn_cast<FixedVectorType>(ZExt->getSrcTy());
+      if (SrcTy && SrcTy->getNumElements() == 4 &&
+          SrcTy->getElementType()->isIntegerTy(8)) {
+        Src = ZExt->getOperand(0);
+        Signed = false;
+        return true;
+      }
+    } else if (auto *SExt = dyn_cast<SExtInst>(V)) {
+      auto *SrcTy = dyn_cast<FixedVectorType>(SExt->getSrcTy());
+      if (SrcTy && SrcTy->getNumElements() == 4 &&
+          SrcTy->getElementType()->isIntegerTy(8)) {
+        Src = SExt->getOperand(0);
+        Signed = true;
+        return true;
+      }
+    }
+    return false;
+  };
+
+  bool Signed0 = false, Signed1 = false;
+  if (!matchExtend(Src0, A, Signed0) || !matchExtend(Src1, B, Signed1))
+    return false;
+
+  // Both operands must have the same signedness
+  if (Signed0 != Signed1)
+    return false;
+
+  IsSigned = Signed0;
+  return true;
+}
+
+/// Try to convert vector.reduce.add(mul(zext/sext <4 x i8>, zext/sext <4 x i8>))
+/// to a dot4 intrinsic call (non-saturating case only).
+/// The saturating case is handled by visitSaturatingAdd which starts at the root.
+bool AMDGPUCodeGenPrepareImpl::visitVectorReduceAdd(IntrinsicInst &I) {
+  // Check if we have dot4 instructions available
+  if (!ST.hasDot7Insts() || (!ST.hasDot1Insts() && !ST.hasDot8Insts()))
+    return false;
+
+  // Skip if this reduce is used by a saturating add - that case will be
+  // handled by visitSaturatingAdd starting from the root instruction.
+  if (I.hasOneUse()) {
+    if (auto *User = dyn_cast<IntrinsicInst>(*I.user_begin())) {
+      Intrinsic::ID UserIID = User->getIntrinsicID();
+      if (UserIID == Intrinsic::uadd_sat || UserIID == Intrinsic::sadd_sat)
+        return false;
+    }
+  }
+
+  Value *A = nullptr, *B = nullptr;
+  bool IsSigned = false;
+
+  if (!matchDot4Pattern(I.getArgOperand(0), A, B, IsSigned))
+    return false;
+
+  LLVMContext &Ctx = I.getContext();
+  Type *I32Ty = Type::getInt32Ty(Ctx);
+  IRBuilder<> Builder(&I);
+
+  // Bitcast <4 x i8> to i32
+  Value *ASrc = Builder.CreateBitCast(A, I32Ty, "dot4.a");
+  Value *BSrc = Builder.CreateBitCast(B, I32Ty, "dot4.b");
+
+  // Non-saturating case: accumulator is 0, clamp is false
+  Value *Acc = ConstantInt::get(I32Ty, 0);
+  Value *Clamp = ConstantInt::getFalse(Ctx);
+
+  Intrinsic::ID DotIID =
+      IsSigned ? Intrinsic::amdgcn_sdot4 : Intrinsic::amdgcn_udot4;
+  Function *DotFn =
+      Intrinsic::getOrInsertDeclaration(F.getParent(), DotIID, {});
+
+  Value *Dot = Builder.CreateCall(DotFn, {ASrc, BSrc, Acc, Clamp}, "dot4");
+
+  I.replaceAllUsesWith(Dot);
+  DeadVals.push_back(&I);
+
+  return true;
+}
+
+/// Try to convert uadd.sat/sadd.sat(vector.reduce.add(mul(...)), c) to a
+/// saturating dot4 intrinsic. This combine starts at the root (saturating add)
+/// and looks at its operands.
+bool AMDGPUCodeGenPrepareImpl::visitSaturatingAdd(IntrinsicInst &I) {
+  // Check if we have dot4 instructions available
+  if (!ST.hasDot7Insts() || (!ST.hasDot1Insts() && !ST.hasDot8Insts()))
+    return false;
+
+  Intrinsic::ID IID = I.getIntrinsicID();
+  bool IsSigned = (IID == Intrinsic::sadd_sat);
+
+  // Look for vector.reduce.add as one of the operands
+  Value *ReduceOp = nullptr;
+  Value *Accum = nullptr;
+
+  for (int Swap = 0; Swap < 2; ++Swap) {
+    Value *Op0 = I.getArgOperand(Swap);
+    Value *Op1 = I.getArgOperand(1 - Swap);
+
+    if (auto *ReduceInst = dyn_cast<IntrinsicInst>(Op0)) {
+      if (ReduceInst->getIntrinsicID() == Intrinsic::vector_reduce_add) {
+        ReduceOp = Op0;
+        Accum = Op1;
+        break;
+      }
+    }
+  }
+
+  if (!ReduceOp)
+    return false;
+
+  auto *ReduceInst = cast<IntrinsicInst>(ReduceOp);
+
+  Value *A = nullptr, *B = nullptr;
+  bool PatternSigned = false;
+
+  if (!matchDot4Pattern(ReduceInst->getArgOperand(0), A, B, PatternSigned))
+    return false;
+
+  // Signedness of the pattern must match the saturating add type
+  if (PatternSigned != IsSigned)
+    return false;
+
+  LLVMContext &Ctx = I.getContext();
+  Type *I32Ty = Type::getInt32Ty(Ctx);
+  IRBuilder<> Builder(&I);
+
+  // Bitcast <4 x i8> to i32
+  Value *ASrc = Builder.CreateBitCast(A, I32Ty, "dot4.a");
+  Value *BSrc = Builder.CreateBitCast(B, I32Ty, "dot4.b");
+
+  // Saturating case: use the accumulator and set clamp to true
+  Value *Clamp = ConstantInt::getTrue(Ctx);
+
+  Intrinsic::ID DotIID =
+      IsSigned ? Intrinsic::amdgcn_sdot4 : Intrinsic::amdgcn_udot4;
+  Function *DotFn =
+      Intrinsic::getOrInsertDeclaration(F.getParent(), DotIID, {});
+
+  Value *Dot = Builder.CreateCall(DotFn, {ASrc, BSrc, Accum, Clamp}, "dot4");
+
+  I.replaceAllUsesWith(Dot);
+  DeadVals.push_back(&I);
+  // The reduce.add will be dead after this and cleaned up later
+  if (ReduceInst->use_empty())
+    DeadVals.push_back(ReduceInst);
+
+  return true;
+}
+
 char AMDGPUCodeGenPrepare::ID = 0;
 
 FunctionPass *llvm::createAMDGPUCodeGenPreparePass() {

>From d4b4222f42a15e47a3f59dd1558a1816487eeb36 Mon Sep 17 00:00:00 2001
From: Addmisol <addmisol9 at gmail.com>
Date: Sat, 28 Mar 2026 19:53:58 +0530
Subject: [PATCH 14/16] Update idot2-sat.ll

---
 llvm/test/CodeGen/AMDGPU/idot2-sat.ll | 416 +++++++++++++++-----------
 1 file changed, 246 insertions(+), 170 deletions(-)

diff --git a/llvm/test/CodeGen/AMDGPU/idot2-sat.ll b/llvm/test/CodeGen/AMDGPU/idot2-sat.ll
index 0d1da43a06912..740ec4152eba4 100644
--- a/llvm/test/CodeGen/AMDGPU/idot2-sat.ll
+++ b/llvm/test/CodeGen/AMDGPU/idot2-sat.ll
@@ -133,6 +133,174 @@ entry:
   ret i32 %add8.i
 }
 
+;------------------------------------------------------------------------------
+; DOT2 TESTS WITH I8 PROMOTION (i8 -> i16)
+;------------------------------------------------------------------------------
+
+; Unsigned dot2 with i8 inputs promoted to i16, with saturation
+define i32 @udot2_i8_promoted_sat(<2 x i8> %a, <2 x i8> %b, i32 %c) {
+; GFX9-DL-LABEL: udot2_i8_promoted_sat:
+; GFX9-DL:       ; %bb.0: ; %entry
+; GFX9-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-DL-NEXT:    v_and_b32_e32 v0, 0xff, v0
+; GFX9-DL-NEXT:    v_and_b32_e32 v2, 0xff, v2
+; GFX9-DL-NEXT:    v_mul_u32_u24_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-DL-NEXT:    v_mad_u32_u24 v0, v0, v2, v1
+; GFX9-DL-NEXT:    v_add_u32_e64 v0, v0, v4 clamp
+; GFX9-DL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-DL-LABEL: udot2_i8_promoted_sat:
+; GFX10-DL:       ; %bb.0: ; %entry
+; GFX10-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-DL-NEXT:    v_and_b32_e32 v0, 0xff, v0
+; GFX10-DL-NEXT:    v_and_b32_e32 v2, 0xff, v2
+; GFX10-DL-NEXT:    v_mul_u32_u24_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX10-DL-NEXT:    v_mad_u32_u24 v0, v0, v2, v1
+; GFX10-DL-NEXT:    v_add_nc_u32_e64 v0, v0, v4 clamp
+; GFX10-DL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX950-LABEL: udot2_i8_promoted_sat:
+; GFX950:       ; %bb.0: ; %entry
+; GFX950-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX950-NEXT:    v_and_b32_e32 v0, 0xff, v0
+; GFX950-NEXT:    v_and_b32_e32 v2, 0xff, v2
+; GFX950-NEXT:    v_mul_u32_u24_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX950-NEXT:    v_mad_u32_u24 v0, v0, v2, v1
+; GFX950-NEXT:    v_add_u32_e64 v0, v0, v4 clamp
+; GFX950-NEXT:    s_setpc_b64 s[30:31]
+entry:
+  %a.ext16 = zext <2 x i8> %a to <2 x i16>
+  %b.ext16 = zext <2 x i8> %b to <2 x i16>
+  %a.ext32 = zext <2 x i16> %a.ext16 to <2 x i32>
+  %b.ext32 = zext <2 x i16> %b.ext16 to <2 x i32>
+  %mul = mul <2 x i32> %a.ext32, %b.ext32
+  %e0 = extractelement <2 x i32> %mul, i64 0
+  %e1 = extractelement <2 x i32> %mul, i64 1
+  %sum = add i32 %e0, %e1
+  %result = tail call i32 @llvm.uadd.sat.i32(i32 %sum, i32 %c)
+  ret i32 %result
+}
+
+; Signed dot2 with i8 inputs promoted to i16, with saturation
+define i32 @sdot2_i8_promoted_sat(<2 x i8> %a, <2 x i8> %b, i32 %c) {
+; GFX9-DL-LABEL: sdot2_i8_promoted_sat:
+; GFX9-DL:       ; %bb.0: ; %entry
+; GFX9-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-DL-NEXT:    v_bfe_i32 v0, v0, 0, 8
+; GFX9-DL-NEXT:    v_bfe_i32 v2, v2, 0, 8
+; GFX9-DL-NEXT:    v_mul_i32_i24_sdwa v1, sext(v1), sext(v3) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-DL-NEXT:    v_mad_i32_i24 v0, v0, v2, v1
+; GFX9-DL-NEXT:    v_add_i32 v0, v0, v4 clamp
+; GFX9-DL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-DL-LABEL: sdot2_i8_promoted_sat:
+; GFX10-DL:       ; %bb.0: ; %entry
+; GFX10-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-DL-NEXT:    v_bfe_i32 v0, v0, 0, 8
+; GFX10-DL-NEXT:    v_bfe_i32 v2, v2, 0, 8
+; GFX10-DL-NEXT:    v_mul_i32_i24_sdwa v1, sext(v1), sext(v3) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX10-DL-NEXT:    v_mad_i32_i24 v0, v0, v2, v1
+; GFX10-DL-NEXT:    v_add_nc_i32 v0, v0, v4 clamp
+; GFX10-DL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX950-LABEL: sdot2_i8_promoted_sat:
+; GFX950:       ; %bb.0: ; %entry
+; GFX950-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX950-NEXT:    v_bfe_i32 v0, v0, 0, 8
+; GFX950-NEXT:    v_bfe_i32 v2, v2, 0, 8
+; GFX950-NEXT:    v_mul_i32_i24_sdwa v1, sext(v1), sext(v3) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX950-NEXT:    v_mad_i32_i24 v0, v0, v2, v1
+; GFX950-NEXT:    v_add_i32 v0, v0, v4 clamp
+; GFX950-NEXT:    s_setpc_b64 s[30:31]
+entry:
+  %a.ext16 = sext <2 x i8> %a to <2 x i16>
+  %b.ext16 = sext <2 x i8> %b to <2 x i16>
+  %a.ext32 = sext <2 x i16> %a.ext16 to <2 x i32>
+  %b.ext32 = sext <2 x i16> %b.ext16 to <2 x i32>
+  %mul = mul <2 x i32> %a.ext32, %b.ext32
+  %e0 = extractelement <2 x i32> %mul, i64 0
+  %e1 = extractelement <2 x i32> %mul, i64 1
+  %sum = add i32 %e0, %e1
+  %result = tail call i32 @llvm.sadd.sat.i32(i32 %sum, i32 %c)
+  ret i32 %result
+}
+
+; Unsigned dot2 with i8 inputs promoted to i16, without saturation
+define i32 @udot2_i8_promoted_unsat(<2 x i8> %a, <2 x i8> %b, i32 %c) {
+; GFX9-DL-LABEL: udot2_i8_promoted_unsat:
+; GFX9-DL:       ; %bb.0: ; %entry
+; GFX9-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-DL-NEXT:    v_mul_u32_u24_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-DL-NEXT:    v_mul_u32_u24_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-DL-NEXT:    v_add3_u32 v0, v0, v1, v4
+; GFX9-DL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-DL-LABEL: udot2_i8_promoted_unsat:
+; GFX10-DL:       ; %bb.0: ; %entry
+; GFX10-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-DL-NEXT:    v_mul_u32_u24_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX10-DL-NEXT:    v_mul_u32_u24_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX10-DL-NEXT:    v_add3_u32 v0, v0, v1, v4
+; GFX10-DL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX950-LABEL: udot2_i8_promoted_unsat:
+; GFX950:       ; %bb.0: ; %entry
+; GFX950-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX950-NEXT:    v_mul_u32_u24_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX950-NEXT:    v_mul_u32_u24_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX950-NEXT:    v_add3_u32 v0, v0, v1, v4
+; GFX950-NEXT:    s_setpc_b64 s[30:31]
+entry:
+  %a.ext16 = zext <2 x i8> %a to <2 x i16>
+  %b.ext16 = zext <2 x i8> %b to <2 x i16>
+  %a.ext32 = zext <2 x i16> %a.ext16 to <2 x i32>
+  %b.ext32 = zext <2 x i16> %b.ext16 to <2 x i32>
+  %mul = mul <2 x i32> %a.ext32, %b.ext32
+  %e0 = extractelement <2 x i32> %mul, i64 0
+  %e1 = extractelement <2 x i32> %mul, i64 1
+  %sum = add i32 %e0, %e1
+  %result = add i32 %sum, %c
+  ret i32 %result
+}
+
+; Signed dot2 with i8 inputs promoted to i16, without saturation
+define i32 @sdot2_i8_promoted_unsat(<2 x i8> %a, <2 x i8> %b, i32 %c) {
+; GFX9-DL-LABEL: sdot2_i8_promoted_unsat:
+; GFX9-DL:       ; %bb.0: ; %entry
+; GFX9-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-DL-NEXT:    v_mul_i32_i24_sdwa v0, sext(v0), sext(v2) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-DL-NEXT:    v_mul_i32_i24_sdwa v1, sext(v1), sext(v3) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-DL-NEXT:    v_add3_u32 v0, v0, v1, v4
+; GFX9-DL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-DL-LABEL: sdot2_i8_promoted_unsat:
+; GFX10-DL:       ; %bb.0: ; %entry
+; GFX10-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-DL-NEXT:    v_mul_i32_i24_sdwa v0, sext(v0), sext(v2) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX10-DL-NEXT:    v_mul_i32_i24_sdwa v1, sext(v1), sext(v3) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX10-DL-NEXT:    v_add3_u32 v0, v0, v1, v4
+; GFX10-DL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX950-LABEL: sdot2_i8_promoted_unsat:
+; GFX950:       ; %bb.0: ; %entry
+; GFX950-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX950-NEXT:    v_mul_i32_i24_sdwa v0, sext(v0), sext(v2) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX950-NEXT:    v_mul_i32_i24_sdwa v1, sext(v1), sext(v3) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX950-NEXT:    v_add3_u32 v0, v0, v1, v4
+; GFX950-NEXT:    s_setpc_b64 s[30:31]
+entry:
+  %a.ext16 = sext <2 x i8> %a to <2 x i16>
+  %b.ext16 = sext <2 x i8> %b to <2 x i16>
+  %a.ext32 = sext <2 x i16> %a.ext16 to <2 x i32>
+  %b.ext32 = sext <2 x i16> %b.ext16 to <2 x i32>
+  %mul = mul <2 x i32> %a.ext32, %b.ext32
+  %e0 = extractelement <2 x i32> %mul, i64 0
+  %e1 = extractelement <2 x i32> %mul, i64 1
+  %sum = add i32 %e0, %e1
+  %result = add i32 %sum, %c
+  ret i32 %result
+}
+
 ;------------------------------------------------------------------------------
 ; DOT4 TESTS WITH BITCAST FROM I32
 ;------------------------------------------------------------------------------
@@ -142,44 +310,23 @@ define i32 @udot4_unsat_bitcast(i32 %a_packed, i32 %b_packed, i32 %c) {
 ; GFX9-DL-LABEL: udot4_unsat_bitcast:
 ; GFX9-DL:       ; %bb.0: ; %entry
 ; GFX9-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-DL-NEXT:    v_lshrrev_b16_e32 v3, 8, v0
-; GFX9-DL-NEXT:    v_and_b32_e32 v4, 0xff, v0
-; GFX9-DL-NEXT:    v_lshrrev_b16_e32 v5, 8, v1
-; GFX9-DL-NEXT:    v_and_b32_e32 v6, 0xff, v1
-; GFX9-DL-NEXT:    v_mul_u32_u24_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:BYTE_2
-; GFX9-DL-NEXT:    v_mul_u32_u24_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_3
-; GFX9-DL-NEXT:    v_mad_u32_u24 v1, v4, v6, v7
-; GFX9-DL-NEXT:    v_mad_u32_u24 v0, v3, v5, v0
-; GFX9-DL-NEXT:    v_add3_u32 v0, v1, v0, v2
+; GFX9-DL-NEXT:    v_dot4_u32_u8 v0, v0, v1, 0
+; GFX9-DL-NEXT:    v_add_u32_e32 v0, v0, v2
 ; GFX9-DL-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-DL-LABEL: udot4_unsat_bitcast:
 ; GFX10-DL:       ; %bb.0: ; %entry
 ; GFX10-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-DL-NEXT:    v_mov_b32_e32 v3, 0xffff
-; GFX10-DL-NEXT:    v_and_b32_e32 v5, 0xff, v0
-; GFX10-DL-NEXT:    v_and_b32_e32 v6, 0xff, v1
-; GFX10-DL-NEXT:    v_mul_u32_u24_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:BYTE_2
-; GFX10-DL-NEXT:    v_and_b32_sdwa v4, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
-; GFX10-DL-NEXT:    v_and_b32_sdwa v3, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
-; GFX10-DL-NEXT:    v_mul_u32_u24_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_3
-; GFX10-DL-NEXT:    v_mad_u32_u24 v1, v5, v6, v7
-; GFX10-DL-NEXT:    v_mad_u32_u24 v0, v4, v3, v0
-; GFX10-DL-NEXT:    v_add3_u32 v0, v1, v0, v2
+; GFX10-DL-NEXT:    v_dot4_u32_u8 v0, v0, v1, 0
+; GFX10-DL-NEXT:    v_add_nc_u32_e32 v0, v0, v2
 ; GFX10-DL-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX950-LABEL: udot4_unsat_bitcast:
 ; GFX950:       ; %bb.0: ; %entry
 ; GFX950-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX950-NEXT:    v_lshrrev_b16_e32 v3, 8, v0
-; GFX950-NEXT:    v_and_b32_e32 v4, 0xff, v0
-; GFX950-NEXT:    v_lshrrev_b16_e32 v5, 8, v1
-; GFX950-NEXT:    v_and_b32_e32 v6, 0xff, v1
-; GFX950-NEXT:    v_mul_u32_u24_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:BYTE_2
-; GFX950-NEXT:    v_mul_u32_u24_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_3
-; GFX950-NEXT:    v_mad_u32_u24 v1, v4, v6, v7
-; GFX950-NEXT:    v_mad_u32_u24 v0, v3, v5, v0
-; GFX950-NEXT:    v_add3_u32 v0, v1, v0, v2
+; GFX950-NEXT:    v_dot4_u32_u8 v0, v0, v1, 0
+; GFX950-NEXT:    s_nop 2
+; GFX950-NEXT:    v_add_u32_e32 v0, v0, v2
 ; GFX950-NEXT:    s_setpc_b64 s[30:31]
 entry:
   %a_vec = bitcast i32 %a_packed to <4 x i8>
@@ -197,49 +344,25 @@ define i32 @sdot4_unsat_bitcast(i32 %a_packed, i32 %b_packed, i32 %c) {
 ; GFX9-DL-LABEL: sdot4_unsat_bitcast:
 ; GFX9-DL:       ; %bb.0: ; %entry
 ; GFX9-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-DL-NEXT:    v_lshrrev_b16_e32 v3, 8, v1
-; GFX9-DL-NEXT:    v_lshrrev_b16_e32 v4, 8, v0
-; GFX9-DL-NEXT:    v_bfe_i32 v4, v4, 0, 8
-; GFX9-DL-NEXT:    v_bfe_i32 v5, v0, 0, 8
-; GFX9-DL-NEXT:    v_bfe_i32 v3, v3, 0, 8
-; GFX9-DL-NEXT:    v_bfe_i32 v6, v1, 0, 8
-; GFX9-DL-NEXT:    v_mul_i32_i24_sdwa v7, sext(v0), sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:BYTE_2
-; GFX9-DL-NEXT:    v_mul_i32_i24_sdwa v0, sext(v0), sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_3
-; GFX9-DL-NEXT:    v_mad_i32_i24 v0, v4, v3, v0
-; GFX9-DL-NEXT:    v_mad_i32_i24 v1, v5, v6, v7
-; GFX9-DL-NEXT:    v_add3_u32 v0, v1, v0, v2
+; GFX9-DL-NEXT:    v_dot4_i32_i8 v0, v0, v1, 0
+; GFX9-DL-NEXT:    v_add_u32_e32 v0, v0, v2
 ; GFX9-DL-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-DL-LABEL: sdot4_unsat_bitcast:
 ; GFX10-DL:       ; %bb.0: ; %entry
 ; GFX10-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-DL-NEXT:    v_lshrrev_b16 v3, 8, v0
-; GFX10-DL-NEXT:    v_lshrrev_b16 v4, 8, v1
-; GFX10-DL-NEXT:    v_bfe_i32 v5, v0, 0, 8
-; GFX10-DL-NEXT:    v_bfe_i32 v6, v1, 0, 8
-; GFX10-DL-NEXT:    v_mul_i32_i24_sdwa v7, sext(v0), sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_3
-; GFX10-DL-NEXT:    v_bfe_i32 v3, v3, 0, 8
-; GFX10-DL-NEXT:    v_bfe_i32 v4, v4, 0, 8
-; GFX10-DL-NEXT:    v_mul_i32_i24_sdwa v0, sext(v0), sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:BYTE_2
-; GFX10-DL-NEXT:    v_mad_i32_i24 v1, v3, v4, v7
-; GFX10-DL-NEXT:    v_mad_i32_i24 v0, v5, v6, v0
-; GFX10-DL-NEXT:    v_add3_u32 v0, v0, v1, v2
+; GFX10-DL-NEXT:    v_mov_b32_e32 v3, 0
+; GFX10-DL-NEXT:    v_dot4c_i32_i8 v3, v0, v1
+; GFX10-DL-NEXT:    v_add_nc_u32_e32 v0, v3, v2
 ; GFX10-DL-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX950-LABEL: sdot4_unsat_bitcast:
 ; GFX950:       ; %bb.0: ; %entry
 ; GFX950-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX950-NEXT:    v_lshrrev_b16_e32 v3, 8, v1
-; GFX950-NEXT:    v_lshrrev_b16_e32 v4, 8, v0
-; GFX950-NEXT:    v_bfe_i32 v4, v4, 0, 8
-; GFX950-NEXT:    v_bfe_i32 v5, v0, 0, 8
-; GFX950-NEXT:    v_bfe_i32 v3, v3, 0, 8
-; GFX950-NEXT:    v_bfe_i32 v6, v1, 0, 8
-; GFX950-NEXT:    v_mul_i32_i24_sdwa v7, sext(v0), sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:BYTE_2
-; GFX950-NEXT:    v_mul_i32_i24_sdwa v0, sext(v0), sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_3
-; GFX950-NEXT:    v_mad_i32_i24 v0, v4, v3, v0
-; GFX950-NEXT:    v_mad_i32_i24 v1, v5, v6, v7
-; GFX950-NEXT:    v_add3_u32 v0, v1, v0, v2
+; GFX950-NEXT:    v_mov_b32_e32 v3, 0
+; GFX950-NEXT:    v_dot4c_i32_i8_e32 v3, v0, v1
+; GFX950-NEXT:    s_nop 2
+; GFX950-NEXT:    v_add_u32_e32 v0, v3, v2
 ; GFX950-NEXT:    s_setpc_b64 s[30:31]
 entry:
   %a_vec = bitcast i32 %a_packed to <4 x i8>
@@ -257,41 +380,19 @@ define i32 @udot4_sat_bitcast(i32 %a_packed, i32 %b_packed, i32 %c) {
 ; GFX9-DL-LABEL: udot4_sat_bitcast:
 ; GFX9-DL:       ; %bb.0: ; %entry
 ; GFX9-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-DL-NEXT:    v_lshrrev_b16_e32 v3, 8, v0
-; GFX9-DL-NEXT:    v_lshrrev_b16_e32 v4, 8, v1
-; GFX9-DL-NEXT:    v_mul_u32_u24_sdwa v5, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
-; GFX9-DL-NEXT:    v_mul_u32_u24_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:BYTE_2
-; GFX9-DL-NEXT:    v_mul_u32_u24_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_3
-; GFX9-DL-NEXT:    v_mad_u32_u24 v0, v3, v4, v0
-; GFX9-DL-NEXT:    v_add3_u32 v0, v5, v6, v0
-; GFX9-DL-NEXT:    v_add_u32_e64 v0, v0, v2 clamp
+; GFX9-DL-NEXT:    v_dot4_u32_u8 v0, v0, v1, v2 clamp
 ; GFX9-DL-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-DL-LABEL: udot4_sat_bitcast:
 ; GFX10-DL:       ; %bb.0: ; %entry
 ; GFX10-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-DL-NEXT:    v_mov_b32_e32 v3, 0xffff
-; GFX10-DL-NEXT:    v_mul_u32_u24_sdwa v5, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_3
-; GFX10-DL-NEXT:    v_mul_u32_u24_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
-; GFX10-DL-NEXT:    v_and_b32_sdwa v4, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
-; GFX10-DL-NEXT:    v_and_b32_sdwa v3, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
-; GFX10-DL-NEXT:    v_mul_u32_u24_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:BYTE_2
-; GFX10-DL-NEXT:    v_mad_u32_u24 v1, v4, v3, v5
-; GFX10-DL-NEXT:    v_add3_u32 v0, v6, v0, v1
-; GFX10-DL-NEXT:    v_add_nc_u32_e64 v0, v0, v2 clamp
+; GFX10-DL-NEXT:    v_dot4_u32_u8 v0, v0, v1, v2 clamp
 ; GFX10-DL-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX950-LABEL: udot4_sat_bitcast:
 ; GFX950:       ; %bb.0: ; %entry
 ; GFX950-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX950-NEXT:    v_lshrrev_b16_e32 v3, 8, v0
-; GFX950-NEXT:    v_lshrrev_b16_e32 v4, 8, v1
-; GFX950-NEXT:    v_mul_u32_u24_sdwa v5, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
-; GFX950-NEXT:    v_mul_u32_u24_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:BYTE_2
-; GFX950-NEXT:    v_mul_u32_u24_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_3
-; GFX950-NEXT:    v_mad_u32_u24 v0, v3, v4, v0
-; GFX950-NEXT:    v_add3_u32 v0, v5, v6, v0
-; GFX950-NEXT:    v_add_u32_e64 v0, v0, v2 clamp
+; GFX950-NEXT:    v_dot4_u32_u8 v0, v0, v1, v2 clamp
 ; GFX950-NEXT:    s_setpc_b64 s[30:31]
 entry:
   %a_vec = bitcast i32 %a_packed to <4 x i8>
@@ -309,46 +410,19 @@ define i32 @sdot4_sat_bitcast(i32 %a_packed, i32 %b_packed, i32 %c) {
 ; GFX9-DL-LABEL: sdot4_sat_bitcast:
 ; GFX9-DL:       ; %bb.0: ; %entry
 ; GFX9-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-DL-NEXT:    v_lshrrev_b16_e32 v3, 8, v1
-; GFX9-DL-NEXT:    v_lshrrev_b16_e32 v4, 8, v0
-; GFX9-DL-NEXT:    v_bfe_i32 v4, v4, 0, 8
-; GFX9-DL-NEXT:    v_bfe_i32 v3, v3, 0, 8
-; GFX9-DL-NEXT:    v_mul_i32_i24_sdwa v5, sext(v0), sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
-; GFX9-DL-NEXT:    v_mul_i32_i24_sdwa v6, sext(v0), sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:BYTE_2
-; GFX9-DL-NEXT:    v_mul_i32_i24_sdwa v0, sext(v0), sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_3
-; GFX9-DL-NEXT:    v_mad_i32_i24 v0, v4, v3, v0
-; GFX9-DL-NEXT:    v_add3_u32 v0, v5, v6, v0
-; GFX9-DL-NEXT:    v_add_i32 v0, v0, v2 clamp
+; GFX9-DL-NEXT:    v_dot4_i32_i8 v0, v0, v1, v2 clamp
 ; GFX9-DL-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-DL-LABEL: sdot4_sat_bitcast:
 ; GFX10-DL:       ; %bb.0: ; %entry
 ; GFX10-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-DL-NEXT:    v_lshrrev_b16 v3, 8, v0
-; GFX10-DL-NEXT:    v_lshrrev_b16 v4, 8, v1
-; GFX10-DL-NEXT:    v_mul_i32_i24_sdwa v5, sext(v0), sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_3
-; GFX10-DL-NEXT:    v_mul_i32_i24_sdwa v6, sext(v0), sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
-; GFX10-DL-NEXT:    v_mul_i32_i24_sdwa v0, sext(v0), sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:BYTE_2
-; GFX10-DL-NEXT:    v_bfe_i32 v3, v3, 0, 8
-; GFX10-DL-NEXT:    v_bfe_i32 v4, v4, 0, 8
-; GFX10-DL-NEXT:    v_mad_i32_i24 v1, v3, v4, v5
-; GFX10-DL-NEXT:    v_add3_u32 v0, v6, v0, v1
-; GFX10-DL-NEXT:    v_add_nc_i32 v0, v0, v2 clamp
+; GFX10-DL-NEXT:    v_dot4_i32_i8 v0, v0, v1, v2 clamp
 ; GFX10-DL-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX950-LABEL: sdot4_sat_bitcast:
 ; GFX950:       ; %bb.0: ; %entry
 ; GFX950-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX950-NEXT:    v_lshrrev_b16_e32 v3, 8, v1
-; GFX950-NEXT:    v_lshrrev_b16_e32 v4, 8, v0
-; GFX950-NEXT:    v_bfe_i32 v4, v4, 0, 8
-; GFX950-NEXT:    v_bfe_i32 v3, v3, 0, 8
-; GFX950-NEXT:    v_mul_i32_i24_sdwa v5, sext(v0), sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
-; GFX950-NEXT:    v_mul_i32_i24_sdwa v6, sext(v0), sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:BYTE_2
-; GFX950-NEXT:    v_mul_i32_i24_sdwa v0, sext(v0), sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_3
-; GFX950-NEXT:    v_mad_i32_i24 v0, v4, v3, v0
-; GFX950-NEXT:    v_add3_u32 v0, v5, v6, v0
-; GFX950-NEXT:    v_add_i32 v0, v0, v2 clamp
+; GFX950-NEXT:    v_dot4_i32_i8 v0, v0, v1, v2 clamp
 ; GFX950-NEXT:    s_setpc_b64 s[30:31]
 entry:
   %a_vec = bitcast i32 %a_packed to <4 x i8>
@@ -508,43 +582,43 @@ define i32 @udot4_v4i8_arg(<4 x i8> %a, <4 x i8> %b, i32 %c) {
 ; GFX9-DL-LABEL: udot4_v4i8_arg:
 ; GFX9-DL:       ; %bb.0: ; %entry
 ; GFX9-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-DL-NEXT:    v_and_b32_e32 v0, 0xff, v0
-; GFX9-DL-NEXT:    v_and_b32_e32 v1, 0xff, v1
-; GFX9-DL-NEXT:    v_and_b32_e32 v4, 0xff, v4
-; GFX9-DL-NEXT:    v_and_b32_e32 v5, 0xff, v5
-; GFX9-DL-NEXT:    v_mul_u32_u24_sdwa v2, v2, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
-; GFX9-DL-NEXT:    v_mul_u32_u24_sdwa v3, v3, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
-; GFX9-DL-NEXT:    v_mad_u32_u24 v1, v1, v5, v3
-; GFX9-DL-NEXT:    v_mad_u32_u24 v0, v0, v4, v2
-; GFX9-DL-NEXT:    v_add3_u32 v0, v0, v1, v8
+; GFX9-DL-NEXT:    s_mov_b32 s4, 0xc0c0004
+; GFX9-DL-NEXT:    v_perm_b32 v4, v4, v5, s4
+; GFX9-DL-NEXT:    v_perm_b32 v5, v6, v7, s4
+; GFX9-DL-NEXT:    v_perm_b32 v0, v0, v1, s4
+; GFX9-DL-NEXT:    v_perm_b32 v1, v2, v3, s4
+; GFX9-DL-NEXT:    v_lshl_or_b32 v4, v5, 16, v4
+; GFX9-DL-NEXT:    v_lshl_or_b32 v0, v1, 16, v0
+; GFX9-DL-NEXT:    v_dot4_u32_u8 v0, v0, v4, 0
+; GFX9-DL-NEXT:    v_add_u32_e32 v0, v0, v8
 ; GFX9-DL-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-DL-LABEL: udot4_v4i8_arg:
 ; GFX10-DL:       ; %bb.0: ; %entry
 ; GFX10-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-DL-NEXT:    v_and_b32_e32 v0, 0xff, v0
-; GFX10-DL-NEXT:    v_and_b32_e32 v1, 0xff, v1
-; GFX10-DL-NEXT:    v_and_b32_e32 v4, 0xff, v4
-; GFX10-DL-NEXT:    v_and_b32_e32 v5, 0xff, v5
-; GFX10-DL-NEXT:    v_mul_u32_u24_sdwa v3, v3, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
-; GFX10-DL-NEXT:    v_mul_u32_u24_sdwa v2, v2, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
-; GFX10-DL-NEXT:    v_mad_u32_u24 v1, v1, v5, v3
-; GFX10-DL-NEXT:    v_mad_u32_u24 v0, v0, v4, v2
-; GFX10-DL-NEXT:    v_add3_u32 v0, v0, v1, v8
+; GFX10-DL-NEXT:    v_perm_b32 v4, v4, v5, 0xc0c0004
+; GFX10-DL-NEXT:    v_perm_b32 v5, v6, v7, 0xc0c0004
+; GFX10-DL-NEXT:    v_perm_b32 v0, v0, v1, 0xc0c0004
+; GFX10-DL-NEXT:    v_perm_b32 v1, v2, v3, 0xc0c0004
+; GFX10-DL-NEXT:    v_lshl_or_b32 v2, v5, 16, v4
+; GFX10-DL-NEXT:    v_lshl_or_b32 v0, v1, 16, v0
+; GFX10-DL-NEXT:    v_dot4_u32_u8 v0, v0, v2, 0
+; GFX10-DL-NEXT:    v_add_nc_u32_e32 v0, v0, v8
 ; GFX10-DL-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX950-LABEL: udot4_v4i8_arg:
 ; GFX950:       ; %bb.0: ; %entry
 ; GFX950-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX950-NEXT:    v_and_b32_e32 v0, 0xff, v0
-; GFX950-NEXT:    v_and_b32_e32 v1, 0xff, v1
-; GFX950-NEXT:    v_and_b32_e32 v4, 0xff, v4
-; GFX950-NEXT:    v_and_b32_e32 v5, 0xff, v5
-; GFX950-NEXT:    v_mul_u32_u24_sdwa v2, v2, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
-; GFX950-NEXT:    v_mul_u32_u24_sdwa v3, v3, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
-; GFX950-NEXT:    v_mad_u32_u24 v1, v1, v5, v3
-; GFX950-NEXT:    v_mad_u32_u24 v0, v0, v4, v2
-; GFX950-NEXT:    v_add3_u32 v0, v0, v1, v8
+; GFX950-NEXT:    s_mov_b32 s0, 0xc0c0004
+; GFX950-NEXT:    v_perm_b32 v4, v4, v5, s0
+; GFX950-NEXT:    v_perm_b32 v5, v6, v7, s0
+; GFX950-NEXT:    v_perm_b32 v0, v0, v1, s0
+; GFX950-NEXT:    v_perm_b32 v1, v2, v3, s0
+; GFX950-NEXT:    v_lshl_or_b32 v4, v5, 16, v4
+; GFX950-NEXT:    v_lshl_or_b32 v0, v1, 16, v0
+; GFX950-NEXT:    v_dot4_u32_u8 v0, v0, v4, 0
+; GFX950-NEXT:    s_nop 2
+; GFX950-NEXT:    v_add_u32_e32 v0, v0, v8
 ; GFX950-NEXT:    s_setpc_b64 s[30:31]
 entry:
   %a_ext = zext <4 x i8> %a to <4 x i32>
@@ -559,43 +633,45 @@ define i32 @sdot4_v4i8_arg(<4 x i8> %a, <4 x i8> %b, i32 %c) {
 ; GFX9-DL-LABEL: sdot4_v4i8_arg:
 ; GFX9-DL:       ; %bb.0: ; %entry
 ; GFX9-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-DL-NEXT:    v_bfe_i32 v0, v0, 0, 8
-; GFX9-DL-NEXT:    v_bfe_i32 v1, v1, 0, 8
-; GFX9-DL-NEXT:    v_bfe_i32 v4, v4, 0, 8
-; GFX9-DL-NEXT:    v_bfe_i32 v5, v5, 0, 8
-; GFX9-DL-NEXT:    v_mul_i32_i24_sdwa v2, sext(v2), sext(v6) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
-; GFX9-DL-NEXT:    v_mul_i32_i24_sdwa v3, sext(v3), sext(v7) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
-; GFX9-DL-NEXT:    v_mad_i32_i24 v1, v1, v5, v3
-; GFX9-DL-NEXT:    v_mad_i32_i24 v0, v0, v4, v2
-; GFX9-DL-NEXT:    v_add3_u32 v0, v0, v1, v8
+; GFX9-DL-NEXT:    s_mov_b32 s4, 0xc0c0004
+; GFX9-DL-NEXT:    v_perm_b32 v4, v4, v5, s4
+; GFX9-DL-NEXT:    v_perm_b32 v5, v6, v7, s4
+; GFX9-DL-NEXT:    v_perm_b32 v0, v0, v1, s4
+; GFX9-DL-NEXT:    v_perm_b32 v1, v2, v3, s4
+; GFX9-DL-NEXT:    v_lshl_or_b32 v4, v5, 16, v4
+; GFX9-DL-NEXT:    v_lshl_or_b32 v0, v1, 16, v0
+; GFX9-DL-NEXT:    v_dot4_i32_i8 v0, v0, v4, 0
+; GFX9-DL-NEXT:    v_add_u32_e32 v0, v0, v8
 ; GFX9-DL-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-DL-LABEL: sdot4_v4i8_arg:
 ; GFX10-DL:       ; %bb.0: ; %entry
 ; GFX10-DL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-DL-NEXT:    v_bfe_i32 v0, v0, 0, 8
-; GFX10-DL-NEXT:    v_bfe_i32 v1, v1, 0, 8
-; GFX10-DL-NEXT:    v_bfe_i32 v4, v4, 0, 8
-; GFX10-DL-NEXT:    v_bfe_i32 v5, v5, 0, 8
-; GFX10-DL-NEXT:    v_mul_i32_i24_sdwa v3, sext(v3), sext(v7) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
-; GFX10-DL-NEXT:    v_mul_i32_i24_sdwa v2, sext(v2), sext(v6) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
-; GFX10-DL-NEXT:    v_mad_i32_i24 v1, v1, v5, v3
-; GFX10-DL-NEXT:    v_mad_i32_i24 v0, v0, v4, v2
-; GFX10-DL-NEXT:    v_add3_u32 v0, v0, v1, v8
+; GFX10-DL-NEXT:    v_perm_b32 v4, v4, v5, 0xc0c0004
+; GFX10-DL-NEXT:    v_perm_b32 v5, v6, v7, 0xc0c0004
+; GFX10-DL-NEXT:    v_perm_b32 v0, v0, v1, 0xc0c0004
+; GFX10-DL-NEXT:    v_perm_b32 v1, v2, v3, 0xc0c0004
+; GFX10-DL-NEXT:    v_lshl_or_b32 v2, v5, 16, v4
+; GFX10-DL-NEXT:    v_lshl_or_b32 v0, v1, 16, v0
+; GFX10-DL-NEXT:    v_mov_b32_e32 v1, 0
+; GFX10-DL-NEXT:    v_dot4c_i32_i8 v1, v0, v2
+; GFX10-DL-NEXT:    v_add_nc_u32_e32 v0, v1, v8
 ; GFX10-DL-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX950-LABEL: sdot4_v4i8_arg:
 ; GFX950:       ; %bb.0: ; %entry
 ; GFX950-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX950-NEXT:    v_bfe_i32 v0, v0, 0, 8
-; GFX950-NEXT:    v_bfe_i32 v1, v1, 0, 8
-; GFX950-NEXT:    v_bfe_i32 v4, v4, 0, 8
-; GFX950-NEXT:    v_bfe_i32 v5, v5, 0, 8
-; GFX950-NEXT:    v_mul_i32_i24_sdwa v2, sext(v2), sext(v6) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
-; GFX950-NEXT:    v_mul_i32_i24_sdwa v3, sext(v3), sext(v7) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
-; GFX950-NEXT:    v_mad_i32_i24 v1, v1, v5, v3
-; GFX950-NEXT:    v_mad_i32_i24 v0, v0, v4, v2
-; GFX950-NEXT:    v_add3_u32 v0, v0, v1, v8
+; GFX950-NEXT:    s_mov_b32 s0, 0xc0c0004
+; GFX950-NEXT:    v_perm_b32 v4, v4, v5, s0
+; GFX950-NEXT:    v_perm_b32 v5, v6, v7, s0
+; GFX950-NEXT:    v_perm_b32 v0, v0, v1, s0
+; GFX950-NEXT:    v_perm_b32 v1, v2, v3, s0
+; GFX950-NEXT:    v_lshl_or_b32 v4, v5, 16, v4
+; GFX950-NEXT:    v_lshl_or_b32 v0, v1, 16, v0
+; GFX950-NEXT:    v_mov_b32_e32 v1, 0
+; GFX950-NEXT:    v_dot4c_i32_i8_e32 v1, v0, v4
+; GFX950-NEXT:    s_nop 2
+; GFX950-NEXT:    v_add_u32_e32 v0, v1, v8
 ; GFX950-NEXT:    s_setpc_b64 s[30:31]
 entry:
   %a_ext = sext <4 x i8> %a to <4 x i32>

>From 04aa8132d71b7492190e4e8b086c44e7145e81c9 Mon Sep 17 00:00:00 2001
From: Addmisol <addmisol9 at gmail.com>
Date: Sat, 28 Mar 2026 22:13:46 +0530
Subject: [PATCH 15/16] Update SIISelLowering.cpp

---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index cdd3662e8d0b4..af298969daa8f 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -17012,9 +17012,9 @@ SDValue SITargetLowering::performSatAddCombine(SDNode *N,
         SDValue Src0 = DotOp.getOperand(1);
         SDValue Src1 = DotOp.getOperand(2);
 
-        SDValue NewDot = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SL, MVT::i32,
-                                     NewIID, Src0, Src1, Accum,
-                                     DAG.getTargetConstant(1, SL, MVT::i1));
+        SDValue NewDot =
+            DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SL, MVT::i32, NewIID, Src0,
+                        Src1, Accum, DAG.getTargetConstant(1, SL, MVT::i1));
         return NewDot;
       }
     }

>From 8ea7939d822b17d925744864e902364c8aa9d041 Mon Sep 17 00:00:00 2001
From: Addmisol <addmisol9 at gmail.com>
Date: Sat, 28 Mar 2026 22:19:57 +0530
Subject: [PATCH 16/16] Update AMDGPUCodeGenPrepare.cpp

---
 llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index 38e796c75fda5..0cc9d7feb0628 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -2343,8 +2343,8 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) const {
   return tryReplaceWithWorkitemId(I, Wave);
 }
 
-/// Helper to match the dot4 pattern: mul(zext/sext <4 x i8>, zext/sext <4 x i8>)
-/// Returns true if pattern matches, sets A, B to the <4 x i8> sources and
+/// Helper to match the dot4 pattern: mul(zext/sext <4 x i8>, zext/sext <4 x
+/// i8>) Returns true if pattern matches, sets A, B to the <4 x i8> sources and
 /// IsSigned based on whether sext was used.
 static bool matchDot4Pattern(Value *MulOp, Value *&A, Value *&B,
                              bool &IsSigned) {
@@ -2395,9 +2395,9 @@ static bool matchDot4Pattern(Value *MulOp, Value *&A, Value *&B,
   return true;
 }
 
-/// Try to convert vector.reduce.add(mul(zext/sext <4 x i8>, zext/sext <4 x i8>))
-/// to a dot4 intrinsic call (non-saturating case only).
-/// The saturating case is handled by visitSaturatingAdd which starts at the root.
+/// Try to convert vector.reduce.add(mul(zext/sext <4 x i8>, zext/sext <4 x
+/// i8>)) to a dot4 intrinsic call (non-saturating case only). The saturating
+/// case is handled by visitSaturatingAdd which starts at the root.
 bool AMDGPUCodeGenPrepareImpl::visitVectorReduceAdd(IntrinsicInst &I) {
   // Check if we have dot4 instructions available
   if (!ST.hasDot7Insts() || (!ST.hasDot1Insts() && !ST.hasDot8Insts()))



More information about the cfe-commits mailing list