[Mlir-commits] [mlir] [MLIR] Supported sparse MMA intrinsics in the MLIR->NVVM IR->NVPTX flow (PR #168686)

Kirill Vedernikov llvmlistbot at llvm.org
Wed Nov 19 09:52:42 PST 2025


https://github.com/kvederni updated https://github.com/llvm/llvm-project/pull/168686

>From 7e65982ccad4437340c7f93c3d5bf27abe45de25 Mon Sep 17 00:00:00 2001
From: Kirill Vedernikov <kvedernikov at nvidia.com>
Date: Wed, 19 Nov 2025 10:33:32 +0100
Subject: [PATCH 1/4] [MLIR] Supported sparse MMA intrinsicsin the MLIR->NVVM
 IR->NVPTX flow

---
 mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td   | 277 +++++++++-
 mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp    | 474 ++++++++++++++++++
 .../test/Dialect/LLVMIR/nvvm-mma-sp-kind.mlir | 221 ++++++++
 .../Dialect/LLVMIR/nvvm-mma-sp-ordered.mlir   | 411 +++++++++++++++
 mlir/test/Dialect/LLVMIR/nvvm-mma-sp.mlir     | 390 ++++++++++++++
 5 files changed, 1772 insertions(+), 1 deletion(-)
 create mode 100644 mlir/test/Dialect/LLVMIR/nvvm-mma-sp-kind.mlir
 create mode 100644 mlir/test/Dialect/LLVMIR/nvvm-mma-sp-ordered.mlir
 create mode 100644 mlir/test/Dialect/LLVMIR/nvvm-mma-sp.mlir

diff --git a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
index 8d5bc7333d47f..b8f69f6b2cb98 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
@@ -1955,6 +1955,12 @@ class WMMA_NAME_LDST<string Op, WMMA_REGS Frag, string Layout, int WithStride> {
 /// Generate the signature part of the mma intrinsic name.
 class MMA_SIGNATURE<WMMA_REGS A, WMMA_REGS B, WMMA_REGS C, WMMA_REGS D> {
   list<WMMA_REGS> id_frags = !cond(
+     // FP8/F8F6F4 ops are identified by A,B inputs & accomulator & result type.
+     !or(!eq(A.ptx_elt_type, "e4m3"),
+         !eq(A.ptx_elt_type, "e5m2"),
+         !eq(A.ptx_elt_type, "e3m2"),
+         !eq(A.ptx_elt_type, "e2m3"),
+         !eq(A.ptx_elt_type, "e2m1")): [D, A, B, C],
      // FP16 ops are identified by accumulator & result type.
      !eq(A.ptx_elt_type, "f16") : [D, C],
      // other ops are identified by input types.
@@ -2081,6 +2087,31 @@ class NVVM_MMA_OPS {
   list<list<WMMA_REGS>> all_mma_sync_ops = !listconcat(
             tf32_mma_ops, bf16_mma_ops, f64_mma_ops,
             fp_mma_ops, int_mma_ops, subint_mma_ops, bit_mma_ops);
+
+  list<list<WMMA_REGS>> bf16_mma_sp_ops = MMA_OPS<
+            [GEOM<16,8,16>, GEOM<16,8,32>],
+            ["bf16"], [], ["f32"], []>.ret;
+  list<list<WMMA_REGS>> tf32_mma_sp_ops = MMA_OPS<
+            [GEOM<16,8,8>, GEOM<16,8,16>],
+            ["tf32"], [], ["f32"], []>.ret;
+  list<list<WMMA_REGS>> fp_mma_sp_ops = MMA_OPS<
+            [GEOM<16,8,16>, GEOM<16,8,32>],
+            ["f16"], [], ["f16", "f32"], ["f16", "f32"]>.ret;
+  list<list<WMMA_REGS>> fp8_mma_sp_ops = MMA_OPS<
+            [GEOM<16,8,64>],
+            ["e4m3", "e5m2", "e3m2", "e2m3", "e2m1"],
+            ["e4m3", "e5m2", "e3m2", "e2m3", "e2m1"],
+            ["f16", "f32"], ["f16", "f32"]>.ret;
+  list<list<WMMA_REGS>> subint_mma_sp_ops = MMA_OPS<
+            [GEOM<16,8,64>, GEOM<16,8,128>],
+            ["s4", "u4"], ["s4", "u4"], ["s32"], []>.ret;
+  list<list<WMMA_REGS>> int_mma_sp_ops = MMA_OPS<
+            [GEOM<16,8,32>, GEOM<16,8,64>],
+            ["s8", "u8"], ["s8", "u8"], ["s32"], []>.ret;
+  list<list<WMMA_REGS>> all_mma_sp_sync_ops = !listconcat(
+            bf16_mma_sp_ops, tf32_mma_sp_ops, fp_mma_sp_ops, fp8_mma_sp_ops,
+            subint_mma_sp_ops, int_mma_sp_ops);
+
 }
 
 def NVVM_MMA_OPS : NVVM_MMA_OPS;
@@ -2187,6 +2218,29 @@ def MMAIntOverflowAttr : EnumAttr<NVVM_Dialect, MMAIntOverflow, "mma_int_overflo
   let assemblyFormat = "`<` $value `>`";
 }
 
+/// Sparse MMA metadata types
+def MMASpMetadataStandard : I32EnumAttrCase<"standard", 0>;
+def MMASpMetadataOrdered : I32EnumAttrCase<"ordered", 1>;
+def MMASpMetadata : I32EnumAttr<"MMASpMetadata", "Sparse MMA metadata ordering",
+  [MMASpMetadataStandard, MMASpMetadataOrdered]> {
+  let genSpecializedAttr = 0;
+  let cppNamespace = "::mlir::NVVM";
+}
+def MMASpMetadataAttr : EnumAttr<NVVM_Dialect, MMASpMetadata, "mma_sp_metadata"> {
+  let assemblyFormat = "`<` $value `>`";
+}
+
+/// MMA kind types (for mixed-precision FP8 operations)
+def MMAKindF8F6F4 : I32EnumAttrCase<"f8f6f4", 0>;
+def MMAKind : I32EnumAttr<"MMAKind", "MMA operation kind",
+  [MMAKindF8F6F4]> {
+  let genSpecializedAttr = 0;
+  let cppNamespace = "::mlir::NVVM";
+}
+def MMAKindAttr : EnumAttr<NVVM_Dialect, MMAKind, "mma_kind"> {
+  let assemblyFormat = "`<` $value `>`";
+}
+
 /// Attribute to hold the MMA shape
 def NVVM_MMAShapeAttr : NVVM_Attr<"MMAShape", "shape"> {
   let summary = "Attribute for MMA operation shape.";
@@ -2330,12 +2384,18 @@ def MMATypeU4 : I32EnumAttrCase<"u4", 7>;
 def MMATypeS4 : I32EnumAttrCase<"s4", 8>;
 def MMATypeBF16 : I32EnumAttrCase<"bf16", 9>;
 def MMATypeF64 : I32EnumAttrCase<"f64", 10>;
+def MMATypeE4M3 : I32EnumAttrCase<"e4m3", 11>;
+def MMATypeE5M2 : I32EnumAttrCase<"e5m2", 12>;
+def MMATypeE3M2 : I32EnumAttrCase<"e3m2", 13>;
+def MMATypeE2M3 : I32EnumAttrCase<"e2m3", 14>;
+def MMATypeE2M1 : I32EnumAttrCase<"e2m1", 15>;
 
 def MMATypes : I32EnumAttr<"MMATypes", "NVVM MMA types",
   [MMATypeF16, MMATypeF32, MMATypeTF32,
   MMATypeBF16, MMATypeS8, MMATypeU8,
   MMATypeS32, MMATypeS4, MMATypeU4,
-  MMATypeB1, MMATypeF64]> {
+  MMATypeB1, MMATypeF64,
+  MMATypeE4M3, MMATypeE5M2, MMATypeE3M2, MMATypeE2M3, MMATypeE2M1]> {
   let genSpecializedAttr = 0;
   let cppNamespace = "::mlir::NVVM";
 }
@@ -2772,6 +2832,221 @@ def NVVM_MmaOp : NVVM_Op<"mma.sync", [AttrSizedOperandSegments]> {
   let hasVerifier = 1;
 }
 
+/// Generate enum value of the mma.sync intrinsic.
+class MMA_SP_SYNC_NAME<string Metadata, string Kind, int Satfinite,
+                       WMMA_REGS A, WMMA_REGS B, WMMA_REGS C, WMMA_REGS D> {
+  string signature = MMA_SIGNATURE<A, B, C, D>.ret;
+  string id = "llvm::Intrinsic::nvvm_mma"
+              # "_" # !subst("::", "_", Metadata)
+              # "_" # A.geom
+              # "_row_col"
+              # !if(!ne(Kind, ""), !strconcat("_", !subst("::", "_", Kind)), "")
+              # !if(Satfinite, "_satfinite", "")
+              # signature;
+}
+
+// Returns true if this combination of layout/kind/satf for MMA.SP ops is supported;
+// false otherwise.
+// E.g.
+// if NVVM_MMA_SP_SUPPORTED<...>.ret then
+//   def : FOO<>; // The record will only be defined for supported ops.
+//
+class NVVM_MMA_SP_SUPPORTED<list<WMMA_REGS> frags, string metadata,
+                            string kind, int satf> {
+  // MMA.SP ops check both layouts.
+  string a_type = frags[0].ptx_elt_type;
+  string b_type = frags[1].ptx_elt_type;
+  string c_type = frags[2].ptx_elt_type;
+  string d_type = frags[3].ptx_elt_type;
+  string geom = frags[0].geom;
+
+  bit is_int = !or(!eq(a_type, "s8"),
+                   !eq(a_type, "u8"),
+                   !eq(a_type, "s4"),
+                   !eq(a_type, "u4"));
+
+  bit ret = !cond(
+
+    // Limit satf to valid types
+    !and(!eq(satf, 1),
+         !eq(is_int, 0)): false,
+
+    // f16/bf16/tf32 requires A and B to be the same type.
+    !and(!or(!eq(a_type, "f16"),
+             !eq(a_type, "bf16"),
+             !eq(a_type, "tf32")),
+         !ne(a_type, b_type)): false,
+
+    // m16n8k16, m16n8k32 and m16n8k64 requires C and D to be the same type.
+    !and(!or(!eq(geom, "m16n8k16"),
+             !eq(geom, "m16n8k32"),
+             !eq(geom, "m16n8k64")),
+         !ne(c_type, d_type)): false,
+
+    !and(!eq(kind, ""),
+         !or(!eq(a_type, "e3m2"),
+             !eq(a_type, "e2m3"),
+             !eq(a_type, "e2m1"),
+             !eq(b_type, "e3m2"),
+             !eq(b_type, "e2m3"),
+             !eq(b_type, "e2m1"))): false,
+
+    !and(!eq(kind, ""),
+         !eq(geom, "m16n8k64"),
+         !or(!eq(c_type, "f16"),
+             !eq(d_type, "f16"))): false,
+
+    !and(!ne(kind, ""),
+         !or(!eq(metadata, "sp"),
+             !ne(geom, "m16n8k64"),
+             !eq(is_int, 1))): false,
+
+    // All other are OK.
+    true: true
+  );
+}
+
+/// Helper to create the mapping between the configuration and the mma.sp.sync
+/// intrinsic enum value.
+class MMA_SP_SYNC_INTR {
+  list<list<list<list<string>>>> cond0 =
+    !foreach(op, NVVM_MMA_OPS.all_mma_sp_sync_ops,
+      !foreach(metadata, ["sp", "sp::ordered_metadata"],
+        !foreach(kind, ["", "kind::f8f6f4"],
+          !foreach (satf, [0, 1],
+            !if(NVVM_MMA_SP_SUPPORTED<op, metadata, kind, satf>.ret,
+                "if (m == " # op[0].m # " && n == " # op[0].n # " && k == " # op[0].k
+                # " && \"" # op[0].ptx_elt_type # "\" == eltypeA"
+                # " && \"" # op[1].ptx_elt_type # "\" == eltypeB"
+                # " && \"" # op[2].ptx_elt_type # "\" == eltypeC"
+                # " && \"" # op[3].ptx_elt_type # "\" == eltypeD"
+                # " && (satf.has_value()  ? " # satf # " == static_cast<int>(*satf) : true)"
+                # " && " # !if(!eq(metadata, "sp"), "!orderedMetadata", "orderedMetadata")
+                # " && " # !if(!eq(kind, ""), "!hasKind", "hasKind") # ")\n"
+                # "  return " #
+                MMA_SP_SYNC_NAME<metadata, kind, satf, op[0], op[1], op[2], op[3]>.id # ";",
+                "") // if supported
+          ) // satf
+        ) // kind
+      ) // metadata
+    ); // all_mma_sp_sync_ops
+  list<list<list<string>>> f1 = !foldl([[[""]]], cond0, acc, el,
+                                       !listconcat(acc, el));
+  list<list<string>> f2 = !foldl([[""]], f1, acc, el, !listconcat(acc, el));
+  list<string> f3 = !foldl([""], f2, acc, el, !listconcat(acc, el));
+  string id = !foldl("", f3, acc, el, acc # "\n" # el);
+}
+
+def NVVM_MmaSpOp : NVVM_Op<"mma.sp.sync", [AttrSizedOperandSegments]> {
+
+  let summary = "cooperative sparse matrix-multiply and accumulate";
+
+  let description = [{
+    The `nvvm.mma.sp.sync` operation collectively performs the sparse operation
+    `D = matmul(A_sparse, B) + C` using all threads in a warp.
+
+    This operation is similar to `nvvm.mma.sync` but with structured sparsity
+    in the A operand. The sparsity follows the 2:4 structured sparse pattern
+    where 2 out of every 4 elements are non-zero.
+
+    All the threads in the warp must execute the same `mma.sp.sync` operation.
+
+    The `sparseMetadata` operand provides the sparsity indices that indicate
+    which elements in the A operand are non-zero. The `sparsitySelector`
+    controls how the indices are distributed among threads in the warp and
+    should typically be 0 or 1.
+
+    The optional `metadataType` attribute specifies the metadata ordering:
+    - `standard` (default): Uses standard sparse metadata ordering
+    - `ordered`: Uses ordered metadata (PTX ISA 8.5+, sm_90+)
+    
+    The optional `kind` attribute specifies mixed-precision modes for FP8 operations:
+    - `f8f6f4`: Enables e3m2, e2m3, e2m1 FP8 types and f16 accumulator (PTX ISA 8.7+, sm_90+)
+    - Only valid with ordered metadata and m16n8k64 shape
+
+    The shapes, layouts, and data types follow the same constraints as the
+    regular `nvvm.mma.sync` operation, but the A operand contains only the
+    non-zero elements in compressed format.
+
+    Example:
+    ```mlir
+    %d = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1]
+                          sparseMetadata[%meta] selector[%sel]
+                          {shape = {k = 32 : i32, m = 16 : i32, n = 8 : i32}}
+        : (vector<2xf16>, vector<2xf16>, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
+    
+    // With ordered metadata:
+    %d = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1]
+                          sparseMetadata[%meta] selector[%sel]
+                          {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                           shape = {k = 32 : i32, m = 16 : i32, n = 8 : i32}}
+        : (vector<2xf16>, vector<2xf16>, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
+    ```
+  }];
+
+  let results = (outs LLVM_AnyStruct:$res);
+  let arguments = (ins NVVM_MMAShapeAttr:$shape,
+             OptionalAttr<MMAIntOverflowAttr>:$intOverflowBehavior,
+             OptionalAttr<MMATypesAttr>:$multiplicandAPtxType,
+             OptionalAttr<MMATypesAttr>:$multiplicandBPtxType,
+             OptionalAttr<MMASpMetadataAttr>:$metadataType,
+             OptionalAttr<MMAKindAttr>:$kind,
+             Variadic<LLVM_Type>:$operandA,
+             Variadic<LLVM_Type>:$operandB,
+             Variadic<LLVM_Type>:$operandC,
+             I32:$sparseMetadata,
+             I32:$sparsitySelector);
+
+  let extraClassDeclaration = !strconcat([{
+      static llvm::Intrinsic::ID getIntrinsicID(
+            int64_t m, int64_t n, uint64_t k,
+            std::optional<MMAIntOverflow> satf,
+            std::optional<MMASpMetadata> metadata,
+            std::optional<MMAKind> kind,
+            mlir::NVVM::MMATypes eltypeAEnum, mlir::NVVM::MMATypes eltypeBEnum,
+            mlir::NVVM::MMATypes eltypeCEnum, mlir::NVVM::MMATypes eltypeDEnum) {
+        llvm::StringRef eltypeA = stringifyEnum(eltypeAEnum);
+        llvm::StringRef eltypeB = stringifyEnum(eltypeBEnum);
+        llvm::StringRef eltypeC = stringifyEnum(eltypeCEnum);
+        llvm::StringRef eltypeD = stringifyEnum(eltypeDEnum);
+        bool orderedMetadata = metadata.has_value() &&
+                               *metadata == MMASpMetadata::ordered;
+        bool hasKind = kind.has_value();
+        }],
+        MMA_SP_SYNC_INTR<>.id, [{
+          return 0;
+      }
+
+      static std::optional<mlir::NVVM::MMATypes> inferOperandMMAType(Type operandElType,
+        bool isAccumulator);
+
+      MMATypes accumPtxType();
+      MMATypes resultPtxType();
+
+      static mlir::NVVM::IDArgPair
+      getIntrinsicIDAndArgs(Operation &op, LLVM::ModuleTranslation &mt,
+                            llvm::IRBuilderBase& builder);
+    }]);
+
+  let builders = [
+      OpBuilder<(ins  "Type":$resultType, "ValueRange":$operandA,
+        "ValueRange":$operandB, "ValueRange":$operandC,
+        "Value":$sparseMetadata, "Value":$sparsitySelector,
+        "ArrayRef<int64_t>":$shape,
+        "std::optional<MMAIntOverflow>":$intOverflow,
+        "std::optional<std::array<MMATypes, 2>>":$multiplicandPtxTypes)>
+    ];
+
+  string llvmBuilder = [{
+    auto [id, args] = NVVM::MmaSpOp::getIntrinsicIDAndArgs(
+                      *op, moduleTranslation, builder);
+    $res = createIntrinsicCall(builder, id, args);
+  }];
+
+  let hasCustomAssemblyFormat = 1;
+  let hasVerifier = 1;
+}
+
 //===----------------------------------------------------------------------===//
 // NVVM TMA Ops
 //===----------------------------------------------------------------------===//
diff --git a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
index 7ac427dbe3941..8db724dd0a25b 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
@@ -940,6 +940,480 @@ LogicalResult MmaOp::verify() {
   return success();
 }
 
+MMATypes MmaSpOp::accumPtxType() {
+  std::optional<mlir::NVVM::MMATypes> val = MmaOp::inferOperandMMAType(
+      getODSOperands(2).getTypes().front(), /*isAccumulator=*/true);
+  assert(val.has_value() && "accumulator PTX type should always be inferrable");
+  return val.value();
+}
+
+MMATypes MmaSpOp::resultPtxType() {
+  std::optional<mlir::NVVM::MMATypes> val =
+      MmaOp::inferOperandMMAType(getResult().getType(), /*isAccumulator=*/true);
+  assert(val.has_value() && "result PTX type should always be inferrable");
+  return val.value();
+}
+
+mlir::NVVM::IDArgPair
+MmaSpOp::getIntrinsicIDAndArgs(Operation &op, LLVM::ModuleTranslation &mt,
+                                llvm::IRBuilderBase &builder) {
+  auto thisOp = cast<NVVM::MmaSpOp>(op);
+
+  // Get operands
+  llvm::SmallVector<llvm::Value *> args;
+  for (mlir::Value v : thisOp.getOperands())
+    args.push_back(mt.lookupValue(v));
+
+  // Get intrinsic ID using the existing getIntrinsicID method
+  auto intId = MmaSpOp::getIntrinsicID(
+      thisOp.getShape().getM(), thisOp.getShape().getN(), thisOp.getShape().getK(),
+      thisOp.getIntOverflowBehavior(),
+      thisOp.getMetadataType(),
+      thisOp.getKind(),
+      *thisOp.getMultiplicandAPtxType(),
+      *thisOp.getMultiplicandBPtxType(),
+      thisOp.accumPtxType(),
+      thisOp.resultPtxType());
+
+  return {intId, args};
+}
+
+void MmaSpOp::print(OpAsmPrinter &p) {
+  SmallVector<Type, 4> regTypes;
+  struct OperandFragment {
+    StringRef operandName;
+    StringRef ptxTypeAttr;
+    SmallVector<Value, 4> regs;
+    explicit OperandFragment(StringRef name, StringRef ptxTypeName)
+        : operandName(name), ptxTypeAttr(ptxTypeName) {}
+  };
+
+  std::array<OperandFragment, 5> frags{
+      OperandFragment("A", getMultiplicandAPtxTypeAttrName()),
+      OperandFragment("B", getMultiplicandBPtxTypeAttrName()),
+      OperandFragment("C", ""),
+      OperandFragment("sparseMetadata", ""),
+      OperandFragment("selector", "")};
+  SmallVector<StringRef, 4> ignoreAttrNames{
+      mlir::NVVM::MmaSpOp::getOperandSegmentSizeAttr()};
+
+  // Handle variadic operands A, B, C
+  for (unsigned fragIdx = 0; fragIdx < 3; fragIdx++) {
+    auto &frag = frags[fragIdx];
+    auto varOperandSpec = getODSOperandIndexAndLength(fragIdx);
+    for (auto operandIdx = varOperandSpec.first;
+         operandIdx < varOperandSpec.first + varOperandSpec.second;
+         operandIdx++) {
+      frag.regs.push_back(this->getOperand(operandIdx));
+      if (operandIdx == varOperandSpec.first) {
+        regTypes.push_back(this->getOperand(operandIdx).getType());
+      }
+    }
+    std::optional<MMATypes> inferredType =
+        MmaOp::inferOperandMMAType(regTypes.back(), /*isAccumulator=*/fragIdx >= 2);
+    if (inferredType)
+      ignoreAttrNames.push_back(frag.ptxTypeAttr);
+  }
+
+  // Handle sparse metadata and selector (single operands)
+  frags[3].regs.push_back(getSparseMetadata());
+  frags[4].regs.push_back(getSparsitySelector());
+
+  auto printMmaSpOperand = [&](const OperandFragment &frag) -> void {
+    p << " " << frag.operandName;
+    p << "[";
+    p.printOperands(frag.regs);
+    p << "]";
+  };
+
+  for (const auto &frag : frags)
+    printMmaSpOperand(frag);
+
+  p.printOptionalAttrDict((*this)->getAttrs(), ignoreAttrNames);
+  p << " : ";
+  p << "(";
+  for (int i = 0; i < 3; ++i) {
+    p << regTypes[i];
+    if (i < 2) p << ", ";
+  }
+  p << ") -> " << getResult().getType();
+}
+
+void MmaSpOp::build(OpBuilder &builder, OperationState &result,
+                Type resultType, ValueRange operandA, ValueRange operandB,
+                ValueRange operandC, Value sparseMetadata, Value sparsitySelector,
+                ArrayRef<int64_t> shape,
+                std::optional<MMAIntOverflow> intOverflow,
+                std::optional<std::array<MMATypes, 2>> multiplicandPtxTypes) {
+
+  assert(shape.size() == 3 && "expected shape to have size 3 (m, n, k)");
+  MLIRContext *ctx = builder.getContext();
+  result.addAttribute(
+      "shape", builder.getAttr<MMAShapeAttr>(shape[0], shape[1], shape[2]));
+
+  result.addOperands(operandA);
+  result.addOperands(operandB);
+  result.addOperands(operandC);
+  result.addOperands(sparseMetadata);
+  result.addOperands(sparsitySelector);
+
+  if (multiplicandPtxTypes) {
+    result.addAttribute("multiplicandAPtxType",
+                        MMATypesAttr::get(ctx, (*multiplicandPtxTypes)[0]));
+    result.addAttribute("multiplicandBPtxType",
+                        MMATypesAttr::get(ctx, (*multiplicandPtxTypes)[1]));
+  } else {
+    if (auto res = MmaOp::inferOperandMMAType(operandA[0].getType(), false))
+      result.addAttribute("multiplicandAPtxType", MMATypesAttr::get(ctx, *res));
+    if (auto res = MmaOp::inferOperandMMAType(operandB[0].getType(), false))
+      result.addAttribute("multiplicandBPtxType", MMATypesAttr::get(ctx, *res));
+  }
+
+  if (intOverflow.has_value())
+    result.addAttribute("intOverflowBehavior",
+                        MMAIntOverflowAttr::get(ctx, *intOverflow));
+
+  result.addTypes(resultType);
+  result.addAttribute(
+      MmaSpOp::getOperandSegmentSizeAttr(),
+      builder.getDenseI32ArrayAttr({static_cast<int32_t>(operandA.size()),
+                                    static_cast<int32_t>(operandB.size()),
+                                    static_cast<int32_t>(operandC.size()),
+                                    1, 1})); // sparseMetadata and sparsitySelector
+}
+
+ParseResult MmaSpOp::parse(OpAsmParser &parser, OperationState &result) {
+  struct OperandFragment {
+    std::optional<MMATypes> elemtype;
+    SmallVector<OpAsmParser::UnresolvedOperand, 4> regs;
+    SmallVector<Type> regTypes;
+  };
+
+  Builder &builder = parser.getBuilder();
+  std::array<OperandFragment, 6> frags; // A, B, C, sparseMetadata, selector
+
+  NamedAttrList namedAttributes;
+
+  // A helper to parse the operand segments.
+  auto parseMmaSpOperand = [&](StringRef operandName,
+                               OperandFragment &frag) -> LogicalResult {
+    if (parser.parseKeyword(operandName).failed())
+      return failure();
+    if (parser
+            .parseOperandList(frag.regs, OpAsmParser::Delimiter::OptionalSquare)
+            .failed())
+      return failure();
+    return success();
+  };
+
+  // Parse the operand segments.
+  if (parseMmaSpOperand("A", frags[0]).failed())
+    return failure();
+  if (parseMmaSpOperand("B", frags[1]).failed())
+    return failure();
+  if (parseMmaSpOperand("C", frags[2]).failed())
+    return failure();
+  if (parseMmaSpOperand("sparseMetadata", frags[3]).failed())
+    return failure();
+  if (parseMmaSpOperand("selector", frags[4]).failed())
+    return failure();
+
+  if (parser.parseOptionalAttrDict(namedAttributes).failed())
+    return failure();
+
+  // Parse the type specification and resolve operands.
+  SmallVector<Type, 3> operandTypes;
+  if (failed(parser.parseColon()))
+    return failure();
+  if (failed(parser.parseLParen()))
+    return failure();
+  if (failed(parser.parseTypeList(operandTypes)))
+    return failure();
+  if (failed(parser.parseRParen()))
+    return failure();
+  if (operandTypes.size() != 3)
+    return parser.emitError(
+        parser.getNameLoc(),
+        "expected one type for each operand segment but got " +
+            Twine(operandTypes.size()) + " types");
+  for (const auto &iter : llvm::enumerate(operandTypes)) {
+    auto &frag = frags[iter.index()];
+    frag.regTypes.resize(frag.regs.size(), iter.value());
+    if (failed(parser.resolveOperands(frag.regs, frag.regTypes,
+                                      parser.getNameLoc(), result.operands)))
+      return failure();
+    frag.elemtype = MmaOp::inferOperandMMAType(frag.regTypes[0],
+                                               /*isAccumulator*/ iter.index() >= 2);
+  }
+
+  Type resultType;
+  if (parser.parseArrow() || parser.parseType(resultType))
+    return failure();
+  frags[5].elemtype = MmaOp::inferOperandMMAType(resultType, /*isAccumulator*/ true);
+
+  // Resolve sparse metadata and selector (assume i32 type)
+  Type i32Type = builder.getIntegerType(32);
+  if (parser.resolveOperands(frags[3].regs, i32Type,
+                             parser.getCurrentLocation(), result.operands)
+          .failed())
+    return failure();
+  if (parser.resolveOperands(frags[4].regs, i32Type,
+                             parser.getCurrentLocation(), result.operands)
+          .failed())
+    return failure();
+
+  std::array<StringRef, 2> names{"multiplicandAPtxType",
+                                 "multiplicandBPtxType"};
+  for (unsigned idx = 0; idx < names.size(); idx++) {
+    const auto &frag = frags[idx];
+    std::optional<NamedAttribute> attr = namedAttributes.getNamed(names[idx]);
+    if (!frag.elemtype.has_value() && !attr.has_value()) {
+      return parser.emitError(
+          parser.getNameLoc(),
+          "attribute " + names[idx] +
+              " is not provided explicitly and cannot be inferred");
+    }
+    if (!attr.has_value())
+      result.addAttribute(
+          names[idx], MMATypesAttr::get(parser.getContext(), *frag.elemtype));
+  }
+
+  result.addTypes(resultType);
+  if (!namedAttributes.empty())
+    result.addAttributes(namedAttributes);
+  result.addAttribute(MmaSpOp::getOperandSegmentSizeAttr(),
+                      builder.getDenseI32ArrayAttr({
+                          static_cast<int32_t>(frags[0].regs.size()),
+                          static_cast<int32_t>(frags[1].regs.size()),
+                          static_cast<int32_t>(frags[2].regs.size()),
+                          1, // sparseMetadata
+                          1  // sparsitySelector
+                      }));
+  return success();
+}
+
+LogicalResult MmaSpOp::verify() {
+  MLIRContext *context = getContext();
+  auto f16Ty = Float16Type::get(context);
+  auto i32Ty = IntegerType::get(context, 32);
+  auto f16x2Ty = VectorType::get(2, f16Ty);
+  auto f32Ty = Float32Type::get(context);
+  auto f16x2x4StructTy = LLVM::LLVMStructType::getLiteral(
+      context, {f16x2Ty, f16x2Ty, f16x2Ty, f16x2Ty});
+
+  auto s32x4StructTy =
+      LLVM::LLVMStructType::getLiteral(context, {i32Ty, i32Ty, i32Ty, i32Ty});
+  auto f32x8StructTy =
+      LLVM::LLVMStructType::getLiteral(context, SmallVector<Type>(8, f32Ty));
+  auto f16x2x2StructTy =
+      LLVM::LLVMStructType::getLiteral(context, {f16x2Ty, f16x2Ty});
+  auto f32x4StructTy =
+      LLVM::LLVMStructType::getLiteral(context, {f32Ty, f32Ty, f32Ty, f32Ty});
+  auto s32x2StructTy =
+      LLVM::LLVMStructType::getLiteral(context, {i32Ty, i32Ty});
+
+  std::array<int64_t, 3> mmaShape{getShapeAttr().getM(), getShapeAttr().getN(),
+                                  getShapeAttr().getK()};
+
+  // These variables define the set of allowed data types for matrices A, B, C,
+  // and result.
+  using AllowedShapes = SmallVector<std::array<int64_t, 3>, 2>;
+  using AllowedTypes = SmallVector<SmallVector<Type, 4>, 2>;
+  AllowedShapes allowedShapes;
+  AllowedTypes expectedA;
+  AllowedTypes expectedB;
+  AllowedTypes expectedC;
+  SmallVector<Type> expectedResult;
+
+  // When M = 16, we just need to calculate the number of 8xk tiles, where
+  // k is a factor that depends on the data type.
+  if (mmaShape[0] == 16) {
+    int64_t kFactor;
+    Type multiplicandFragType;
+    switch (*getMultiplicandAPtxType()) {
+    case MMATypes::tf32:
+      kFactor = 4;
+      multiplicandFragType = i32Ty;
+      expectedResult.push_back(LLVM::LLVMStructType::getLiteral(
+          context, {f32Ty, f32Ty, f32Ty, f32Ty}));
+      // Sparse MMA supports m16n8k8 and m16n8k16 for tf32
+      allowedShapes.push_back({16, 8, 8});
+      allowedShapes.push_back({16, 8, 16});
+      break;
+    case MMATypes::bf16:
+      kFactor = 8;
+      multiplicandFragType = i32Ty;
+      expectedResult.push_back(LLVM::LLVMStructType::getLiteral(
+          context, {f32Ty, f32Ty, f32Ty, f32Ty}));
+      // Sparse MMA supports m16n8k16 and m16n8k32 for bf16
+      allowedShapes.push_back({16, 8, 16});
+      allowedShapes.push_back({16, 8, 32});
+      break;
+    case MMATypes::f16:
+      kFactor = 8;
+      multiplicandFragType = f16x2Ty;
+      expectedResult.push_back(f16x2x2StructTy);
+      expectedResult.push_back(f32x4StructTy);
+      // Sparse MMA supports m16n8k16 and m16n8k32 for f16
+      allowedShapes.push_back({16, 8, 16});
+      allowedShapes.push_back({16, 8, 32});
+      break;
+    case MMATypes::s4:
+    case MMATypes::u4:
+      kFactor = 32;
+      // Sparse MMA supports m16n8k64 and m16n8k128 for s4/u4
+      allowedShapes.push_back({16, 8, 64});
+      allowedShapes.push_back({16, 8, 128});
+      break;
+    case MMATypes::s8:
+    case MMATypes::u8:
+      kFactor = 16;
+      // Sparse MMA supports m16n8k32 and m16n8k64 for s8/u8
+      allowedShapes.push_back({16, 8, 32});
+      allowedShapes.push_back({16, 8, 64});
+      break;
+    case MMATypes::e4m3:
+    case MMATypes::e5m2:
+    case MMATypes::e3m2:
+    case MMATypes::e2m3:
+    case MMATypes::e2m1:
+      kFactor = 32;
+      multiplicandFragType = i32Ty;
+      expectedResult.push_back(f16x2x2StructTy);
+      expectedResult.push_back(f32x4StructTy);
+      // Sparse MMA supports m16n8k64 for FP8 types
+      allowedShapes.push_back({16, 8, 64});
+      break;
+    default:
+      return emitError("invalid shape or multiplicand type: " +
+                       stringifyEnum(getMultiplicandAPtxType().value()));
+    }
+
+    if (isIntegerPtxType(getMultiplicandAPtxType().value())) {
+      expectedResult.push_back(s32x4StructTy);
+      expectedC.emplace_back(4, i32Ty);
+      multiplicandFragType = i32Ty;
+    } else if (*getMultiplicandAPtxType() >= MMATypes::e4m3 &&
+               *getMultiplicandAPtxType() <= MMATypes::e2m1) {
+      // FP8 types
+      expectedC.emplace_back(2, f16x2Ty);
+      expectedC.emplace_back(4, f32Ty);
+    } else {
+      expectedC.emplace_back(2, f16x2Ty);
+      expectedC.emplace_back(4, f32Ty);
+    }
+
+    // For sparse MMA, A operand is compressed (2:4 sparsity means half the elements)
+    int64_t unitA = (mmaShape[0] / 8) * (mmaShape[2] / kFactor) / 2;
+    int64_t unitB = (mmaShape[1] / 8) * (mmaShape[2] / kFactor);
+    expectedA.emplace_back(unitA, multiplicandFragType);
+    expectedB.emplace_back(unitB, multiplicandFragType);
+
+    if (resultPtxType() != accumPtxType())
+      return emitOpError("ctype does not match dtype");
+  }
+
+  // In the M=8 case, there is only 1 possible case per data type.
+  if (mmaShape[0] == 8) {
+    if (*getMultiplicandAPtxType() == MMATypes::f16) {
+      expectedA.emplace_back(2, f16x2Ty);
+      expectedB.emplace_back(2, f16x2Ty);
+      expectedResult.push_back(f16x2x4StructTy);
+      expectedResult.push_back(f32x8StructTy);
+      expectedC.emplace_back(4, f16x2Ty);
+      expectedC.emplace_back(8, f32Ty);
+      allowedShapes.push_back({8, 8, 4});
+    }
+    if (*getMultiplicandAPtxType() == MMATypes::f64) {
+      Type f64Ty = Float64Type::get(context);
+      expectedA.emplace_back(1, f64Ty);
+      expectedB.emplace_back(1, f64Ty);
+      expectedC.emplace_back(2, f64Ty);
+      expectedResult.emplace_back(LLVM::LLVMStructType::getLiteral(
+          context, SmallVector<Type>(2, f64Ty)));
+      allowedShapes.push_back({8, 8, 4});
+    }
+    if (isIntegerPtxType(getMultiplicandAPtxType().value())) {
+      expectedA.push_back({i32Ty});
+      expectedB.push_back({i32Ty});
+      expectedC.push_back({i32Ty, i32Ty});
+      expectedResult.push_back(s32x2StructTy);
+      if (isInt4PtxType(getMultiplicandAPtxType().value()))
+        allowedShapes.push_back({8, 8, 32});
+      if (isInt8PtxType(getMultiplicandAPtxType().value()))
+        allowedShapes.push_back({8, 8, 16});
+    }
+  }
+
+  std::string errorMessage;
+  llvm::raw_string_ostream errorStream(errorMessage);
+
+  // Check that we matched an existing shape/dtype combination.
+  if (expectedA.empty() || expectedB.empty() || expectedC.empty() ||
+      !llvm::is_contained(allowedShapes, mmaShape)) {
+    errorStream << "unimplemented variant for MMA shape <";
+    llvm::interleaveComma(mmaShape, errorStream);
+    errorStream << ">";
+    return emitOpError(errorMessage);
+  }
+
+  // Verify the operand types for segments of A, B, and C operands.
+  std::array<StringRef, 3> operandNames{"A", "B", "C"};
+  for (const auto &iter : llvm::enumerate(
+           SmallVector<AllowedTypes, 3>{expectedA, expectedB, expectedC})) {
+    auto spec = this->getODSOperandIndexAndLength(iter.index());
+    SmallVector<Type, 4> operandTySeg(operand_type_begin() + spec.first,
+                                      operand_type_begin() + spec.first +
+                                          spec.second);
+    bool match = llvm::is_contained(iter.value(), operandTySeg);
+
+    if (!match) {
+      errorStream << "Could not match types for the "
+                  << operandNames[iter.index()]
+                  << " operands; expected one of ";
+      for (const auto &x : iter.value()) {
+        errorStream << x.size() << "x" << x[0] << " ";
+      }
+      errorStream << "but got ";
+      llvm::interleaveComma(operandTySeg, errorStream);
+      return emitOpError(errorMessage);
+    }
+  }
+
+  // Check the result type
+  if (!llvm::any_of(expectedResult, [&](Type expectedResultType) {
+        return expectedResultType == getResult().getType();
+      })) {
+    errorStream
+        << "Could not match allowed types for the result; expected one of ";
+    llvm::interleaveComma(expectedResult, errorStream);
+    errorStream << " but got " << getResult().getType();
+    return emitOpError(errorMessage);
+  }
+
+  // Ensure int4/int8 MMA variants specify the accum overflow behavior
+  // attribute.
+  if (isInt4PtxType(*getMultiplicandAPtxType()) ||
+      isInt8PtxType(*getMultiplicandAPtxType())) {
+    if (!getIntOverflowBehavior())
+      return emitOpError("op requires " +
+                         getIntOverflowBehaviorAttrName().strref() +
+                         " attribute");
+  }
+
+  // Validate sparse metadata type (should be i32)
+  if (!getSparseMetadata().getType().isInteger(32)) {
+    return emitOpError() << "sparse metadata must be i32 type";
+  }
+
+  // Validate sparsity selector type (should be i32)
+  if (!getSparsitySelector().getType().isInteger(32)) {
+    return emitOpError() << "sparsity selector must be i32 type";
+  }
+
+  return success();
+}
+
 LogicalResult ShflOp::verify() {
   auto returnStructType = llvm::dyn_cast<LLVM::LLVMStructType>(getType());
 
diff --git a/mlir/test/Dialect/LLVMIR/nvvm-mma-sp-kind.mlir b/mlir/test/Dialect/LLVMIR/nvvm-mma-sp-kind.mlir
new file mode 100644
index 0000000000000..b55486aadaaa5
--- /dev/null
+++ b/mlir/test/Dialect/LLVMIR/nvvm-mma-sp-kind.mlir
@@ -0,0 +1,221 @@
+// RUN: mlir-opt %s -split-input-file | FileCheck %s
+
+// This file contains tests for sparse MMA (mma.sp.sync) operations with KIND variants.
+// The kind::f8f6f4 variant was introduced in PTX ISA 8.7 for sm_90+ architectures.
+//
+// Based on PTX ISA documentation:
+// https://docs.nvidia.com/cuda/parallel-thread-execution/#warp-level-matrix-instructions-for-sparse-mma
+//
+// KIND::F8F6F4 enables:
+// - Additional FP8 types: e3m2, e2m3, e2m1
+// - F16 accumulator for m16n8k64 FP8 operations
+// - Mixed-precision FP8 computations
+//
+// Requirements:
+// - ONLY works with ordered metadata (sp::ordered_metadata)
+// - ONLY for shape m16n8k64
+// - ONLY for FP8 types (not integers or other floats)
+
+// =============================================================================
+// FP8 e4m3 Sparse MMA with KIND (m16n8k64)
+// =============================================================================
+
+// CHECK-LABEL: @nvvm_mma_sp_kind_m16n8k64_e4m3_f16
+func.func @nvvm_mma_sp_kind_m16n8k64_e4m3_f16(
+    %a0 : i32, %a1 : i32,
+    %b0 : i32, %b1 : i32,
+    %c0 : vector<2xf16>, %c1 : vector<2xf16>,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {kind = #nvvm.mma_kind<f8f6f4>, metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<e4m3>, multiplicandBPtxType = #nvvm.mma_type<e4m3>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1]
+                        sparseMetadata[%meta] selector[%sel]
+                        {kind = #nvvm.mma_kind<f8f6f4>,
+                         metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         multiplicandAPtxType = #nvvm.mma_type<e4m3>,
+                         multiplicandBPtxType = #nvvm.mma_type<e4m3>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 64>}
+      : (i32, i32, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
+  return
+}
+
+// CHECK-LABEL: @nvvm_mma_sp_kind_m16n8k64_e4m3_f32
+func.func @nvvm_mma_sp_kind_m16n8k64_e4m3_f32(
+    %a0 : i32, %a1 : i32,
+    %b0 : i32, %b1 : i32,
+    %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {kind = #nvvm.mma_kind<f8f6f4>, metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<e4m3>, multiplicandBPtxType = #nvvm.mma_type<e4m3>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1, %c2, %c3]
+                        sparseMetadata[%meta] selector[%sel]
+                        {kind = #nvvm.mma_kind<f8f6f4>,
+                         metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         multiplicandAPtxType = #nvvm.mma_type<e4m3>,
+                         multiplicandBPtxType = #nvvm.mma_type<e4m3>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 64>}
+      : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  return
+}
+
+// =============================================================================
+// FP8 e5m2 Sparse MMA with KIND (m16n8k64)
+// =============================================================================
+
+// CHECK-LABEL: @nvvm_mma_sp_kind_m16n8k64_e5m2_f16
+func.func @nvvm_mma_sp_kind_m16n8k64_e5m2_f16(
+    %a0 : i32, %a1 : i32,
+    %b0 : i32, %b1 : i32,
+    %c0 : vector<2xf16>, %c1 : vector<2xf16>,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {kind = #nvvm.mma_kind<f8f6f4>, metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<e5m2>, multiplicandBPtxType = #nvvm.mma_type<e5m2>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1]
+                        sparseMetadata[%meta] selector[%sel]
+                        {kind = #nvvm.mma_kind<f8f6f4>,
+                         metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         multiplicandAPtxType = #nvvm.mma_type<e5m2>,
+                         multiplicandBPtxType = #nvvm.mma_type<e5m2>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 64>}
+      : (i32, i32, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
+  return
+}
+
+// CHECK-LABEL: @nvvm_mma_sp_kind_m16n8k64_e5m2_f32
+func.func @nvvm_mma_sp_kind_m16n8k64_e5m2_f32(
+    %a0 : i32, %a1 : i32,
+    %b0 : i32, %b1 : i32,
+    %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {kind = #nvvm.mma_kind<f8f6f4>, metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<e5m2>, multiplicandBPtxType = #nvvm.mma_type<e5m2>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1, %c2, %c3]
+                        sparseMetadata[%meta] selector[%sel]
+                        {kind = #nvvm.mma_kind<f8f6f4>,
+                         metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         multiplicandAPtxType = #nvvm.mma_type<e5m2>,
+                         multiplicandBPtxType = #nvvm.mma_type<e5m2>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 64>}
+      : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  return
+}
+
+// =============================================================================
+// FP8 e3m2 Sparse MMA with KIND (m16n8k64)
+// NOTE: e3m2 is ONLY available with kind::f8f6f4
+// =============================================================================
+
+// CHECK-LABEL: @nvvm_mma_sp_kind_m16n8k64_e3m2_f16
+func.func @nvvm_mma_sp_kind_m16n8k64_e3m2_f16(
+    %a0 : i32, %a1 : i32,
+    %b0 : i32, %b1 : i32,
+    %c0 : vector<2xf16>, %c1 : vector<2xf16>,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {kind = #nvvm.mma_kind<f8f6f4>, metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<e3m2>, multiplicandBPtxType = #nvvm.mma_type<e3m2>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1]
+                        sparseMetadata[%meta] selector[%sel]
+                        {kind = #nvvm.mma_kind<f8f6f4>,
+                         metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         multiplicandAPtxType = #nvvm.mma_type<e3m2>,
+                         multiplicandBPtxType = #nvvm.mma_type<e3m2>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 64>}
+      : (i32, i32, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
+  return
+}
+
+// CHECK-LABEL: @nvvm_mma_sp_kind_m16n8k64_e3m2_f32
+func.func @nvvm_mma_sp_kind_m16n8k64_e3m2_f32(
+    %a0 : i32, %a1 : i32,
+    %b0 : i32, %b1 : i32,
+    %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {kind = #nvvm.mma_kind<f8f6f4>, metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<e3m2>, multiplicandBPtxType = #nvvm.mma_type<e3m2>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1, %c2, %c3]
+                        sparseMetadata[%meta] selector[%sel]
+                        {kind = #nvvm.mma_kind<f8f6f4>,
+                         metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         multiplicandAPtxType = #nvvm.mma_type<e3m2>,
+                         multiplicandBPtxType = #nvvm.mma_type<e3m2>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 64>}
+      : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  return
+}
+
+// =============================================================================
+// FP8 e2m3 Sparse MMA with KIND (m16n8k64)
+// NOTE: e2m3 is ONLY available with kind::f8f6f4
+// =============================================================================
+
+// CHECK-LABEL: @nvvm_mma_sp_kind_m16n8k64_e2m3_f16
+func.func @nvvm_mma_sp_kind_m16n8k64_e2m3_f16(
+    %a0 : i32, %a1 : i32,
+    %b0 : i32, %b1 : i32,
+    %c0 : vector<2xf16>, %c1 : vector<2xf16>,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {kind = #nvvm.mma_kind<f8f6f4>, metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<e2m3>, multiplicandBPtxType = #nvvm.mma_type<e2m3>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1]
+                        sparseMetadata[%meta] selector[%sel]
+                        {kind = #nvvm.mma_kind<f8f6f4>,
+                         metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         multiplicandAPtxType = #nvvm.mma_type<e2m3>,
+                         multiplicandBPtxType = #nvvm.mma_type<e2m3>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 64>}
+      : (i32, i32, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
+  return
+}
+
+// CHECK-LABEL: @nvvm_mma_sp_kind_m16n8k64_e2m3_f32
+func.func @nvvm_mma_sp_kind_m16n8k64_e2m3_f32(
+    %a0 : i32, %a1 : i32,
+    %b0 : i32, %b1 : i32,
+    %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {kind = #nvvm.mma_kind<f8f6f4>, metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<e2m3>, multiplicandBPtxType = #nvvm.mma_type<e2m3>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1, %c2, %c3]
+                        sparseMetadata[%meta] selector[%sel]
+                        {kind = #nvvm.mma_kind<f8f6f4>,
+                         metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         multiplicandAPtxType = #nvvm.mma_type<e2m3>,
+                         multiplicandBPtxType = #nvvm.mma_type<e2m3>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 64>}
+      : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  return
+}
+
+// =============================================================================
+// FP8 e2m1 Sparse MMA with KIND (m16n8k64)
+// NOTE: e2m1 is ONLY available with kind::f8f6f4
+// =============================================================================
+
+// CHECK-LABEL: @nvvm_mma_sp_kind_m16n8k64_e2m1_f16
+func.func @nvvm_mma_sp_kind_m16n8k64_e2m1_f16(
+    %a0 : i32, %a1 : i32,
+    %b0 : i32, %b1 : i32,
+    %c0 : vector<2xf16>, %c1 : vector<2xf16>,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {kind = #nvvm.mma_kind<f8f6f4>, metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<e2m1>, multiplicandBPtxType = #nvvm.mma_type<e2m1>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1]
+                        sparseMetadata[%meta] selector[%sel]
+                        {kind = #nvvm.mma_kind<f8f6f4>,
+                         metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         multiplicandAPtxType = #nvvm.mma_type<e2m1>,
+                         multiplicandBPtxType = #nvvm.mma_type<e2m1>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 64>}
+      : (i32, i32, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
+  return
+}
+
+// CHECK-LABEL: @nvvm_mma_sp_kind_m16n8k64_e2m1_f32
+func.func @nvvm_mma_sp_kind_m16n8k64_e2m1_f32(
+    %a0 : i32, %a1 : i32,
+    %b0 : i32, %b1 : i32,
+    %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {kind = #nvvm.mma_kind<f8f6f4>, metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<e2m1>, multiplicandBPtxType = #nvvm.mma_type<e2m1>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1, %c2, %c3]
+                        sparseMetadata[%meta] selector[%sel]
+                        {kind = #nvvm.mma_kind<f8f6f4>,
+                         metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         multiplicandAPtxType = #nvvm.mma_type<e2m1>,
+                         multiplicandBPtxType = #nvvm.mma_type<e2m1>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 64>}
+      : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  return
+}
+
diff --git a/mlir/test/Dialect/LLVMIR/nvvm-mma-sp-ordered.mlir b/mlir/test/Dialect/LLVMIR/nvvm-mma-sp-ordered.mlir
new file mode 100644
index 0000000000000..ca84d19612af5
--- /dev/null
+++ b/mlir/test/Dialect/LLVMIR/nvvm-mma-sp-ordered.mlir
@@ -0,0 +1,411 @@
+// RUN: mlir-opt %s -split-input-file | FileCheck %s
+
+// This file contains tests for sparse MMA (mma.sp.sync) operations with ORDERED metadata.
+// The ordered metadata variant was introduced in PTX ISA 8.5 for sm_90+ architectures.
+//
+// Based on PTX ISA documentation:
+// https://docs.nvidia.com/cuda/parallel-thread-execution/#warp-level-matrix-instructions-for-sparse-mma
+//
+// Ordered metadata provides an alternative metadata ordering for 2:4 structured sparsity
+// that can offer better performance on newer architectures.
+
+// =============================================================================
+// F16 Sparse MMA Operations with Ordered Metadata (m16n8k16)
+// =============================================================================
+
+// CHECK-LABEL: @nvvm_mma_sp_ordered_m16n8k16_f16_f16
+func.func @nvvm_mma_sp_ordered_m16n8k16_f16_f16(
+    %a0 : vector<2xf16>, %a1 : vector<2xf16>,
+    %b0 : vector<2xf16>, %b1 : vector<2xf16>,
+    %c0 : vector<2xf16>, %c1 : vector<2xf16>,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {metadataType = #nvvm.mma_sp_metadata<ordered>, shape = #nvvm.shape<m = 16, n = 8, k = 16>} : (vector<2xf16>, vector<2xf16>, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1]
+                        sparseMetadata[%meta] selector[%sel]
+                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 16>}
+      : (vector<2xf16>, vector<2xf16>, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
+  return
+}
+
+// CHECK-LABEL: @nvvm_mma_sp_ordered_m16n8k16_f16_f32
+func.func @nvvm_mma_sp_ordered_m16n8k16_f16_f32(
+    %a0 : vector<2xf16>, %a1 : vector<2xf16>,
+    %b0 : vector<2xf16>, %b1 : vector<2xf16>,
+    %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {metadataType = #nvvm.mma_sp_metadata<ordered>, shape = #nvvm.shape<m = 16, n = 8, k = 16>} : (vector<2xf16>, vector<2xf16>, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1, %c2, %c3]
+                        sparseMetadata[%meta] selector[%sel]
+                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 16>}
+      : (vector<2xf16>, vector<2xf16>, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  return
+}
+
+// =============================================================================
+// F16 Sparse MMA Operations with Ordered Metadata (m16n8k32)
+// =============================================================================
+
+// CHECK-LABEL: @nvvm_mma_sp_ordered_m16n8k32_f16_f16
+func.func @nvvm_mma_sp_ordered_m16n8k32_f16_f16(
+    %a0 : vector<2xf16>, %a1 : vector<2xf16>, %a2 : vector<2xf16>, %a3 : vector<2xf16>,
+    %b0 : vector<2xf16>, %b1 : vector<2xf16>, %b2 : vector<2xf16>, %b3 : vector<2xf16>,
+    %c0 : vector<2xf16>, %c1 : vector<2xf16>,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}, {{.*}}, {{.*}}] B[{{.*}}, {{.*}}, {{.*}}, {{.*}}] C[{{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {metadataType = #nvvm.mma_sp_metadata<ordered>, shape = #nvvm.shape<m = 16, n = 8, k = 32>} : (vector<2xf16>, vector<2xf16>, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1, %a2, %a3] B[%b0, %b1, %b2, %b3] C[%c0, %c1]
+                        sparseMetadata[%meta] selector[%sel]
+                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 32>}
+      : (vector<2xf16>, vector<2xf16>, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
+  return
+}
+
+// CHECK-LABEL: @nvvm_mma_sp_ordered_m16n8k32_f16_f32
+func.func @nvvm_mma_sp_ordered_m16n8k32_f16_f32(
+    %a0 : vector<2xf16>, %a1 : vector<2xf16>, %a2 : vector<2xf16>, %a3 : vector<2xf16>,
+    %b0 : vector<2xf16>, %b1 : vector<2xf16>, %b2 : vector<2xf16>, %b3 : vector<2xf16>,
+    %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}, {{.*}}, {{.*}}] B[{{.*}}, {{.*}}, {{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {metadataType = #nvvm.mma_sp_metadata<ordered>, shape = #nvvm.shape<m = 16, n = 8, k = 32>} : (vector<2xf16>, vector<2xf16>, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1, %a2, %a3] B[%b0, %b1, %b2, %b3] C[%c0, %c1, %c2, %c3]
+                        sparseMetadata[%meta] selector[%sel]
+                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 32>}
+      : (vector<2xf16>, vector<2xf16>, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  return
+}
+
+// =============================================================================
+// BF16 Sparse MMA Operations with Ordered Metadata
+// =============================================================================
+
+// CHECK-LABEL: @nvvm_mma_sp_ordered_m16n8k16_bf16_f32
+func.func @nvvm_mma_sp_ordered_m16n8k16_bf16_f32(
+    %a0 : i32, %a1 : i32,
+    %b0 : i32, %b1 : i32,
+    %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<bf16>, multiplicandBPtxType = #nvvm.mma_type<bf16>, shape = #nvvm.shape<m = 16, n = 8, k = 16>} : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1, %c2, %c3]
+                        sparseMetadata[%meta] selector[%sel]
+                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         multiplicandAPtxType = #nvvm.mma_type<bf16>,
+                         multiplicandBPtxType = #nvvm.mma_type<bf16>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 16>}
+      : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  return
+}
+
+// CHECK-LABEL: @nvvm_mma_sp_ordered_m16n8k32_bf16_f32
+func.func @nvvm_mma_sp_ordered_m16n8k32_bf16_f32(
+    %a0 : i32, %a1 : i32, %a2 : i32, %a3 : i32,
+    %b0 : i32, %b1 : i32, %b2 : i32, %b3 : i32,
+    %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}, {{.*}}, {{.*}}] B[{{.*}}, {{.*}}, {{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<bf16>, multiplicandBPtxType = #nvvm.mma_type<bf16>, shape = #nvvm.shape<m = 16, n = 8, k = 32>} : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1, %a2, %a3] B[%b0, %b1, %b2, %b3] C[%c0, %c1, %c2, %c3]
+                        sparseMetadata[%meta] selector[%sel]
+                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         multiplicandAPtxType = #nvvm.mma_type<bf16>,
+                         multiplicandBPtxType = #nvvm.mma_type<bf16>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 32>}
+      : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  return
+}
+
+// =============================================================================
+// TF32 Sparse MMA Operations with Ordered Metadata
+// =============================================================================
+
+// CHECK-LABEL: @nvvm_mma_sp_ordered_m16n8k8_tf32_f32
+func.func @nvvm_mma_sp_ordered_m16n8k8_tf32_f32(
+    %a0 : i32, %a1 : i32,
+    %b0 : i32, %b1 : i32,
+    %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<tf32>, multiplicandBPtxType = #nvvm.mma_type<tf32>, shape = #nvvm.shape<m = 16, n = 8, k = 8>} : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1, %c2, %c3]
+                        sparseMetadata[%meta] selector[%sel]
+                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         multiplicandAPtxType = #nvvm.mma_type<tf32>,
+                         multiplicandBPtxType = #nvvm.mma_type<tf32>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 8>}
+      : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  return
+}
+
+// CHECK-LABEL: @nvvm_mma_sp_ordered_m16n8k16_tf32_f32
+func.func @nvvm_mma_sp_ordered_m16n8k16_tf32_f32(
+    %a0 : i32, %a1 : i32, %a2 : i32, %a3 : i32,
+    %b0 : i32, %b1 : i32, %b2 : i32, %b3 : i32,
+    %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}, {{.*}}, {{.*}}] B[{{.*}}, {{.*}}, {{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<tf32>, multiplicandBPtxType = #nvvm.mma_type<tf32>, shape = #nvvm.shape<m = 16, n = 8, k = 16>} : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1, %a2, %a3] B[%b0, %b1, %b2, %b3] C[%c0, %c1, %c2, %c3]
+                        sparseMetadata[%meta] selector[%sel]
+                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         multiplicandAPtxType = #nvvm.mma_type<tf32>,
+                         multiplicandBPtxType = #nvvm.mma_type<tf32>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 16>}
+      : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  return
+}
+
+// =============================================================================
+// Integer (s8) Sparse MMA Operations with Ordered Metadata
+// =============================================================================
+
+// CHECK-LABEL: @nvvm_mma_sp_ordered_m16n8k32_s8_s32
+func.func @nvvm_mma_sp_ordered_m16n8k32_s8_s32(
+    %a0 : i32, %a1 : i32,
+    %b0 : i32, %b1 : i32,
+    %c0 : i32, %c1 : i32, %c2 : i32, %c3 : i32,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>, metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<s8>, multiplicandBPtxType = #nvvm.mma_type<s8>, shape = #nvvm.shape<m = 16, n = 8, k = 32>} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1, %c2, %c3]
+                        sparseMetadata[%meta] selector[%sel]
+                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         multiplicandAPtxType = #nvvm.mma_type<s8>,
+                         multiplicandBPtxType = #nvvm.mma_type<s8>,
+                         intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 32>}
+      : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  return
+}
+
+// CHECK-LABEL: @nvvm_mma_sp_ordered_m16n8k32_s8_s32_satfinite
+func.func @nvvm_mma_sp_ordered_m16n8k32_s8_s32_satfinite(
+    %a0 : i32, %a1 : i32,
+    %b0 : i32, %b1 : i32,
+    %c0 : i32, %c1 : i32, %c2 : i32, %c3 : i32,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {intOverflowBehavior = #nvvm.mma_int_overflow<satfinite>, metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<s8>, multiplicandBPtxType = #nvvm.mma_type<s8>, shape = #nvvm.shape<m = 16, n = 8, k = 32>} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1, %c2, %c3]
+                        sparseMetadata[%meta] selector[%sel]
+                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         multiplicandAPtxType = #nvvm.mma_type<s8>,
+                         multiplicandBPtxType = #nvvm.mma_type<s8>,
+                         intOverflowBehavior = #nvvm.mma_int_overflow<satfinite>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 32>}
+      : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  return
+}
+
+// CHECK-LABEL: @nvvm_mma_sp_ordered_m16n8k64_s8_s32
+func.func @nvvm_mma_sp_ordered_m16n8k64_s8_s32(
+    %a0 : i32, %a1 : i32, %a2 : i32, %a3 : i32,
+    %b0 : i32, %b1 : i32, %b2 : i32, %b3 : i32,
+    %c0 : i32, %c1 : i32, %c2 : i32, %c3 : i32,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}, {{.*}}, {{.*}}] B[{{.*}}, {{.*}}, {{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>, metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<s8>, multiplicandBPtxType = #nvvm.mma_type<s8>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1, %a2, %a3] B[%b0, %b1, %b2, %b3] C[%c0, %c1, %c2, %c3]
+                        sparseMetadata[%meta] selector[%sel]
+                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         multiplicandAPtxType = #nvvm.mma_type<s8>,
+                         multiplicandBPtxType = #nvvm.mma_type<s8>,
+                         intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 64>}
+      : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  return
+}
+
+// =============================================================================
+// Integer (u8) Sparse MMA Operations with Ordered Metadata
+// =============================================================================
+
+// CHECK-LABEL: @nvvm_mma_sp_ordered_m16n8k32_u8_s32
+func.func @nvvm_mma_sp_ordered_m16n8k32_u8_s32(
+    %a0 : i32, %a1 : i32,
+    %b0 : i32, %b1 : i32,
+    %c0 : i32, %c1 : i32, %c2 : i32, %c3 : i32,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>, metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<u8>, multiplicandBPtxType = #nvvm.mma_type<u8>, shape = #nvvm.shape<m = 16, n = 8, k = 32>} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1, %c2, %c3]
+                        sparseMetadata[%meta] selector[%sel]
+                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         multiplicandAPtxType = #nvvm.mma_type<u8>,
+                         multiplicandBPtxType = #nvvm.mma_type<u8>,
+                         intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 32>}
+      : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  return
+}
+
+// CHECK-LABEL: @nvvm_mma_sp_ordered_m16n8k64_u8_s32
+func.func @nvvm_mma_sp_ordered_m16n8k64_u8_s32(
+    %a0 : i32, %a1 : i32, %a2 : i32, %a3 : i32,
+    %b0 : i32, %b1 : i32, %b2 : i32, %b3 : i32,
+    %c0 : i32, %c1 : i32, %c2 : i32, %c3 : i32,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}, {{.*}}, {{.*}}] B[{{.*}}, {{.*}}, {{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>, metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<u8>, multiplicandBPtxType = #nvvm.mma_type<u8>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1, %a2, %a3] B[%b0, %b1, %b2, %b3] C[%c0, %c1, %c2, %c3]
+                        sparseMetadata[%meta] selector[%sel]
+                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         multiplicandAPtxType = #nvvm.mma_type<u8>,
+                         multiplicandBPtxType = #nvvm.mma_type<u8>,
+                         intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 64>}
+      : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  return
+}
+
+// =============================================================================
+// Sub-byte Integer (s4) Sparse MMA Operations with Ordered Metadata
+// =============================================================================
+
+// CHECK-LABEL: @nvvm_mma_sp_ordered_m16n8k64_s4_s32
+func.func @nvvm_mma_sp_ordered_m16n8k64_s4_s32(
+    %a0 : i32, %a1 : i32,
+    %b0 : i32, %b1 : i32,
+    %c0 : i32, %c1 : i32, %c2 : i32, %c3 : i32,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>, metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<s4>, multiplicandBPtxType = #nvvm.mma_type<s4>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1, %c2, %c3]
+                        sparseMetadata[%meta] selector[%sel]
+                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         multiplicandAPtxType = #nvvm.mma_type<s4>,
+                         multiplicandBPtxType = #nvvm.mma_type<s4>,
+                         intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 64>}
+      : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  return
+}
+
+// CHECK-LABEL: @nvvm_mma_sp_ordered_m16n8k128_s4_s32
+func.func @nvvm_mma_sp_ordered_m16n8k128_s4_s32(
+    %a0 : i32, %a1 : i32, %a2 : i32, %a3 : i32,
+    %b0 : i32, %b1 : i32, %b2 : i32, %b3 : i32,
+    %c0 : i32, %c1 : i32, %c2 : i32, %c3 : i32,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}, {{.*}}, {{.*}}] B[{{.*}}, {{.*}}, {{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>, metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<s4>, multiplicandBPtxType = #nvvm.mma_type<s4>, shape = #nvvm.shape<m = 16, n = 8, k = 128>} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1, %a2, %a3] B[%b0, %b1, %b2, %b3] C[%c0, %c1, %c2, %c3]
+                        sparseMetadata[%meta] selector[%sel]
+                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         multiplicandAPtxType = #nvvm.mma_type<s4>,
+                         multiplicandBPtxType = #nvvm.mma_type<s4>,
+                         intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 128>}
+      : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  return
+}
+
+// =============================================================================
+// Sub-byte Integer (u4) Sparse MMA Operations with Ordered Metadata
+// =============================================================================
+
+// CHECK-LABEL: @nvvm_mma_sp_ordered_m16n8k64_u4_s32
+func.func @nvvm_mma_sp_ordered_m16n8k64_u4_s32(
+    %a0 : i32, %a1 : i32,
+    %b0 : i32, %b1 : i32,
+    %c0 : i32, %c1 : i32, %c2 : i32, %c3 : i32,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>, metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<u4>, multiplicandBPtxType = #nvvm.mma_type<u4>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1, %c2, %c3]
+                        sparseMetadata[%meta] selector[%sel]
+                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         multiplicandAPtxType = #nvvm.mma_type<u4>,
+                         multiplicandBPtxType = #nvvm.mma_type<u4>,
+                         intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 64>}
+      : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  return
+}
+
+// CHECK-LABEL: @nvvm_mma_sp_ordered_m16n8k128_u4_s32
+func.func @nvvm_mma_sp_ordered_m16n8k128_u4_s32(
+    %a0 : i32, %a1 : i32, %a2 : i32, %a3 : i32,
+    %b0 : i32, %b1 : i32, %b2 : i32, %b3 : i32,
+    %c0 : i32, %c1 : i32, %c2 : i32, %c3 : i32,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}, {{.*}}, {{.*}}] B[{{.*}}, {{.*}}, {{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>, metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<u4>, multiplicandBPtxType = #nvvm.mma_type<u4>, shape = #nvvm.shape<m = 16, n = 8, k = 128>} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1, %a2, %a3] B[%b0, %b1, %b2, %b3] C[%c0, %c1, %c2, %c3]
+                        sparseMetadata[%meta] selector[%sel]
+                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         multiplicandAPtxType = #nvvm.mma_type<u4>,
+                         multiplicandBPtxType = #nvvm.mma_type<u4>,
+                         intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 128>}
+      : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  return
+}
+
+// =============================================================================
+// FP8 (e4m3) Sparse MMA Operations with Ordered Metadata
+// NOTE: FP8 ordered metadata requires PTX ISA 8.7+ and sm_90+
+// =============================================================================
+
+// CHECK-LABEL: @nvvm_mma_sp_ordered_m16n8k64_e4m3_f16
+func.func @nvvm_mma_sp_ordered_m16n8k64_e4m3_f16(
+    %a0 : i32, %a1 : i32,
+    %b0 : i32, %b1 : i32,
+    %c0 : vector<2xf16>, %c1 : vector<2xf16>,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<e4m3>, multiplicandBPtxType = #nvvm.mma_type<e4m3>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1]
+                        sparseMetadata[%meta] selector[%sel]
+                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         multiplicandAPtxType = #nvvm.mma_type<e4m3>,
+                         multiplicandBPtxType = #nvvm.mma_type<e4m3>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 64>}
+      : (i32, i32, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
+  return
+}
+
+// CHECK-LABEL: @nvvm_mma_sp_ordered_m16n8k64_e4m3_f32
+func.func @nvvm_mma_sp_ordered_m16n8k64_e4m3_f32(
+    %a0 : i32, %a1 : i32,
+    %b0 : i32, %b1 : i32,
+    %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<e4m3>, multiplicandBPtxType = #nvvm.mma_type<e4m3>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1, %c2, %c3]
+                        sparseMetadata[%meta] selector[%sel]
+                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         multiplicandAPtxType = #nvvm.mma_type<e4m3>,
+                         multiplicandBPtxType = #nvvm.mma_type<e4m3>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 64>}
+      : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  return
+}
+
+// =============================================================================
+// FP8 (e5m2) Sparse MMA Operations with Ordered Metadata
+// NOTE: FP8 ordered metadata requires PTX ISA 8.7+ and sm_90+
+// =============================================================================
+
+// CHECK-LABEL: @nvvm_mma_sp_ordered_m16n8k64_e5m2_f16
+func.func @nvvm_mma_sp_ordered_m16n8k64_e5m2_f16(
+    %a0 : i32, %a1 : i32,
+    %b0 : i32, %b1 : i32,
+    %c0 : vector<2xf16>, %c1 : vector<2xf16>,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<e5m2>, multiplicandBPtxType = #nvvm.mma_type<e5m2>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1]
+                        sparseMetadata[%meta] selector[%sel]
+                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         multiplicandAPtxType = #nvvm.mma_type<e5m2>,
+                         multiplicandBPtxType = #nvvm.mma_type<e5m2>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 64>}
+      : (i32, i32, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
+  return
+}
+
+// CHECK-LABEL: @nvvm_mma_sp_ordered_m16n8k64_e5m2_f32
+func.func @nvvm_mma_sp_ordered_m16n8k64_e5m2_f32(
+    %a0 : i32, %a1 : i32,
+    %b0 : i32, %b1 : i32,
+    %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<e5m2>, multiplicandBPtxType = #nvvm.mma_type<e5m2>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1, %c2, %c3]
+                        sparseMetadata[%meta] selector[%sel]
+                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         multiplicandAPtxType = #nvvm.mma_type<e5m2>,
+                         multiplicandBPtxType = #nvvm.mma_type<e5m2>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 64>}
+      : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  return
+}
+
diff --git a/mlir/test/Dialect/LLVMIR/nvvm-mma-sp.mlir b/mlir/test/Dialect/LLVMIR/nvvm-mma-sp.mlir
new file mode 100644
index 0000000000000..e7122aac61baf
--- /dev/null
+++ b/mlir/test/Dialect/LLVMIR/nvvm-mma-sp.mlir
@@ -0,0 +1,390 @@
+// RUN: mlir-opt %s -split-input-file | FileCheck %s
+
+// This file contains tests for all sparse MMA (mma.sp.sync) operations in the NVVM dialect
+// Based on PTX ISA documentation:
+// https://docs.nvidia.com/cuda/parallel-thread-execution/#warp-level-matrix-instructions-for-sparse-mma
+//
+// Sparse MMA operations follow 2:4 structured sparsity where 2 out of every 4 elements
+// in the A operand are non-zero. The A operand is provided in compressed form,
+// and sparseMetadata provides the sparsity indices.
+//
+// NOTE: These tests use the default (standard) metadata ordering.
+// For ordered metadata tests (PTX ISA 8.5+, sm_90+), see nvvm-mma-sp-ordered.mlir.
+
+// =============================================================================
+// F16 Sparse MMA Operations (m16n8k16)
+// =============================================================================
+
+// CHECK-LABEL: @nvvm_mma_sp_m16n8k16_f16_f16
+func.func @nvvm_mma_sp_m16n8k16_f16_f16(
+    %a0 : vector<2xf16>, %a1 : vector<2xf16>,
+    %b0 : vector<2xf16>, %b1 : vector<2xf16>,
+    %c0 : vector<2xf16>, %c1 : vector<2xf16>,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {shape = #nvvm.shape<m = 16, n = 8, k = 16>} : (vector<2xf16>, vector<2xf16>, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1]
+                        sparseMetadata[%meta] selector[%sel]
+                        {shape = #nvvm.shape<m = 16, n = 8, k = 16>}
+      : (vector<2xf16>, vector<2xf16>, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
+  return
+}
+
+// CHECK-LABEL: @nvvm_mma_sp_m16n8k16_f16_f32
+func.func @nvvm_mma_sp_m16n8k16_f16_f32(
+    %a0 : vector<2xf16>, %a1 : vector<2xf16>,
+    %b0 : vector<2xf16>, %b1 : vector<2xf16>,
+    %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {shape = #nvvm.shape<m = 16, n = 8, k = 16>} : (vector<2xf16>, vector<2xf16>, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1, %c2, %c3]
+                        sparseMetadata[%meta] selector[%sel]
+                        {shape = #nvvm.shape<m = 16, n = 8, k = 16>}
+      : (vector<2xf16>, vector<2xf16>, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  return
+}
+
+// =============================================================================
+// F16 Sparse MMA Operations (m16n8k32)
+// =============================================================================
+
+// CHECK-LABEL: @nvvm_mma_sp_m16n8k32_f16_f16
+func.func @nvvm_mma_sp_m16n8k32_f16_f16(
+    %a0 : vector<2xf16>, %a1 : vector<2xf16>, %a2 : vector<2xf16>, %a3 : vector<2xf16>,
+    %b0 : vector<2xf16>, %b1 : vector<2xf16>, %b2 : vector<2xf16>, %b3 : vector<2xf16>,
+    %c0 : vector<2xf16>, %c1 : vector<2xf16>,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}, {{.*}}, {{.*}}] B[{{.*}}, {{.*}}, {{.*}}, {{.*}}] C[{{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {shape = #nvvm.shape<m = 16, n = 8, k = 32>} : (vector<2xf16>, vector<2xf16>, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1, %a2, %a3] B[%b0, %b1, %b2, %b3] C[%c0, %c1]
+                        sparseMetadata[%meta] selector[%sel]
+                        {shape = #nvvm.shape<m = 16, n = 8, k = 32>}
+      : (vector<2xf16>, vector<2xf16>, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
+  return
+}
+
+// CHECK-LABEL: @nvvm_mma_sp_m16n8k32_f16_f32
+func.func @nvvm_mma_sp_m16n8k32_f16_f32(
+    %a0 : vector<2xf16>, %a1 : vector<2xf16>, %a2 : vector<2xf16>, %a3 : vector<2xf16>,
+    %b0 : vector<2xf16>, %b1 : vector<2xf16>, %b2 : vector<2xf16>, %b3 : vector<2xf16>,
+    %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}, {{.*}}, {{.*}}] B[{{.*}}, {{.*}}, {{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {shape = #nvvm.shape<m = 16, n = 8, k = 32>} : (vector<2xf16>, vector<2xf16>, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1, %a2, %a3] B[%b0, %b1, %b2, %b3] C[%c0, %c1, %c2, %c3]
+                        sparseMetadata[%meta] selector[%sel]
+                        {shape = #nvvm.shape<m = 16, n = 8, k = 32>}
+      : (vector<2xf16>, vector<2xf16>, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  return
+}
+
+// =============================================================================
+// BF16 Sparse MMA Operations
+// =============================================================================
+
+// CHECK-LABEL: @nvvm_mma_sp_m16n8k16_bf16_f32
+func.func @nvvm_mma_sp_m16n8k16_bf16_f32(
+    %a0 : i32, %a1 : i32,
+    %b0 : i32, %b1 : i32,
+    %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {multiplicandAPtxType = #nvvm.mma_type<bf16>, multiplicandBPtxType = #nvvm.mma_type<bf16>, shape = #nvvm.shape<m = 16, n = 8, k = 16>} : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1, %c2, %c3]
+                        sparseMetadata[%meta] selector[%sel]
+                        {multiplicandAPtxType = #nvvm.mma_type<bf16>,
+                         multiplicandBPtxType = #nvvm.mma_type<bf16>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 16>}
+      : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  return
+}
+
+// CHECK-LABEL: @nvvm_mma_sp_m16n8k32_bf16_f32
+func.func @nvvm_mma_sp_m16n8k32_bf16_f32(
+    %a0 : i32, %a1 : i32, %a2 : i32, %a3 : i32,
+    %b0 : i32, %b1 : i32, %b2 : i32, %b3 : i32,
+    %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}, {{.*}}, {{.*}}] B[{{.*}}, {{.*}}, {{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {multiplicandAPtxType = #nvvm.mma_type<bf16>, multiplicandBPtxType = #nvvm.mma_type<bf16>, shape = #nvvm.shape<m = 16, n = 8, k = 32>} : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1, %a2, %a3] B[%b0, %b1, %b2, %b3] C[%c0, %c1, %c2, %c3]
+                        sparseMetadata[%meta] selector[%sel]
+                        {multiplicandAPtxType = #nvvm.mma_type<bf16>,
+                         multiplicandBPtxType = #nvvm.mma_type<bf16>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 32>}
+      : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  return
+}
+
+// =============================================================================
+// TF32 Sparse MMA Operations
+// =============================================================================
+
+// CHECK-LABEL: @nvvm_mma_sp_m16n8k8_tf32_f32
+func.func @nvvm_mma_sp_m16n8k8_tf32_f32(
+    %a0 : i32, %a1 : i32,
+    %b0 : i32, %b1 : i32,
+    %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {multiplicandAPtxType = #nvvm.mma_type<tf32>, multiplicandBPtxType = #nvvm.mma_type<tf32>, shape = #nvvm.shape<m = 16, n = 8, k = 8>} : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1, %c2, %c3]
+                        sparseMetadata[%meta] selector[%sel]
+                        {multiplicandAPtxType = #nvvm.mma_type<tf32>,
+                         multiplicandBPtxType = #nvvm.mma_type<tf32>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 8>}
+      : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  return
+}
+
+// CHECK-LABEL: @nvvm_mma_sp_m16n8k16_tf32_f32
+func.func @nvvm_mma_sp_m16n8k16_tf32_f32(
+    %a0 : i32, %a1 : i32, %a2 : i32, %a3 : i32,
+    %b0 : i32, %b1 : i32, %b2 : i32, %b3 : i32,
+    %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}, {{.*}}, {{.*}}] B[{{.*}}, {{.*}}, {{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {multiplicandAPtxType = #nvvm.mma_type<tf32>, multiplicandBPtxType = #nvvm.mma_type<tf32>, shape = #nvvm.shape<m = 16, n = 8, k = 16>} : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1, %a2, %a3] B[%b0, %b1, %b2, %b3] C[%c0, %c1, %c2, %c3]
+                        sparseMetadata[%meta] selector[%sel]
+                        {multiplicandAPtxType = #nvvm.mma_type<tf32>,
+                         multiplicandBPtxType = #nvvm.mma_type<tf32>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 16>}
+      : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  return
+}
+
+// =============================================================================
+// Integer (s8) Sparse MMA Operations
+// =============================================================================
+
+// CHECK-LABEL: @nvvm_mma_sp_m16n8k32_s8_s32
+func.func @nvvm_mma_sp_m16n8k32_s8_s32(
+    %a0 : i32, %a1 : i32,
+    %b0 : i32, %b1 : i32,
+    %c0 : i32, %c1 : i32, %c2 : i32, %c3 : i32,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>, multiplicandAPtxType = #nvvm.mma_type<s8>, multiplicandBPtxType = #nvvm.mma_type<s8>, shape = #nvvm.shape<m = 16, n = 8, k = 32>} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1, %c2, %c3]
+                        sparseMetadata[%meta] selector[%sel]
+                        {multiplicandAPtxType = #nvvm.mma_type<s8>,
+                         multiplicandBPtxType = #nvvm.mma_type<s8>,
+                         intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 32>}
+      : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  return
+}
+
+// CHECK-LABEL: @nvvm_mma_sp_m16n8k32_s8_s32_satfinite
+func.func @nvvm_mma_sp_m16n8k32_s8_s32_satfinite(
+    %a0 : i32, %a1 : i32,
+    %b0 : i32, %b1 : i32,
+    %c0 : i32, %c1 : i32, %c2 : i32, %c3 : i32,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {intOverflowBehavior = #nvvm.mma_int_overflow<satfinite>, multiplicandAPtxType = #nvvm.mma_type<s8>, multiplicandBPtxType = #nvvm.mma_type<s8>, shape = #nvvm.shape<m = 16, n = 8, k = 32>} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1, %c2, %c3]
+                        sparseMetadata[%meta] selector[%sel]
+                        {multiplicandAPtxType = #nvvm.mma_type<s8>,
+                         multiplicandBPtxType = #nvvm.mma_type<s8>,
+                         intOverflowBehavior = #nvvm.mma_int_overflow<satfinite>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 32>}
+      : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  return
+}
+
+// CHECK-LABEL: @nvvm_mma_sp_m16n8k64_s8_s32
+func.func @nvvm_mma_sp_m16n8k64_s8_s32(
+    %a0 : i32, %a1 : i32, %a2 : i32, %a3 : i32,
+    %b0 : i32, %b1 : i32, %b2 : i32, %b3 : i32,
+    %c0 : i32, %c1 : i32, %c2 : i32, %c3 : i32,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}, {{.*}}, {{.*}}] B[{{.*}}, {{.*}}, {{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>, multiplicandAPtxType = #nvvm.mma_type<s8>, multiplicandBPtxType = #nvvm.mma_type<s8>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1, %a2, %a3] B[%b0, %b1, %b2, %b3] C[%c0, %c1, %c2, %c3]
+                        sparseMetadata[%meta] selector[%sel]
+                        {multiplicandAPtxType = #nvvm.mma_type<s8>,
+                         multiplicandBPtxType = #nvvm.mma_type<s8>,
+                         intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 64>}
+      : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  return
+}
+
+// =============================================================================
+// Integer (u8) Sparse MMA Operations
+// =============================================================================
+
+// CHECK-LABEL: @nvvm_mma_sp_m16n8k32_u8_s32
+func.func @nvvm_mma_sp_m16n8k32_u8_s32(
+    %a0 : i32, %a1 : i32,
+    %b0 : i32, %b1 : i32,
+    %c0 : i32, %c1 : i32, %c2 : i32, %c3 : i32,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>, multiplicandAPtxType = #nvvm.mma_type<u8>, multiplicandBPtxType = #nvvm.mma_type<u8>, shape = #nvvm.shape<m = 16, n = 8, k = 32>} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1, %c2, %c3]
+                        sparseMetadata[%meta] selector[%sel]
+                        {multiplicandAPtxType = #nvvm.mma_type<u8>,
+                         multiplicandBPtxType = #nvvm.mma_type<u8>,
+                         intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 32>}
+      : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  return
+}
+
+// CHECK-LABEL: @nvvm_mma_sp_m16n8k64_u8_s32
+func.func @nvvm_mma_sp_m16n8k64_u8_s32(
+    %a0 : i32, %a1 : i32, %a2 : i32, %a3 : i32,
+    %b0 : i32, %b1 : i32, %b2 : i32, %b3 : i32,
+    %c0 : i32, %c1 : i32, %c2 : i32, %c3 : i32,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}, {{.*}}, {{.*}}] B[{{.*}}, {{.*}}, {{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>, multiplicandAPtxType = #nvvm.mma_type<u8>, multiplicandBPtxType = #nvvm.mma_type<u8>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1, %a2, %a3] B[%b0, %b1, %b2, %b3] C[%c0, %c1, %c2, %c3]
+                        sparseMetadata[%meta] selector[%sel]
+                        {multiplicandAPtxType = #nvvm.mma_type<u8>,
+                         multiplicandBPtxType = #nvvm.mma_type<u8>,
+                         intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 64>}
+      : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  return
+}
+
+// =============================================================================
+// Sub-byte Integer (s4) Sparse MMA Operations
+// =============================================================================
+
+// CHECK-LABEL: @nvvm_mma_sp_m16n8k64_s4_s32
+func.func @nvvm_mma_sp_m16n8k64_s4_s32(
+    %a0 : i32, %a1 : i32,
+    %b0 : i32, %b1 : i32,
+    %c0 : i32, %c1 : i32, %c2 : i32, %c3 : i32,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>, multiplicandAPtxType = #nvvm.mma_type<s4>, multiplicandBPtxType = #nvvm.mma_type<s4>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1, %c2, %c3]
+                        sparseMetadata[%meta] selector[%sel]
+                        {multiplicandAPtxType = #nvvm.mma_type<s4>,
+                         multiplicandBPtxType = #nvvm.mma_type<s4>,
+                         intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 64>}
+      : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  return
+}
+
+// CHECK-LABEL: @nvvm_mma_sp_m16n8k128_s4_s32
+func.func @nvvm_mma_sp_m16n8k128_s4_s32(
+    %a0 : i32, %a1 : i32, %a2 : i32, %a3 : i32,
+    %b0 : i32, %b1 : i32, %b2 : i32, %b3 : i32,
+    %c0 : i32, %c1 : i32, %c2 : i32, %c3 : i32,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}, {{.*}}, {{.*}}] B[{{.*}}, {{.*}}, {{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>, multiplicandAPtxType = #nvvm.mma_type<s4>, multiplicandBPtxType = #nvvm.mma_type<s4>, shape = #nvvm.shape<m = 16, n = 8, k = 128>} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1, %a2, %a3] B[%b0, %b1, %b2, %b3] C[%c0, %c1, %c2, %c3]
+                        sparseMetadata[%meta] selector[%sel]
+                        {multiplicandAPtxType = #nvvm.mma_type<s4>,
+                         multiplicandBPtxType = #nvvm.mma_type<s4>,
+                         intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 128>}
+      : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  return
+}
+
+// =============================================================================
+// Sub-byte Integer (u4) Sparse MMA Operations
+// =============================================================================
+
+// CHECK-LABEL: @nvvm_mma_sp_m16n8k64_u4_s32
+func.func @nvvm_mma_sp_m16n8k64_u4_s32(
+    %a0 : i32, %a1 : i32,
+    %b0 : i32, %b1 : i32,
+    %c0 : i32, %c1 : i32, %c2 : i32, %c3 : i32,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>, multiplicandAPtxType = #nvvm.mma_type<u4>, multiplicandBPtxType = #nvvm.mma_type<u4>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1, %c2, %c3]
+                        sparseMetadata[%meta] selector[%sel]
+                        {multiplicandAPtxType = #nvvm.mma_type<u4>,
+                         multiplicandBPtxType = #nvvm.mma_type<u4>,
+                         intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 64>}
+      : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  return
+}
+
+// CHECK-LABEL: @nvvm_mma_sp_m16n8k128_u4_s32
+func.func @nvvm_mma_sp_m16n8k128_u4_s32(
+    %a0 : i32, %a1 : i32, %a2 : i32, %a3 : i32,
+    %b0 : i32, %b1 : i32, %b2 : i32, %b3 : i32,
+    %c0 : i32, %c1 : i32, %c2 : i32, %c3 : i32,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}, {{.*}}, {{.*}}] B[{{.*}}, {{.*}}, {{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>, multiplicandAPtxType = #nvvm.mma_type<u4>, multiplicandBPtxType = #nvvm.mma_type<u4>, shape = #nvvm.shape<m = 16, n = 8, k = 128>} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1, %a2, %a3] B[%b0, %b1, %b2, %b3] C[%c0, %c1, %c2, %c3]
+                        sparseMetadata[%meta] selector[%sel]
+                        {multiplicandAPtxType = #nvvm.mma_type<u4>,
+                         multiplicandBPtxType = #nvvm.mma_type<u4>,
+                         intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 128>}
+      : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  return
+}
+
+// =============================================================================
+// FP8 (e4m3) Sparse MMA Operations
+// =============================================================================
+
+// CHECK-LABEL: @nvvm_mma_sp_m16n8k64_e4m3_f16
+func.func @nvvm_mma_sp_m16n8k64_e4m3_f16(
+    %a0 : i32, %a1 : i32,
+    %b0 : i32, %b1 : i32,
+    %c0 : vector<2xf16>, %c1 : vector<2xf16>,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {multiplicandAPtxType = #nvvm.mma_type<e4m3>, multiplicandBPtxType = #nvvm.mma_type<e4m3>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1]
+                        sparseMetadata[%meta] selector[%sel]
+                        {multiplicandAPtxType = #nvvm.mma_type<e4m3>,
+                         multiplicandBPtxType = #nvvm.mma_type<e4m3>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 64>}
+      : (i32, i32, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
+  return
+}
+
+// CHECK-LABEL: @nvvm_mma_sp_m16n8k64_e4m3_f32
+func.func @nvvm_mma_sp_m16n8k64_e4m3_f32(
+    %a0 : i32, %a1 : i32,
+    %b0 : i32, %b1 : i32,
+    %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {multiplicandAPtxType = #nvvm.mma_type<e4m3>, multiplicandBPtxType = #nvvm.mma_type<e4m3>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1, %c2, %c3]
+                        sparseMetadata[%meta] selector[%sel]
+                        {multiplicandAPtxType = #nvvm.mma_type<e4m3>,
+                         multiplicandBPtxType = #nvvm.mma_type<e4m3>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 64>}
+      : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  return
+}
+
+// =============================================================================
+// FP8 (e5m2) Sparse MMA Operations
+// =============================================================================
+
+// CHECK-LABEL: @nvvm_mma_sp_m16n8k64_e5m2_f16
+func.func @nvvm_mma_sp_m16n8k64_e5m2_f16(
+    %a0 : i32, %a1 : i32,
+    %b0 : i32, %b1 : i32,
+    %c0 : vector<2xf16>, %c1 : vector<2xf16>,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {multiplicandAPtxType = #nvvm.mma_type<e5m2>, multiplicandBPtxType = #nvvm.mma_type<e5m2>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1]
+                        sparseMetadata[%meta] selector[%sel]
+                        {multiplicandAPtxType = #nvvm.mma_type<e5m2>,
+                         multiplicandBPtxType = #nvvm.mma_type<e5m2>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 64>}
+      : (i32, i32, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
+  return
+}
+
+// CHECK-LABEL: @nvvm_mma_sp_m16n8k64_e5m2_f32
+func.func @nvvm_mma_sp_m16n8k64_e5m2_f32(
+    %a0 : i32, %a1 : i32,
+    %b0 : i32, %b1 : i32,
+    %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
+    %meta : i32, %sel : i32) {
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {multiplicandAPtxType = #nvvm.mma_type<e5m2>, multiplicandBPtxType = #nvvm.mma_type<e5m2>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1, %c2, %c3]
+                        sparseMetadata[%meta] selector[%sel]
+                        {multiplicandAPtxType = #nvvm.mma_type<e5m2>,
+                         multiplicandBPtxType = #nvvm.mma_type<e5m2>,
+                         shape = #nvvm.shape<m = 16, n = 8, k = 64>}
+      : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  return
+}
+

>From fb3e6dad9197e0bf8a79e818f30a7b1b38afc77d Mon Sep 17 00:00:00 2001
From: Kirill Vedernikov <kvedernikov at nvidia.com>
Date: Wed, 19 Nov 2025 11:11:24 +0100
Subject: [PATCH 2/4] [MLIR] Code formatting was fixed.

---
 mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp | 63 +++++++++++-----------
 1 file changed, 33 insertions(+), 30 deletions(-)

diff --git a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
index 8db724dd0a25b..b5a0aff814a67 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
@@ -956,7 +956,7 @@ MMATypes MmaSpOp::resultPtxType() {
 
 mlir::NVVM::IDArgPair
 MmaSpOp::getIntrinsicIDAndArgs(Operation &op, LLVM::ModuleTranslation &mt,
-                                llvm::IRBuilderBase &builder) {
+                               llvm::IRBuilderBase &builder) {
   auto thisOp = cast<NVVM::MmaSpOp>(op);
 
   // Get operands
@@ -966,14 +966,11 @@ MmaSpOp::getIntrinsicIDAndArgs(Operation &op, LLVM::ModuleTranslation &mt,
 
   // Get intrinsic ID using the existing getIntrinsicID method
   auto intId = MmaSpOp::getIntrinsicID(
-      thisOp.getShape().getM(), thisOp.getShape().getN(), thisOp.getShape().getK(),
-      thisOp.getIntOverflowBehavior(),
-      thisOp.getMetadataType(),
-      thisOp.getKind(),
-      *thisOp.getMultiplicandAPtxType(),
-      *thisOp.getMultiplicandBPtxType(),
-      thisOp.accumPtxType(),
-      thisOp.resultPtxType());
+      thisOp.getShape().getM(), thisOp.getShape().getN(),
+      thisOp.getShape().getK(), thisOp.getIntOverflowBehavior(),
+      thisOp.getMetadataType(), thisOp.getKind(),
+      *thisOp.getMultiplicandAPtxType(), *thisOp.getMultiplicandBPtxType(),
+      thisOp.accumPtxType(), thisOp.resultPtxType());
 
   return {intId, args};
 }
@@ -991,8 +988,7 @@ void MmaSpOp::print(OpAsmPrinter &p) {
   std::array<OperandFragment, 5> frags{
       OperandFragment("A", getMultiplicandAPtxTypeAttrName()),
       OperandFragment("B", getMultiplicandBPtxTypeAttrName()),
-      OperandFragment("C", ""),
-      OperandFragment("sparseMetadata", ""),
+      OperandFragment("C", ""), OperandFragment("sparseMetadata", ""),
       OperandFragment("selector", "")};
   SmallVector<StringRef, 4> ignoreAttrNames{
       mlir::NVVM::MmaSpOp::getOperandSegmentSizeAttr()};
@@ -1009,8 +1005,8 @@ void MmaSpOp::print(OpAsmPrinter &p) {
         regTypes.push_back(this->getOperand(operandIdx).getType());
       }
     }
-    std::optional<MMATypes> inferredType =
-        MmaOp::inferOperandMMAType(regTypes.back(), /*isAccumulator=*/fragIdx >= 2);
+    std::optional<MMATypes> inferredType = MmaOp::inferOperandMMAType(
+        regTypes.back(), /*isAccumulator=*/fragIdx >= 2);
     if (inferredType)
       ignoreAttrNames.push_back(frag.ptxTypeAttr);
   }
@@ -1034,17 +1030,19 @@ void MmaSpOp::print(OpAsmPrinter &p) {
   p << "(";
   for (int i = 0; i < 3; ++i) {
     p << regTypes[i];
-    if (i < 2) p << ", ";
+    if (i < 2)
+      p << ", ";
   }
   p << ") -> " << getResult().getType();
 }
 
-void MmaSpOp::build(OpBuilder &builder, OperationState &result,
-                Type resultType, ValueRange operandA, ValueRange operandB,
-                ValueRange operandC, Value sparseMetadata, Value sparsitySelector,
-                ArrayRef<int64_t> shape,
-                std::optional<MMAIntOverflow> intOverflow,
-                std::optional<std::array<MMATypes, 2>> multiplicandPtxTypes) {
+void MmaSpOp::build(
+    OpBuilder &builder, OperationState &result,
+    Type resultType, ValueRange operandA, ValueRange operandB,
+    ValueRange operandC, Value sparseMetadata, Value sparsitySelector,
+    ArrayRef<int64_t> shape,
+    std::optional<MMAIntOverflow> intOverflow,
+    std::optional<std::array<MMATypes, 2>> multiplicandPtxTypes) {
 
   assert(shape.size() == 3 && "expected shape to have size 3 (m, n, k)");
   MLIRContext *ctx = builder.getContext();
@@ -1078,8 +1076,8 @@ void MmaSpOp::build(OpBuilder &builder, OperationState &result,
       MmaSpOp::getOperandSegmentSizeAttr(),
       builder.getDenseI32ArrayAttr({static_cast<int32_t>(operandA.size()),
                                     static_cast<int32_t>(operandB.size()),
-                                    static_cast<int32_t>(operandC.size()),
-                                    1, 1})); // sparseMetadata and sparsitySelector
+                                    static_cast<int32_t>(operandC.size()), 1,
+                                    1})); // sparseMetadata and sparsitySelector
 }
 
 ParseResult MmaSpOp::parse(OpAsmParser &parser, OperationState &result) {
@@ -1142,23 +1140,27 @@ ParseResult MmaSpOp::parse(OpAsmParser &parser, OperationState &result) {
     if (failed(parser.resolveOperands(frag.regs, frag.regTypes,
                                       parser.getNameLoc(), result.operands)))
       return failure();
-    frag.elemtype = MmaOp::inferOperandMMAType(frag.regTypes[0],
-                                               /*isAccumulator*/ iter.index() >= 2);
+    frag.elemtype =
+        MmaOp::inferOperandMMAType(frag.regTypes[0],
+                                   /*isAccumulator*/ iter.index() >= 2);
   }
 
   Type resultType;
   if (parser.parseArrow() || parser.parseType(resultType))
     return failure();
-  frags[5].elemtype = MmaOp::inferOperandMMAType(resultType, /*isAccumulator*/ true);
+  frags[5].elemtype =
+      MmaOp::inferOperandMMAType(resultType, /*isAccumulator*/ true);
 
   // Resolve sparse metadata and selector (assume i32 type)
   Type i32Type = builder.getIntegerType(32);
-  if (parser.resolveOperands(frags[3].regs, i32Type,
-                             parser.getCurrentLocation(), result.operands)
+  if (parser
+          .resolveOperands(frags[3].regs, i32Type, parser.getCurrentLocation(),
+                           result.operands)
           .failed())
     return failure();
-  if (parser.resolveOperands(frags[4].regs, i32Type,
-                             parser.getCurrentLocation(), result.operands)
+  if (parser
+          .resolveOperands(frags[4].regs, i32Type, parser.getCurrentLocation(),
+                           result.operands)
           .failed())
     return failure();
 
@@ -1303,7 +1305,8 @@ LogicalResult MmaSpOp::verify() {
       expectedC.emplace_back(4, f32Ty);
     }
 
-    // For sparse MMA, A operand is compressed (2:4 sparsity means half the elements)
+    // For sparse MMA, A operand is compressed (2:4 sparsity means half the
+    // elements)
     int64_t unitA = (mmaShape[0] / 8) * (mmaShape[2] / kFactor) / 2;
     int64_t unitB = (mmaShape[1] / 8) * (mmaShape[2] / kFactor);
     expectedA.emplace_back(unitA, multiplicandFragType);

>From 35f2970d8ebceadf2f368a17ff838d2a1f080e87 Mon Sep 17 00:00:00 2001
From: Kirill Vedernikov <kvedernikov at nvidia.com>
Date: Wed, 19 Nov 2025 11:13:56 +0100
Subject: [PATCH 3/4] [MLIR] One more fix for code formatting

---
 mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp | 7 +++----
 1 file changed, 3 insertions(+), 4 deletions(-)

diff --git a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
index b5a0aff814a67..cdb6fb0bde29e 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
@@ -1037,10 +1037,9 @@ void MmaSpOp::print(OpAsmPrinter &p) {
 }
 
 void MmaSpOp::build(
-    OpBuilder &builder, OperationState &result,
-    Type resultType, ValueRange operandA, ValueRange operandB,
-    ValueRange operandC, Value sparseMetadata, Value sparsitySelector,
-    ArrayRef<int64_t> shape,
+    OpBuilder &builder, OperationState &result, Type resultType,
+    ValueRange operandA, ValueRange operandB, ValueRange operandC,
+    Value sparseMetadata, Value sparsitySelector, ArrayRef<int64_t> shape,
     std::optional<MMAIntOverflow> intOverflow,
     std::optional<std::array<MMATypes, 2>> multiplicandPtxTypes) {
 

>From 156919a4c3749b7e4df45b3ca8331556dd510ae8 Mon Sep 17 00:00:00 2001
From: Kirill Vedernikov <kvedernikov at nvidia.com>
Date: Wed, 19 Nov 2025 18:51:26 +0100
Subject: [PATCH 4/4] [MLIR] Fixes for sparse MMA support according to feedback
 from PR168686

---
 mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td   | 32 ++-----
 mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp    |  2 +-
 .../test/Dialect/LLVMIR/nvvm-mma-sp-kind.mlir | 40 ++++-----
 .../Dialect/LLVMIR/nvvm-mma-sp-ordered.mlir   | 84 +++++++++----------
 4 files changed, 70 insertions(+), 88 deletions(-)

diff --git a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
index b8f69f6b2cb98..0344e8d42f172 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
@@ -2217,19 +2217,6 @@ def MMAIntOverflow : I32EnumAttr<"MMAIntOverflow", "MMA overflow options",
 def MMAIntOverflowAttr : EnumAttr<NVVM_Dialect, MMAIntOverflow, "mma_int_overflow"> {
   let assemblyFormat = "`<` $value `>`";
 }
-
-/// Sparse MMA metadata types
-def MMASpMetadataStandard : I32EnumAttrCase<"standard", 0>;
-def MMASpMetadataOrdered : I32EnumAttrCase<"ordered", 1>;
-def MMASpMetadata : I32EnumAttr<"MMASpMetadata", "Sparse MMA metadata ordering",
-  [MMASpMetadataStandard, MMASpMetadataOrdered]> {
-  let genSpecializedAttr = 0;
-  let cppNamespace = "::mlir::NVVM";
-}
-def MMASpMetadataAttr : EnumAttr<NVVM_Dialect, MMASpMetadata, "mma_sp_metadata"> {
-  let assemblyFormat = "`<` $value `>`";
-}
-
 /// MMA kind types (for mixed-precision FP8 operations)
 def MMAKindF8F6F4 : I32EnumAttrCase<"f8f6f4", 0>;
 def MMAKind : I32EnumAttr<"MMAKind", "MMA operation kind",
@@ -2921,8 +2908,7 @@ class MMA_SP_SYNC_INTR {
                 # " && \"" # op[2].ptx_elt_type # "\" == eltypeC"
                 # " && \"" # op[3].ptx_elt_type # "\" == eltypeD"
                 # " && (satf.has_value()  ? " # satf # " == static_cast<int>(*satf) : true)"
-                # " && " # !if(!eq(metadata, "sp"), "!orderedMetadata", "orderedMetadata")
-                # " && " # !if(!eq(kind, ""), "!hasKind", "hasKind") # ")\n"
+                # " && " # !if(!eq(metadata, "sp"), "!orderedMetadata", "orderedMetadata") # ")\n"
                 # "  return " #
                 MMA_SP_SYNC_NAME<metadata, kind, satf, op[0], op[1], op[2], op[3]>.id # ";",
                 "") // if supported
@@ -2956,9 +2942,9 @@ def NVVM_MmaSpOp : NVVM_Op<"mma.sp.sync", [AttrSizedOperandSegments]> {
     controls how the indices are distributed among threads in the warp and
     should typically be 0 or 1.
 
-    The optional `metadataType` attribute specifies the metadata ordering:
-    - `standard` (default): Uses standard sparse metadata ordering
-    - `ordered`: Uses ordered metadata (PTX ISA 8.5+, sm_90+)
+    The optional `orderedMetadata` attribute specifies the metadata ordering:
+    - Absence (default): Uses standard sparse metadata ordering
+    - Presence: Uses ordered metadata (PTX ISA 8.5+, sm_90+)
     
     The optional `kind` attribute specifies mixed-precision modes for FP8 operations:
     - `f8f6f4`: Enables e3m2, e2m3, e2m1 FP8 types and f16 accumulator (PTX ISA 8.7+, sm_90+)
@@ -2978,8 +2964,7 @@ def NVVM_MmaSpOp : NVVM_Op<"mma.sp.sync", [AttrSizedOperandSegments]> {
     // With ordered metadata:
     %d = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1]
                           sparseMetadata[%meta] selector[%sel]
-                          {metadataType = #nvvm.mma_sp_metadata<ordered>,
-                           shape = {k = 32 : i32, m = 16 : i32, n = 8 : i32}}
+                          {orderedMetadata, shape = {k = 32 : i32, m = 16 : i32, n = 8 : i32}}
         : (vector<2xf16>, vector<2xf16>, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
     ```
   }];
@@ -2989,7 +2974,7 @@ def NVVM_MmaSpOp : NVVM_Op<"mma.sp.sync", [AttrSizedOperandSegments]> {
              OptionalAttr<MMAIntOverflowAttr>:$intOverflowBehavior,
              OptionalAttr<MMATypesAttr>:$multiplicandAPtxType,
              OptionalAttr<MMATypesAttr>:$multiplicandBPtxType,
-             OptionalAttr<MMASpMetadataAttr>:$metadataType,
+             UnitAttr:$orderedMetadata,
              OptionalAttr<MMAKindAttr>:$kind,
              Variadic<LLVM_Type>:$operandA,
              Variadic<LLVM_Type>:$operandB,
@@ -3001,7 +2986,7 @@ def NVVM_MmaSpOp : NVVM_Op<"mma.sp.sync", [AttrSizedOperandSegments]> {
       static llvm::Intrinsic::ID getIntrinsicID(
             int64_t m, int64_t n, uint64_t k,
             std::optional<MMAIntOverflow> satf,
-            std::optional<MMASpMetadata> metadata,
+            bool orderedMetadata,
             std::optional<MMAKind> kind,
             mlir::NVVM::MMATypes eltypeAEnum, mlir::NVVM::MMATypes eltypeBEnum,
             mlir::NVVM::MMATypes eltypeCEnum, mlir::NVVM::MMATypes eltypeDEnum) {
@@ -3009,9 +2994,6 @@ def NVVM_MmaSpOp : NVVM_Op<"mma.sp.sync", [AttrSizedOperandSegments]> {
         llvm::StringRef eltypeB = stringifyEnum(eltypeBEnum);
         llvm::StringRef eltypeC = stringifyEnum(eltypeCEnum);
         llvm::StringRef eltypeD = stringifyEnum(eltypeDEnum);
-        bool orderedMetadata = metadata.has_value() &&
-                               *metadata == MMASpMetadata::ordered;
-        bool hasKind = kind.has_value();
         }],
         MMA_SP_SYNC_INTR<>.id, [{
           return 0;
diff --git a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
index cdb6fb0bde29e..d312278753171 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
@@ -968,7 +968,7 @@ MmaSpOp::getIntrinsicIDAndArgs(Operation &op, LLVM::ModuleTranslation &mt,
   auto intId = MmaSpOp::getIntrinsicID(
       thisOp.getShape().getM(), thisOp.getShape().getN(),
       thisOp.getShape().getK(), thisOp.getIntOverflowBehavior(),
-      thisOp.getMetadataType(), thisOp.getKind(),
+      thisOp.getOrderedMetadata(), thisOp.getKind(),
       *thisOp.getMultiplicandAPtxType(), *thisOp.getMultiplicandBPtxType(),
       thisOp.accumPtxType(), thisOp.resultPtxType());
 
diff --git a/mlir/test/Dialect/LLVMIR/nvvm-mma-sp-kind.mlir b/mlir/test/Dialect/LLVMIR/nvvm-mma-sp-kind.mlir
index b55486aadaaa5..ff3e91b89016d 100644
--- a/mlir/test/Dialect/LLVMIR/nvvm-mma-sp-kind.mlir
+++ b/mlir/test/Dialect/LLVMIR/nvvm-mma-sp-kind.mlir
@@ -26,11 +26,11 @@ func.func @nvvm_mma_sp_kind_m16n8k64_e4m3_f16(
     %b0 : i32, %b1 : i32,
     %c0 : vector<2xf16>, %c1 : vector<2xf16>,
     %meta : i32, %sel : i32) {
-  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {kind = #nvvm.mma_kind<f8f6f4>, metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<e4m3>, multiplicandBPtxType = #nvvm.mma_type<e4m3>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {kind = #nvvm.mma_kind<f8f6f4>, multiplicandAPtxType = #nvvm.mma_type<e4m3>, multiplicandBPtxType = #nvvm.mma_type<e4m3>, orderedMetadata, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
   %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1]
                         sparseMetadata[%meta] selector[%sel]
                         {kind = #nvvm.mma_kind<f8f6f4>,
-                         metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         orderedMetadata,
                          multiplicandAPtxType = #nvvm.mma_type<e4m3>,
                          multiplicandBPtxType = #nvvm.mma_type<e4m3>,
                          shape = #nvvm.shape<m = 16, n = 8, k = 64>}
@@ -44,11 +44,11 @@ func.func @nvvm_mma_sp_kind_m16n8k64_e4m3_f32(
     %b0 : i32, %b1 : i32,
     %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
     %meta : i32, %sel : i32) {
-  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {kind = #nvvm.mma_kind<f8f6f4>, metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<e4m3>, multiplicandBPtxType = #nvvm.mma_type<e4m3>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {kind = #nvvm.mma_kind<f8f6f4>, multiplicandAPtxType = #nvvm.mma_type<e4m3>, multiplicandBPtxType = #nvvm.mma_type<e4m3>, orderedMetadata, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
   %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1, %c2, %c3]
                         sparseMetadata[%meta] selector[%sel]
                         {kind = #nvvm.mma_kind<f8f6f4>,
-                         metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         orderedMetadata,
                          multiplicandAPtxType = #nvvm.mma_type<e4m3>,
                          multiplicandBPtxType = #nvvm.mma_type<e4m3>,
                          shape = #nvvm.shape<m = 16, n = 8, k = 64>}
@@ -66,11 +66,11 @@ func.func @nvvm_mma_sp_kind_m16n8k64_e5m2_f16(
     %b0 : i32, %b1 : i32,
     %c0 : vector<2xf16>, %c1 : vector<2xf16>,
     %meta : i32, %sel : i32) {
-  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {kind = #nvvm.mma_kind<f8f6f4>, metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<e5m2>, multiplicandBPtxType = #nvvm.mma_type<e5m2>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {kind = #nvvm.mma_kind<f8f6f4>, multiplicandAPtxType = #nvvm.mma_type<e5m2>, multiplicandBPtxType = #nvvm.mma_type<e5m2>, orderedMetadata, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
   %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1]
                         sparseMetadata[%meta] selector[%sel]
                         {kind = #nvvm.mma_kind<f8f6f4>,
-                         metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         orderedMetadata,
                          multiplicandAPtxType = #nvvm.mma_type<e5m2>,
                          multiplicandBPtxType = #nvvm.mma_type<e5m2>,
                          shape = #nvvm.shape<m = 16, n = 8, k = 64>}
@@ -84,11 +84,11 @@ func.func @nvvm_mma_sp_kind_m16n8k64_e5m2_f32(
     %b0 : i32, %b1 : i32,
     %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
     %meta : i32, %sel : i32) {
-  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {kind = #nvvm.mma_kind<f8f6f4>, metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<e5m2>, multiplicandBPtxType = #nvvm.mma_type<e5m2>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {kind = #nvvm.mma_kind<f8f6f4>, multiplicandAPtxType = #nvvm.mma_type<e5m2>, multiplicandBPtxType = #nvvm.mma_type<e5m2>, orderedMetadata, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
   %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1, %c2, %c3]
                         sparseMetadata[%meta] selector[%sel]
                         {kind = #nvvm.mma_kind<f8f6f4>,
-                         metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         orderedMetadata,
                          multiplicandAPtxType = #nvvm.mma_type<e5m2>,
                          multiplicandBPtxType = #nvvm.mma_type<e5m2>,
                          shape = #nvvm.shape<m = 16, n = 8, k = 64>}
@@ -107,11 +107,11 @@ func.func @nvvm_mma_sp_kind_m16n8k64_e3m2_f16(
     %b0 : i32, %b1 : i32,
     %c0 : vector<2xf16>, %c1 : vector<2xf16>,
     %meta : i32, %sel : i32) {
-  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {kind = #nvvm.mma_kind<f8f6f4>, metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<e3m2>, multiplicandBPtxType = #nvvm.mma_type<e3m2>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {kind = #nvvm.mma_kind<f8f6f4>, multiplicandAPtxType = #nvvm.mma_type<e3m2>, multiplicandBPtxType = #nvvm.mma_type<e3m2>, orderedMetadata, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
   %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1]
                         sparseMetadata[%meta] selector[%sel]
                         {kind = #nvvm.mma_kind<f8f6f4>,
-                         metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         orderedMetadata,
                          multiplicandAPtxType = #nvvm.mma_type<e3m2>,
                          multiplicandBPtxType = #nvvm.mma_type<e3m2>,
                          shape = #nvvm.shape<m = 16, n = 8, k = 64>}
@@ -125,11 +125,11 @@ func.func @nvvm_mma_sp_kind_m16n8k64_e3m2_f32(
     %b0 : i32, %b1 : i32,
     %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
     %meta : i32, %sel : i32) {
-  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {kind = #nvvm.mma_kind<f8f6f4>, metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<e3m2>, multiplicandBPtxType = #nvvm.mma_type<e3m2>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {kind = #nvvm.mma_kind<f8f6f4>, multiplicandAPtxType = #nvvm.mma_type<e3m2>, multiplicandBPtxType = #nvvm.mma_type<e3m2>, orderedMetadata, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
   %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1, %c2, %c3]
                         sparseMetadata[%meta] selector[%sel]
                         {kind = #nvvm.mma_kind<f8f6f4>,
-                         metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         orderedMetadata,
                          multiplicandAPtxType = #nvvm.mma_type<e3m2>,
                          multiplicandBPtxType = #nvvm.mma_type<e3m2>,
                          shape = #nvvm.shape<m = 16, n = 8, k = 64>}
@@ -148,11 +148,11 @@ func.func @nvvm_mma_sp_kind_m16n8k64_e2m3_f16(
     %b0 : i32, %b1 : i32,
     %c0 : vector<2xf16>, %c1 : vector<2xf16>,
     %meta : i32, %sel : i32) {
-  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {kind = #nvvm.mma_kind<f8f6f4>, metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<e2m3>, multiplicandBPtxType = #nvvm.mma_type<e2m3>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {kind = #nvvm.mma_kind<f8f6f4>, multiplicandAPtxType = #nvvm.mma_type<e2m3>, multiplicandBPtxType = #nvvm.mma_type<e2m3>, orderedMetadata, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
   %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1]
                         sparseMetadata[%meta] selector[%sel]
                         {kind = #nvvm.mma_kind<f8f6f4>,
-                         metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         orderedMetadata,
                          multiplicandAPtxType = #nvvm.mma_type<e2m3>,
                          multiplicandBPtxType = #nvvm.mma_type<e2m3>,
                          shape = #nvvm.shape<m = 16, n = 8, k = 64>}
@@ -166,11 +166,11 @@ func.func @nvvm_mma_sp_kind_m16n8k64_e2m3_f32(
     %b0 : i32, %b1 : i32,
     %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
     %meta : i32, %sel : i32) {
-  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {kind = #nvvm.mma_kind<f8f6f4>, metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<e2m3>, multiplicandBPtxType = #nvvm.mma_type<e2m3>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {kind = #nvvm.mma_kind<f8f6f4>, multiplicandAPtxType = #nvvm.mma_type<e2m3>, multiplicandBPtxType = #nvvm.mma_type<e2m3>, orderedMetadata, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
   %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1, %c2, %c3]
                         sparseMetadata[%meta] selector[%sel]
                         {kind = #nvvm.mma_kind<f8f6f4>,
-                         metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         orderedMetadata,
                          multiplicandAPtxType = #nvvm.mma_type<e2m3>,
                          multiplicandBPtxType = #nvvm.mma_type<e2m3>,
                          shape = #nvvm.shape<m = 16, n = 8, k = 64>}
@@ -189,11 +189,11 @@ func.func @nvvm_mma_sp_kind_m16n8k64_e2m1_f16(
     %b0 : i32, %b1 : i32,
     %c0 : vector<2xf16>, %c1 : vector<2xf16>,
     %meta : i32, %sel : i32) {
-  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {kind = #nvvm.mma_kind<f8f6f4>, metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<e2m1>, multiplicandBPtxType = #nvvm.mma_type<e2m1>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {kind = #nvvm.mma_kind<f8f6f4>, multiplicandAPtxType = #nvvm.mma_type<e2m1>, multiplicandBPtxType = #nvvm.mma_type<e2m1>, orderedMetadata, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
   %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1]
                         sparseMetadata[%meta] selector[%sel]
                         {kind = #nvvm.mma_kind<f8f6f4>,
-                         metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         orderedMetadata,
                          multiplicandAPtxType = #nvvm.mma_type<e2m1>,
                          multiplicandBPtxType = #nvvm.mma_type<e2m1>,
                          shape = #nvvm.shape<m = 16, n = 8, k = 64>}
@@ -207,11 +207,11 @@ func.func @nvvm_mma_sp_kind_m16n8k64_e2m1_f32(
     %b0 : i32, %b1 : i32,
     %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
     %meta : i32, %sel : i32) {
-  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {kind = #nvvm.mma_kind<f8f6f4>, metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<e2m1>, multiplicandBPtxType = #nvvm.mma_type<e2m1>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {kind = #nvvm.mma_kind<f8f6f4>, multiplicandAPtxType = #nvvm.mma_type<e2m1>, multiplicandBPtxType = #nvvm.mma_type<e2m1>, orderedMetadata, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
   %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1, %c2, %c3]
                         sparseMetadata[%meta] selector[%sel]
                         {kind = #nvvm.mma_kind<f8f6f4>,
-                         metadataType = #nvvm.mma_sp_metadata<ordered>,
+                         orderedMetadata,
                          multiplicandAPtxType = #nvvm.mma_type<e2m1>,
                          multiplicandBPtxType = #nvvm.mma_type<e2m1>,
                          shape = #nvvm.shape<m = 16, n = 8, k = 64>}
diff --git a/mlir/test/Dialect/LLVMIR/nvvm-mma-sp-ordered.mlir b/mlir/test/Dialect/LLVMIR/nvvm-mma-sp-ordered.mlir
index ca84d19612af5..a4e2812e54c12 100644
--- a/mlir/test/Dialect/LLVMIR/nvvm-mma-sp-ordered.mlir
+++ b/mlir/test/Dialect/LLVMIR/nvvm-mma-sp-ordered.mlir
@@ -19,10 +19,10 @@ func.func @nvvm_mma_sp_ordered_m16n8k16_f16_f16(
     %b0 : vector<2xf16>, %b1 : vector<2xf16>,
     %c0 : vector<2xf16>, %c1 : vector<2xf16>,
     %meta : i32, %sel : i32) {
-  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {metadataType = #nvvm.mma_sp_metadata<ordered>, shape = #nvvm.shape<m = 16, n = 8, k = 16>} : (vector<2xf16>, vector<2xf16>, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {orderedMetadata, shape = #nvvm.shape<m = 16, n = 8, k = 16>} : (vector<2xf16>, vector<2xf16>, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
   %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1]
                         sparseMetadata[%meta] selector[%sel]
-                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                        {orderedMetadata,
                          shape = #nvvm.shape<m = 16, n = 8, k = 16>}
       : (vector<2xf16>, vector<2xf16>, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
   return
@@ -34,10 +34,10 @@ func.func @nvvm_mma_sp_ordered_m16n8k16_f16_f32(
     %b0 : vector<2xf16>, %b1 : vector<2xf16>,
     %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
     %meta : i32, %sel : i32) {
-  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {metadataType = #nvvm.mma_sp_metadata<ordered>, shape = #nvvm.shape<m = 16, n = 8, k = 16>} : (vector<2xf16>, vector<2xf16>, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {orderedMetadata, shape = #nvvm.shape<m = 16, n = 8, k = 16>} : (vector<2xf16>, vector<2xf16>, f32) -> !llvm.struct<(f32, f32, f32, f32)>
   %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1, %c2, %c3]
                         sparseMetadata[%meta] selector[%sel]
-                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                        {orderedMetadata,
                          shape = #nvvm.shape<m = 16, n = 8, k = 16>}
       : (vector<2xf16>, vector<2xf16>, f32) -> !llvm.struct<(f32, f32, f32, f32)>
   return
@@ -53,10 +53,10 @@ func.func @nvvm_mma_sp_ordered_m16n8k32_f16_f16(
     %b0 : vector<2xf16>, %b1 : vector<2xf16>, %b2 : vector<2xf16>, %b3 : vector<2xf16>,
     %c0 : vector<2xf16>, %c1 : vector<2xf16>,
     %meta : i32, %sel : i32) {
-  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}, {{.*}}, {{.*}}] B[{{.*}}, {{.*}}, {{.*}}, {{.*}}] C[{{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {metadataType = #nvvm.mma_sp_metadata<ordered>, shape = #nvvm.shape<m = 16, n = 8, k = 32>} : (vector<2xf16>, vector<2xf16>, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}, {{.*}}, {{.*}}] B[{{.*}}, {{.*}}, {{.*}}, {{.*}}] C[{{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {orderedMetadata, shape = #nvvm.shape<m = 16, n = 8, k = 32>} : (vector<2xf16>, vector<2xf16>, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
   %0 = nvvm.mma.sp.sync A[%a0, %a1, %a2, %a3] B[%b0, %b1, %b2, %b3] C[%c0, %c1]
                         sparseMetadata[%meta] selector[%sel]
-                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                        {orderedMetadata,
                          shape = #nvvm.shape<m = 16, n = 8, k = 32>}
       : (vector<2xf16>, vector<2xf16>, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
   return
@@ -68,10 +68,10 @@ func.func @nvvm_mma_sp_ordered_m16n8k32_f16_f32(
     %b0 : vector<2xf16>, %b1 : vector<2xf16>, %b2 : vector<2xf16>, %b3 : vector<2xf16>,
     %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
     %meta : i32, %sel : i32) {
-  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}, {{.*}}, {{.*}}] B[{{.*}}, {{.*}}, {{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {metadataType = #nvvm.mma_sp_metadata<ordered>, shape = #nvvm.shape<m = 16, n = 8, k = 32>} : (vector<2xf16>, vector<2xf16>, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}, {{.*}}, {{.*}}] B[{{.*}}, {{.*}}, {{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {orderedMetadata, shape = #nvvm.shape<m = 16, n = 8, k = 32>} : (vector<2xf16>, vector<2xf16>, f32) -> !llvm.struct<(f32, f32, f32, f32)>
   %0 = nvvm.mma.sp.sync A[%a0, %a1, %a2, %a3] B[%b0, %b1, %b2, %b3] C[%c0, %c1, %c2, %c3]
                         sparseMetadata[%meta] selector[%sel]
-                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                        {orderedMetadata,
                          shape = #nvvm.shape<m = 16, n = 8, k = 32>}
       : (vector<2xf16>, vector<2xf16>, f32) -> !llvm.struct<(f32, f32, f32, f32)>
   return
@@ -87,10 +87,10 @@ func.func @nvvm_mma_sp_ordered_m16n8k16_bf16_f32(
     %b0 : i32, %b1 : i32,
     %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
     %meta : i32, %sel : i32) {
-  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<bf16>, multiplicandBPtxType = #nvvm.mma_type<bf16>, shape = #nvvm.shape<m = 16, n = 8, k = 16>} : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {multiplicandAPtxType = #nvvm.mma_type<bf16>, multiplicandBPtxType = #nvvm.mma_type<bf16>, orderedMetadata, shape = #nvvm.shape<m = 16, n = 8, k = 16>} : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
   %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1, %c2, %c3]
                         sparseMetadata[%meta] selector[%sel]
-                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                        {orderedMetadata,
                          multiplicandAPtxType = #nvvm.mma_type<bf16>,
                          multiplicandBPtxType = #nvvm.mma_type<bf16>,
                          shape = #nvvm.shape<m = 16, n = 8, k = 16>}
@@ -104,10 +104,10 @@ func.func @nvvm_mma_sp_ordered_m16n8k32_bf16_f32(
     %b0 : i32, %b1 : i32, %b2 : i32, %b3 : i32,
     %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
     %meta : i32, %sel : i32) {
-  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}, {{.*}}, {{.*}}] B[{{.*}}, {{.*}}, {{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<bf16>, multiplicandBPtxType = #nvvm.mma_type<bf16>, shape = #nvvm.shape<m = 16, n = 8, k = 32>} : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}, {{.*}}, {{.*}}] B[{{.*}}, {{.*}}, {{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {multiplicandAPtxType = #nvvm.mma_type<bf16>, multiplicandBPtxType = #nvvm.mma_type<bf16>, orderedMetadata, shape = #nvvm.shape<m = 16, n = 8, k = 32>} : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
   %0 = nvvm.mma.sp.sync A[%a0, %a1, %a2, %a3] B[%b0, %b1, %b2, %b3] C[%c0, %c1, %c2, %c3]
                         sparseMetadata[%meta] selector[%sel]
-                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                        {orderedMetadata,
                          multiplicandAPtxType = #nvvm.mma_type<bf16>,
                          multiplicandBPtxType = #nvvm.mma_type<bf16>,
                          shape = #nvvm.shape<m = 16, n = 8, k = 32>}
@@ -125,10 +125,10 @@ func.func @nvvm_mma_sp_ordered_m16n8k8_tf32_f32(
     %b0 : i32, %b1 : i32,
     %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
     %meta : i32, %sel : i32) {
-  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<tf32>, multiplicandBPtxType = #nvvm.mma_type<tf32>, shape = #nvvm.shape<m = 16, n = 8, k = 8>} : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {multiplicandAPtxType = #nvvm.mma_type<tf32>, multiplicandBPtxType = #nvvm.mma_type<tf32>, orderedMetadata, shape = #nvvm.shape<m = 16, n = 8, k = 8>} : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
   %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1, %c2, %c3]
                         sparseMetadata[%meta] selector[%sel]
-                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                        {orderedMetadata,
                          multiplicandAPtxType = #nvvm.mma_type<tf32>,
                          multiplicandBPtxType = #nvvm.mma_type<tf32>,
                          shape = #nvvm.shape<m = 16, n = 8, k = 8>}
@@ -142,10 +142,10 @@ func.func @nvvm_mma_sp_ordered_m16n8k16_tf32_f32(
     %b0 : i32, %b1 : i32, %b2 : i32, %b3 : i32,
     %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
     %meta : i32, %sel : i32) {
-  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}, {{.*}}, {{.*}}] B[{{.*}}, {{.*}}, {{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<tf32>, multiplicandBPtxType = #nvvm.mma_type<tf32>, shape = #nvvm.shape<m = 16, n = 8, k = 16>} : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}, {{.*}}, {{.*}}] B[{{.*}}, {{.*}}, {{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {multiplicandAPtxType = #nvvm.mma_type<tf32>, multiplicandBPtxType = #nvvm.mma_type<tf32>, orderedMetadata, shape = #nvvm.shape<m = 16, n = 8, k = 16>} : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
   %0 = nvvm.mma.sp.sync A[%a0, %a1, %a2, %a3] B[%b0, %b1, %b2, %b3] C[%c0, %c1, %c2, %c3]
                         sparseMetadata[%meta] selector[%sel]
-                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                        {orderedMetadata,
                          multiplicandAPtxType = #nvvm.mma_type<tf32>,
                          multiplicandBPtxType = #nvvm.mma_type<tf32>,
                          shape = #nvvm.shape<m = 16, n = 8, k = 16>}
@@ -163,10 +163,10 @@ func.func @nvvm_mma_sp_ordered_m16n8k32_s8_s32(
     %b0 : i32, %b1 : i32,
     %c0 : i32, %c1 : i32, %c2 : i32, %c3 : i32,
     %meta : i32, %sel : i32) {
-  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>, metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<s8>, multiplicandBPtxType = #nvvm.mma_type<s8>, shape = #nvvm.shape<m = 16, n = 8, k = 32>} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>, multiplicandAPtxType = #nvvm.mma_type<s8>, multiplicandBPtxType = #nvvm.mma_type<s8>, orderedMetadata, shape = #nvvm.shape<m = 16, n = 8, k = 32>} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
   %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1, %c2, %c3]
                         sparseMetadata[%meta] selector[%sel]
-                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                        {orderedMetadata,
                          multiplicandAPtxType = #nvvm.mma_type<s8>,
                          multiplicandBPtxType = #nvvm.mma_type<s8>,
                          intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>,
@@ -181,10 +181,10 @@ func.func @nvvm_mma_sp_ordered_m16n8k32_s8_s32_satfinite(
     %b0 : i32, %b1 : i32,
     %c0 : i32, %c1 : i32, %c2 : i32, %c3 : i32,
     %meta : i32, %sel : i32) {
-  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {intOverflowBehavior = #nvvm.mma_int_overflow<satfinite>, metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<s8>, multiplicandBPtxType = #nvvm.mma_type<s8>, shape = #nvvm.shape<m = 16, n = 8, k = 32>} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {intOverflowBehavior = #nvvm.mma_int_overflow<satfinite>, multiplicandAPtxType = #nvvm.mma_type<s8>, multiplicandBPtxType = #nvvm.mma_type<s8>, orderedMetadata, shape = #nvvm.shape<m = 16, n = 8, k = 32>} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
   %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1, %c2, %c3]
                         sparseMetadata[%meta] selector[%sel]
-                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                        {orderedMetadata,
                          multiplicandAPtxType = #nvvm.mma_type<s8>,
                          multiplicandBPtxType = #nvvm.mma_type<s8>,
                          intOverflowBehavior = #nvvm.mma_int_overflow<satfinite>,
@@ -199,10 +199,10 @@ func.func @nvvm_mma_sp_ordered_m16n8k64_s8_s32(
     %b0 : i32, %b1 : i32, %b2 : i32, %b3 : i32,
     %c0 : i32, %c1 : i32, %c2 : i32, %c3 : i32,
     %meta : i32, %sel : i32) {
-  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}, {{.*}}, {{.*}}] B[{{.*}}, {{.*}}, {{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>, metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<s8>, multiplicandBPtxType = #nvvm.mma_type<s8>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}, {{.*}}, {{.*}}] B[{{.*}}, {{.*}}, {{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>, multiplicandAPtxType = #nvvm.mma_type<s8>, multiplicandBPtxType = #nvvm.mma_type<s8>, orderedMetadata, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
   %0 = nvvm.mma.sp.sync A[%a0, %a1, %a2, %a3] B[%b0, %b1, %b2, %b3] C[%c0, %c1, %c2, %c3]
                         sparseMetadata[%meta] selector[%sel]
-                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                        {orderedMetadata,
                          multiplicandAPtxType = #nvvm.mma_type<s8>,
                          multiplicandBPtxType = #nvvm.mma_type<s8>,
                          intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>,
@@ -221,10 +221,10 @@ func.func @nvvm_mma_sp_ordered_m16n8k32_u8_s32(
     %b0 : i32, %b1 : i32,
     %c0 : i32, %c1 : i32, %c2 : i32, %c3 : i32,
     %meta : i32, %sel : i32) {
-  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>, metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<u8>, multiplicandBPtxType = #nvvm.mma_type<u8>, shape = #nvvm.shape<m = 16, n = 8, k = 32>} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>, multiplicandAPtxType = #nvvm.mma_type<u8>, multiplicandBPtxType = #nvvm.mma_type<u8>, orderedMetadata, shape = #nvvm.shape<m = 16, n = 8, k = 32>} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
   %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1, %c2, %c3]
                         sparseMetadata[%meta] selector[%sel]
-                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                        {orderedMetadata,
                          multiplicandAPtxType = #nvvm.mma_type<u8>,
                          multiplicandBPtxType = #nvvm.mma_type<u8>,
                          intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>,
@@ -239,10 +239,10 @@ func.func @nvvm_mma_sp_ordered_m16n8k64_u8_s32(
     %b0 : i32, %b1 : i32, %b2 : i32, %b3 : i32,
     %c0 : i32, %c1 : i32, %c2 : i32, %c3 : i32,
     %meta : i32, %sel : i32) {
-  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}, {{.*}}, {{.*}}] B[{{.*}}, {{.*}}, {{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>, metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<u8>, multiplicandBPtxType = #nvvm.mma_type<u8>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}, {{.*}}, {{.*}}] B[{{.*}}, {{.*}}, {{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>, multiplicandAPtxType = #nvvm.mma_type<u8>, multiplicandBPtxType = #nvvm.mma_type<u8>, orderedMetadata, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
   %0 = nvvm.mma.sp.sync A[%a0, %a1, %a2, %a3] B[%b0, %b1, %b2, %b3] C[%c0, %c1, %c2, %c3]
                         sparseMetadata[%meta] selector[%sel]
-                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                        {orderedMetadata,
                          multiplicandAPtxType = #nvvm.mma_type<u8>,
                          multiplicandBPtxType = #nvvm.mma_type<u8>,
                          intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>,
@@ -261,10 +261,10 @@ func.func @nvvm_mma_sp_ordered_m16n8k64_s4_s32(
     %b0 : i32, %b1 : i32,
     %c0 : i32, %c1 : i32, %c2 : i32, %c3 : i32,
     %meta : i32, %sel : i32) {
-  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>, metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<s4>, multiplicandBPtxType = #nvvm.mma_type<s4>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>, multiplicandAPtxType = #nvvm.mma_type<s4>, multiplicandBPtxType = #nvvm.mma_type<s4>, orderedMetadata, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
   %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1, %c2, %c3]
                         sparseMetadata[%meta] selector[%sel]
-                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                        {orderedMetadata,
                          multiplicandAPtxType = #nvvm.mma_type<s4>,
                          multiplicandBPtxType = #nvvm.mma_type<s4>,
                          intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>,
@@ -279,10 +279,10 @@ func.func @nvvm_mma_sp_ordered_m16n8k128_s4_s32(
     %b0 : i32, %b1 : i32, %b2 : i32, %b3 : i32,
     %c0 : i32, %c1 : i32, %c2 : i32, %c3 : i32,
     %meta : i32, %sel : i32) {
-  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}, {{.*}}, {{.*}}] B[{{.*}}, {{.*}}, {{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>, metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<s4>, multiplicandBPtxType = #nvvm.mma_type<s4>, shape = #nvvm.shape<m = 16, n = 8, k = 128>} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}, {{.*}}, {{.*}}] B[{{.*}}, {{.*}}, {{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>, multiplicandAPtxType = #nvvm.mma_type<s4>, multiplicandBPtxType = #nvvm.mma_type<s4>, orderedMetadata, shape = #nvvm.shape<m = 16, n = 8, k = 128>} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
   %0 = nvvm.mma.sp.sync A[%a0, %a1, %a2, %a3] B[%b0, %b1, %b2, %b3] C[%c0, %c1, %c2, %c3]
                         sparseMetadata[%meta] selector[%sel]
-                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                        {orderedMetadata,
                          multiplicandAPtxType = #nvvm.mma_type<s4>,
                          multiplicandBPtxType = #nvvm.mma_type<s4>,
                          intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>,
@@ -301,10 +301,10 @@ func.func @nvvm_mma_sp_ordered_m16n8k64_u4_s32(
     %b0 : i32, %b1 : i32,
     %c0 : i32, %c1 : i32, %c2 : i32, %c3 : i32,
     %meta : i32, %sel : i32) {
-  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>, metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<u4>, multiplicandBPtxType = #nvvm.mma_type<u4>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>, multiplicandAPtxType = #nvvm.mma_type<u4>, multiplicandBPtxType = #nvvm.mma_type<u4>, orderedMetadata, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
   %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1, %c2, %c3]
                         sparseMetadata[%meta] selector[%sel]
-                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                        {orderedMetadata,
                          multiplicandAPtxType = #nvvm.mma_type<u4>,
                          multiplicandBPtxType = #nvvm.mma_type<u4>,
                          intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>,
@@ -319,10 +319,10 @@ func.func @nvvm_mma_sp_ordered_m16n8k128_u4_s32(
     %b0 : i32, %b1 : i32, %b2 : i32, %b3 : i32,
     %c0 : i32, %c1 : i32, %c2 : i32, %c3 : i32,
     %meta : i32, %sel : i32) {
-  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}, {{.*}}, {{.*}}] B[{{.*}}, {{.*}}, {{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>, metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<u4>, multiplicandBPtxType = #nvvm.mma_type<u4>, shape = #nvvm.shape<m = 16, n = 8, k = 128>} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}, {{.*}}, {{.*}}] B[{{.*}}, {{.*}}, {{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>, multiplicandAPtxType = #nvvm.mma_type<u4>, multiplicandBPtxType = #nvvm.mma_type<u4>, orderedMetadata, shape = #nvvm.shape<m = 16, n = 8, k = 128>} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
   %0 = nvvm.mma.sp.sync A[%a0, %a1, %a2, %a3] B[%b0, %b1, %b2, %b3] C[%c0, %c1, %c2, %c3]
                         sparseMetadata[%meta] selector[%sel]
-                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                        {orderedMetadata,
                          multiplicandAPtxType = #nvvm.mma_type<u4>,
                          multiplicandBPtxType = #nvvm.mma_type<u4>,
                          intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>,
@@ -342,10 +342,10 @@ func.func @nvvm_mma_sp_ordered_m16n8k64_e4m3_f16(
     %b0 : i32, %b1 : i32,
     %c0 : vector<2xf16>, %c1 : vector<2xf16>,
     %meta : i32, %sel : i32) {
-  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<e4m3>, multiplicandBPtxType = #nvvm.mma_type<e4m3>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {multiplicandAPtxType = #nvvm.mma_type<e4m3>, multiplicandBPtxType = #nvvm.mma_type<e4m3>, orderedMetadata, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
   %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1]
                         sparseMetadata[%meta] selector[%sel]
-                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                        {orderedMetadata,
                          multiplicandAPtxType = #nvvm.mma_type<e4m3>,
                          multiplicandBPtxType = #nvvm.mma_type<e4m3>,
                          shape = #nvvm.shape<m = 16, n = 8, k = 64>}
@@ -359,10 +359,10 @@ func.func @nvvm_mma_sp_ordered_m16n8k64_e4m3_f32(
     %b0 : i32, %b1 : i32,
     %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
     %meta : i32, %sel : i32) {
-  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<e4m3>, multiplicandBPtxType = #nvvm.mma_type<e4m3>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {multiplicandAPtxType = #nvvm.mma_type<e4m3>, multiplicandBPtxType = #nvvm.mma_type<e4m3>, orderedMetadata, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
   %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1, %c2, %c3]
                         sparseMetadata[%meta] selector[%sel]
-                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                        {orderedMetadata,
                          multiplicandAPtxType = #nvvm.mma_type<e4m3>,
                          multiplicandBPtxType = #nvvm.mma_type<e4m3>,
                          shape = #nvvm.shape<m = 16, n = 8, k = 64>}
@@ -381,10 +381,10 @@ func.func @nvvm_mma_sp_ordered_m16n8k64_e5m2_f16(
     %b0 : i32, %b1 : i32,
     %c0 : vector<2xf16>, %c1 : vector<2xf16>,
     %meta : i32, %sel : i32) {
-  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<e5m2>, multiplicandBPtxType = #nvvm.mma_type<e5m2>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {multiplicandAPtxType = #nvvm.mma_type<e5m2>, multiplicandBPtxType = #nvvm.mma_type<e5m2>, orderedMetadata, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
   %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1]
                         sparseMetadata[%meta] selector[%sel]
-                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                        {orderedMetadata,
                          multiplicandAPtxType = #nvvm.mma_type<e5m2>,
                          multiplicandBPtxType = #nvvm.mma_type<e5m2>,
                          shape = #nvvm.shape<m = 16, n = 8, k = 64>}
@@ -398,10 +398,10 @@ func.func @nvvm_mma_sp_ordered_m16n8k64_e5m2_f32(
     %b0 : i32, %b1 : i32,
     %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
     %meta : i32, %sel : i32) {
-  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {metadataType = #nvvm.mma_sp_metadata<ordered>, multiplicandAPtxType = #nvvm.mma_type<e5m2>, multiplicandBPtxType = #nvvm.mma_type<e5m2>, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
+  // CHECK: nvvm.mma.sp.sync A[{{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] sparseMetadata[{{.*}}] selector[{{.*}}] {multiplicandAPtxType = #nvvm.mma_type<e5m2>, multiplicandBPtxType = #nvvm.mma_type<e5m2>, orderedMetadata, shape = #nvvm.shape<m = 16, n = 8, k = 64>} : (i32, i32, f32) -> !llvm.struct<(f32, f32, f32, f32)>
   %0 = nvvm.mma.sp.sync A[%a0, %a1] B[%b0, %b1] C[%c0, %c1, %c2, %c3]
                         sparseMetadata[%meta] selector[%sel]
-                        {metadataType = #nvvm.mma_sp_metadata<ordered>,
+                        {orderedMetadata,
                          multiplicandAPtxType = #nvvm.mma_type<e5m2>,
                          multiplicandBPtxType = #nvvm.mma_type<e5m2>,
                          shape = #nvvm.shape<m = 16, n = 8, k = 64>}



More information about the Mlir-commits mailing list