[Mlir-commits] [mlir] [MLIR][NVVM] Add tcgen05.mma MLIR Ops (PR #164356)
Durgadoss R
llvmlistbot at llvm.org
Fri Oct 24 03:16:43 PDT 2025
================
@@ -4586,6 +4586,661 @@ def NVVM_ClusterLaunchControlQueryCancelOp
}];
}
+//===----------------------------------------------------------------------===//
+// NVVM tcgen05.mma attributes
+//===----------------------------------------------------------------------===//
+
+def Tcgen05MMAKindF16 : I32EnumAttrCase<"F16", 0, "f16">;
+def Tcgen05MMAKindTF32 : I32EnumAttrCase<"TF32", 1, "tf32">;
+def Tcgen05MMAKindF8F6F4 : I32EnumAttrCase<"F8F6F4", 2, "f8f6f4">;
+def Tcgen05MMAKindINT8 : I32EnumAttrCase<"I8", 3, "i8">;
+
+def Tcgen05MMAKind : I32EnumAttr<
+ "Tcgen05MMAKind",
+ "tcgen05 MMA Supported Types",
+ [Tcgen05MMAKindF8F6F4, Tcgen05MMAKindINT8, Tcgen05MMAKindF16,
+ Tcgen05MMAKindTF32]> {
+ let cppNamespace = "::mlir::NVVM";
+ let genSpecializedAttr = 0;
+}
+
+def Tcgen05MMAKindAttr : EnumAttr<NVVM_Dialect, Tcgen05MMAKind, "tcgen05_mma_kind"> {
+ let description = [{
+ The Tcgen05MMAKind attribute describes the allowed set of types for matrix A and B in the tcgen05.mma.{sp} Op. The following are supported types for each kind:
+
+ ```
+ +--------+--------------------------------------------+
+ | Matrix | A / B |
+ +--------+--------------------------------------------+
+ | f16 | f16, bf16 |
+ | tf32 | tf32 |
+ | f8f6f4 | e4m3, e5m2, e2m3, e3m2, e2m1 |
+ | i8 | unsigned 8b, signed 8b |
+ +--------+--------------------------------------------+
+ ```
+ }];
+ let assemblyFormat = "`<` $value `>`";
+}
+
+def Tcgen05MMACollectorOpDiscard : I32EnumAttrCase<"DISCARD", 0, "discard">;
+def Tcgen05MMACollectorOpLastUse : I32EnumAttrCase<"LASTUSE", 1, "lastuse">;
+def Tcgen05MMACollectorOpFill : I32EnumAttrCase<"FILL", 2, "fill">;
+def Tcgen05MMACollectorOpUse : I32EnumAttrCase<"USE", 3, "use">;
+
+def Tcgen05MMACollectorOp : I32EnumAttr<
+ "Tcgen05MMACollectorOp",
+ "tcgen05.mma Collector Buffer Operation",
+ [Tcgen05MMACollectorOpDiscard,
+ Tcgen05MMACollectorOpLastUse,
+ Tcgen05MMACollectorOpFill,
+ Tcgen05MMACollectorOpUse]> {
+ let cppNamespace = "::mlir::NVVM";
+ let genSpecializedAttr = 0;
+}
+
+def Tcgen05MMACollectorOpAttr : EnumAttr<NVVM_Dialect, Tcgen05MMACollectorOp, "tcgen05_mma_collectorop"> {
+ let assemblyFormat = "`<` $value `>`";
+}
+
+//===----------------------------------------------------------------------===//
+// NVVM tcgen05.mma Ops.
+//===----------------------------------------------------------------------===//
+
+def NVVM_Tcgen05MMAOp : NVVM_Op<"tcgen05.mma",
+ [AttrSizedOperandSegments,
+ NVVMRequiresSMa<[100, 110]>]> {
+
+ let summary = "Performs MMA operation on 5th-gen tensor cores";
+
+ let arguments = (ins
+ Tcgen05MMAKindAttr:$kind,
+ CTAGroupKindAttr:$ctaGroup,
+ DefaultValuedAttr<Tcgen05MMACollectorOpAttr,
+ "Tcgen05MMACollectorOp::DISCARD">:$collectorOp,
+ UnitAttr:$ashift,
+ LLVM_PointerTensor:$matrixD,
+ AnyTypeOf<[LLVM_PointerTensor, I64]>:$matrixA,
+ I64:$matrixB,
+ I32:$idesc,
+ I1:$enableInputD,
+ Optional<I64>:$scaleInputD,
+ Optional<FixedVectorOfLengthAndType<[4, 8], [I32]>>:$disableOutputLane
+ );
+
+ let description = [{
+ The `tcgen05.mma` operation is an asynchronous tensor core instruction
+ that performs matrix multiplication, accumulation in a single fused
+ operation. It targets 5th-generation tensor cores, providing developers
+ with fine-grained control over execution and scheduling.
+
+ ```
+ D = A * B + (D * 2^ -scaleInputD) // if `scaleInputD` is provided
+ D = A * B // if `enableInputD` is false
+ D = A * B + D // otherwise
+ ```
+
+ where:
+ - A is an `M x K` matrix in tensor memory or described using shared memory descriptor
+ - B is a `K x N` matrix described using shared memory descriptor
+ - D is an `M x N` accumulator matrix in tensor memory
+
+ The `shared memory descriptor` can be generated using `tcgen05.mma_smem_desc` Op
+
+ - idesc is a 32-bit value representing the [Instruction Descriptor](https://docs.nvidia.com/cuda/parallel-thread-execution/#tcgen05-instruction-descriptor)
+
+ Optional Operands:
+ - `scaleInputD` is an Immediate value operand used for scaling D matrix by 2 ^ (-scaleInputD). The valid range is [0, 15]
+
+ - `disableOutputLane` is a vector mask for selective output
+ * vector<4 x i32> when ctaGroup is CTA_1
+ * vector<8 x i32> when ctaGroup is CTA_2
+
+ Required Attributes:
+ - `kind` specifies the computation data type and precision
+ * f16 : 16-bit floating point (half precision)
+ * tf32 : Tensor Float 32 (truncated 32-bit float)
+ * f8f6f4 : Mixed precision FP8/FP6/FP4
+ * i8 : 8-bit integer operations
+
+ - `ctaGroup` specifies CTA group configuration
+ * cta_1: MMA will be performed on the current thread's CTA
+ * cta_2: MMA will be performed on the current thread and it's peer CTA
+
+ Default Attributes:
+ - collectorOp specifies the collector buffer operations for matrix A
+ * discard : Release buffer after use (default)
+ * lastuse : Mark buffer for last use
+ * fill : Fill buffer
+ * use : Use buffer without modification
+
+ - `ashift` shifts the rows of the A matrix down by one row
+
+ [For more information, see PTX ISA](https://docs.nvidia.com/cuda/parallel-thread-execution/#tcgen05-mma-instructions-mma)
+ }];
+
+ let assemblyFormat = [{
+ $matrixD `,` $matrixA `,` $matrixB `,` $idesc `,` $enableInputD (`scale` `=` $scaleInputD^)?
+ (`mask` `=` $disableOutputLane^)? attr-dict `:` `(` type(operands) `)`
+ }];
+
+ let extraClassDeclaration = [{
+ static mlir::NVVM::IDArgPair getIntrinsicIDAndArgs(
+ Operation &op, LLVM::ModuleTranslation &mt,
+ llvm::IRBuilderBase &builder);
+ }];
+
+ let llvmBuilder = [{
----------------
durga4github wrote:
ok, thanks!
https://github.com/llvm/llvm-project/pull/164356
More information about the Mlir-commits
mailing list