[Mlir-commits] [mlir] [MLIR][NVVM] Add tcgen05.mma MLIR Ops (PR #164356)

Rajat Bajpai llvmlistbot at llvm.org
Fri Oct 24 00:10:39 PDT 2025


================
@@ -2694,6 +2706,591 @@ NVVM::IDArgPair ClusterLaunchControlQueryCancelOp::getIntrinsicIDAndArgs(
   return {intrinsicID, args};
 }
 
+//===----------------------------------------------------------------------===//
+// NVVM tcgen05.mma functions
+//===----------------------------------------------------------------------===//
+
+mlir::NVVM::IDArgPair
+Tcgen05MMAOp::getIntrinsicIDAndArgs(Operation &op, LLVM::ModuleTranslation &mt,
+                                    llvm::IRBuilderBase &builder) {
+
+  auto thisOp = cast<NVVM::Tcgen05MMAOp>(op);
+  llvm::SmallVector<llvm::Value *> args;
+
+  args.push_back(mt.lookupValue(thisOp.getMatrixD()));
+  llvm::Value *A = mt.lookupValue(thisOp.getMatrixA());
+  args.push_back(A);
+  args.push_back(mt.lookupValue(thisOp.getMatrixB()));
+  args.push_back(mt.lookupValue(thisOp.getIdesc()));
+  args.push_back(mt.lookupValue(thisOp.getEnableInputD()));
+
+  llvm::Value *ScaleInputD = mt.lookupValue(thisOp.getScaleInputD());
+  bool hasScaleInputD = ScaleInputD != nullptr;
+  llvm::Value *DisableOutputLane =
+      mt.lookupValue(thisOp.getDisableOutputLane());
+  bool hasDisableOutputLane = DisableOutputLane != nullptr;
+
+  const unsigned ctaGroup =
+      static_cast<unsigned>(getNVVMCtaGroupKind(thisOp.getCtaGroup()));
+
+  const bool isATensor = isa<llvm::PointerType>(A->getType());
+  const bool enableAshift = thisOp.getAshift();
+
+  // [hasDisableOutputLane][hasScaleInputD][isATensor][CtaGroup][EnableAShift];
+  static constexpr llvm::Intrinsic::ID tcgen05MMAIDs[2][2][2][2][2] = {
+      // without diable output lane
+      {// without scale input D
+       {
+           // shared
+           {// cg1
+            {llvm::Intrinsic::nvvm_tcgen05_mma_shared, notIntrinsic},
+            // cg2
+            {llvm::Intrinsic::nvvm_tcgen05_mma_shared, notIntrinsic}},
+           {// tensor
+            {
+                // cg1
+                llvm::Intrinsic::nvvm_tcgen05_mma_tensor,
+                llvm::Intrinsic::nvvm_tcgen05_mma_tensor_ashift,
+            },
+            {
+                // cg2
+                llvm::Intrinsic::nvvm_tcgen05_mma_tensor,
+                llvm::Intrinsic::nvvm_tcgen05_mma_tensor_ashift,
+            }},
+       },
+       // with scale input D
+       { // shared
+        {// cg1
+         {llvm::Intrinsic::nvvm_tcgen05_mma_shared_scale_d, notIntrinsic},
+         // cg2
+         {llvm::Intrinsic::nvvm_tcgen05_mma_shared_scale_d, notIntrinsic}},
+        {// tensor
+         {
+             // cg1
+             llvm::Intrinsic::nvvm_tcgen05_mma_tensor_scale_d,
+             llvm::Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_ashift,
+         },
+         {
+             // cg2
+             llvm::Intrinsic::nvvm_tcgen05_mma_tensor_scale_d,
+             llvm::Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_ashift,
+         }}}},
+      // with disable output lane
+      {  // without scale input D
+       { // shared
+        {// cg1
+         {llvm::Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg1,
+          notIntrinsic},
+         // cg2
+         {llvm::Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg2,
+          notIntrinsic}},
+        {// cg1
+         {
+             llvm::Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1,
+             llvm::Intrinsic::
+                 nvvm_tcgen05_mma_tensor_disable_output_lane_cg1_ashift,
+         },
+         // cg2
+         {
+             llvm::Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2,
+             llvm::Intrinsic::
+                 nvvm_tcgen05_mma_tensor_disable_output_lane_cg2_ashift,
+         }}},
+       // with scale input D
+       { // shared
+        {// cg1
+         {llvm::Intrinsic::
+              nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg1,
+          notIntrinsic},
+         // cg2
+         {llvm::Intrinsic::
+              nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg2,
+          notIntrinsic}},
+        // tensor
+        {// cg1
+         {llvm::Intrinsic::
+              nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1,
+          llvm::Intrinsic::
+              nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1_ashift},
+         // cg2
+         {
+             llvm::Intrinsic::
+                 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2,
+             llvm::Intrinsic::
+                 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2_ashift,
+         }}}}};
+
+  if (hasDisableOutputLane) {
+    if (hasScaleInputD) {
+      args.push_back(ScaleInputD);
+    }
+    args.push_back(DisableOutputLane);
+    args.push_back(builder.getInt32(static_cast<unsigned>(thisOp.getKind())));
+  } else {
+    if (hasScaleInputD) {
----------------
rajatbajpai wrote:

nit: you can move out this from both the branches.

https://github.com/llvm/llvm-project/pull/164356


More information about the Mlir-commits mailing list