[Mlir-commits] [mlir] 4a5b051 - [MLIR][NVVM] Update TMA Store Op (#155435)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Thu Aug 28 22:57:22 PDT 2025
Author: Durgadoss R
Date: 2025-08-29T11:27:17+05:30
New Revision: 4a5b051d53ec0fcbce6b86f6d735fa343a99b3f3
URL: https://github.com/llvm/llvm-project/commit/4a5b051d53ec0fcbce6b86f6d735fa343a99b3f3
DIFF: https://github.com/llvm/llvm-project/commit/4a5b051d53ec0fcbce6b86f6d735fa343a99b3f3.diff
LOG: [MLIR][NVVM] Update TMA Store Op (#155435)
This patch includes im2col and scatter mode
support to the TMA Store Op. The lowering is
also updated to intrinsics except when Predicate
is given. This completes the Blackwell additions
on this Op.
* lit tests are added for all combinations.
* Move the TMA reduce invalid tests to their own file.
Signed-off-by: Durgadoss R <durgadossr at nvidia.com>
Added:
mlir/test/Target/LLVMIR/nvvm/tma_store.mlir
mlir/test/Target/LLVMIR/nvvm/tma_store_invalid.mlir
mlir/test/Target/LLVMIR/nvvm/tma_store_reduce_invalid.mlir
Modified:
mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp
mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
mlir/test/Conversion/NVVMToLLVM/nvvm-to-llvm.mlir
mlir/test/Target/LLVMIR/nvvmir-invalid.mlir
Removed:
################################################################################
diff --git a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
index 85e6e064f22a9..9528da05c9fd6 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
@@ -2353,6 +2353,20 @@ def TMALoadModeAttr : EnumAttr<NVVM_Dialect, TMALoadMode, "tma_load_mode"> {
let assemblyFormat = "`<` $value `>`";
}
+// List of modes supported for TMA Store and Reduction Ops
+def TMAStoreModeTile : I32EnumAttrCase<"TILE", 0, "tile">;
+def TMAStoreModeIm2Col : I32EnumAttrCase<"IM2COL", 1, "im2col">;
+def TMAStoreModeTileScatter4 : I32EnumAttrCase<"TILE_SCATTER4", 2, "tile_scatter4">;
+
+def TMAStoreMode : I32EnumAttr<"TMAStoreMode", "NVVM TMA Store Mode",
+ [TMAStoreModeTile, TMAStoreModeIm2Col, TMAStoreModeTileScatter4]> {
+ let genSpecializedAttr = 0;
+ let cppNamespace = "::mlir::NVVM";
+}
+def TMAStoreModeAttr : EnumAttr<NVVM_Dialect, TMAStoreMode, "tma_store_mode"> {
+ let assemblyFormat = "`<` $value `>`";
+}
+
def NVVM_CpAsyncBulkCommitGroupOp : NVVM_Op<"cp.async.bulk.commit.group">,
Arguments<(ins )> {
let assemblyFormat = "attr-dict";
@@ -2479,20 +2493,43 @@ def NVVM_CpAsyncBulkTensorGlobalToSharedClusterOp :
}
def NVVM_CpAsyncBulkTensorSharedCTAToGlobalOp :
- NVVM_Op<"cp.async.bulk.tensor.global.shared.cta",
- [DeclareOpInterfaceMethods<BasicPtxBuilderOpInterface>,
+ NVVM_PTXBuilder_Op<"cp.async.bulk.tensor.global.shared.cta",
+ [DeclareOpInterfaceMethods<BasicPtxBuilderOpInterface>,
AttrSizedOperandSegments]>,
- Arguments<(ins LLVM_AnyPointer:$tmaDescriptor,
- LLVM_PointerShared:$srcMem,
- Variadic<I32>:$coordinates,
- PtxPredicate:$predicate)> {
+ Arguments<(ins LLVM_PointerGeneric:$tmaDescriptor,
+ LLVM_PointerShared:$srcMem,
+ Variadic<I32>:$coordinates,
+ Optional<I64>:$l2CacheHint,
+ DefaultValuedAttr<TMAStoreModeAttr, "TMAStoreMode::TILE">:$mode,
+ PtxPredicate:$predicate)> {
+ let description = [{
+ Initiates an asynchronous copy of the tensor data from shared::cta
+ memory to global memory. This Op supports all the store modes specified in
+ `TMAStoreMode`.
+
+ The `l2CacheHint` operand is optional, and it is used to specify cache
+ eviction policy that may be used during the memory access.
+
+ [For more information, see PTX ISA](https://docs.nvidia.com/cuda/parallel-thread-execution/#data-movement-and-conversion-instructions-cp-async-bulk-tensor)
+ }];
+
let assemblyFormat = [{
$tmaDescriptor `,`
$srcMem `,`
`box` `[`$coordinates `]`
- (`,` `predicate` `=` $predicate^)?
- attr-dict `:` type(operands)
+ (`l2_cache_hint` `=` $l2CacheHint^ )?
+ (`,` `predicate` `=` $predicate^)?
+ attr-dict `:` type($tmaDescriptor) `,` type($srcMem)
+ }];
+
+ let extraClassDeclaration = [{
+ bool hasIntrinsic() { return !getPredicate(); }
+
+ static mlir::NVVM::IDArgPair
+ getIntrinsicIDAndArgs(Operation &op, LLVM::ModuleTranslation &mt,
+ llvm::IRBuilderBase& builder);
}];
+
let extraClassDefinition = [{
std::string $cppClass::getPtx() {
int dim = getCoordinates().size();
@@ -2508,6 +2545,12 @@ def NVVM_CpAsyncBulkTensorSharedCTAToGlobalOp :
}
}];
let hasVerifier = 1;
+
+ string llvmBuilder = [{
+ auto [id, args] = NVVM::CpAsyncBulkTensorSharedCTAToGlobalOp::getIntrinsicIDAndArgs(
+ *op, moduleTranslation, builder);
+ createIntrinsicCall(builder, id, args);
+ }];
}
//===----------------------------------------------------------------------===//
@@ -2661,19 +2704,6 @@ def NVVM_CpAsyncBulkTensorPrefetchOp :
}];
}
-// List of modes supported for TMA Store and Reduction Ops
-def TMAStoreModeTile : I32EnumAttrCase<"TILE", 0, "tile">;
-def TMAStoreModeIm2Col : I32EnumAttrCase<"IM2COL", 1, "im2col">;
-
-def TMAStoreMode : I32EnumAttr<"TMAStoreMode", "NVVM TMA Store Mode",
- [TMAStoreModeTile, TMAStoreModeIm2Col]> {
- let genSpecializedAttr = 0;
- let cppNamespace = "::mlir::NVVM";
-}
-def TMAStoreModeAttr : EnumAttr<NVVM_Dialect, TMAStoreMode, "tma_store_mode"> {
- let assemblyFormat = "`<` $value `>`";
-}
-
// List of Reduction Ops supported with TMA Store
def TMAReduxKindAdd : I32EnumAttrCase<"ADD", 0, "add">;
def TMAReduxKindMin : I32EnumAttrCase<"MIN", 1, "min">;
diff --git a/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp b/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp
index 09f506ad40b17..ab1666a0e8e75 100644
--- a/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp
+++ b/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp
@@ -1026,8 +1026,10 @@ struct NVGPUTmaAsyncStoreOpLowering
coords[index] = truncToI32(b, value);
}
+ // TODO: Enhance the NVGPU Op for other modes too
rewriter.replaceOpWithNewOp<NVVM::CpAsyncBulkTensorSharedCTAToGlobalOp>(
- op, adaptor.getTensorMapDescriptor(), dest, coords,
+ op, adaptor.getTensorMapDescriptor(), dest, coords, Value{},
+ NVVM::TMAStoreMode::TILE, // default is TILE mode
adaptor.getPredicate());
return success();
}
diff --git a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
index ec1e6edc9bcb4..ff6ccbaac2b35 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
@@ -81,8 +81,27 @@ LogicalResult CpAsyncBulkTensorGlobalToSharedClusterOp::verify() {
}
LogicalResult CpAsyncBulkTensorSharedCTAToGlobalOp::verify() {
- if (getCoordinates().size() > 5)
- return emitError("Maximum 5 coordinates and dimension is supported.");
+ TMAStoreMode mode = getMode();
+ // We lower through inline-ptx when getPredicate() is true.
+ // a) Only TILE mode is supported
+ // b) Cache-hint is not supported
+ if (getPredicate()) {
+ if (mode != TMAStoreMode::TILE)
+ return emitError("Inline-ptx lowering supported only for Tile mode.");
+ if (getL2CacheHint())
+ return emitError("Inline-ptx lowering unsupported with L2 cache-hint.");
+ }
+
+ size_t dims = getCoordinates().size();
+ switch (mode) {
+ case TMAStoreMode::TILE:
+ return cpAsyncBulkTensorCommonVerifier(dims, false, 0, getLoc());
+ case TMAStoreMode::IM2COL:
+ return cpAsyncBulkTensorCommonVerifier(dims, true, 0, getLoc());
+ case TMAStoreMode::TILE_SCATTER4:
+ if (dims != 5)
+ return emitError("Scatter4 mode expects 5 coordinates");
+ }
return success();
}
@@ -139,9 +158,17 @@ LogicalResult CpAsyncBulkTensorPrefetchOp::verify() {
}
LogicalResult CpAsyncBulkTensorReduceOp::verify() {
- bool isIm2Col = (getMode() == TMAStoreMode::IM2COL);
- return cpAsyncBulkTensorCommonVerifier(getCoordinates().size(), isIm2Col, 0,
- getLoc());
+ TMAStoreMode mode = getMode();
+ size_t dims = getCoordinates().size();
+ switch (mode) {
+ case TMAStoreMode::TILE:
+ return cpAsyncBulkTensorCommonVerifier(dims, false, 0, getLoc());
+ case TMAStoreMode::IM2COL:
+ return cpAsyncBulkTensorCommonVerifier(dims, true, 0, getLoc());
+ case TMAStoreMode::TILE_SCATTER4:
+ return emitError("Scatter mode unsupported for CpAsyncBulkTensorReduceOp");
+ }
+ return success();
}
LogicalResult ConvertFloatToTF32Op::verify() {
@@ -1521,6 +1548,51 @@ mlir::NVVM::IDArgPair CpAsyncBulkTensorPrefetchOp::getIntrinsicIDAndArgs(
return {id, std::move(args)};
}
+mlir::NVVM::IDArgPair
+CpAsyncBulkTensorSharedCTAToGlobalOp::getIntrinsicIDAndArgs(
+ Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
+ auto thisOp = cast<NVVM::CpAsyncBulkTensorSharedCTAToGlobalOp>(op);
+ llvm::SmallVector<llvm::Value *> args;
+
+ // Fill the Intrinsic Args
+ args.push_back(mt.lookupValue(thisOp.getSrcMem()));
+ args.push_back(mt.lookupValue(thisOp.getTmaDescriptor()));
+
+ for (auto v : thisOp.getCoordinates())
+ args.push_back(mt.lookupValue(v));
+
+ mlir::Value cacheHint = thisOp.getL2CacheHint();
+ const bool hasCacheHint = static_cast<bool>(cacheHint);
+ llvm::Value *i64Unused =
+ llvm::ConstantInt::get(llvm::Type::getInt64Ty(mt.getLLVMContext()), 0);
+ args.push_back(hasCacheHint ? mt.lookupValue(cacheHint) : i64Unused);
+ args.push_back(builder.getInt1(hasCacheHint));
+
+ const unsigned NI = llvm::Intrinsic::not_intrinsic;
+ static constexpr llvm::Intrinsic::ID IDTable[][6] = {
+ {NI, llvm::Intrinsic::nvvm_cp_async_bulk_tensor_s2g_tile_1d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_s2g_tile_2d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_s2g_tile_3d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_s2g_tile_4d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_s2g_tile_5d},
+ {NI, NI, NI, llvm::Intrinsic::nvvm_cp_async_bulk_tensor_s2g_im2col_3d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_s2g_im2col_4d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_s2g_im2col_5d},
+ {NI, NI, NI, NI, NI,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_s2g_tile_scatter4_2d}};
+
+ static_assert(getMaxEnumValForTMAStoreMode() == std::size(IDTable) - 1,
+ "TMAStoreModes must match number of rows in IDTable");
+ size_t mode = static_cast<size_t>(thisOp.getMode());
+ size_t dim = thisOp.getCoordinates().size();
+ llvm::Intrinsic::ID id = IDTable[mode][dim];
+ if (id == llvm::Intrinsic::not_intrinsic)
+ llvm_unreachable(
+ "Invalid intrinsic for CpAsyncBulkTensorSharedCTAToGlobalOp.");
+
+ return {id, std::move(args)};
+}
+
#define CP_ASYNC_BULK_TENSOR_REDUCE_MODE(op, dim, mode) \
llvm::Intrinsic::nvvm_cp_async_bulk_tensor_##op##_##mode##_##dim##d
diff --git a/mlir/test/Conversion/NVVMToLLVM/nvvm-to-llvm.mlir b/mlir/test/Conversion/NVVMToLLVM/nvvm-to-llvm.mlir
index 2a19c72ab0840..89075120d16ea 100644
--- a/mlir/test/Conversion/NVVMToLLVM/nvvm-to-llvm.mlir
+++ b/mlir/test/Conversion/NVVMToLLVM/nvvm-to-llvm.mlir
@@ -214,47 +214,36 @@ func.func @tma_load_multicast5d(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<3>,
// CHECK-LABEL: @tma_store_1d
func.func @tma_store_1d(%tmaDescriptor: !llvm.ptr, %src : !llvm.ptr<3>, %crd0: i32, %p : i1) {
- // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "cp.async.bulk.tensor.1d.global.shared::cta.bulk_group [$0, {$2} ], [$1];", "l,r,r"
- nvvm.cp.async.bulk.tensor.global.shared.cta %tmaDescriptor, %src, box[%crd0] : !llvm.ptr, !llvm.ptr<3>, i32
// CHECK: llvm.inline_asm has_side_effects asm_dialect = att "@$3 cp.async.bulk.tensor.1d.global.shared::cta.bulk_group [$0, {$2} ], [$1];", "l,r,r,b"
- nvvm.cp.async.bulk.tensor.global.shared.cta %tmaDescriptor, %src, box[%crd0], predicate=%p : !llvm.ptr, !llvm.ptr<3>, i32, i1
+ nvvm.cp.async.bulk.tensor.global.shared.cta %tmaDescriptor, %src, box[%crd0], predicate=%p : !llvm.ptr, !llvm.ptr<3>
return
}
// CHECK-LABEL: @tma_store_2d
func.func @tma_store_2d(%tmaDescriptor: !llvm.ptr, %src : !llvm.ptr<3>, %crd0: i32, %crd1: i32, %p : i1) {
- // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "cp.async.bulk.tensor.2d.global.shared::cta.bulk_group [$0, {$2, $3} ], [$1];", "l,r,r,r"
- nvvm.cp.async.bulk.tensor.global.shared.cta %tmaDescriptor, %src, box[%crd0,%crd1] : !llvm.ptr, !llvm.ptr<3>, i32, i32
// CHECK: llvm.inline_asm has_side_effects asm_dialect = att "@$4 cp.async.bulk.tensor.2d.global.shared::cta.bulk_group [$0, {$2, $3} ], [$1];", "l,r,r,r,b"
- nvvm.cp.async.bulk.tensor.global.shared.cta %tmaDescriptor, %src, box[%crd0,%crd1], predicate=%p : !llvm.ptr, !llvm.ptr<3>, i32, i32, i1
+ nvvm.cp.async.bulk.tensor.global.shared.cta %tmaDescriptor, %src, box[%crd0,%crd1], predicate=%p : !llvm.ptr, !llvm.ptr<3>
return
}
// CHECK-LABEL: @tma_store_3d
func.func @tma_store_3d(%tmaDescriptor: !llvm.ptr, %src : !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %p : i1) {
- // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "cp.async.bulk.tensor.3d.global.shared::cta.bulk_group [$0, {$2, $3, $4} ], [$1];", "l,r,r,r,r"
- nvvm.cp.async.bulk.tensor.global.shared.cta %tmaDescriptor, %src, box[%crd0,%crd1,%crd2] : !llvm.ptr, !llvm.ptr<3>, i32, i32, i32
// CHECK: llvm.inline_asm has_side_effects asm_dialect = att "@$5 cp.async.bulk.tensor.3d.global.shared::cta.bulk_group [$0, {$2, $3, $4} ], [$1];", "l,r,r,r,r,b"
- nvvm.cp.async.bulk.tensor.global.shared.cta %tmaDescriptor, %src, box[%crd0,%crd1,%crd2], predicate=%p : !llvm.ptr, !llvm.ptr<3>, i32, i32, i32, i1
+ nvvm.cp.async.bulk.tensor.global.shared.cta %tmaDescriptor, %src, box[%crd0,%crd1,%crd2], predicate=%p : !llvm.ptr, !llvm.ptr<3>
return
}
// CHECK-LABEL: @tma_store_4d
func.func @tma_store_4d(%tmaDescriptor: !llvm.ptr, %src : !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %p : i1) {
- // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "cp.async.bulk.tensor.4d.global.shared::cta.bulk_group [$0, {$2, $3, $4, $5} ], [$1];", "l,r,r,r,r,r"
- nvvm.cp.async.bulk.tensor.global.shared.cta %tmaDescriptor, %src, box[%crd0,%crd1,%crd2,%crd3] : !llvm.ptr, !llvm.ptr<3>, i32, i32, i32, i32
// CHECK: llvm.inline_asm has_side_effects asm_dialect = att "@$6 cp.async.bulk.tensor.4d.global.shared::cta.bulk_group [$0, {$2, $3, $4, $5} ], [$1];", "l,r,r,r,r,r,b"
- nvvm.cp.async.bulk.tensor.global.shared.cta %tmaDescriptor, %src, box[%crd0,%crd1,%crd2,%crd3], predicate=%p : !llvm.ptr, !llvm.ptr<3>, i32, i32, i32, i32, i1
+ nvvm.cp.async.bulk.tensor.global.shared.cta %tmaDescriptor, %src, box[%crd0,%crd1,%crd2,%crd3], predicate=%p : !llvm.ptr, !llvm.ptr<3>
return
}
// CHECK-LABEL: @tma_store_5d
func.func @tma_store_5d(%tmaDescriptor: !llvm.ptr, %src : !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %crd4: i32, %p : i1) {
- // CHECK-NEXT: llvm.inline_asm has_side_effects asm_dialect = att "cp.async.bulk.tensor.5d.global.shared::cta.bulk_group [$0, {$2, $3, $4, $5, $6} ], [$1];", "l,r,r,r,r,r,r"
- nvvm.cp.async.bulk.tensor.global.shared.cta %tmaDescriptor, %src, box[%crd0,%crd1,%crd2,%crd3,%crd4] : !llvm.ptr, !llvm.ptr<3>, i32, i32, i32, i32, i32
-
// CHECK-NEXT: llvm.inline_asm has_side_effects asm_dialect = att "@$7 cp.async.bulk.tensor.5d.global.shared::cta.bulk_group [$0, {$2, $3, $4, $5, $6} ], [$1];", "l,r,r,r,r,r,r,b"
- nvvm.cp.async.bulk.tensor.global.shared.cta %tmaDescriptor, %src, box[%crd0,%crd1,%crd2,%crd3,%crd4], predicate=%p : !llvm.ptr, !llvm.ptr<3>, i32, i32, i32, i32, i32, i1
+ nvvm.cp.async.bulk.tensor.global.shared.cta %tmaDescriptor, %src, box[%crd0,%crd1,%crd2,%crd3,%crd4], predicate=%p : !llvm.ptr, !llvm.ptr<3>
return
}
diff --git a/mlir/test/Target/LLVMIR/nvvm/tma_store.mlir b/mlir/test/Target/LLVMIR/nvvm/tma_store.mlir
new file mode 100644
index 0000000000000..b77927fcfb47b
--- /dev/null
+++ b/mlir/test/Target/LLVMIR/nvvm/tma_store.mlir
@@ -0,0 +1,94 @@
+// RUN: mlir-translate -mlir-to-llvmir %s | FileCheck %s
+
+llvm.func @tma_store_1d(%tma_desc: !llvm.ptr, %src : !llvm.ptr<3>, %crd0: i32, %ch : i64) {
+ // CHECK-LABEL: define void @tma_store_1d(ptr %0, ptr addrspace(3) %1, i32 %2, i64 %3) {
+ // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.s2g.tile.1d(ptr addrspace(3) %1, ptr %0, i32 %2, i64 0, i1 false)
+ // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.s2g.tile.1d(ptr addrspace(3) %1, ptr %0, i32 %2, i64 %3, i1 true)
+ // CHECK-NEXT: ret void
+ // CHECK-NEXT: }
+ nvvm.cp.async.bulk.tensor.global.shared.cta %tma_desc, %src, box[%crd0] : !llvm.ptr, !llvm.ptr<3>
+
+ nvvm.cp.async.bulk.tensor.global.shared.cta %tma_desc, %src, box[%crd0] l2_cache_hint=%ch : !llvm.ptr, !llvm.ptr<3>
+
+ llvm.return
+}
+
+llvm.func @tma_store_2d(%tma_desc: !llvm.ptr, %src : !llvm.ptr<3>, %crd0: i32, %crd1: i32, %ch : i64) {
+ // CHECK-LABEL: define void @tma_store_2d(ptr %0, ptr addrspace(3) %1, i32 %2, i32 %3, i64 %4) {
+ // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.s2g.tile.2d(ptr addrspace(3) %1, ptr %0, i32 %2, i32 %3, i64 0, i1 false)
+ // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.s2g.tile.2d(ptr addrspace(3) %1, ptr %0, i32 %2, i32 %3, i64 %4, i1 true)
+ // CHECK-NEXT: ret void
+ // CHECK-NEXT: }
+ nvvm.cp.async.bulk.tensor.global.shared.cta %tma_desc, %src, box[%crd0,%crd1] : !llvm.ptr, !llvm.ptr<3>
+
+ nvvm.cp.async.bulk.tensor.global.shared.cta %tma_desc, %src, box[%crd0,%crd1] l2_cache_hint=%ch : !llvm.ptr, !llvm.ptr<3>
+
+ llvm.return
+}
+
+llvm.func @tma_store_3d(%tma_desc: !llvm.ptr, %src : !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %ch : i64) {
+ // CHECK-LABEL: define void @tma_store_3d(ptr %0, ptr addrspace(3) %1, i32 %2, i32 %3, i32 %4, i64 %5) {
+ // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.s2g.tile.3d(ptr addrspace(3) %1, ptr %0, i32 %2, i32 %3, i32 %4, i64 0, i1 false)
+ // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.s2g.tile.3d(ptr addrspace(3) %1, ptr %0, i32 %2, i32 %3, i32 %4, i64 %5, i1 true)
+ // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.s2g.im2col.3d(ptr addrspace(3) %1, ptr %0, i32 %2, i32 %3, i32 %4, i64 0, i1 false)
+ // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.s2g.im2col.3d(ptr addrspace(3) %1, ptr %0, i32 %2, i32 %3, i32 %4, i64 %5, i1 true)
+ // CHECK-NEXT: ret void
+ // CHECK-NEXT: }
+ nvvm.cp.async.bulk.tensor.global.shared.cta %tma_desc, %src, box[%crd0,%crd1,%crd2] : !llvm.ptr, !llvm.ptr<3>
+
+ nvvm.cp.async.bulk.tensor.global.shared.cta %tma_desc, %src, box[%crd0,%crd1,%crd2] l2_cache_hint=%ch : !llvm.ptr, !llvm.ptr<3>
+
+ nvvm.cp.async.bulk.tensor.global.shared.cta %tma_desc, %src, box[%crd0,%crd1,%crd2] {mode = #nvvm.tma_store_mode<im2col>}: !llvm.ptr, !llvm.ptr<3>
+
+ nvvm.cp.async.bulk.tensor.global.shared.cta %tma_desc, %src, box[%crd0,%crd1,%crd2] l2_cache_hint=%ch {mode = #nvvm.tma_store_mode<im2col>}: !llvm.ptr, !llvm.ptr<3>
+ llvm.return
+}
+
+llvm.func @tma_store_4d(%tma_desc: !llvm.ptr, %src : !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %ch : i64) {
+ // CHECK-LABEL: define void @tma_store_4d(ptr %0, ptr addrspace(3) %1, i32 %2, i32 %3, i32 %4, i32 %5, i64 %6) {
+ // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.s2g.tile.4d(ptr addrspace(3) %1, ptr %0, i32 %2, i32 %3, i32 %4, i32 %5, i64 0, i1 false)
+ // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.s2g.tile.4d(ptr addrspace(3) %1, ptr %0, i32 %2, i32 %3, i32 %4, i32 %5, i64 %6, i1 true)
+ // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.s2g.im2col.4d(ptr addrspace(3) %1, ptr %0, i32 %2, i32 %3, i32 %4, i32 %5, i64 0, i1 false)
+ // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.s2g.im2col.4d(ptr addrspace(3) %1, ptr %0, i32 %2, i32 %3, i32 %4, i32 %5, i64 %6, i1 true)
+ // CHECK-NEXT: ret void
+ // CHECK-NEXT: }
+ nvvm.cp.async.bulk.tensor.global.shared.cta %tma_desc, %src, box[%crd0,%crd1,%crd2,%crd3] : !llvm.ptr, !llvm.ptr<3>
+
+ nvvm.cp.async.bulk.tensor.global.shared.cta %tma_desc, %src, box[%crd0,%crd1,%crd2,%crd3] l2_cache_hint=%ch : !llvm.ptr, !llvm.ptr<3>
+
+ nvvm.cp.async.bulk.tensor.global.shared.cta %tma_desc, %src, box[%crd0,%crd1,%crd2,%crd3] {mode = #nvvm.tma_store_mode<im2col>}: !llvm.ptr, !llvm.ptr<3>
+
+ nvvm.cp.async.bulk.tensor.global.shared.cta %tma_desc, %src, box[%crd0,%crd1,%crd2,%crd3] l2_cache_hint=%ch {mode = #nvvm.tma_store_mode<im2col>}: !llvm.ptr, !llvm.ptr<3>
+ llvm.return
+}
+
+llvm.func @tma_store_5d(%tma_desc: !llvm.ptr, %src : !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %crd4: i32, %ch : i64) {
+ // CHECK-LABEL: define void @tma_store_5d(ptr %0, ptr addrspace(3) %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i64 %7) {
+ // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.s2g.tile.5d(ptr addrspace(3) %1, ptr %0, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i64 0, i1 false)
+ // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.s2g.tile.5d(ptr addrspace(3) %1, ptr %0, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i64 %7, i1 true)
+ // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.s2g.im2col.5d(ptr addrspace(3) %1, ptr %0, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i64 0, i1 false)
+ // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.s2g.im2col.5d(ptr addrspace(3) %1, ptr %0, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i64 %7, i1 true)
+ // CHECK-NEXT: ret void
+ // CHECK-NEXT: }
+ nvvm.cp.async.bulk.tensor.global.shared.cta %tma_desc, %src, box[%crd0,%crd1,%crd2,%crd3,%crd4] : !llvm.ptr, !llvm.ptr<3>
+
+ nvvm.cp.async.bulk.tensor.global.shared.cta %tma_desc, %src, box[%crd0,%crd1,%crd2,%crd3,%crd4] l2_cache_hint=%ch : !llvm.ptr, !llvm.ptr<3>
+
+ nvvm.cp.async.bulk.tensor.global.shared.cta %tma_desc, %src, box[%crd0,%crd1,%crd2,%crd3,%crd4] {mode = #nvvm.tma_store_mode<im2col>}: !llvm.ptr, !llvm.ptr<3>
+
+ nvvm.cp.async.bulk.tensor.global.shared.cta %tma_desc, %src, box[%crd0,%crd1,%crd2,%crd3,%crd4] l2_cache_hint=%ch {mode = #nvvm.tma_store_mode<im2col>}: !llvm.ptr, !llvm.ptr<3>
+ llvm.return
+}
+
+llvm.func @tma_store_scatter(%tma_desc: !llvm.ptr, %src : !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %crd4: i32, %ch : i64) {
+ // CHECK-LABEL: define void @tma_store_scatter(ptr %0, ptr addrspace(3) %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i64 %7) {
+ // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.s2g.tile.scatter4.2d(ptr addrspace(3) %1, ptr %0, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i64 0, i1 false)
+ // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.s2g.tile.scatter4.2d(ptr addrspace(3) %1, ptr %0, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i64 %7, i1 true)
+ // CHECK-NEXT: ret void
+ // CHECK-NEXT: }
+ nvvm.cp.async.bulk.tensor.global.shared.cta %tma_desc, %src, box[%crd0,%crd1,%crd2,%crd3,%crd4] {mode = #nvvm.tma_store_mode<tile_scatter4>}: !llvm.ptr, !llvm.ptr<3>
+
+ nvvm.cp.async.bulk.tensor.global.shared.cta %tma_desc, %src, box[%crd0,%crd1,%crd2,%crd3,%crd4] l2_cache_hint=%ch {mode = #nvvm.tma_store_mode<tile_scatter4>}: !llvm.ptr, !llvm.ptr<3>
+
+ llvm.return
+}
diff --git a/mlir/test/Target/LLVMIR/nvvm/tma_store_invalid.mlir b/mlir/test/Target/LLVMIR/nvvm/tma_store_invalid.mlir
new file mode 100644
index 0000000000000..9d9dc8e35b5db
--- /dev/null
+++ b/mlir/test/Target/LLVMIR/nvvm/tma_store_invalid.mlir
@@ -0,0 +1,46 @@
+// RUN: mlir-translate -verify-diagnostics -split-input-file -mlir-to-llvmir %s
+
+// -----
+
+llvm.func @tma_store_1d_im2col(%tma_desc: !llvm.ptr, %src : !llvm.ptr<3>, %crd0: i32, %ch : i64) {
+ // expected-error @below {{to use im2col mode, the tensor has to be at least 3-dimensional}}
+ nvvm.cp.async.bulk.tensor.global.shared.cta %tma_desc, %src, box[%crd0] {mode = #nvvm.tma_store_mode<im2col>} : !llvm.ptr, !llvm.ptr<3>
+
+ llvm.return
+}
+
+// -----
+
+llvm.func @tma_store_0d(%tma_desc: !llvm.ptr, %src : !llvm.ptr<3>) {
+ // expected-error @below {{expects coordinates between 1 to 5 dimension}}
+ nvvm.cp.async.bulk.tensor.global.shared.cta %tma_desc, %src, box[] : !llvm.ptr, !llvm.ptr<3>
+
+ llvm.return
+}
+
+// -----
+
+llvm.func @tma_store_scatter(%tma_desc: !llvm.ptr, %src : !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %ch : i64) {
+ // expected-error @below {{Scatter4 mode expects 5 coordinates}}
+ nvvm.cp.async.bulk.tensor.global.shared.cta %tma_desc, %src, box[%crd0,%crd1,%crd2,%crd3] l2_cache_hint=%ch {mode = #nvvm.tma_store_mode<tile_scatter4>}: !llvm.ptr, !llvm.ptr<3>
+
+ llvm.return
+}
+
+// -----
+
+llvm.func @tma_store_asm_ch(%tma_desc: !llvm.ptr, %src : !llvm.ptr<3>, %crd0: i32, %ch : i64, %p : i1) {
+ // expected-error @below {{Inline-ptx lowering unsupported with L2 cache-hint.}}
+ nvvm.cp.async.bulk.tensor.global.shared.cta %tma_desc, %src, box[%crd0] l2_cache_hint=%ch, predicate=%p : !llvm.ptr, !llvm.ptr<3>
+
+ llvm.return
+}
+
+// -----
+
+llvm.func @tma_store_asm_im2col(%tma_desc: !llvm.ptr, %src : !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %ch : i64, %p : i1) {
+ // expected-error @below {{Inline-ptx lowering supported only for Tile mode.}}
+ nvvm.cp.async.bulk.tensor.global.shared.cta %tma_desc, %src, box[%crd0, %crd1, %crd2], predicate=%p {mode = #nvvm.tma_store_mode<im2col>} : !llvm.ptr, !llvm.ptr<3>
+
+ llvm.return
+}
diff --git a/mlir/test/Target/LLVMIR/nvvm/tma_store_reduce_invalid.mlir b/mlir/test/Target/LLVMIR/nvvm/tma_store_reduce_invalid.mlir
new file mode 100644
index 0000000000000..2fcf00fa3b670
--- /dev/null
+++ b/mlir/test/Target/LLVMIR/nvvm/tma_store_reduce_invalid.mlir
@@ -0,0 +1,25 @@
+// RUN: mlir-translate -verify-diagnostics -split-input-file -mlir-to-llvmir %s
+
+// -----
+
+llvm.func @tma_reduce_0d(%src : !llvm.ptr<3>, %tma_desc : !llvm.ptr, %ch : i64) {
+ // expected-error @below {{expects coordinates between 1 to 5 dimension}}
+ nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[] {redKind = #nvvm.tma_redux_kind<add>}: !llvm.ptr, !llvm.ptr<3>
+ llvm.return
+}
+
+// -----
+
+llvm.func @tma_reduce_2d_im2col(%src : !llvm.ptr<3>, %tma_desc : !llvm.ptr, %d0 : i32, %d1 : i32, %ch : i64) {
+ // expected-error @below {{to use im2col mode, the tensor has to be at least 3-dimensional}}
+ nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0, %d1] {redKind = #nvvm.tma_redux_kind<and>, mode = #nvvm.tma_store_mode<im2col>}: !llvm.ptr, !llvm.ptr<3>
+ llvm.return
+}
+
+// -----
+
+llvm.func @tma_store_reduce_scatter(%src : !llvm.ptr<3>, %tma_desc : !llvm.ptr, %d0 : i32, %ch : i64) {
+ // expected-error @below {{Scatter mode unsupported for CpAsyncBulkTensorReduceOp}}
+ nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0] {redKind = #nvvm.tma_redux_kind<add>, mode = #nvvm.tma_store_mode<tile_scatter4>} : !llvm.ptr, !llvm.ptr<3>
+ llvm.return
+}
diff --git a/mlir/test/Target/LLVMIR/nvvmir-invalid.mlir b/mlir/test/Target/LLVMIR/nvvmir-invalid.mlir
index db36afd6f22dd..863118cd8dd71 100644
--- a/mlir/test/Target/LLVMIR/nvvmir-invalid.mlir
+++ b/mlir/test/Target/LLVMIR/nvvmir-invalid.mlir
@@ -104,22 +104,6 @@ llvm.func @nvvm_fence_proxy_release() {
// -----
-llvm.func @tma_reduce_0d(%src : !llvm.ptr<3>, %tma_desc : !llvm.ptr, %ch : i64) {
- // expected-error @below {{expects coordinates between 1 to 5 dimension}}
- nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[] {redKind = #nvvm.tma_redux_kind<add>}: !llvm.ptr, !llvm.ptr<3>
- llvm.return
-}
-
-// -----
-
-llvm.func @tma_reduce_2d_im2col(%src : !llvm.ptr<3>, %tma_desc : !llvm.ptr, %d0 : i32, %d1 : i32, %ch : i64) {
- // expected-error @below {{to use im2col mode, the tensor has to be at least 3-dimensional}}
- nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0, %d1] {redKind = #nvvm.tma_redux_kind<and>, mode = #nvvm.tma_store_mode<im2col>}: !llvm.ptr, !llvm.ptr<3>
- llvm.return
-}
-
-// -----
-
llvm.func @convert_float_to_tf32_rna_relu(%src : f32) -> i32 {
// expected-error @below {{Relu not supported with rna rounding mode.}}
%res = nvvm.convert.float.to.tf32 %src {rnd = #nvvm.fp_rnd_mode<rna>, relu=true}
More information about the Mlir-commits
mailing list