[Mlir-commits] [mlir] [mlir][nvvm] Add prefetch.tensormap (PR #67564)
Guray Ozen
llvmlistbot at llvm.org
Wed Sep 27 23:32:11 PDT 2023
https://github.com/grypp updated https://github.com/llvm/llvm-project/pull/67564
>From e9ba8ade6a5f3ed27207fae53bc1880517c2202f Mon Sep 17 00:00:00 2001
From: Guray Ozen <guray.ozen at gmail.com>
Date: Wed, 27 Sep 2023 16:51:38 +0200
Subject: [PATCH 1/4] [mlir][nvvm] Add prefetch.tensormap
This PR adds `prefetch.tensormap` Op. It brings the cache line containing the given tma descriptor for subsequent use by the cp.async.bulk.tensor instruction.
https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-prefetch-prefetchu
---
mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td | 15 +++++++++++++++
mlir/test/Conversion/NVVMToLLVM/nvvm-to-llvm.mlir | 11 +++++++++++
2 files changed, 26 insertions(+)
diff --git a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
index 0d4d734edd2b69b..e9c52c06ed27ebd 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
@@ -1512,6 +1512,21 @@ def NVVM_CpAsyncBulkTensorSharedCTAToGlobalOp : NVVM_Op<"cp.async.bulk.tensor.gl
let hasVerifier = 1;
}
+def NVVM_PrefetchTensorMapOp : NVVM_Op<"prefetch.tensormap",
+ [DeclareOpInterfaceMethods<BasicPtxBuilderOpInterface>]>,
+ Arguments<(ins LLVM_i64ptr_any:$tmaDescriptor, PtxPredicate:$predicate)> {
+ let description = [{
+ The Op brings the cache line containing the given $tmaDescriptor for
+ subsequent use by the `cp.async.bulk.tensor` instruction.
+ }];
+ let assemblyFormat = "$tmaDescriptor (`,` `predicate` `=` $predicate^)? attr-dict `:` type(operands)";
+ let extraClassDefinition = [{
+ std::string $cppClass::getPtx() {
+ return std::string("prefetch.tensormap [%0];");
+ }
+ }];
+}
+
//===----------------------------------------------------------------------===//
// NVVM Wgmma Ops
//===----------------------------------------------------------------------===//
diff --git a/mlir/test/Conversion/NVVMToLLVM/nvvm-to-llvm.mlir b/mlir/test/Conversion/NVVMToLLVM/nvvm-to-llvm.mlir
index 7ffe1ad2bb2b111..8ff8868e96ace11 100644
--- a/mlir/test/Conversion/NVVMToLLVM/nvvm-to-llvm.mlir
+++ b/mlir/test/Conversion/NVVMToLLVM/nvvm-to-llvm.mlir
@@ -363,3 +363,14 @@ func.func @wgmma_f32_e5m2_e4m3(%descA : i64, %descB : i64) -> !mat32f32 {
: !mat32f32 -> !mat32f32
return %result2 : !mat32f32
}
+
+// -----
+
+// CHECK-LABEL: @init_mbarrier_arrive_expect_tx
+llvm.func @init_mbarrier_arrive_expect_tx(%desc : !llvm.ptr, %pred : i1) {
+ //CHECK: llvm.inline_asm has_side_effects asm_dialect = att "prefetch.tensormap [$0];", "l"
+ nvvm.prefetch.tensormap %desc : !llvm.ptr
+ //CHECK: llvm.inline_asm has_side_effects asm_dialect = att "@$1 prefetch.tensormap [$0];", "l,b"
+ nvvm.prefetch.tensormap %desc, predicate = %pred : !llvm.ptr, i1
+ llvm.return
+}
>From 99d9bc0079be2be2cc995aef8329f9a90dac4042 Mon Sep 17 00:00:00 2001
From: Guray Ozen <guray.ozen at gmail.com>
Date: Wed, 27 Sep 2023 16:58:53 +0200
Subject: [PATCH 2/4] implement it in nvgpu dialect as well
---
mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td | 4 ----
mlir/include/mlir/Dialect/NVGPU/IR/NVGPU.td | 12 ++++++++++++
mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp | 13 +++++++++++++
3 files changed, 25 insertions(+), 4 deletions(-)
diff --git a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
index e9c52c06ed27ebd..d1624b44f0a79c2 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
@@ -1515,10 +1515,6 @@ def NVVM_CpAsyncBulkTensorSharedCTAToGlobalOp : NVVM_Op<"cp.async.bulk.tensor.gl
def NVVM_PrefetchTensorMapOp : NVVM_Op<"prefetch.tensormap",
[DeclareOpInterfaceMethods<BasicPtxBuilderOpInterface>]>,
Arguments<(ins LLVM_i64ptr_any:$tmaDescriptor, PtxPredicate:$predicate)> {
- let description = [{
- The Op brings the cache line containing the given $tmaDescriptor for
- subsequent use by the `cp.async.bulk.tensor` instruction.
- }];
let assemblyFormat = "$tmaDescriptor (`,` `predicate` `=` $predicate^)? attr-dict `:` type(operands)";
let extraClassDefinition = [{
std::string $cppClass::getPtx() {
diff --git a/mlir/include/mlir/Dialect/NVGPU/IR/NVGPU.td b/mlir/include/mlir/Dialect/NVGPU/IR/NVGPU.td
index 31b137160545772..c4736cb1b675b68 100644
--- a/mlir/include/mlir/Dialect/NVGPU/IR/NVGPU.td
+++ b/mlir/include/mlir/Dialect/NVGPU/IR/NVGPU.td
@@ -619,6 +619,18 @@ def NVGPU_MBarrierTryWaitParityOp : NVGPU_Op<"mbarrier.try_wait.parity", []> {
let assemblyFormat = "$barriers `[` $mbarId `]` `,` $phase `,` $ticks attr-dict `:` type($barriers)";
}
+def NVGPU_TmaPrefetchOp : NVGPU_Op<"tma.prefetch.descriptor", []> {
+ let summary = "Prefetch given `nvgpu.tensormap.descriptor` ";
+ let description = [{
+ The Op brings the cache line containing the given `$tmaDescriptor` for
+ subsequent use by the `tma.async.load` instruction.
+ }];
+ let arguments = (ins NVGPU_TensorMapDescriptor:$tensorMapDescriptor, Optional<I1>:$predicate);
+ let assemblyFormat = [{
+ $tensorMapDescriptor (`,` $predicate^)? attr-dict `:` type($tensorMapDescriptor)
+ }];
+}
+
def NVGPU_TmaAsyncLoadOp : NVGPU_Op<"tma.async.load", []> {
let summary = "TMA asynchronous load";
let description = [{
diff --git a/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp b/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp
index 4d1f6641af6dca3..e9d0b231f2c8e17 100644
--- a/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp
+++ b/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp
@@ -1306,6 +1306,18 @@ struct NVGPUWarpgroupMmaOpLowering
}
};
+struct NVGPUTmaPrefetchOpLowering
+ : public ConvertOpToLLVMPattern<nvgpu::TmaPrefetchOp> {
+ using ConvertOpToLLVMPattern<nvgpu::TmaPrefetchOp>::ConvertOpToLLVMPattern;
+ LogicalResult
+ matchAndRewrite(nvgpu::TmaPrefetchOp op, OpAdaptor adaptor,
+ ConversionPatternRewriter &rewriter) const override {
+ rewriter.replaceOpWithNewOp<NVVM::PrefetchTensorMapOp>(
+ op, adaptor.getTensorMapDescriptor(), adaptor.getPredicate());
+ return success();
+ }
+};
+
} // namespace
void mlir::populateNVGPUToNVVMConversionPatterns(LLVMTypeConverter &converter,
@@ -1322,6 +1334,7 @@ void mlir::populateNVGPUToNVVMConversionPatterns(LLVMTypeConverter &converter,
NVGPUMBarrierArriveExpectTxLowering, // nvgpu.mbarrier.arrive.expect_tx
NVGPUGenerateGmmaDescriptorLowering, // nvgpu.wgmma.generate.descriptor
NVGPUWarpgroupMmaOpLowering, // nvgpu.warpgroup.mma
+ NVGPUTmaPrefetchOpLowering, // nvgpu.tma.prefetch.descriptor
MmaSyncOptoNVVM, MmaLdMatrixOpToNVVM, NVGPUAsyncCopyLowering,
NVGPUAsyncCreateGroupLowering, NVGPUAsyncWaitLowering,
NVGPUMmaSparseSyncLowering>(converter);
>From d724d1f7d2ee9aab85477028a2e0688b9ac86d50 Mon Sep 17 00:00:00 2001
From: Guray Ozen <guray.ozen at gmail.com>
Date: Wed, 27 Sep 2023 17:05:26 +0200
Subject: [PATCH 3/4] test
---
mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir b/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir
index 8c2f8dbbd5ad9a3..ee333e119a295b2 100644
--- a/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir
+++ b/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir
@@ -643,6 +643,13 @@ func.func @create_tensor_map(%devicePtr2d : memref<64x128xf32>, %devicePtr1d : m
func.return
}
+func.func @prefetch(%tensorMap1d: !tensorMap1d, %p : i1) {
+ // CHECK: nvvm.prefetch.tensormap
+ nvgpu.tma.prefetch.descriptor %tensorMap1d: !tensorMap1d
+ // CHECK: nvvm.prefetch.tensormap
+ nvgpu.tma.prefetch.descriptor %tensorMap1d, %p: !tensorMap1d
+}
+
!lhsTensorMap = !nvgpu.tensormap.descriptor<tensor = memref<128x64xf16, 3>, swizzle = swizzle_128b, l2promo = none, oob = zero, interleave = none>
!rhsTensorMap = !nvgpu.tensormap.descriptor<tensor = memref<64x128xf16, strided<[128, 1], offset: 8192>, 3>, swizzle = swizzle_128b, l2promo = none, oob = zero, interleave = none>
>From 8fdcdb687ab67caaa1f79ebadfc77d2530c2823d Mon Sep 17 00:00:00 2001
From: Guray Ozen <guray.ozen at gmail.com>
Date: Thu, 28 Sep 2023 08:31:55 +0200
Subject: [PATCH 4/4] check the arguments in the test
---
mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir b/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir
index ee333e119a295b2..c6e9cb3673fd742 100644
--- a/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir
+++ b/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir
@@ -643,10 +643,12 @@ func.func @create_tensor_map(%devicePtr2d : memref<64x128xf32>, %devicePtr1d : m
func.return
}
-func.func @prefetch(%tensorMap1d: !tensorMap1d, %p : i1) {
- // CHECK: nvvm.prefetch.tensormap
+// CHECK-LABEL: @tma_prefetch(
+// CHECK-SAME: %[[arg0:[a-zA-Z0-9_]+]]: !nvgpu.tensormap.descriptor<tensor = memref<128xf32,3>, swizzle=none, l2promo = none, oob = nan, interleave = none>, %[[arg1:[a-zA-Z0-9_]+]]: i1
+func.func @tma_prefetch(%tensorMap1d: !tensorMap1d, %p : i1) {
+ // CHECK: nvvm.prefetch.tensormap %[[arg0]] : !llvm.ptr
nvgpu.tma.prefetch.descriptor %tensorMap1d: !tensorMap1d
- // CHECK: nvvm.prefetch.tensormap
+ // CHECK: nvvm.prefetch.tensormap %[[arg0]], predicate = %[[arg1]] : !llvm.ptr, i1
nvgpu.tma.prefetch.descriptor %tensorMap1d, %p: !tensorMap1d
}
More information about the Mlir-commits
mailing list