[Mlir-commits] [mlir] 38d9a44 - [MLIR][NVGPU] Add `tma.fence.descriptor` OP (#133218)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Thu Mar 27 07:20:23 PDT 2025


Author: Guray Ozen
Date: 2025-03-27T15:20:19+01:00
New Revision: 38d9a445106cba09854d9d00050a20f6faa4dd0b

URL: https://github.com/llvm/llvm-project/commit/38d9a445106cba09854d9d00050a20f6faa4dd0b
DIFF: https://github.com/llvm/llvm-project/commit/38d9a445106cba09854d9d00050a20f6faa4dd0b.diff

LOG: [MLIR][NVGPU] Add `tma.fence.descriptor` OP (#133218)

When the TMA descriptor is transferred from host memory to global memory
using cudaMemcpy, each thread block must insert a fence before any
thread accesses the updated tensor map in global memory. Once the tensor
map has been accessed, no additional fences are needed by that block
unless the map is modified again.

[Example from cuda programming
guide](https://docs.nvidia.com/cuda/cuda-c-programming-guide/#using-tma-to-transfer-multi-dimensional-arrays).
The `tma.fence.descriptor` basically implements
`ptx::fence_proxy_tensormap_generic`.
```
#include <cuda.h>
#include <cuda/ptx>
namespace ptx = cuda::ptx;

__device__ CUtensorMap global_tensor_map;
__global__ void kernel(CUtensorMap *tensor_map)
{
  // Fence acquire tensor map:
  ptx::n32_t<128> size_bytes;
  // Since the tensor map was modified from the host using cudaMemcpy,
  // the scope should be .sys.
  ptx::fence_proxy_tensormap_generic(
     ptx::sem_acquire, ptx::scope_sys, tensor_map, size_bytes
 );
 // Safe to use tensor_map after fence inside this thread..
}
int main() {
  CUtensorMap local_tensor_map;
  // [ ..Initialize map.. ]
  cudaMemcpy(&global_tensor_map, &local_tensor_map, sizeof(CUtensorMap), cudaMemcpyHostToDevice);
  kernel<<<1, 1>>>(global_tensor_map);
}
```

Added: 
    

Modified: 
    mlir/include/mlir/Dialect/NVGPU/IR/NVGPUOps.td
    mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp
    mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/Dialect/NVGPU/IR/NVGPUOps.td b/mlir/include/mlir/Dialect/NVGPU/IR/NVGPUOps.td
index ec68364d47e4f..73d86283a5940 100644
--- a/mlir/include/mlir/Dialect/NVGPU/IR/NVGPUOps.td
+++ b/mlir/include/mlir/Dialect/NVGPU/IR/NVGPUOps.td
@@ -452,6 +452,20 @@ def NVGPU_MBarrierTryWaitParityOp : NVGPU_Op<"mbarrier.try_wait.parity", []> {
   let assemblyFormat = "$barriers `[` $mbarId `]` `,` $phaseParity `,` $ticks attr-dict `:` type($barriers)";  
 }
 
+def NVGPU_TmaFenceOp : NVGPU_Op<"tma.fence.descriptor", []> {
+  let summary = "Insert fence given `nvgpu.tensormap.descriptor` ";
+  let description = [{
+    The Op fences the given `$tmaDescriptor`. This is necessary if the tensor map
+    descriptor was modified from the host using cudaMemcpy. In this case, the
+    kernel needs a fence after which it is safe to use `tensor.map`.
+  }];
+  let arguments = (ins NVGPU_TensorMapDescriptor:$tensorMapDescriptor);
+  let assemblyFormat = [{
+    $tensorMapDescriptor attr-dict `:` type($tensorMapDescriptor)
+  }];
+}
+
+
 def NVGPU_TmaPrefetchOp : NVGPU_Op<"tma.prefetch.descriptor", []> {
   let summary = "Prefetch given `nvgpu.tensormap.descriptor` ";
   let description = [{

diff  --git a/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp b/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp
index 31c28a6008a22..51507c6507b69 100644
--- a/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp
+++ b/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp
@@ -1671,6 +1671,28 @@ struct NVGPUWarpgroupMmaInitAccumulatorOpLowering
   }
 };
 
+struct NVGPUTmaFenceOpLowering
+    : public ConvertOpToLLVMPattern<nvgpu::TmaFenceOp> {
+  using ConvertOpToLLVMPattern<nvgpu::TmaFenceOp>::ConvertOpToLLVMPattern;
+  LogicalResult
+  matchAndRewrite(nvgpu::TmaFenceOp op, OpAdaptor adaptor,
+                  ConversionPatternRewriter &rewriter) const override {
+    MLIRContext *ctx = op.getContext();
+    ImplicitLocOpBuilder b(op->getLoc(), rewriter);
+    auto i32Ty = b.getI32Type();
+    Value tensormapSize =
+        b.create<LLVM::ConstantOp>(i32Ty, rewriter.getI32IntegerAttr(128));
+
+    auto memscope =
+        NVVM::MemScopeKindAttr::get(ctx, ::mlir::NVVM::MemScopeKind::SYS);
+
+    rewriter.replaceOpWithNewOp<NVVM::FenceProxyAcquireOp>(
+        op, memscope, adaptor.getTensorMapDescriptor(), tensormapSize);
+
+    return success();
+  }
+};
+
 struct NVGPUTmaPrefetchOpLowering
     : public ConvertOpToLLVMPattern<nvgpu::TmaPrefetchOp> {
   using ConvertOpToLLVMPattern<nvgpu::TmaPrefetchOp>::ConvertOpToLLVMPattern;
@@ -1733,6 +1755,7 @@ void mlir::populateNVGPUToNVVMConversionPatterns(
       NVGPUTmaAsyncStoreOpLowering,          // nvgpu.tma.async.store
       NVGPUTmaCreateDescriptorOpLowering,    // nvgpu.tma.create.descriptor
       NVGPUTmaPrefetchOpLowering,            // nvgpu.tma.prefetch.descriptor
+      NVGPUTmaFenceOpLowering,               // nvgpu.tma.fence.descriptor
       NVGPUMBarrierArriveExpectTxLowering,   // nvgpu.mbarrier.arrive.expect_tx
       NVGPUGenerateWarpgroupDescriptorLowering, // nvgpu.warpgroup.generate.descriptor
       NVGPUWarpgroupMmaOpLowering,              // nvgpu.warpgroup.mma

diff  --git a/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir b/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir
index cb4dd7d3b2961..d0bc806e0aa8c 100644
--- a/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir
+++ b/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir
@@ -824,6 +824,17 @@ func.func @tma_prefetch(%tensorMap1d: !tensorMap1d, %p : i1) {
   func.return
 }
 
+
+// CHECK-LABEL: @tma_fence(
+// CHECK-SAME: %[[arg0:[a-zA-Z0-9_]+]]: !nvgpu.tensormap.descriptor<tensor = memref<128xf32, 3>, swizzle = none, l2promo = none, oob = nan, interleave = none>
+func.func @tma_fence(%tensorMap1d: !tensorMap1d) {
+  // CHECK: %[[S0:.+]] = builtin.unrealized_conversion_cast %[[arg0]] : !nvgpu.tensormap.descriptor<tensor = memref<128xf32, 3>, swizzle = none, l2promo = none, oob = nan, interleave = none> to !llvm.ptr
+  // CHECK: %[[S1:.+]] = llvm.mlir.constant(128 : i32) : i32
+  // CHECK: nvvm.fence.proxy.acquire <sys> %[[S0]], %[[S1]]
+  nvgpu.tma.fence.descriptor %tensorMap1d: !tensorMap1d
+  func.return
+}
+
 !lhsTensorMap = !nvgpu.tensormap.descriptor<tensor = memref<128x64xf16, 3>, swizzle = swizzle_128b, l2promo = none, oob = zero, interleave = none>
 !rhsTensorMap = !nvgpu.tensormap.descriptor<tensor = memref<64x64xf16, strided<[64, 1], offset: 8192>, 3>, swizzle = swizzle_128b, l2promo = none, oob = zero, interleave = none>
 


        


More information about the Mlir-commits mailing list