[Mlir-commits] [mlir] b4d7d3f - [mlir][NVVM] Add nvvm.membar operation (#166698)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Fri Nov 7 10:39:05 PST 2025
Author: Valentin Clement (バレンタイン クレメン)
Date: 2025-11-07T10:39:01-08:00
New Revision: b4d7d3f745aa7647cd7085b3a03ba15e7c21aa8b
URL: https://github.com/llvm/llvm-project/commit/b4d7d3f745aa7647cd7085b3a03ba15e7c21aa8b
DIFF: https://github.com/llvm/llvm-project/commit/b4d7d3f745aa7647cd7085b3a03ba15e7c21aa8b.diff
LOG: [mlir][NVVM] Add nvvm.membar operation (#166698)
Add nvvm.membar operation with level as defined in
https://docs.nvidia.com/cuda/parallel-thread-execution/#parallel-synchronization-and-communication-instructions-membar
This will be used to replace direct intrinsic call in CUDA Fortran for
`threadfence()`, `threadfence_block` and `thread fence_system()`
currently lowered here:
https://github.com/llvm/llvm-project/blob/e700f157026bf8b4d58f936c5db8f152e269d77f/flang/lib/Optimizer/Builder/CUDAIntrinsicCall.cpp#L1310
The nvvm membar intrsinsic are also used in CUDA C/C++
(https://github.com/llvm/llvm-project/blob/49f55f4991227f3c7a2b8161bbf45c74b7023944/clang/lib/Headers/__clang_cuda_device_functions.h#L528)
Added:
mlir/test/Target/LLVMIR/nvvm/membar.mlir
Modified:
mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
mlir/lib/Target/LLVMIR/Dialect/NVVM/NVVMToLLVMIRTranslation.cpp
Removed:
################################################################################
diff --git a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
index 80bc0e5986e51..1cc5b74a3cb67 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
@@ -1236,6 +1236,23 @@ def NVVM_FenceProxyAcquireOp : NVVM_Op<"fence.proxy.acquire">,
let hasVerifier = 1;
}
+def NVVM_MembarOp : NVVM_Op<"memory.barrier">,
+ Arguments<(ins MemScopeKindAttr:$scope)> {
+ let summary = "Memory barrier operation";
+ let description = [{
+ `membar` operation guarantees that prior memory accesses requested by this
+ thread are performed at the specified `scope`, before later memory
+ operations requested by this thread following the membar instruction.
+
+ [For more information, see PTX ISA](https://docs.nvidia.com/cuda/parallel-thread-execution/#parallel-synchronization-and-communication-instructions-membar)
+ }];
+
+ let assemblyFormat = "$scope attr-dict";
+ let llvmBuilder = [{
+ createIntrinsicCall(builder, getMembarIntrinsicID($scope), {});
+ }];
+}
+
def NVVM_FenceProxyReleaseOp : NVVM_Op<"fence.proxy.release">,
Arguments<(ins MemScopeKindAttr:$scope,
DefaultValuedAttr<ProxyKindAttr,
diff --git a/mlir/lib/Target/LLVMIR/Dialect/NVVM/NVVMToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/NVVM/NVVMToLLVMIRTranslation.cpp
index 0964e1b8c5ef3..cecff51e637a5 100644
--- a/mlir/lib/Target/LLVMIR/Dialect/NVVM/NVVMToLLVMIRTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/Dialect/NVVM/NVVMToLLVMIRTranslation.cpp
@@ -291,6 +291,20 @@ static unsigned getUnidirectionalFenceProxyID(NVVM::ProxyKind fromProxy,
llvm_unreachable("Unsupported proxy kinds");
}
+static unsigned getMembarIntrinsicID(NVVM::MemScopeKind scope) {
+ switch (scope) {
+ case NVVM::MemScopeKind::CTA:
+ return llvm::Intrinsic::nvvm_membar_cta;
+ case NVVM::MemScopeKind::CLUSTER:
+ return llvm::Intrinsic::nvvm_fence_sc_cluster;
+ case NVVM::MemScopeKind::GPU:
+ return llvm::Intrinsic::nvvm_membar_gl;
+ case NVVM::MemScopeKind::SYS:
+ return llvm::Intrinsic::nvvm_membar_sys;
+ }
+ llvm_unreachable("Unknown scope for memory barrier");
+}
+
#define TCGEN05LD(SHAPE, NUM) llvm::Intrinsic::nvvm_tcgen05_ld_##SHAPE##_##NUM
static llvm::Intrinsic::ID
diff --git a/mlir/test/Target/LLVMIR/nvvm/membar.mlir b/mlir/test/Target/LLVMIR/nvvm/membar.mlir
new file mode 100644
index 0000000000000..1b794f663b573
--- /dev/null
+++ b/mlir/test/Target/LLVMIR/nvvm/membar.mlir
@@ -0,0 +1,14 @@
+// RUN: mlir-translate -mlir-to-llvmir %s -split-input-file --verify-diagnostics | FileCheck %s
+
+// CHECK-lABEL: @memorybarrier()
+llvm.func @memorybarrier() {
+ // CHECK: call void @llvm.nvvm.membar.cta()
+ nvvm.memory.barrier #nvvm.mem_scope<cta>
+ // CHECK: call void @llvm.nvvm.fence.sc.cluster()
+ nvvm.memory.barrier #nvvm.mem_scope<cluster>
+ // CHECK: call void @llvm.nvvm.membar.gl()
+ nvvm.memory.barrier #nvvm.mem_scope<gpu>
+ // CHECK: call void @llvm.nvvm.membar.sys()
+ nvvm.memory.barrier #nvvm.mem_scope<sys>
+ llvm.return
+}
More information about the Mlir-commits
mailing list