[Mlir-commits] [mlir] [llvm][mlir] Atomic Control Options Support (PR #140932)
Anchu Rajendran S
llvmlistbot at llvm.org
Wed May 21 10:52:23 PDT 2025
https://github.com/anchuraj updated https://github.com/llvm/llvm-project/pull/140932
>From b0ceb7a995f7e69bfc89d3ce4742563874dd9cd5 Mon Sep 17 00:00:00 2001
From: Anchu Rajendran <asudhaku at amd.com>
Date: Tue, 20 May 2025 16:27:15 -0500
Subject: [PATCH] Atomic Options Support
---
mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td | 33 +++++++++++++++++----
mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp | 7 ++++-
mlir/test/Dialect/LLVMIR/roundtrip.mlir | 6 ++++
mlir/test/Target/LLVMIR/llvmir.mlir | 9 ++++++
4 files changed, 49 insertions(+), 6 deletions(-)
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
index f19f9d5a3083c..8efb3c0ee4520 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
@@ -2164,12 +2164,18 @@ def LLVM_AtomicRMWOp : LLVM_MemAccessOpBase<"atomicrmw", [
LLVM_AtomicRMWType:$val, AtomicOrdering:$ordering,
OptionalAttr<StrAttr>:$syncscope,
OptionalAttr<I64Attr>:$alignment,
- UnitAttr:$volatile_);
+ UnitAttr:$volatile_,
+ UnitAttr:$amdgpu_no_fine_grained_memory,
+ UnitAttr:$amdgpu_ignore_denormal_mode,
+ UnitAttr:$amdgpu_no_remote_memory);
// Append the aliasing related attributes defined in LLVM_MemAccessOpBase.
let arguments = !con(args, aliasAttrs);
let results = (outs LLVM_AtomicRMWType:$res);
let assemblyFormat = [{
- (`volatile` $volatile_^)? $bin_op $ptr `,` $val
+ (`volatile` $volatile_^)?
+ (`amdgpu_ignore_denormal_mode` $amdgpu_ignore_denormal_mode^)?
+ (`amdgpu_no_fine_grained_memory` $amdgpu_no_fine_grained_memory^)?
+ (`amdgpu_no_remote_memory` $amdgpu_no_remote_memory^)? $bin_op $ptr `,` $val
(`syncscope` `(` $syncscope^ `)`)? $ordering attr-dict `:`
qualified(type($ptr)) `,` type($val)
}];
@@ -2179,6 +2185,19 @@ def LLVM_AtomicRMWOp : LLVM_MemAccessOpBase<"atomicrmw", [
convertAtomicBinOpToLLVM($bin_op), $ptr, $val, llvm::MaybeAlign(),
convertAtomicOrderingToLLVM($ordering));
$res = inst;
+ auto &llvmContext = inst->getContext();
+ if($amdgpu_ignore_denormal_mode) {
+ llvm::MDNode *metadata = llvm::MDNode::get(llvmContext, std::nullopt);
+ inst->setMetadata((llvmContext).getMDKindID("amdgpu.ignore.denormal.mode"), metadata);
+ }
+ if($amdgpu_no_fine_grained_memory) {
+ llvm::MDNode *metadata = llvm::MDNode::get(llvmContext, std::nullopt);
+ inst->setMetadata(llvmContext.getMDKindID("amdgpu.no.fine.grained.memory"), metadata);
+ }
+ if($amdgpu_no_remote_memory) {
+ llvm::MDNode *metadata = llvm::MDNode::get(llvmContext, std::nullopt);
+ inst->setMetadata((llvmContext).getMDKindID("amdgpu.no.remote.memory"), metadata);
+ }
}] # setVolatileCode
# setSyncScopeCode
# setAlignmentCode
@@ -2192,12 +2211,16 @@ def LLVM_AtomicRMWOp : LLVM_MemAccessOpBase<"atomicrmw", [
convertAtomicOrderingFromLLVM(atomicInst->getOrdering()),
getLLVMSyncScope(atomicInst), alignment, atomicInst->isVolatile());
}];
- list<int> llvmArgIndices = [-1, 0, 1, -1, -1, -1, -1, -1, -1, -1, -1];
+ list<int> llvmArgIndices = [-1, 0, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1];
let builders = [
- OpBuilder<(ins "LLVM::AtomicBinOp":$binOp, "Value":$ptr, "Value":$val,
+ OpBuilder<(ins "LLVM::AtomicBinOp":$binOp, "Value":$ptr,"Value":$val,
"LLVM::AtomicOrdering":$ordering,
CArg<"StringRef", "StringRef()">:$syncscope,
- CArg<"unsigned", "0">:$alignment, CArg<"bool", "false">:$isVolatile
+ CArg<"unsigned", "0">:$alignment, CArg<"bool", "false">:$isVolatile,
+ CArg<"bool", "false">:$isAmdgpuIgnoreDenormalMode,
+ CArg<"bool", "false">:$isAmdgpuNoFineGrainedMemory,
+ CArg<"bool", "false">:$isAmdgpuNoRemoteMemory
)>
];
let hasVerifier = 1;
diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
index e17b9fd6eb8d3..90f7ba55b5b2f 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
@@ -3274,10 +3274,15 @@ OpFoldResult LLVM::ConstantOp::fold(FoldAdaptor) { return getValue(); }
void AtomicRMWOp::build(OpBuilder &builder, OperationState &state,
AtomicBinOp binOp, Value ptr, Value val,
AtomicOrdering ordering, StringRef syncscope,
- unsigned alignment, bool isVolatile) {
+ unsigned alignment, bool isVolatile,
+ bool isAmdgpuIgnoreDenormalMode,
+ bool isAmdgpuNoFineGrainedMemory,
+ bool isAmdgpuNoRemoteMemory) {
build(builder, state, val.getType(), binOp, ptr, val, ordering,
!syncscope.empty() ? builder.getStringAttr(syncscope) : nullptr,
alignment ? builder.getI64IntegerAttr(alignment) : nullptr, isVolatile,
+ isAmdgpuIgnoreDenormalMode, isAmdgpuNoFineGrainedMemory,
+ isAmdgpuNoRemoteMemory,
/*access_groups=*/nullptr,
/*alias_scopes=*/nullptr, /*noalias_scopes=*/nullptr, /*tbaa=*/nullptr);
}
diff --git a/mlir/test/Dialect/LLVMIR/roundtrip.mlir b/mlir/test/Dialect/LLVMIR/roundtrip.mlir
index 1fd24f3f58e44..edaebdf39ca42 100644
--- a/mlir/test/Dialect/LLVMIR/roundtrip.mlir
+++ b/mlir/test/Dialect/LLVMIR/roundtrip.mlir
@@ -483,6 +483,12 @@ func.func @atomicrmw(%ptr : !llvm.ptr, %f32 : f32, %f16_vec : vector<2xf16>) {
%1 = llvm.atomicrmw volatile fsub %ptr, %f32 syncscope("singlethread") monotonic {alignment = 16 : i64} : !llvm.ptr, f32
// CHECK: llvm.atomicrmw fmin %{{.*}}, %{{.*}} monotonic : !llvm.ptr, vector<2xf16>
%2 = llvm.atomicrmw fmin %ptr, %f16_vec monotonic : !llvm.ptr, vector<2xf16>
+ // CHECK: llvm.atomicrmw amdgpu_ignore_denormal_mode fmin %{{.*}}, %{{.*}} monotonic : !llvm.ptr, vector<2xf16>
+ %3 = llvm.atomicrmw amdgpu_ignore_denormal_mode fmin %ptr, %f16_vec monotonic : !llvm.ptr, vector<2xf16>
+ // CHECK: llvm.atomicrmw amdgpu_no_fine_grained_memory fmin %{{.*}}, %{{.*}} monotonic : !llvm.ptr, vector<2xf16>
+ %4 = llvm.atomicrmw amdgpu_no_fine_grained_memory fmin %ptr, %f16_vec monotonic : !llvm.ptr, vector<2xf16>
+ // CHECK: llvm.atomicrmw amdgpu_no_remote_memory fmin %{{.*}}, %{{.*}} monotonic : !llvm.ptr, vector<2xf16>
+ %5 = llvm.atomicrmw amdgpu_no_remote_memory fmin %ptr, %f16_vec monotonic : !llvm.ptr, vector<2xf16>
llvm.return
}
diff --git a/mlir/test/Target/LLVMIR/llvmir.mlir b/mlir/test/Target/LLVMIR/llvmir.mlir
index 4ef68fa83a70d..b4979fc11914c 100644
--- a/mlir/test/Target/LLVMIR/llvmir.mlir
+++ b/mlir/test/Target/LLVMIR/llvmir.mlir
@@ -1568,6 +1568,15 @@ llvm.func @atomicrmw(
// CHECK-SAME: syncscope("singlethread")
// CHECK-SAME: align 8
%27 = llvm.atomicrmw volatile udec_wrap %i32_ptr, %i32 syncscope("singlethread") monotonic {alignment = 8 : i64} : !llvm.ptr, i32
+ // CHECK: atomicrmw
+ // CHECK-SAME: !amdgpu.ignore.denormal.mode
+ %28 = llvm.atomicrmw amdgpu_ignore_denormal_mode udec_wrap %i32_ptr, %i32 monotonic {alignment = 8 : i64} : !llvm.ptr, i32
+ // CHECK: atomicrmw
+ // CHECK-SAME: !amdgpu.no.fine.grained.memory
+ %29 = llvm.atomicrmw amdgpu_no_fine_grained_memory udec_wrap %i32_ptr, %i32 monotonic {alignment = 8 : i64} : !llvm.ptr, i32
+ // CHECK: atomicrmw
+ // CHECK-SAME: !amdgpu.no.remote.memory
+ %30 = llvm.atomicrmw amdgpu_no_remote_memory udec_wrap %i32_ptr, %i32 monotonic {alignment = 8 : i64} : !llvm.ptr, i32
llvm.return
}
More information about the Mlir-commits
mailing list