[clang] [CIR] Scoped atomic store (PR #171627)
Sirui Mu via cfe-commits
cfe-commits at lists.llvm.org
Wed Dec 10 06:48:34 PST 2025
https://github.com/Lancern created https://github.com/llvm/llvm-project/pull/171627
This patch adds support for `__scoped_atomic_store` and `__scoped_atomic_store_n`.
>From b31238d8af0cfc97c94a87dd0c2aa58a1d3b0c16 Mon Sep 17 00:00:00 2001
From: Sirui Mu <msrlancern at gmail.com>
Date: Wed, 10 Dec 2025 22:47:09 +0800
Subject: [PATCH] [CIR] Scoped atomic store
---
.../CIR/Dialect/Builder/CIRBaseBuilder.h | 4 ++-
clang/include/clang/CIR/Dialect/IR/CIROps.td | 2 ++
clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 10 +++---
clang/lib/CIR/CodeGen/CIRGenBuilder.h | 3 +-
clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp | 1 +
.../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 7 ++--
clang/test/CIR/CodeGen/atomic-scoped.c | 32 +++++++++++++++++++
7 files changed, 51 insertions(+), 8 deletions(-)
diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
index 8edb796884b5c..b4b02e24f85cc 100644
--- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
+++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
@@ -330,8 +330,10 @@ class CIRBaseBuilderTy : public mlir::OpBuilder {
cir::StoreOp createStore(mlir::Location loc, mlir::Value val, mlir::Value dst,
bool isVolatile = false,
mlir::IntegerAttr align = {},
+ cir::SyncScopeKindAttr scope = {},
cir::MemOrderAttr order = {}) {
- return cir::StoreOp::create(*this, loc, val, dst, isVolatile, align, order);
+ return cir::StoreOp::create(*this, loc, val, dst, isVolatile, align, scope,
+ order);
}
/// Emit a load from an boolean flag variable.
diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td
index 635809afdf2cc..12c5e399f02da 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIROps.td
+++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td
@@ -649,11 +649,13 @@ def CIR_StoreOp : CIR_Op<"store", [
[MemWrite]>:$addr,
UnitAttr:$is_volatile,
OptionalAttr<I64Attr>:$alignment,
+ OptionalAttr<CIR_SyncScopeKind>:$sync_scope,
OptionalAttr<CIR_MemOrder>:$mem_order);
let assemblyFormat = [{
(`volatile` $is_volatile^)?
(`align` `(` $alignment^ `)`)?
+ (`syncscope` `(` $sync_scope^ `)`)?
(`atomic` `(` $mem_order^ `)`)?
$value `,` $addr attr-dict `:` type($value) `,` qualified(type($addr))
}];
diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
index 700e5f401a18f..0b8cded35fee9 100644
--- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
@@ -455,13 +455,15 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
case AtomicExpr::AO__c11_atomic_store:
case AtomicExpr::AO__atomic_store_n:
- case AtomicExpr::AO__atomic_store: {
+ case AtomicExpr::AO__atomic_store:
+ case AtomicExpr::AO__scoped_atomic_store:
+ case AtomicExpr::AO__scoped_atomic_store_n: {
cir::LoadOp loadVal1 = builder.createLoad(loc, val1);
assert(!cir::MissingFeatures::atomicSyncScopeID());
builder.createStore(loc, loadVal1, ptr, expr->isVolatile(),
- /*align=*/mlir::IntegerAttr{}, orderAttr);
+ /*align=*/mlir::IntegerAttr{}, scopeAttr, orderAttr);
return;
}
@@ -584,8 +586,6 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
case AtomicExpr::AO__opencl_atomic_store:
case AtomicExpr::AO__hip_atomic_store:
- case AtomicExpr::AO__scoped_atomic_store:
- case AtomicExpr::AO__scoped_atomic_store_n:
case AtomicExpr::AO__hip_atomic_exchange:
case AtomicExpr::AO__opencl_atomic_exchange:
@@ -849,6 +849,7 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
break;
case AtomicExpr::AO__atomic_store:
+ case AtomicExpr::AO__scoped_atomic_store:
val1 = emitPointerWithAlignment(e->getVal1());
break;
@@ -912,6 +913,7 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
case AtomicExpr::AO__c11_atomic_fetch_xor:
case AtomicExpr::AO__c11_atomic_exchange:
case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__scoped_atomic_store_n:
val1 = emitValToTemp(*this, e->getVal1());
break;
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h
index 6e8c5d369dbc5..a9f7fe1386fa0 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h
+++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h
@@ -492,11 +492,12 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy {
cir::StoreOp createStore(mlir::Location loc, mlir::Value val, Address dst,
bool isVolatile = false,
mlir::IntegerAttr align = {},
+ cir::SyncScopeKindAttr scope = {},
cir::MemOrderAttr order = {}) {
if (!align)
align = getAlignmentAttr(dst.getAlignment());
return CIRBaseBuilderTy::createStore(loc, val, dst.getPointer(), isVolatile,
- align, order);
+ align, scope, order);
}
/// Create a cir.complex.real_ptr operation that derives a pointer to the real
diff --git a/clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp b/clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp
index 66469e208d7b0..f79a52e2fb9b3 100644
--- a/clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp
+++ b/clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp
@@ -143,6 +143,7 @@ DeletionKind cir::CopyOp::removeBlockingUses(
cir::StoreOp::create(builder, getLoc(), reachingDefinition, getDst(),
/*isVolatile=*/false,
/*alignment=*/mlir::IntegerAttr{},
+ /*sync_scope=*/cir::SyncScopeKindAttr(),
/*mem-order=*/cir::MemOrderAttr());
return DeletionKind::Delete;
}
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index 8a3b0a1448d2e..be7724317b21c 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -1684,13 +1684,16 @@ mlir::LogicalResult CIRToLLVMStoreOpLowering::matchAndRewrite(
// Convert adapted value to its memory type if needed.
mlir::Value value = emitToMemory(rewriter, dataLayout,
op.getValue().getType(), adaptor.getValue());
- // TODO: nontemporal, syncscope.
+ // TODO: nontemporal.
assert(!cir::MissingFeatures::opLoadStoreNontemporal());
assert(!cir::MissingFeatures::opLoadStoreTbaa());
+ std::optional<llvm::StringRef> syncScope =
+ getLLVMSyncScope(op.getSyncScope());
mlir::LLVM::StoreOp storeOp = mlir::LLVM::StoreOp::create(
rewriter, op->getLoc(), value, adaptor.getAddr(), alignment,
op.getIsVolatile(),
- /*isNonTemporal=*/false, /*isInvariantGroup=*/false, memorder);
+ /*isNonTemporal=*/false, /*isInvariantGroup=*/false, memorder,
+ syncScope.value_or(llvm::StringRef()));
rewriter.replaceOp(op, storeOp);
assert(!cir::MissingFeatures::opLoadStoreTbaa());
return mlir::LogicalResult::success();
diff --git a/clang/test/CIR/CodeGen/atomic-scoped.c b/clang/test/CIR/CodeGen/atomic-scoped.c
index 04989589bee26..5b8c868d6c9d6 100644
--- a/clang/test/CIR/CodeGen/atomic-scoped.c
+++ b/clang/test/CIR/CodeGen/atomic-scoped.c
@@ -38,3 +38,35 @@ void scoped_atomic_load_n(int *ptr) {
// LLVM: %{{.+}} = load atomic i32, ptr %{{.+}} monotonic, align 4
// OGCG: %{{.+}} = load atomic i32, ptr %{{.+}} monotonic, align 4
}
+
+void scoped_atomic_store(int *ptr, int value) {
+ // CIR-LABEL: @scoped_atomic_store
+ // LLVM-LABEL: @scoped_atomic_store
+ // OGCG-LABEL: @scoped_atomic_store
+
+ __scoped_atomic_store(ptr, &value, __ATOMIC_RELAXED, __MEMORY_SCOPE_SINGLE);
+ // CIR: cir.store align(4) syncscope(single_thread) atomic(relaxed) %{{.+}}, %{{.+}} : !s32i, !cir.ptr<!s32i>
+ // LLVM: store atomic i32 %{{.+}}, ptr %{{.+}} syncscope("singlethread") monotonic, align 4
+ // OGCG: store atomic i32 %{{.+}}, ptr %{{.+}} monotonic, align 4
+
+ __scoped_atomic_store(ptr, &value, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
+ // CIR: cir.store align(4) syncscope(system) atomic(relaxed) %{{.+}}, %{{.+}} : !s32i, !cir.ptr<!s32i>
+ // LLVM: store atomic i32 %{{.+}}, ptr %{{.+}} monotonic, align 4
+ // OGCG: store atomic i32 %{{.+}}, ptr %{{.+}} monotonic, align 4
+}
+
+void scoped_atomic_store_n(int *ptr, int value) {
+ // CIR-LABEL: @scoped_atomic_store_n
+ // LLVM-LABEL: @scoped_atomic_store_n
+ // OGCG-LABEL: @scoped_atomic_store_n
+
+ __scoped_atomic_store_n(ptr, value, __ATOMIC_RELAXED, __MEMORY_SCOPE_SINGLE);
+ // CIR: cir.store align(4) syncscope(single_thread) atomic(relaxed) %{{.+}}, %{{.+}} : !s32i, !cir.ptr<!s32i>
+ // LLVM: store atomic i32 %{{.+}}, ptr %{{.+}} syncscope("singlethread") monotonic, align 4
+ // OGCG: store atomic i32 %{{.+}}, ptr %{{.+}} monotonic, align 4
+
+ __scoped_atomic_store_n(ptr, value, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
+ // CIR: cir.store align(4) syncscope(system) atomic(relaxed) %{{.+}}, %{{.+}} : !s32i, !cir.ptr<!s32i>
+ // LLVM: store atomic i32 %{{.+}}, ptr %{{.+}} monotonic, align 4
+ // OGCG: store atomic i32 %{{.+}}, ptr %{{.+}} monotonic, align 4
+}
More information about the cfe-commits
mailing list