[clang] [CIR] Add scoped atomic compare-and-exchange (PR #180412)
via cfe-commits
cfe-commits at lists.llvm.org
Sun Feb 8 07:16:54 PST 2026
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-clang
@llvm/pr-subscribers-clangir
Author: Sirui Mu (Lancern)
<details>
<summary>Changes</summary>
This patch adds synchronization scopes to the atomic compare-and-exchange operation.
---
Patch is 21.03 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/180412.diff
6 Files Affected:
- (modified) clang/include/clang/CIR/Dialect/IR/CIROps.td (+6-1)
- (modified) clang/lib/CIR/CodeGen/CIRGenAtomic.cpp (+14-10)
- (modified) clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp (+3-2)
- (modified) clang/test/CIR/CodeGen/atomic-scoped.c (+68)
- (modified) clang/test/CIR/CodeGen/atomic.c (+6-6)
- (modified) clang/test/CIR/IR/atomic.cir (+8-8)
``````````diff
diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td
index 906bd247f60ef..64118c677f4d3 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIROps.td
+++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td
@@ -6467,6 +6467,9 @@ def CIR_AtomicCmpXchgOp : CIR_Op<"atomic.cmpxchg", [
when the exchange takes place. The `fail_order` attribute gives the memory
order of this atomic operation when the exchange does not take place.
+ The `sync_scope` attribute specifies the synchronization scope for this
+ atomic operation.
+
The `weak` attribute is a boolean flag that indicates whether this is a
"weak" compare-and-exchange operation. A weak compare-and-exchange operation
allows "spurious failures", meaning that be treated as if the comparison
@@ -6485,7 +6488,7 @@ def CIR_AtomicCmpXchgOp : CIR_Op<"atomic.cmpxchg", [
```mlir
%old, %success = cir.atomic.cmpxchg weak success(seq_cst) failure(acquire)
- %ptr, %expected, %desired
+ syncscope(system) %ptr, %expected, %desired
: (!cir.ptr<!u64i>, !u64i, !u64i) -> (!u64i, !cir.bool)
```
}];
@@ -6495,6 +6498,7 @@ def CIR_AtomicCmpXchgOp : CIR_Op<"atomic.cmpxchg", [
CIR_AnyType:$desired,
Arg<CIR_MemOrder, "success memory order">:$succ_order,
Arg<CIR_MemOrder, "failure memory order">:$fail_order,
+ CIR_SyncScopeKind:$sync_scope,
OptionalAttr<I64Attr>:$alignment,
UnitAttr:$weak,
UnitAttr:$is_volatile);
@@ -6502,6 +6506,7 @@ def CIR_AtomicCmpXchgOp : CIR_Op<"atomic.cmpxchg", [
let assemblyFormat = [{
(`weak` $weak^)?
`success` `(` $succ_order `)` `failure` `(` $fail_order `)`
+ `syncscope` `(` $sync_scope `)`
$ptr `,` $expected `,` $desired
(`align` `(` $alignment^ `)`)?
(`volatile` $is_volatile^)?
diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
index a78b15511dd82..60dc34c9a930d 100644
--- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
@@ -315,7 +315,8 @@ static void emitAtomicCmpXchg(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak,
Address dest, Address ptr, Address val1,
Address val2, uint64_t size,
cir::MemOrder successOrder,
- cir::MemOrder failureOrder) {
+ cir::MemOrder failureOrder,
+ cir::SyncScopeKind scope) {
mlir::Location loc = cgf.getLoc(e->getSourceRange());
CIRGenBuilderTy &builder = cgf.getBuilder();
@@ -327,6 +328,7 @@ static void emitAtomicCmpXchg(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak,
expected, desired,
cir::MemOrderAttr::get(&cgf.getMLIRContext(), successOrder),
cir::MemOrderAttr::get(&cgf.getMLIRContext(), failureOrder),
+ cir::SyncScopeKindAttr::get(&cgf.getMLIRContext(), scope),
builder.getI64IntegerAttr(ptr.getAlignment().getAsAlign().value()));
cmpxchg.setIsVolatile(e->isVolatile());
@@ -355,7 +357,8 @@ static void emitAtomicCmpXchgFailureSet(CIRGenFunction &cgf, AtomicExpr *e,
bool isWeak, Address dest, Address ptr,
Address val1, Address val2,
Expr *failureOrderExpr, uint64_t size,
- cir::MemOrder successOrder) {
+ cir::MemOrder successOrder,
+ cir::SyncScopeKind scope) {
Expr::EvalResult failureOrderEval;
if (failureOrderExpr->EvaluateAsInt(failureOrderEval, cgf.getContext())) {
uint64_t failureOrderInt = failureOrderEval.Val.getInt().getZExtValue();
@@ -387,7 +390,7 @@ static void emitAtomicCmpXchgFailureSet(CIRGenFunction &cgf, AtomicExpr *e,
// precondition is 31.7.2.18. Effectively treat this as a DR and skip
// language version checks.
emitAtomicCmpXchg(cgf, e, isWeak, dest, ptr, val1, val2, size, successOrder,
- failureOrder);
+ failureOrder, scope);
return;
}
@@ -416,20 +419,22 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
emitAtomicCmpXchgFailureSet(cgf, expr, /*isWeak=*/false, dest, ptr, val1,
- val2, failureOrderExpr, size, order);
+ val2, failureOrderExpr, size, order, scope);
return;
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
emitAtomicCmpXchgFailureSet(cgf, expr, /*isWeak=*/true, dest, ptr, val1,
- val2, failureOrderExpr, size, order);
+ val2, failureOrderExpr, size, order, scope);
return;
case AtomicExpr::AO__atomic_compare_exchange:
- case AtomicExpr::AO__atomic_compare_exchange_n: {
+ case AtomicExpr::AO__atomic_compare_exchange_n:
+ case AtomicExpr::AO__scoped_atomic_compare_exchange:
+ case AtomicExpr::AO__scoped_atomic_compare_exchange_n: {
bool isWeak = false;
if (isWeakExpr->EvaluateAsBooleanCondition(isWeak, cgf.getContext())) {
emitAtomicCmpXchgFailureSet(cgf, expr, isWeak, dest, ptr, val1, val2,
- failureOrderExpr, size, order);
+ failureOrderExpr, size, order, scope);
} else {
assert(!cir::MissingFeatures::atomicExpr());
cgf.cgm.errorNYI(expr->getSourceRange(),
@@ -580,9 +585,6 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
- case AtomicExpr::AO__scoped_atomic_compare_exchange:
- case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
-
case AtomicExpr::AO__opencl_atomic_load:
case AtomicExpr::AO__hip_atomic_load:
@@ -895,6 +897,8 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
case AtomicExpr::AO__atomic_compare_exchange_n:
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__scoped_atomic_compare_exchange:
+ case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
val1 = emitPointerWithAlignment(e->getVal1());
if (e->getOp() == AtomicExpr::AO__atomic_compare_exchange ||
e->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index 6ac32cade4576..b73bd0d35a635 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -832,8 +832,9 @@ mlir::LogicalResult CIRToLLVMAtomicCmpXchgOpLowering::matchAndRewrite(
auto cmpxchg = mlir::LLVM::AtomicCmpXchgOp::create(
rewriter, op.getLoc(), adaptor.getPtr(), expected, desired,
getLLVMMemOrder(adaptor.getSuccOrder()),
- getLLVMMemOrder(adaptor.getFailOrder()));
- assert(!cir::MissingFeatures::atomicScope());
+ getLLVMMemOrder(adaptor.getFailOrder()),
+ getLLVMSyncScope(op.getSyncScope()));
+
cmpxchg.setAlignment(adaptor.getAlignment());
cmpxchg.setWeak(adaptor.getWeak());
cmpxchg.setVolatile_(adaptor.getIsVolatile());
diff --git a/clang/test/CIR/CodeGen/atomic-scoped.c b/clang/test/CIR/CodeGen/atomic-scoped.c
index d34b95b9a305a..be9322c304840 100644
--- a/clang/test/CIR/CodeGen/atomic-scoped.c
+++ b/clang/test/CIR/CodeGen/atomic-scoped.c
@@ -112,3 +112,71 @@ void scoped_atomic_exchange_n(int *ptr, int value) {
// LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} monotonic, align 4
// OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} monotonic, align 4
}
+
+void scoped_atomic_cmpxchg(int *ptr, int *expected, int *desired) {
+ // CIR-LABEL: @scoped_atomic_cmpxchg
+ // LLVM-LABEL: @scoped_atomic_cmpxchg
+ // OGCG-LABEL: @scoped_atomic_cmpxchg
+
+ __scoped_atomic_compare_exchange(ptr, expected, desired, /*weak=*/0,
+ __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE,
+ __MEMORY_SCOPE_SINGLE);
+ // CIR: %{{.+}}, %{{.+}} = cir.atomic.cmpxchg success(seq_cst) failure(acquire) syncscope(single_thread) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+ // LLVM: %{{.+}} = cmpxchg ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst acquire, align 4
+ // OGCG: %{{.+}} = cmpxchg ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst acquire, align 4
+
+ __scoped_atomic_compare_exchange(ptr, expected, desired, /*weak=*/1,
+ __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE,
+ __MEMORY_SCOPE_SINGLE);
+ // CIR: %{{.+}}, %{{.+}} = cir.atomic.cmpxchg weak success(seq_cst) failure(acquire) syncscope(single_thread) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+ // LLVM: %{{.+}} = cmpxchg weak ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst acquire, align 4
+ // OGCG: %{{.+}} = cmpxchg weak ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst acquire, align 4
+
+ __scoped_atomic_compare_exchange(ptr, expected, desired, /*weak=*/0,
+ __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE,
+ __MEMORY_SCOPE_SYSTEM);
+ // CIR: %{{.+}}, %{{.+}} = cir.atomic.cmpxchg success(seq_cst) failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+ // LLVM: %{{.+}} = cmpxchg ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst acquire, align 4
+ // OGCG: %{{.+}} = cmpxchg ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst acquire, align 4
+
+ __scoped_atomic_compare_exchange(ptr, expected, desired, /*weak=*/1,
+ __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE,
+ __MEMORY_SCOPE_SYSTEM);
+ // CIR: %{{.+}}, %{{.+}} = cir.atomic.cmpxchg weak success(seq_cst) failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+ // LLVM: %{{.+}} = cmpxchg weak ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst acquire, align 4
+ // OGCG: %{{.+}} = cmpxchg weak ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst acquire, align 4
+}
+
+void scoped_atomic_cmpxchg_n(int *ptr, int *expected, int desired) {
+ // CIR-LABEL: @scoped_atomic_cmpxchg_n
+ // LLVM-LABEL: @scoped_atomic_cmpxchg_n
+ // OGCG-LABEL: @scoped_atomic_cmpxchg_n
+
+ __scoped_atomic_compare_exchange_n(ptr, expected, desired, /*weak=*/0,
+ __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE,
+ __MEMORY_SCOPE_SINGLE);
+ // CIR: %{{.+}}, %{{.+}} = cir.atomic.cmpxchg success(seq_cst) failure(acquire) syncscope(single_thread) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+ // LLVM: %{{.+}} = cmpxchg ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst acquire, align 4
+ // OGCG: %{{.+}} = cmpxchg ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst acquire, align 4
+
+ __scoped_atomic_compare_exchange_n(ptr, expected, desired, /*weak=*/1,
+ __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE,
+ __MEMORY_SCOPE_SINGLE);
+ // CIR: %{{.+}}, %{{.+}} = cir.atomic.cmpxchg weak success(seq_cst) failure(acquire) syncscope(single_thread) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+ // LLVM: %{{.+}} = cmpxchg weak ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst acquire, align 4
+ // OGCG: %{{.+}} = cmpxchg weak ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst acquire, align 4
+
+ __scoped_atomic_compare_exchange_n(ptr, expected, desired, /*weak=*/0,
+ __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE,
+ __MEMORY_SCOPE_SYSTEM);
+ // CIR: %{{.+}}, %{{.+}} = cir.atomic.cmpxchg success(seq_cst) failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+ // LLVM: %{{.+}} = cmpxchg ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst acquire, align 4
+ // OGCG: %{{.+}} = cmpxchg ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst acquire, align 4
+
+ __scoped_atomic_compare_exchange_n(ptr, expected, desired, /*weak=*/1,
+ __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE,
+ __MEMORY_SCOPE_SYSTEM);
+ // CIR: %{{.+}}, %{{.+}} = cir.atomic.cmpxchg weak success(seq_cst) failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+ // LLVM: %{{.+}} = cmpxchg weak ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst acquire, align 4
+ // OGCG: %{{.+}} = cmpxchg weak ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst acquire, align 4
+}
diff --git a/clang/test/CIR/CodeGen/atomic.c b/clang/test/CIR/CodeGen/atomic.c
index 631ab6174c937..7a6c7e923f058 100644
--- a/clang/test/CIR/CodeGen/atomic.c
+++ b/clang/test/CIR/CodeGen/atomic.c
@@ -264,7 +264,7 @@ void c11_atomic_cmpxchg_strong(_Atomic(int) *ptr, int *expected, int desired) {
__c11_atomic_compare_exchange_strong(ptr, expected, desired,
__ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
- // CIR: %[[OLD:.+]], %[[SUCCESS:.+]] = cir.atomic.cmpxchg success(seq_cst) failure(acquire) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+ // CIR: %[[OLD:.+]], %[[SUCCESS:.+]] = cir.atomic.cmpxchg success(seq_cst) failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
// CIR-NEXT: %[[FAILED:.+]] = cir.unary(not, %[[SUCCESS]]) : !cir.bool, !cir.bool
// CIR-NEXT: cir.if %[[FAILED]] {
// CIR-NEXT: cir.store align(4) %[[OLD]], %{{.+}} : !s32i, !cir.ptr<!s32i>
@@ -302,7 +302,7 @@ void c11_atomic_cmpxchg_weak(_Atomic(int) *ptr, int *expected, int desired) {
__c11_atomic_compare_exchange_weak(ptr, expected, desired,
__ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
- // CIR: %[[OLD:.+]], %[[SUCCESS:.+]] = cir.atomic.cmpxchg weak success(seq_cst) failure(acquire) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+ // CIR: %[[OLD:.+]], %[[SUCCESS:.+]] = cir.atomic.cmpxchg weak success(seq_cst) failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
// CIR-NEXT: %[[FAILED:.+]] = cir.unary(not, %[[SUCCESS]]) : !cir.bool, !cir.bool
// CIR-NEXT: cir.if %[[FAILED]] {
// CIR-NEXT: cir.store align(4) %[[OLD]], %{{.+}} : !s32i, !cir.ptr<!s32i>
@@ -339,7 +339,7 @@ void atomic_cmpxchg(int *ptr, int *expected, int *desired) {
// OGCG-LABEL: @atomic_cmpxchg
__atomic_compare_exchange(ptr, expected, desired, /*weak=*/0, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
- // CIR: %[[OLD:.+]], %[[SUCCESS:.+]] = cir.atomic.cmpxchg success(seq_cst) failure(acquire) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+ // CIR: %[[OLD:.+]], %[[SUCCESS:.+]] = cir.atomic.cmpxchg success(seq_cst) failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
// CIR-NEXT: %[[FAILED:.+]] = cir.unary(not, %[[SUCCESS]]) : !cir.bool, !cir.bool
// CIR-NEXT: cir.if %[[FAILED]] {
// CIR-NEXT: cir.store align(4) %[[OLD]], %{{.+}} : !s32i, !cir.ptr<!s32i>
@@ -370,7 +370,7 @@ void atomic_cmpxchg(int *ptr, int *expected, int *desired) {
// OGCG-NEXT: store i8 %[[SUCCESS_2]], ptr %{{.+}}, align 1
__atomic_compare_exchange(ptr, expected, desired, /*weak=*/1, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
- // CIR: %[[OLD:.+]], %[[SUCCESS:.+]] = cir.atomic.cmpxchg weak success(seq_cst) failure(acquire) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+ // CIR: %[[OLD:.+]], %[[SUCCESS:.+]] = cir.atomic.cmpxchg weak success(seq_cst) failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
// CIR-NEXT: %[[FAILED:.+]] = cir.unary(not, %[[SUCCESS]]) : !cir.bool, !cir.bool
// CIR-NEXT: cir.if %[[FAILED]] {
// CIR-NEXT: cir.store align(4) %[[OLD]], %{{.+}} : !s32i, !cir.ptr<!s32i>
@@ -407,7 +407,7 @@ void atomic_cmpxchg_n(int *ptr, int *expected, int desired) {
// OGCG-LABEL: @atomic_cmpxchg_n
__atomic_compare_exchange_n(ptr, expected, desired, /*weak=*/0, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
- // CIR: %[[OLD:.+]], %[[SUCCESS:.+]] = cir.atomic.cmpxchg success(seq_cst) failure(acquire) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+ // CIR: %[[OLD:.+]], %[[SUCCESS:.+]] = cir.atomic.cmpxchg success(seq_cst) failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
// CIR-NEXT: %[[FAILED:.+]] = cir.unary(not, %[[SUCCESS]]) : !cir.bool, !cir.bool
// CIR-NEXT: cir.if %[[FAILED]] {
// CIR-NEXT: cir.store align(4) %[[OLD]], %{{.+}} : !s32i, !cir.ptr<!s32i>
@@ -438,7 +438,7 @@ void atomic_cmpxchg_n(int *ptr, int *expected, int desired) {
// OGCG-NEXT: store i8 %[[SUCCESS_2]], ptr %{{.+}}, align 1
__atomic_compare_exchange_n(ptr, expected, desired, /*weak=*/1, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
- // CIR: %[[OLD:.+]], %[[SUCCESS:.+]] = cir.atomic.cmpxchg weak success(seq_cst) failure(acquire) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+ // CIR: %[[OLD:.+]], %[[SUCCESS:.+]] = cir.atomic.cmpxchg weak success(seq_cst) failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
// CIR-NEXT: %[[FAILED:.+]] = cir.unary(not, %[[SUCCESS]]) : !cir.bool, !cir.bool
// CIR-NEXT: cir.if %[[FAILED]] {
// CIR-NEXT: cir.store align(4) %[[OLD]], %{{.+}} : !s32i, !cir.ptr<!s32i>
diff --git a/clang/test/CIR/IR/atomic.cir b/clang/test/CIR/IR/atomic.cir
index c58cf472bb5f0..5d186f3a49cb6 100644
--- a/clang/test/CIR/IR/atomic.cir
+++ b/clang/test/CIR/IR/atomic.cir
@@ -22,13 +22,13 @@ cir.func @atomic_xchg(%ptr: !cir.ptr<!s32i>, %val: !s32i) {
cir.func @atomic_cmpxchg(%ptr: !cir.ptr<!s32i>, %expected: !s32i, %desired: !s32i) {
// CHECK-LABEL: @atomic_cmpxchg
- %0, %1 = cir.atomic.cmpxchg success(relaxed) failure(relaxed) %ptr, %expected, %desired : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
- // CHECK: cir.atomic.cmpxchg success(relaxed) failure(relaxed) %{{.+}}, %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
- %2, %3 = cir.atomic.cmpxchg weak success(relaxed) failure(relaxed) %ptr, %expected, %desired : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
- // CHECK: cir.atomic.cmpxchg weak success(relaxed) failure(relaxed) %{{.+}}, %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
- %4, %5 = cir.atomic.cmpxchg success(seq_cst) failure(acquire) %ptr, %expected, %desired : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
- // CHECK: cir.atomic.cmpxchg success(seq_cst) failure(acquire) %{{.+}}, %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
- %6, %7 = cir.atomic.cmpxchg weak success(seq_cst) failure(acquire) %ptr, %expected, %desired : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
- // CHECK: cir.atomic.cmpxchg weak success(seq_cst) failure(acquire) %{{.+}}, %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+ %0, %1 = cir.atomic.cmpxchg success(relaxed) failure(relaxed) syncscope(system) %ptr, %expected, %desired : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+ // CHECK: cir.atomic.cmpxchg success(relaxed) failure(relaxed) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i, !...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/180412
More information about the cfe-commits
mailing list