[clang] [CIR] Implement compare exchange with dynamic failure ordering (PR #183110)
Erich Keane via cfe-commits
cfe-commits at lists.llvm.org
Tue Feb 24 09:27:57 PST 2026
https://github.com/erichkeane created https://github.com/llvm/llvm-project/pull/183110
In #156253, we implemented the rest of this feature, with compile time constant failure ordering. This patch follows the incubators direction (with a little cleanup based on other cleanup that we do) to replace this situation with a 'switch'.
>From 58b53128efe78cd9beba82fdb5cdbc247700e8bb Mon Sep 17 00:00:00 2001
From: erichkeane <ekeane at nvidia.com>
Date: Tue, 24 Feb 2026 07:53:39 -0800
Subject: [PATCH] [CIR] Implement compare exchange with dynamic failure
ordering
In #156253, we implemented the rest of this feature, with compile time
constant failure ordering. This patch follows the incubators direction
(with a little cleanup based on other cleanup that we do) to replace
this situation with a 'switch'.
---
clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 50 ++++-
clang/test/CIR/CodeGen/atomic.c | 264 ++++++++++++++++++++++++-
2 files changed, 307 insertions(+), 7 deletions(-)
diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
index a9893f83be222..c2d1a031a7e53 100644
--- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
@@ -394,9 +394,53 @@ static void emitAtomicCmpXchgFailureSet(CIRGenFunction &cgf, AtomicExpr *e,
return;
}
- assert(!cir::MissingFeatures::atomicExpr());
- cgf.cgm.errorNYI(e->getSourceRange(),
- "emitAtomicCmpXchgFailureSet: non-constant failure order");
+ // The failure memory order is not a compile time constant. The CIR atomic ops
+ // require a constant value, so that memory order is known at compile time. In
+ // this case, we can switch based on the memory order and call each variant
+ // individually.
+ mlir::Value failureOrderVal = cgf.emitScalarExpr(failureOrderExpr);
+ mlir::Location atomicLoc = cgf.getLoc(e->getSourceRange());
+ cir::SwitchOp::create(
+ cgf.getBuilder(), atomicLoc, failureOrderVal,
+ [&](mlir::OpBuilder &b, mlir::Location loc, mlir::OperationState &os) {
+ mlir::Block *switchBlock = cgf.getBuilder().getBlock();
+
+ // case cir::MemOrder::Relaxed:
+ // // 31.7.2.18: "The failure argument shall not be
+ // memory_order_release
+ // // nor memory_order_acq_rel". Fallback to monotonic.
+ // case cir::MemOrder::Release:
+ // case cir::MemOrder::AcquireRelease:
+ // Note: Since there are 3 options, this makes sense to just emit as a
+ // 'default', which prevents user code from 'falling off' of this,
+ // which seems reasonable. Also, 'relaxed' being the default behavior
+ // is also probably the least harmful.
+ emitMemOrderDefaultCaseLabel(cgf.getBuilder(), atomicLoc);
+ emitAtomicCmpXchg(cgf, e, isWeak, dest, ptr, val1, val2, size,
+ successOrder, cir::MemOrder::Relaxed, scope);
+ cgf.getBuilder().createBreak(atomicLoc);
+ cgf.getBuilder().setInsertionPointToEnd(switchBlock);
+
+ // case cir::MemOrder::Consume:
+ // case cir::MemOrder::Acquire:
+ emitMemOrderCaseLabel(cgf.getBuilder(), loc, failureOrderVal.getType(),
+ {cir::MemOrder::Consume, cir::MemOrder::Acquire});
+ emitAtomicCmpXchg(cgf, e, isWeak, dest, ptr, val1, val2, size,
+ successOrder, cir::MemOrder::Acquire, scope);
+ cgf.getBuilder().createBreak(atomicLoc);
+ cgf.getBuilder().setInsertionPointToEnd(switchBlock);
+
+ // case cir::MemOrder::SequentiallyConsistent:
+ emitMemOrderCaseLabel(cgf.getBuilder(), loc, failureOrderVal.getType(),
+ {cir::MemOrder::SequentiallyConsistent});
+ emitAtomicCmpXchg(cgf, e, isWeak, dest, ptr, val1, val2, size,
+ successOrder, cir::MemOrder::SequentiallyConsistent,
+ scope);
+ cgf.getBuilder().createBreak(atomicLoc);
+ cgf.getBuilder().setInsertionPointToEnd(switchBlock);
+
+ cgf.getBuilder().createYield(atomicLoc);
+ });
}
static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
diff --git a/clang/test/CIR/CodeGen/atomic.c b/clang/test/CIR/CodeGen/atomic.c
index 6f042b06e3b53..ce3f62eaa871c 100644
--- a/clang/test/CIR/CodeGen/atomic.c
+++ b/clang/test/CIR/CodeGen/atomic.c
@@ -257,10 +257,11 @@ void c11_store(_Atomic(int) *ptr, int x) {
// OGCG: store atomic i32 %{{.+}}, ptr %{{.+}} seq_cst, align 4
// OGCG: }
-void c11_atomic_cmpxchg_strong(_Atomic(int) *ptr, int *expected, int desired) {
+void c11_atomic_cmpxchg_strong(_Atomic(int) *ptr, int *expected, int desired, int failure) {
// CIR-LABEL: @c11_atomic_cmpxchg_strong
// LLVM-LABEL: @c11_atomic_cmpxchg_strong
// OGCG-LABEL: @c11_atomic_cmpxchg_strong
+ // CIR: %[[FAILURE:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["failure", init]
__c11_atomic_compare_exchange_strong(ptr, expected, desired,
__ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
@@ -293,12 +294,56 @@ void c11_atomic_cmpxchg_strong(_Atomic(int) *ptr, int *expected, int desired) {
// OGCG: [[LABEL_CONT]]:
// OGCG-NEXT: %[[SUCCESS_2:.+]] = zext i1 %[[SUCCESS]] to i8
// OGCG-NEXT: store i8 %[[SUCCESS_2]], ptr %{{.+}}, align 1
+
+ __c11_atomic_compare_exchange_strong(ptr, expected, desired,
+ __ATOMIC_SEQ_CST, failure);
+ // CIR: %[[FAIL_LOAD:.*]] = cir.load{{.*}}%[[FAILURE]]
+ // CIR: cir.switch(%[[FAIL_LOAD]] : !s32i) {
+ // CIR-NEXT: cir.case(default, []) {
+ // CIR: cir.atomic.cmpxchg success(seq_cst) failure(relaxed) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+ // CIR: cir.break
+ // CIR: }
+ // CIR-NEXT: cir.case(anyof, [#cir.int<1> : !s32i, #cir.int<2> : !s32i]) {
+ // CIR: cir.atomic.cmpxchg success(seq_cst) failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+ // CIR: cir.break
+ // CIR-NEXT: }
+ // CIR-NEXT: cir.case(anyof, [#cir.int<5> : !s32i]) {
+ // CIR: cir.atomic.cmpxchg success(seq_cst) failure(seq_cst) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+ // CIR: cir.break
+ // CIR-NEXT: }
+ // CIR-NEXT: cir.yield
+ // CIR-NEXT: }
+
+ // LLVM: switch i32 %{{.*}}, label %[[DEF:.*]] [
+ // LLVM-NEXT: i32 1, label %[[ACQ:.*]]
+ // LLVM-NEXT: i32 2, label %[[ACQ]]
+ // LLVM-NEXT: i32 5, label %[[SEQ_CST:.*]]
+ // LLVM-NEXT: ]
+ // LLVM: [[DEF]]:
+ // LLVM: cmpxchg ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst monotonic
+ // LLVM: [[ACQ]]:
+ // LLVM: cmpxchg ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst acquire
+ // LLVM: [[SEQ_CST]]:
+ // LLVM: cmpxchg ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst seq_cst
+
+ // OGCG: switch i32 %{{.*}}, label %[[DEF:.*]] [
+ // OGCG-NEXT: i32 1, label %[[ACQ:.*]]
+ // OGCG-NEXT: i32 2, label %[[ACQ]]
+ // OGCG-NEXT: i32 5, label %[[SEQ_CST:.*]]
+ // OGCG-NEXT: ]
+ // OGCG: [[DEF]]:
+ // OGCG: cmpxchg ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst monotonic
+ // OGCG: [[ACQ]]:
+ // OGCG: cmpxchg ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst acquire
+ // OGCG: [[SEQ_CST]]:
+ // OGCG: cmpxchg ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst seq_cst
}
-void c11_atomic_cmpxchg_weak(_Atomic(int) *ptr, int *expected, int desired) {
+void c11_atomic_cmpxchg_weak(_Atomic(int) *ptr, int *expected, int desired, int failure) {
// CIR-LABEL: @c11_atomic_cmpxchg_weak
// LLVM-LABEL: @c11_atomic_cmpxchg_weak
// OGCG-LABEL: @c11_atomic_cmpxchg_weak
+ // CIR: %[[FAILURE:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["failure", init]
__c11_atomic_compare_exchange_weak(ptr, expected, desired,
__ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
@@ -331,12 +376,56 @@ void c11_atomic_cmpxchg_weak(_Atomic(int) *ptr, int *expected, int desired) {
// OGCG: [[LABEL_CONT]]:
// OGCG-NEXT: %[[SUCCESS_2:.+]] = zext i1 %[[SUCCESS]] to i8
// OGCG-NEXT: store i8 %[[SUCCESS_2]], ptr %{{.+}}, align 1
+
+ __c11_atomic_compare_exchange_weak(ptr, expected, desired,
+ __ATOMIC_SEQ_CST, failure);
+ // CIR: %[[FAIL_LOAD:.*]] = cir.load{{.*}}%[[FAILURE]]
+ // CIR: cir.switch(%[[FAIL_LOAD]] : !s32i) {
+ // CIR-NEXT: cir.case(default, []) {
+ // CIR: cir.atomic.cmpxchg weak success(seq_cst) failure(relaxed) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+ // CIR: cir.break
+ // CIR: }
+ // CIR-NEXT: cir.case(anyof, [#cir.int<1> : !s32i, #cir.int<2> : !s32i]) {
+ // CIR: cir.atomic.cmpxchg weak success(seq_cst) failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+ // CIR: cir.break
+ // CIR-NEXT: }
+ // CIR-NEXT: cir.case(anyof, [#cir.int<5> : !s32i]) {
+ // CIR: cir.atomic.cmpxchg weak success(seq_cst) failure(seq_cst) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+ // CIR: cir.break
+ // CIR-NEXT: }
+ // CIR-NEXT: cir.yield
+ // CIR-NEXT: }
+
+ // LLVM: switch i32 %{{.*}}, label %[[DEF:.*]] [
+ // LLVM-NEXT: i32 1, label %[[ACQ:.*]]
+ // LLVM-NEXT: i32 2, label %[[ACQ]]
+ // LLVM-NEXT: i32 5, label %[[SEQ_CST:.*]]
+ // LLVM-NEXT: ]
+ // LLVM: [[DEF]]:
+ // LLVM: cmpxchg weak ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst monotonic
+ // LLVM: [[ACQ]]:
+ // LLVM: cmpxchg weak ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst acquire
+ // LLVM: [[SEQ_CST]]:
+ // LLVM: cmpxchg weak ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst seq_cst
+
+ // OGCG: switch i32 %{{.*}}, label %[[DEF:.*]] [
+ // OGCG-NEXT: i32 1, label %[[ACQ:.*]]
+ // OGCG-NEXT: i32 2, label %[[ACQ]]
+ // OGCG-NEXT: i32 5, label %[[SEQ_CST:.*]]
+ // OGCG-NEXT: ]
+ // OGCG: [[DEF]]:
+ // OGCG: cmpxchg weak ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst monotonic
+ // OGCG: [[ACQ]]:
+ // OGCG: cmpxchg weak ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst acquire
+ // OGCG: [[SEQ_CST]]:
+ // OGCG: cmpxchg weak ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst seq_cst
}
-void atomic_cmpxchg(int *ptr, int *expected, int *desired) {
+void atomic_cmpxchg(int *ptr, int *expected, int *desired, int failure) {
// CIR-LABEL: @atomic_cmpxchg
// LLVM-LABEL: @atomic_cmpxchg
// OGCG-LABEL: @atomic_cmpxchg
+ // CIR: %[[FAILURE:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["failure", init]
__atomic_compare_exchange(ptr, expected, desired, /*weak=*/0, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
// CIR: %[[OLD:.+]], %[[SUCCESS:.+]] = cir.atomic.cmpxchg success(seq_cst) failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
@@ -399,12 +488,96 @@ void atomic_cmpxchg(int *ptr, int *expected, int *desired) {
// OGCG: [[LABEL_CONT]]:
// OGCG-NEXT: %[[SUCCESS_2:.+]] = zext i1 %[[SUCCESS]] to i8
// OGCG-NEXT: store i8 %[[SUCCESS_2]], ptr %{{.+}}, align 1
+
+ __atomic_compare_exchange(ptr, expected, desired, /*weak=*/0, __ATOMIC_SEQ_CST, failure);
+ // CIR: %[[FAIL_LOAD:.*]] = cir.load{{.*}}%[[FAILURE]]
+ // CIR: cir.switch(%[[FAIL_LOAD]] : !s32i) {
+ // CIR-NEXT: cir.case(default, []) {
+ // CIR: cir.atomic.cmpxchg success(seq_cst) failure(relaxed) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+ // CIR: cir.break
+ // CIR: }
+ // CIR-NEXT: cir.case(anyof, [#cir.int<1> : !s32i, #cir.int<2> : !s32i]) {
+ // CIR: cir.atomic.cmpxchg success(seq_cst) failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+ // CIR: cir.break
+ // CIR-NEXT: }
+ // CIR-NEXT: cir.case(anyof, [#cir.int<5> : !s32i]) {
+ // CIR: cir.atomic.cmpxchg success(seq_cst) failure(seq_cst) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+ // CIR: cir.break
+ // CIR-NEXT: }
+ // CIR-NEXT: cir.yield
+ // CIR-NEXT: }
+
+ // LLVM: switch i32 %{{.*}}, label %[[DEF:.*]] [
+ // LLVM-NEXT: i32 1, label %[[ACQ:.*]]
+ // LLVM-NEXT: i32 2, label %[[ACQ]]
+ // LLVM-NEXT: i32 5, label %[[SEQ_CST:.*]]
+ // LLVM-NEXT: ]
+ // LLVM: [[DEF]]:
+ // LLVM: cmpxchg ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst monotonic
+ // LLVM: [[ACQ]]:
+ // LLVM: cmpxchg ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst acquire
+ // LLVM: [[SEQ_CST]]:
+ // LLVM: cmpxchg ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst seq_cst
+
+ // OGCG: switch i32 %{{.*}}, label %[[DEF:.*]] [
+ // OGCG-NEXT: i32 1, label %[[ACQ:.*]]
+ // OGCG-NEXT: i32 2, label %[[ACQ]]
+ // OGCG-NEXT: i32 5, label %[[SEQ_CST:.*]]
+ // OGCG-NEXT: ]
+ // OGCG: [[DEF]]:
+ // OGCG: cmpxchg ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst monotonic
+ // OGCG: [[ACQ]]:
+ // OGCG: cmpxchg ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst acquire
+ // OGCG: [[SEQ_CST]]:
+ // OGCG: cmpxchg ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst seq_cst
+ __atomic_compare_exchange(ptr, expected, desired, /*weak=*/1, __ATOMIC_SEQ_CST, failure);
+ // CIR: %[[FAIL_LOAD:.*]] = cir.load{{.*}}%[[FAILURE]]
+ // CIR: cir.switch(%[[FAIL_LOAD]] : !s32i) {
+ // CIR-NEXT: cir.case(default, []) {
+ // CIR: cir.atomic.cmpxchg weak success(seq_cst) failure(relaxed) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+ // CIR: cir.break
+ // CIR: }
+ // CIR-NEXT: cir.case(anyof, [#cir.int<1> : !s32i, #cir.int<2> : !s32i]) {
+ // CIR: cir.atomic.cmpxchg weak success(seq_cst) failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+ // CIR: cir.break
+ // CIR-NEXT: }
+ // CIR-NEXT: cir.case(anyof, [#cir.int<5> : !s32i]) {
+ // CIR: cir.atomic.cmpxchg weak success(seq_cst) failure(seq_cst) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+ // CIR: cir.break
+ // CIR-NEXT: }
+ // CIR-NEXT: cir.yield
+ // CIR-NEXT: }
+
+ // LLVM: switch i32 %{{.*}}, label %[[DEF:.*]] [
+ // LLVM-NEXT: i32 1, label %[[ACQ:.*]]
+ // LLVM-NEXT: i32 2, label %[[ACQ]]
+ // LLVM-NEXT: i32 5, label %[[SEQ_CST:.*]]
+ // LLVM-NEXT: ]
+ // LLVM: [[DEF]]:
+ // LLVM: cmpxchg weak ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst monotonic
+ // LLVM: [[ACQ]]:
+ // LLVM: cmpxchg weak ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst acquire
+ // LLVM: [[SEQ_CST]]:
+ // LLVM: cmpxchg weak ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst seq_cst
+
+ // OGCG: switch i32 %{{.*}}, label %[[DEF:.*]] [
+ // OGCG-NEXT: i32 1, label %[[ACQ:.*]]
+ // OGCG-NEXT: i32 2, label %[[ACQ]]
+ // OGCG-NEXT: i32 5, label %[[SEQ_CST:.*]]
+ // OGCG-NEXT: ]
+ // OGCG: [[DEF]]:
+ // OGCG: cmpxchg weak ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst monotonic
+ // OGCG: [[ACQ]]:
+ // OGCG: cmpxchg weak ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst acquire
+ // OGCG: [[SEQ_CST]]:
+ // OGCG: cmpxchg weak ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst seq_cst
}
-void atomic_cmpxchg_n(int *ptr, int *expected, int desired) {
+void atomic_cmpxchg_n(int *ptr, int *expected, int desired, int failure) {
// CIR-LABEL: @atomic_cmpxchg_n
// LLVM-LABEL: @atomic_cmpxchg_n
// OGCG-LABEL: @atomic_cmpxchg_n
+ // CIR: %[[FAILURE:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["failure", init]
__atomic_compare_exchange_n(ptr, expected, desired, /*weak=*/0, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
// CIR: %[[OLD:.+]], %[[SUCCESS:.+]] = cir.atomic.cmpxchg success(seq_cst) failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
@@ -467,6 +640,89 @@ void atomic_cmpxchg_n(int *ptr, int *expected, int desired) {
// OGCG: [[LABEL_CONT]]:
// OGCG-NEXT: %[[SUCCESS_2:.+]] = zext i1 %[[SUCCESS]] to i8
// OGCG-NEXT: store i8 %[[SUCCESS_2]], ptr %{{.+}}, align 1
+
+ __atomic_compare_exchange_n(ptr, expected, desired, /*weak=*/0, __ATOMIC_SEQ_CST, failure);
+ // CIR: %[[FAIL_LOAD:.*]] = cir.load{{.*}}%[[FAILURE]]
+ // CIR: cir.switch(%[[FAIL_LOAD]] : !s32i) {
+ // CIR-NEXT: cir.case(default, []) {
+ // CIR: cir.atomic.cmpxchg success(seq_cst) failure(relaxed) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+ // CIR: cir.break
+ // CIR: }
+ // CIR-NEXT: cir.case(anyof, [#cir.int<1> : !s32i, #cir.int<2> : !s32i]) {
+ // CIR: cir.atomic.cmpxchg success(seq_cst) failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+ // CIR: cir.break
+ // CIR-NEXT: }
+ // CIR-NEXT: cir.case(anyof, [#cir.int<5> : !s32i]) {
+ // CIR: cir.atomic.cmpxchg success(seq_cst) failure(seq_cst) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+ // CIR: cir.break
+ // CIR-NEXT: }
+ // CIR-NEXT: cir.yield
+ // CIR-NEXT: }
+
+ // LLVM: switch i32 %{{.*}}, label %[[DEF:.*]] [
+ // LLVM-NEXT: i32 1, label %[[ACQ:.*]]
+ // LLVM-NEXT: i32 2, label %[[ACQ]]
+ // LLVM-NEXT: i32 5, label %[[SEQ_CST:.*]]
+ // LLVM-NEXT: ]
+ // LLVM: [[DEF]]:
+ // LLVM: cmpxchg ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst monotonic
+ // LLVM: [[ACQ]]:
+ // LLVM: cmpxchg ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst acquire
+ // LLVM: [[SEQ_CST]]:
+ // LLVM: cmpxchg ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst seq_cst
+
+ // OGCG: switch i32 %{{.*}}, label %[[DEF:.*]] [
+ // OGCG-NEXT: i32 1, label %[[ACQ:.*]]
+ // OGCG-NEXT: i32 2, label %[[ACQ]]
+ // OGCG-NEXT: i32 5, label %[[SEQ_CST:.*]]
+ // OGCG-NEXT: ]
+ // OGCG: [[DEF]]:
+ // OGCG: cmpxchg ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst monotonic
+ // OGCG: [[ACQ]]:
+ // OGCG: cmpxchg ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst acquire
+ // OGCG: [[SEQ_CST]]:
+ // OGCG: cmpxchg ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst seq_cst
+ __atomic_compare_exchange_n(ptr, expected, desired, /*weak=*/1, __ATOMIC_SEQ_CST, failure);
+ // CIR: %[[FAIL_LOAD:.*]] = cir.load{{.*}}%[[FAILURE]]
+ // CIR: cir.switch(%[[FAIL_LOAD]] : !s32i) {
+ // CIR-NEXT: cir.case(default, []) {
+ // CIR: cir.atomic.cmpxchg weak success(seq_cst) failure(relaxed) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+ // CIR: cir.break
+ // CIR: }
+ // CIR-NEXT: cir.case(anyof, [#cir.int<1> : !s32i, #cir.int<2> : !s32i]) {
+ // CIR: cir.atomic.cmpxchg weak success(seq_cst) failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+ // CIR: cir.break
+ // CIR-NEXT: }
+ // CIR-NEXT: cir.case(anyof, [#cir.int<5> : !s32i]) {
+ // CIR: cir.atomic.cmpxchg weak success(seq_cst) failure(seq_cst) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+ // CIR: cir.break
+ // CIR-NEXT: }
+ // CIR-NEXT: cir.yield
+ // CIR-NEXT: }
+
+ // LLVM: switch i32 %{{.*}}, label %[[DEF:.*]] [
+ // LLVM-NEXT: i32 1, label %[[ACQ:.*]]
+ // LLVM-NEXT: i32 2, label %[[ACQ]]
+ // LLVM-NEXT: i32 5, label %[[SEQ_CST:.*]]
+ // LLVM-NEXT: ]
+ // LLVM: [[DEF]]:
+ // LLVM: cmpxchg weak ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst monotonic
+ // LLVM: [[ACQ]]:
+ // LLVM: cmpxchg weak ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst acquire
+ // LLVM: [[SEQ_CST]]:
+ // LLVM: cmpxchg weak ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst seq_cst
+
+ // OGCG: switch i32 %{{.*}}, label %[[DEF:.*]] [
+ // OGCG-NEXT: i32 1, label %[[ACQ:.*]]
+ // OGCG-NEXT: i32 2, label %[[ACQ]]
+ // OGCG-NEXT: i32 5, label %[[SEQ_CST:.*]]
+ // OGCG-NEXT: ]
+ // OGCG: [[DEF]]:
+ // OGCG: cmpxchg weak ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst monotonic
+ // OGCG: [[ACQ]]:
+ // OGCG: cmpxchg weak ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst acquire
+ // OGCG: [[SEQ_CST]]:
+ // OGCG: cmpxchg weak ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst seq_cst
}
void c11_atomic_exchange(_Atomic(int) *ptr, int value) {
More information about the cfe-commits
mailing list