[libcxx-commits] [libcxx] b2574c9 - [clang] [libc++] fix _Atomic c11 compare exchange does not update expected results (#78707)

via libcxx-commits libcxx-commits at lists.llvm.org
Sun Oct 19 11:25:04 PDT 2025


Author: Hui
Date: 2025-10-19T19:25:00+01:00
New Revision: b2574c9dad487be92605316efd929738a5f6d851

URL: https://github.com/llvm/llvm-project/commit/b2574c9dad487be92605316efd929738a5f6d851
DIFF: https://github.com/llvm/llvm-project/commit/b2574c9dad487be92605316efd929738a5f6d851.diff

LOG: [clang] [libc++] fix _Atomic c11 compare exchange does not update expected results (#78707)

fixes #30023

The issue is that for compare exchange builtin, if the type's size is
not power of 2, it creates a temporary of size power of 2, then emit the
compare exchange operation. And later, the results of the compare
exchange operation has two components: 1. a boolean whether or not the
exchange happens. 2. the old value
we are supposed to write the old value into user's "expected" value.
However, in case the type is not power of 2, what we actually wrote to
is the temporary that was created.

The fix is to pass the "expected" address all the way down so it can
wrote to the correct address

Added: 
    clang/test/CodeGenCXX/builtin-atomic-compare_exchange.cpp
    libcxx/test/std/atomics/atomics.types.generic/cas_non_power_of_2.pass.cpp

Modified: 
    clang/lib/CodeGen/CGAtomic.cpp
    clang/test/CodeGen/c11atomics-ios.c

Removed: 
    


################################################################################
diff  --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp
index d95dab3a19fea..5bf1816fd96a3 100644
--- a/clang/lib/CodeGen/CGAtomic.cpp
+++ b/clang/lib/CodeGen/CGAtomic.cpp
@@ -374,10 +374,9 @@ bool AtomicInfo::emitMemSetZeroIfNecessary() const {
 }
 
 static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
-                              Address Dest, Address Ptr,
-                              Address Val1, Address Val2,
-                              uint64_t Size,
-                              llvm::AtomicOrdering SuccessOrder,
+                              Address Dest, Address Ptr, Address Val1,
+                              Address Val2, Address ExpectedResult,
+                              uint64_t Size, llvm::AtomicOrdering SuccessOrder,
                               llvm::AtomicOrdering FailureOrder,
                               llvm::SyncScope::ID Scope) {
   // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
@@ -411,8 +410,30 @@ static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
 
   CGF.Builder.SetInsertPoint(StoreExpectedBB);
   // Update the memory at Expected with Old's value.
-  auto *I = CGF.Builder.CreateStore(Old, Val1);
-  CGF.addInstToCurrentSourceAtom(I, Old);
+  llvm::Type *ExpectedType = ExpectedResult.getElementType();
+  const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
+  uint64_t ExpectedSizeInBytes = DL.getTypeStoreSize(ExpectedType);
+
+  if (ExpectedSizeInBytes == Size) {
+    // Sizes match: store directly
+    auto *I = CGF.Builder.CreateStore(Old, ExpectedResult);
+    CGF.addInstToCurrentSourceAtom(I, Old);
+  } else {
+    // store only the first ExpectedSizeInBytes bytes of Old
+    llvm::Type *OldType = Old->getType();
+
+    // Allocate temporary storage for Old value
+    Address OldTmp =
+        CGF.CreateTempAlloca(OldType, Ptr.getAlignment(), "old.tmp");
+
+    // Store Old into this temporary
+    auto *I = CGF.Builder.CreateStore(Old, OldTmp);
+    CGF.addInstToCurrentSourceAtom(I, Old);
+
+    // Perform memcpy for first ExpectedSizeInBytes bytes
+    CGF.Builder.CreateMemCpy(ExpectedResult, OldTmp, ExpectedSizeInBytes,
+                             /*isVolatile=*/false);
+  }
 
   // Finally, branch to the exit point.
   CGF.Builder.CreateBr(ContinueBB);
@@ -425,13 +446,11 @@ static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
 /// Given an ordering required on success, emit all possible cmpxchg
 /// instructions to cope with the provided (but possibly only dynamically known)
 /// FailureOrder.
-static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
-                                        bool IsWeak, Address Dest, Address Ptr,
-                                        Address Val1, Address Val2,
-                                        llvm::Value *FailureOrderVal,
-                                        uint64_t Size,
-                                        llvm::AtomicOrdering SuccessOrder,
-                                        llvm::SyncScope::ID Scope) {
+static void emitAtomicCmpXchgFailureSet(
+    CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr,
+    Address Val1, Address Val2, Address ExpectedResult,
+    llvm::Value *FailureOrderVal, uint64_t Size,
+    llvm::AtomicOrdering SuccessOrder, llvm::SyncScope::ID Scope) {
   llvm::AtomicOrdering FailureOrder;
   if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
     auto FOS = FO->getSExtValue();
@@ -458,8 +477,8 @@ static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
     // success argument". This condition has been lifted and the only
     // precondition is 31.7.2.18. Effectively treat this as a DR and skip
     // language version checks.
-    emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
-                      FailureOrder, Scope);
+    emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, ExpectedResult,
+                      Size, SuccessOrder, FailureOrder, Scope);
     return;
   }
 
@@ -483,18 +502,19 @@ static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
 
   // Emit all the 
diff erent atomics
   CGF.Builder.SetInsertPoint(MonotonicBB);
-  emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
-                    Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
+  emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, ExpectedResult, Size,
+                    SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
   CGF.Builder.CreateBr(ContBB);
 
   CGF.Builder.SetInsertPoint(AcquireBB);
-  emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
-                    llvm::AtomicOrdering::Acquire, Scope);
+  emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, ExpectedResult, Size,
+                    SuccessOrder, llvm::AtomicOrdering::Acquire, Scope);
   CGF.Builder.CreateBr(ContBB);
 
   CGF.Builder.SetInsertPoint(SeqCstBB);
-  emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
-                    llvm::AtomicOrdering::SequentiallyConsistent, Scope);
+  emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, ExpectedResult, Size,
+                    SuccessOrder, llvm::AtomicOrdering::SequentiallyConsistent,
+                    Scope);
   CGF.Builder.CreateBr(ContBB);
 
   CGF.Builder.SetInsertPoint(ContBB);
@@ -538,8 +558,9 @@ static llvm::Value *EmitPostAtomicMinMax(CGBuilderTy &Builder,
 
 static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
                          Address Ptr, Address Val1, Address Val2,
-                         llvm::Value *IsWeak, llvm::Value *FailureOrder,
-                         uint64_t Size, llvm::AtomicOrdering Order,
+                         Address ExpectedResult, llvm::Value *IsWeak,
+                         llvm::Value *FailureOrder, uint64_t Size,
+                         llvm::AtomicOrdering Order,
                          llvm::SyncScope::ID Scope) {
   llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
   bool PostOpMinMax = false;
@@ -554,13 +575,15 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
   case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
   case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
     emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
-                                FailureOrder, Size, Order, Scope);
+                                ExpectedResult, FailureOrder, Size, Order,
+                                Scope);
     return;
   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
   case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
   case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
     emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
-                                FailureOrder, Size, Order, Scope);
+                                ExpectedResult, FailureOrder, Size, Order,
+                                Scope);
     return;
   case AtomicExpr::AO__atomic_compare_exchange:
   case AtomicExpr::AO__atomic_compare_exchange_n:
@@ -568,7 +591,8 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
   case AtomicExpr::AO__scoped_atomic_compare_exchange_n: {
     if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
       emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
-                                  Val1, Val2, FailureOrder, Size, Order, Scope);
+                                  Val1, Val2, ExpectedResult, FailureOrder,
+                                  Size, Order, Scope);
     } else {
       // Create all the relevant BB's
       llvm::BasicBlock *StrongBB =
@@ -582,12 +606,14 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
 
       CGF.Builder.SetInsertPoint(StrongBB);
       emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
-                                  FailureOrder, Size, Order, Scope);
+                                  ExpectedResult, FailureOrder, Size, Order,
+                                  Scope);
       CGF.Builder.CreateBr(ContBB);
 
       CGF.Builder.SetInsertPoint(WeakBB);
       emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
-                                  FailureOrder, Size, Order, Scope);
+                                  ExpectedResult, FailureOrder, Size, Order,
+                                  Scope);
       CGF.Builder.CreateBr(ContBB);
 
       CGF.Builder.SetInsertPoint(ContBB);
@@ -797,9 +823,9 @@ EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
 
 static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
                          Address Ptr, Address Val1, Address Val2,
-                         llvm::Value *IsWeak, llvm::Value *FailureOrder,
-                         uint64_t Size, llvm::AtomicOrdering Order,
-                         llvm::Value *Scope) {
+                         Address OriginalVal1, llvm::Value *IsWeak,
+                         llvm::Value *FailureOrder, uint64_t Size,
+                         llvm::AtomicOrdering Order, llvm::Value *Scope) {
   auto ScopeModel = Expr->getScopeModel();
 
   // LLVM atomic instructions always have sync scope. If clang atomic
@@ -816,8 +842,8 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
                                                    Order, CGF.getLLVMContext());
     else
       SS = llvm::SyncScope::System;
-    EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
-                 Order, SS);
+    EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+                 FailureOrder, Size, Order, SS);
     return;
   }
 
@@ -826,8 +852,8 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
     auto SCID = CGF.getTargetHooks().getLLVMSyncScopeID(
         CGF.CGM.getLangOpts(), ScopeModel->map(SC->getZExtValue()),
         Order, CGF.CGM.getLLVMContext());
-    EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
-                 Order, SCID);
+    EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+                 FailureOrder, Size, Order, SCID);
     return;
   }
 
@@ -852,12 +878,11 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
       SI->addCase(Builder.getInt32(S), B);
 
     Builder.SetInsertPoint(B);
-    EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
-                 Order,
-                 CGF.getTargetHooks().getLLVMSyncScopeID(CGF.CGM.getLangOpts(),
-                                                         ScopeModel->map(S),
-                                                         Order,
-                                                         CGF.getLLVMContext()));
+    EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+                 FailureOrder, Size, Order,
+                 CGF.getTargetHooks().getLLVMSyncScopeID(
+                     CGF.CGM.getLangOpts(), ScopeModel->map(S), Order,
+                     CGF.getLLVMContext()));
     Builder.CreateBr(ContBB);
   }
 
@@ -1058,6 +1083,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
   LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy);
   AtomicInfo Atomics(*this, AtomicVal);
 
+  Address OriginalVal1 = Val1;
   if (ShouldCastToIntPtrTy) {
     Ptr = Atomics.castToAtomicIntPointer(Ptr);
     if (Val1.isValid())
@@ -1301,30 +1327,32 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
     if (llvm::isValidAtomicOrderingCABI(ord))
       switch ((llvm::AtomicOrderingCABI)ord) {
       case llvm::AtomicOrderingCABI::relaxed:
-        EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
-                     llvm::AtomicOrdering::Monotonic, Scope);
+        EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+                     OrderFail, Size, llvm::AtomicOrdering::Monotonic, Scope);
         break;
       case llvm::AtomicOrderingCABI::consume:
       case llvm::AtomicOrderingCABI::acquire:
         if (IsStore)
           break; // Avoid crashing on code with undefined behavior
-        EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
-                     llvm::AtomicOrdering::Acquire, Scope);
+        EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+                     OrderFail, Size, llvm::AtomicOrdering::Acquire, Scope);
         break;
       case llvm::AtomicOrderingCABI::release:
         if (IsLoad)
           break; // Avoid crashing on code with undefined behavior
-        EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
-                     llvm::AtomicOrdering::Release, Scope);
+        EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+                     OrderFail, Size, llvm::AtomicOrdering::Release, Scope);
         break;
       case llvm::AtomicOrderingCABI::acq_rel:
         if (IsLoad || IsStore)
           break; // Avoid crashing on code with undefined behavior
-        EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
-                     llvm::AtomicOrdering::AcquireRelease, Scope);
+        EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+                     OrderFail, Size, llvm::AtomicOrdering::AcquireRelease,
+                     Scope);
         break;
       case llvm::AtomicOrderingCABI::seq_cst:
-        EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
+        EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+                     OrderFail, Size,
                      llvm::AtomicOrdering::SequentiallyConsistent, Scope);
         break;
       }
@@ -1360,13 +1388,13 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
 
   // Emit all the 
diff erent atomics
   Builder.SetInsertPoint(MonotonicBB);
-  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
-               llvm::AtomicOrdering::Monotonic, Scope);
+  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, OrderFail,
+               Size, llvm::AtomicOrdering::Monotonic, Scope);
   Builder.CreateBr(ContBB);
   if (!IsStore) {
     Builder.SetInsertPoint(AcquireBB);
-    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
-                 llvm::AtomicOrdering::Acquire, Scope);
+    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+                 OrderFail, Size, llvm::AtomicOrdering::Acquire, Scope);
     Builder.CreateBr(ContBB);
     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
                 AcquireBB);
@@ -1375,23 +1403,23 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
   }
   if (!IsLoad) {
     Builder.SetInsertPoint(ReleaseBB);
-    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
-                 llvm::AtomicOrdering::Release, Scope);
+    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+                 OrderFail, Size, llvm::AtomicOrdering::Release, Scope);
     Builder.CreateBr(ContBB);
     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::release),
                 ReleaseBB);
   }
   if (!IsLoad && !IsStore) {
     Builder.SetInsertPoint(AcqRelBB);
-    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
-                 llvm::AtomicOrdering::AcquireRelease, Scope);
+    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+                 OrderFail, Size, llvm::AtomicOrdering::AcquireRelease, Scope);
     Builder.CreateBr(ContBB);
     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acq_rel),
                 AcqRelBB);
   }
   Builder.SetInsertPoint(SeqCstBB);
-  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
-               llvm::AtomicOrdering::SequentiallyConsistent, Scope);
+  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, OrderFail,
+               Size, llvm::AtomicOrdering::SequentiallyConsistent, Scope);
   Builder.CreateBr(ContBB);
   SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
               SeqCstBB);
@@ -1417,6 +1445,11 @@ Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {
   uint64_t SourceSizeInBits = CGF.CGM.getDataLayout().getTypeSizeInBits(Ty);
   if (SourceSizeInBits != AtomicSizeInBits) {
     Address Tmp = CreateTempAlloca();
+    CGF.Builder.CreateMemSet(
+        Tmp.emitRawPointer(CGF), llvm::ConstantInt::get(CGF.Int8Ty, 0),
+        CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
+        Tmp.getAlignment().getAsAlign());
+
     CGF.Builder.CreateMemCpy(Tmp, Addr,
                              std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
     Addr = Tmp;

diff  --git a/clang/test/CodeGen/c11atomics-ios.c b/clang/test/CodeGen/c11atomics-ios.c
index 8310270ed8547..34843ef55ecb7 100644
--- a/clang/test/CodeGen/c11atomics-ios.c
+++ b/clang/test/CodeGen/c11atomics-ios.c
@@ -235,6 +235,7 @@ _Bool test_promoted_cmpxchg(_Atomic(PS) *addr, PS *desired, PS *new) {
   // CHECK:   [[ATOMIC_DESIRED:%.*]] = alloca { %struct.PS, [2 x i8] }, align 8
   // CHECK:   [[ATOMIC_NEW:%.*]] = alloca { %struct.PS, [2 x i8] }, align 8
   // CHECK:   [[RES_ADDR:%.*]] = alloca i8, align 1
+  // CHECK:   [[OLD_TMP:%.*]] = alloca i64, align 8
   // CHECK:   store ptr %addr, ptr [[ADDR_ARG]], align 4
   // CHECK:   store ptr %desired, ptr [[DESIRED_ARG]], align 4
   // CHECK:   store ptr %new, ptr [[NEW_ARG]], align 4
@@ -251,7 +252,8 @@ _Bool test_promoted_cmpxchg(_Atomic(PS) *addr, PS *desired, PS *new) {
   // CHECK:   [[RES_BOOL:%.*]] = extractvalue { i64, i1 } [[RES]], 1
   // CHECK:   br i1 [[RES_BOOL]], label {{%.*}}, label {{%.*}}
 
-  // CHECK:   store i64 [[RES_VAL64]], ptr [[ATOMIC_DESIRED]], align 8
+  // CHECK:   store i64 [[RES_VAL64]], ptr [[OLD_TMP]], align 8
+  // CHECK:   call void @llvm.memcpy.p0.p0.i64(ptr align 2 [[DESIRED_ARG:%.*]], ptr align 8 [[OLD_TMP]], i64 6, i1 false)
   // CHECK:   br label {{%.*}}
 
   // CHECK:   [[RES_BOOL8:%.*]] = zext i1 [[RES_BOOL]] to i8

diff  --git a/clang/test/CodeGenCXX/builtin-atomic-compare_exchange.cpp b/clang/test/CodeGenCXX/builtin-atomic-compare_exchange.cpp
new file mode 100644
index 0000000000000..4f1fe9852ce12
--- /dev/null
+++ b/clang/test/CodeGenCXX/builtin-atomic-compare_exchange.cpp
@@ -0,0 +1,130 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// RUN: %clang_cc1 -std=c++20 -triple=x86_64-linux-gnu -emit-llvm -o - %s | FileCheck %s
+
+
+template <unsigned Size>
+struct S {
+    char data[Size];
+};
+
+// CHECK-LABEL: define dso_local noundef zeroext i1 @_Z21test_compare_exchangePU7_Atomic1SILj3EEPS0_S0_(
+// CHECK-SAME: ptr noundef [[A:%.*]], ptr noundef [[EXPECTED:%.*]], i24 [[DESIRED_COERCE:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[DESIRED:%.*]] = alloca [[STRUCT_S:%.*]], align 1
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT:    [[EXPECTED_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT:    [[DOTATOMICTMP:%.*]] = alloca [[STRUCT_S]], align 1
+// CHECK-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca { [[STRUCT_S]], [1 x i8] }, align 4
+// CHECK-NEXT:    [[ATOMIC_TEMP1:%.*]] = alloca { [[STRUCT_S]], [1 x i8] }, align 4
+// CHECK-NEXT:    [[CMPXCHG_BOOL:%.*]] = alloca i8, align 1
+// CHECK-NEXT:    [[OLD_TMP:%.*]] = alloca i32, align 4
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_S]], ptr [[DESIRED]], i32 0, i32 0
+// CHECK-NEXT:    store i24 [[DESIRED_COERCE]], ptr [[COERCE_DIVE]], align 1
+// CHECK-NEXT:    store ptr [[A]], ptr [[A_ADDR]], align 8
+// CHECK-NEXT:    store ptr [[EXPECTED]], ptr [[EXPECTED_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = load ptr, ptr [[EXPECTED_ADDR]], align 8
+// CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DOTATOMICTMP]], ptr align 1 [[DESIRED]], i64 3, i1 false)
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 4 [[ATOMIC_TEMP]], i8 0, i64 4, i1 false)
+// CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ATOMIC_TEMP]], ptr align 1 [[TMP1]], i64 3, i1 false)
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 4 [[ATOMIC_TEMP1]], i8 0, i64 4, i1 false)
+// CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ATOMIC_TEMP1]], ptr align 1 [[DOTATOMICTMP]], i64 3, i1 false)
+// CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[ATOMIC_TEMP]], align 4
+// CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[ATOMIC_TEMP1]], align 4
+// CHECK-NEXT:    [[TMP4:%.*]] = cmpxchg ptr [[TMP0]], i32 [[TMP2]], i32 [[TMP3]] monotonic monotonic, align 4
+// CHECK-NEXT:    [[TMP5:%.*]] = extractvalue { i32, i1 } [[TMP4]], 0
+// CHECK-NEXT:    [[TMP6:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+// CHECK-NEXT:    br i1 [[TMP6]], label %[[CMPXCHG_CONTINUE:.*]], label %[[CMPXCHG_STORE_EXPECTED:.*]]
+// CHECK:       [[CMPXCHG_STORE_EXPECTED]]:
+// CHECK-NEXT:    store i32 [[TMP5]], ptr [[OLD_TMP]], align 4
+// CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[TMP1]], ptr align 4 [[OLD_TMP]], i64 3, i1 false)
+// CHECK-NEXT:    br label %[[CMPXCHG_CONTINUE]]
+// CHECK:       [[CMPXCHG_CONTINUE]]:
+// CHECK-NEXT:    [[STOREDV:%.*]] = zext i1 [[TMP6]] to i8
+// CHECK-NEXT:    store i8 [[STOREDV]], ptr [[CMPXCHG_BOOL]], align 1
+// CHECK-NEXT:    [[TMP7:%.*]] = load i8, ptr [[CMPXCHG_BOOL]], align 1
+// CHECK-NEXT:    [[LOADEDV:%.*]] = trunc i8 [[TMP7]] to i1
+// CHECK-NEXT:    ret i1 [[LOADEDV]]
+//
+bool test_compare_exchange(_Atomic(S<3>)* a, S<3>* expected, S<3> desired) {
+    return __c11_atomic_compare_exchange_strong(a, expected, desired, 0, 0);
+}
+
+
+// CHECK-LABEL: define dso_local noundef zeroext i1 @_Z21test_compare_exchangePU7_Atomic1SILj4EEPS0_S0_(
+// CHECK-SAME: ptr noundef [[A:%.*]], ptr noundef [[EXPECTED:%.*]], i32 [[DESIRED_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[DESIRED:%.*]] = alloca [[STRUCT_S_0:%.*]], align 1
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT:    [[EXPECTED_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT:    [[DOTATOMICTMP:%.*]] = alloca [[STRUCT_S_0]], align 1
+// CHECK-NEXT:    [[CMPXCHG_BOOL:%.*]] = alloca i8, align 1
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_S_0]], ptr [[DESIRED]], i32 0, i32 0
+// CHECK-NEXT:    store i32 [[DESIRED_COERCE]], ptr [[COERCE_DIVE]], align 1
+// CHECK-NEXT:    store ptr [[A]], ptr [[A_ADDR]], align 8
+// CHECK-NEXT:    store ptr [[EXPECTED]], ptr [[EXPECTED_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = load ptr, ptr [[EXPECTED_ADDR]], align 8
+// CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DOTATOMICTMP]], ptr align 1 [[DESIRED]], i64 4, i1 false)
+// CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 1
+// CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[DOTATOMICTMP]], align 1
+// CHECK-NEXT:    [[TMP4:%.*]] = cmpxchg ptr [[TMP0]], i32 [[TMP2]], i32 [[TMP3]] monotonic monotonic, align 4
+// CHECK-NEXT:    [[TMP5:%.*]] = extractvalue { i32, i1 } [[TMP4]], 0
+// CHECK-NEXT:    [[TMP6:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+// CHECK-NEXT:    br i1 [[TMP6]], label %[[CMPXCHG_CONTINUE:.*]], label %[[CMPXCHG_STORE_EXPECTED:.*]]
+// CHECK:       [[CMPXCHG_STORE_EXPECTED]]:
+// CHECK-NEXT:    store i32 [[TMP5]], ptr [[TMP1]], align 1
+// CHECK-NEXT:    br label %[[CMPXCHG_CONTINUE]]
+// CHECK:       [[CMPXCHG_CONTINUE]]:
+// CHECK-NEXT:    [[STOREDV:%.*]] = zext i1 [[TMP6]] to i8
+// CHECK-NEXT:    store i8 [[STOREDV]], ptr [[CMPXCHG_BOOL]], align 1
+// CHECK-NEXT:    [[TMP7:%.*]] = load i8, ptr [[CMPXCHG_BOOL]], align 1
+// CHECK-NEXT:    [[LOADEDV:%.*]] = trunc i8 [[TMP7]] to i1
+// CHECK-NEXT:    ret i1 [[LOADEDV]]
+//
+bool test_compare_exchange(_Atomic(S<4>)* a, S<4>* expected, S<4> desired) {
+    return __c11_atomic_compare_exchange_strong(a, expected, desired, 0, 0);
+}
+
+// CHECK-LABEL: define dso_local noundef zeroext i1 @_Z21test_compare_exchangePU7_Atomic1SILj6EEPS0_S0_(
+// CHECK-SAME: ptr noundef [[A:%.*]], ptr noundef [[EXPECTED:%.*]], i48 [[DESIRED_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[DESIRED:%.*]] = alloca [[STRUCT_S_1:%.*]], align 1
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT:    [[EXPECTED_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT:    [[DOTATOMICTMP:%.*]] = alloca [[STRUCT_S_1]], align 1
+// CHECK-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca { [[STRUCT_S_1]], [2 x i8] }, align 8
+// CHECK-NEXT:    [[ATOMIC_TEMP1:%.*]] = alloca { [[STRUCT_S_1]], [2 x i8] }, align 8
+// CHECK-NEXT:    [[CMPXCHG_BOOL:%.*]] = alloca i8, align 1
+// CHECK-NEXT:    [[OLD_TMP:%.*]] = alloca i64, align 8
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_S_1]], ptr [[DESIRED]], i32 0, i32 0
+// CHECK-NEXT:    store i48 [[DESIRED_COERCE]], ptr [[COERCE_DIVE]], align 1
+// CHECK-NEXT:    store ptr [[A]], ptr [[A_ADDR]], align 8
+// CHECK-NEXT:    store ptr [[EXPECTED]], ptr [[EXPECTED_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = load ptr, ptr [[EXPECTED_ADDR]], align 8
+// CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DOTATOMICTMP]], ptr align 1 [[DESIRED]], i64 6, i1 false)
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[ATOMIC_TEMP]], i8 0, i64 8, i1 false)
+// CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[ATOMIC_TEMP]], ptr align 1 [[TMP1]], i64 6, i1 false)
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[ATOMIC_TEMP1]], i8 0, i64 8, i1 false)
+// CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[ATOMIC_TEMP1]], ptr align 1 [[DOTATOMICTMP]], i64 6, i1 false)
+// CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr [[ATOMIC_TEMP]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr [[ATOMIC_TEMP1]], align 8
+// CHECK-NEXT:    [[TMP4:%.*]] = cmpxchg ptr [[TMP0]], i64 [[TMP2]], i64 [[TMP3]] monotonic monotonic, align 8
+// CHECK-NEXT:    [[TMP5:%.*]] = extractvalue { i64, i1 } [[TMP4]], 0
+// CHECK-NEXT:    [[TMP6:%.*]] = extractvalue { i64, i1 } [[TMP4]], 1
+// CHECK-NEXT:    br i1 [[TMP6]], label %[[CMPXCHG_CONTINUE:.*]], label %[[CMPXCHG_STORE_EXPECTED:.*]]
+// CHECK:       [[CMPXCHG_STORE_EXPECTED]]:
+// CHECK-NEXT:    store i64 [[TMP5]], ptr [[OLD_TMP]], align 8
+// CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[TMP1]], ptr align 8 [[OLD_TMP]], i64 6, i1 false)
+// CHECK-NEXT:    br label %[[CMPXCHG_CONTINUE]]
+// CHECK:       [[CMPXCHG_CONTINUE]]:
+// CHECK-NEXT:    [[STOREDV:%.*]] = zext i1 [[TMP6]] to i8
+// CHECK-NEXT:    store i8 [[STOREDV]], ptr [[CMPXCHG_BOOL]], align 1
+// CHECK-NEXT:    [[TMP7:%.*]] = load i8, ptr [[CMPXCHG_BOOL]], align 1
+// CHECK-NEXT:    [[LOADEDV:%.*]] = trunc i8 [[TMP7]] to i1
+// CHECK-NEXT:    ret i1 [[LOADEDV]]
+//
+bool test_compare_exchange(_Atomic(S<6>)* a, S<6>* expected, S<6> desired) {
+    return __c11_atomic_compare_exchange_strong(a, expected, desired, 0, 0);
+}

diff  --git a/libcxx/test/std/atomics/atomics.types.generic/cas_non_power_of_2.pass.cpp b/libcxx/test/std/atomics/atomics.types.generic/cas_non_power_of_2.pass.cpp
new file mode 100644
index 0000000000000..13bd761ae9808
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.types.generic/cas_non_power_of_2.pass.cpp
@@ -0,0 +1,76 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// https://github.com/llvm/llvm-project/issues/30023
+// compare exchange does not work with types of which the size is not a power of 2
+
+// XFAIL: clang-19, clang-20, clang-21, apple-clang-15, apple-clang-16, apple-clang-17
+// UNSUPPORTED: c++03
+
+// TODO: remove the UNSUPPORTED clang-22 once libc++ CI's clang is updated to include
+// the fix https://github.com/llvm/llvm-project/pull/78707
+// UNSUPPORTED: clang-22
+
+#include <atomic>
+#include <cstring>
+#include <cassert>
+
+template <int Size>
+struct S {
+  char data[Size];
+
+  explicit S(char v = 0) noexcept { memset(&data[0], v, sizeof(data)); }
+
+  // only used in the test to check the results. Not used in atomic operations.
+  friend bool operator==(const S& lhs, const S& rhs) noexcept {
+    return memcmp(&lhs.data[0], &rhs.data[0], sizeof(lhs.data)) == 0;
+  }
+  friend bool operator!=(const S& lhs, const S& rhs) noexcept { return !(lhs == rhs); }
+};
+
+template <int Size>
+struct Expected {
+  Expected(S<Size> ss) : s(ss) {}
+
+  S<Size> s;
+  bool b = true; // used to validate that s's operation won't overwrite the memory next to it
+};
+
+template <int Size>
+void test() {
+  using T = S<Size>;
+  std::atomic<T> a(T(0));
+  Expected<Size> expected{T(17)};
+
+  assert(a.load() != expected.s);
+  assert(expected.b);
+
+  auto r1 = a.compare_exchange_strong(expected.s, T(18), std::memory_order_relaxed);
+
+  assert(!r1);
+  assert(expected.s == T(0)); // expected.s is modified by compare_exchange_strong
+  assert(expected.b);
+  assert(a.load() == T(0));
+
+  auto r2 = a.compare_exchange_strong(expected.s, T(18), std::memory_order_relaxed);
+  assert(r2);
+  assert(a.load() == T(18));
+  assert(expected.s == T(0));
+  assert(expected.b);
+}
+
+int main(int, char**) {
+  test<1>();
+  test<2>();
+  test<3>();
+  test<4>();
+  test<5>();
+  test<6>();
+
+  return 0;
+}


        


More information about the libcxx-commits mailing list