[libcxx-commits] [clang] [libcxx] [clang] [libc++] P3309 constexpr atomic and atomic ref [WIP] (PR #98738)
Hana Dusíková via libcxx-commits
libcxx-commits at lists.llvm.org
Sat Jul 13 11:04:26 PDT 2024
https://github.com/hanickadot updated https://github.com/llvm/llvm-project/pull/98738
>From d917c1c121db441ccb39b86729dba2201f0cf2f4 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Hana=20Dusi=CC=81kova=CC=81?= <hanicka at hanicka.net>
Date: Sat, 13 Jul 2024 16:39:21 +0200
Subject: [PATCH 1/2] snapshot
---
clang/include/clang/Basic/Builtins.td | 84 ++--
clang/lib/AST/ExprConstant.cpp | 449 +++++++++++++++++++++-
libcxx/include/__atomic/atomic.h | 95 ++---
libcxx/include/__atomic/atomic_base.h | 76 ++--
libcxx/include/__atomic/atomic_flag.h | 60 +--
libcxx/include/__atomic/atomic_ref.h | 314 ++++++++++-----
libcxx/include/__atomic/cxx_atomic_impl.h | 77 ++--
libcxx/include/__config | 6 +
8 files changed, 882 insertions(+), 279 deletions(-)
diff --git a/clang/include/clang/Basic/Builtins.td b/clang/include/clang/Basic/Builtins.td
index f5b15cf90d1f8..0716cf02f5110 100644
--- a/clang/include/clang/Basic/Builtins.td
+++ b/clang/include/clang/Basic/Builtins.td
@@ -1682,97 +1682,97 @@ def SyncSwapN : Builtin, SyncBuiltinsTemplate {
// C11 _Atomic operations for <stdatomic.h>.
def C11AtomicInit : AtomicBuiltin {
let Spellings = ["__c11_atomic_init"];
- let Attributes = [CustomTypeChecking];
+ let Attributes = [CustomTypeChecking, Constexpr];
let Prototype = "void(...)";
}
def C11AtomicLoad : AtomicBuiltin {
let Spellings = ["__c11_atomic_load"];
- let Attributes = [CustomTypeChecking];
+ let Attributes = [CustomTypeChecking, Constexpr];
let Prototype = "void(...)";
}
def C11AtomicStore : AtomicBuiltin {
let Spellings = ["__c11_atomic_store"];
- let Attributes = [CustomTypeChecking];
+ let Attributes = [CustomTypeChecking, Constexpr];
let Prototype = "void(...)";
}
def C11AtomicExchange : AtomicBuiltin {
let Spellings = ["__c11_atomic_exchange"];
- let Attributes = [CustomTypeChecking];
+ let Attributes = [CustomTypeChecking, Constexpr];
let Prototype = "void(...)";
}
def C11AtomicCompareExchangeStrong : AtomicBuiltin {
let Spellings = ["__c11_atomic_compare_exchange_strong"];
- let Attributes = [CustomTypeChecking];
+ let Attributes = [CustomTypeChecking, Constexpr];
let Prototype = "void(...)";
}
def C11AtomicCompareExchangeWeak : AtomicBuiltin {
let Spellings = ["__c11_atomic_compare_exchange_weak"];
- let Attributes = [CustomTypeChecking];
+ let Attributes = [CustomTypeChecking, Constexpr];
let Prototype = "void(...)";
}
def C11AtomicFetchAdd : AtomicBuiltin {
let Spellings = ["__c11_atomic_fetch_add"];
- let Attributes = [CustomTypeChecking];
+ let Attributes = [CustomTypeChecking, Constexpr];
let Prototype = "void(...)";
}
def C11AtomicFetchSub : AtomicBuiltin {
let Spellings = ["__c11_atomic_fetch_sub"];
- let Attributes = [CustomTypeChecking];
+ let Attributes = [CustomTypeChecking, Constexpr];
let Prototype = "void(...)";
}
def C11AtomicFetchAnd : AtomicBuiltin {
let Spellings = ["__c11_atomic_fetch_and"];
- let Attributes = [CustomTypeChecking];
+ let Attributes = [CustomTypeChecking, Constexpr];
let Prototype = "void(...)";
}
def C11AtomicFetchOr : AtomicBuiltin {
let Spellings = ["__c11_atomic_fetch_or"];
- let Attributes = [CustomTypeChecking];
+ let Attributes = [CustomTypeChecking, Constexpr];
let Prototype = "void(...)";
}
def C11AtomicFetchXor : AtomicBuiltin {
let Spellings = ["__c11_atomic_fetch_xor"];
- let Attributes = [CustomTypeChecking];
+ let Attributes = [CustomTypeChecking, Constexpr];
let Prototype = "void(...)";
}
def C11AtomicFetchNand : AtomicBuiltin {
let Spellings = ["__c11_atomic_fetch_nand"];
- let Attributes = [CustomTypeChecking];
+ let Attributes = [CustomTypeChecking, Constexpr];
let Prototype = "void(...)";
}
def C11AtomicFetchMax : AtomicBuiltin {
let Spellings = ["__c11_atomic_fetch_max"];
- let Attributes = [CustomTypeChecking];
+ let Attributes = [CustomTypeChecking, Constexpr];
let Prototype = "void(...)";
}
def C11AtomicFetchMin : AtomicBuiltin {
let Spellings = ["__c11_atomic_fetch_min"];
- let Attributes = [CustomTypeChecking];
+ let Attributes = [CustomTypeChecking, Constexpr];
let Prototype = "void(...)";
}
def C11AtomicThreadFence : Builtin {
let Spellings = ["__c11_atomic_thread_fence"];
- let Attributes = [NoThrow];
+ let Attributes = [NoThrow, Constexpr];
let Prototype = "void(int)";
}
def C11AtomicSignalFence : Builtin {
let Spellings = ["__c11_atomic_signal_fence"];
- let Attributes = [NoThrow];
+ let Attributes = [NoThrow, Constexpr];
let Prototype = "void(int)";
}
@@ -1785,157 +1785,157 @@ def C11AtomicIsLockFree : Builtin {
// GNU atomic builtins.
def AtomicLoad : AtomicBuiltin {
let Spellings = ["__atomic_load"];
- let Attributes = [CustomTypeChecking];
+ let Attributes = [CustomTypeChecking, Constexpr];
let Prototype = "void(...)";
}
def AtomicLoadN : AtomicBuiltin {
let Spellings = ["__atomic_load_n"];
- let Attributes = [CustomTypeChecking];
+ let Attributes = [CustomTypeChecking, Constexpr];
let Prototype = "void(...)";
}
def AtomicStore : AtomicBuiltin {
let Spellings = ["__atomic_store"];
- let Attributes = [CustomTypeChecking];
+ let Attributes = [CustomTypeChecking, Constexpr];
let Prototype = "void(...)";
}
def AtomicStoreN : AtomicBuiltin {
let Spellings = ["__atomic_store_n"];
- let Attributes = [CustomTypeChecking];
+ let Attributes = [CustomTypeChecking, Constexpr];
let Prototype = "void(...)";
}
def AtomicExchange : AtomicBuiltin {
let Spellings = ["__atomic_exchange"];
- let Attributes = [CustomTypeChecking];
+ let Attributes = [CustomTypeChecking, Constexpr];
let Prototype = "void(...)";
}
def AtomicExchangeN : AtomicBuiltin {
let Spellings = ["__atomic_exchange_n"];
- let Attributes = [CustomTypeChecking];
+ let Attributes = [CustomTypeChecking, Constexpr];
let Prototype = "void(...)";
}
def AtomicCompareExchange : AtomicBuiltin {
let Spellings = ["__atomic_compare_exchange"];
- let Attributes = [CustomTypeChecking];
+ let Attributes = [CustomTypeChecking, Constexpr];
let Prototype = "void(...)";
}
def AtomicCompareExchangeN : AtomicBuiltin {
let Spellings = ["__atomic_compare_exchange_n"];
- let Attributes = [CustomTypeChecking];
+ let Attributes = [CustomTypeChecking, Constexpr];
let Prototype = "void(...)";
}
def AtomicFetchAdd : AtomicBuiltin {
let Spellings = ["__atomic_fetch_add"];
- let Attributes = [CustomTypeChecking];
+ let Attributes = [CustomTypeChecking, Constexpr];
let Prototype = "void(...)";
}
def AtomicFetchSub : AtomicBuiltin {
let Spellings = ["__atomic_fetch_sub"];
- let Attributes = [CustomTypeChecking];
+ let Attributes = [CustomTypeChecking, Constexpr];
let Prototype = "void(...)";
}
def AtomicFetchAnd : AtomicBuiltin {
let Spellings = ["__atomic_fetch_and"];
- let Attributes = [CustomTypeChecking];
+ let Attributes = [CustomTypeChecking, Constexpr];
let Prototype = "void(...)";
}
def AtomicFetchOr : AtomicBuiltin {
let Spellings = ["__atomic_fetch_or"];
- let Attributes = [CustomTypeChecking];
+ let Attributes = [CustomTypeChecking, Constexpr];
let Prototype = "void(...)";
}
def AtomicFetchXor : AtomicBuiltin {
let Spellings = ["__atomic_fetch_xor"];
- let Attributes = [CustomTypeChecking];
+ let Attributes = [CustomTypeChecking, Constexpr];
let Prototype = "void(...)";
}
def AtomicFetchNand : AtomicBuiltin {
let Spellings = ["__atomic_fetch_nand"];
- let Attributes = [CustomTypeChecking];
+ let Attributes = [CustomTypeChecking, Constexpr];
let Prototype = "void(...)";
}
def AtomicAddFetch : AtomicBuiltin {
let Spellings = ["__atomic_add_fetch"];
- let Attributes = [CustomTypeChecking];
+ let Attributes = [CustomTypeChecking, Constexpr];
let Prototype = "void(...)";
}
def AtomicSubFetch : AtomicBuiltin {
let Spellings = ["__atomic_sub_fetch"];
- let Attributes = [CustomTypeChecking];
+ let Attributes = [CustomTypeChecking, Constexpr];
let Prototype = "void(...)";
}
def AtomicAndFetch : AtomicBuiltin {
let Spellings = ["__atomic_and_fetch"];
- let Attributes = [CustomTypeChecking];
+ let Attributes = [CustomTypeChecking, Constexpr];
let Prototype = "void(...)";
}
def AtomicOrFetch : AtomicBuiltin {
let Spellings = ["__atomic_or_fetch"];
- let Attributes = [CustomTypeChecking];
+ let Attributes = [CustomTypeChecking, Constexpr];
let Prototype = "void(...)";
}
def AtomicXorFetch : AtomicBuiltin {
let Spellings = ["__atomic_xor_fetch"];
- let Attributes = [CustomTypeChecking];
+ let Attributes = [CustomTypeChecking, Constexpr];
let Prototype = "void(...)";
}
def AtomicMaxFetch : AtomicBuiltin {
let Spellings = ["__atomic_max_fetch"];
- let Attributes = [CustomTypeChecking];
+ let Attributes = [CustomTypeChecking, Constexpr];
let Prototype = "void(...)";
}
def AtomicMinFetch : AtomicBuiltin {
let Spellings = ["__atomic_min_fetch"];
- let Attributes = [CustomTypeChecking];
+ let Attributes = [CustomTypeChecking, Constexpr];
let Prototype = "void(...)";
}
def AtomicNandFetch : AtomicBuiltin {
let Spellings = ["__atomic_nand_fetch"];
- let Attributes = [CustomTypeChecking];
+ let Attributes = [CustomTypeChecking, Constexpr];
let Prototype = "void(...)";
}
def AtomicTestAndSet : Builtin {
let Spellings = ["__atomic_test_and_set"];
- let Attributes = [NoThrow];
+ let Attributes = [NoThrow, Constexpr];
let Prototype = "bool(void volatile*, int)";
}
def AtomicClear : Builtin {
let Spellings = ["__atomic_clear"];
- let Attributes = [NoThrow];
+ let Attributes = [NoThrow, Constexpr];
let Prototype = "void(void volatile*, int)";
}
def AtomicThreadFence : Builtin {
let Spellings = ["__atomic_thread_fence"];
- let Attributes = [NoThrow];
+ let Attributes = [NoThrow, Constexpr];
let Prototype = "void(int)";
}
def AtomicSignalFence : Builtin {
let Spellings = ["__atomic_signal_fence"];
- let Attributes = [NoThrow];
+ let Attributes = [NoThrow, Constexpr];
let Prototype = "void(int)";
}
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index 0aeac9d03eed3..32f04ccaa1205 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -1900,6 +1900,17 @@ static bool EvaluateFixedPoint(const Expr *E, APFixedPoint &Result,
// Misc utilities
//===----------------------------------------------------------------------===//
+static bool isOnePastTheEndOfCompleteObject(const ASTContext &Ctx,
+ const LValue &LV);
+
+enum class SizeOfType {
+ SizeOf,
+ DataSizeOf,
+};
+
+static bool HandleSizeof(EvalInfo &Info, SourceLocation Loc, QualType Type,
+ CharUnits &Size, SizeOfType SOT = SizeOfType::SizeOf);
+
/// Negate an APSInt in place, converting it to a signed form if necessary, and
/// preserving its value (by extending by up to one bit as needed).
static void negateAsSigned(APSInt &Int) {
@@ -3222,14 +3233,9 @@ static bool HandleLValueIndirectMember(EvalInfo &Info, const Expr *E,
return true;
}
-enum class SizeOfType {
- SizeOf,
- DataSizeOf,
-};
-
/// Get the size of the given type in char units.
static bool HandleSizeof(EvalInfo &Info, SourceLocation Loc, QualType Type,
- CharUnits &Size, SizeOfType SOT = SizeOfType::SizeOf) {
+ CharUnits &Size, SizeOfType SOT) {
// sizeof(void), __alignof__(void), sizeof(function) = 1 as a gcc
// extension.
if (Type->isVoidType() || Type->isFunctionType()) {
@@ -7884,6 +7890,426 @@ class ExprEvaluatorBase
return StmtVisitorTy::Visit(Source);
}
+ static bool EvaluateOrder(const Expr *E, EvalInfo &Info) {
+ // we ignore order
+ [[maybe_unused]] APSInt Order;
+ if (!EvaluateInteger(E, Order, Info)) {
+ return false;
+ }
+
+ return true;
+ }
+
+ static bool ReadAtomicPtr(const AtomicExpr *E, APValue &Result,
+ EvalInfo &Info) {
+ LValue AtomicLV;
+ if (!EvaluatePointer(E->getPtr(), AtomicLV, Info)) {
+ return false;
+ }
+
+ if (!handleLValueToRValueConversion(Info, E->getPtr(), E->getType(),
+ AtomicLV, Result)) {
+ return false;
+ }
+
+ return true;
+ }
+
+ static bool LoadAtomicValue(const AtomicExpr *E, APValue &Result,
+ EvalInfo &Info) {
+ if (!ReadAtomicPtr(E, Result, Info)) {
+ return false;
+ }
+
+ // we ignore order
+ if (!EvaluateOrder(E->getOrder(), Info)) {
+ return false;
+ }
+
+ return true;
+ }
+
+ static bool FetchAtomicOp(const AtomicExpr *E, APValue &Result,
+ EvalInfo &Info, bool StoreToResultAfter) {
+ LValue AtomicLV;
+ QualType AtomicTy =
+ E->getPtr()->getType()->getPointeeType().getAtomicUnqualifiedType();
+ if (!EvaluatePointer(E->getPtr(), AtomicLV, Info)) {
+ return false;
+ }
+
+ APValue AtomicVal;
+ if (!handleLValueToRValueConversion(Info, E->getPtr(), E->getType(),
+ AtomicLV, AtomicVal)) {
+ return false;
+ }
+
+ if (!StoreToResultAfter) {
+ Result = AtomicVal;
+ }
+
+ const auto ResultType = E->getType();
+
+ APValue ArgumentVal;
+ if (!Evaluate(ArgumentVal, Info, E->getVal1())) {
+ return false;
+ }
+
+ APValue Replacement;
+ if (ResultType->isIntegralOrEnumerationType()) {
+ const APSInt AtomicInt = AtomicVal.getInt();
+ const APSInt ArgumentInt = ArgumentVal.getInt();
+
+ switch (E->getOp()) {
+ case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__atomic_fetch_add:
+ case AtomicExpr::AO__atomic_add_fetch:
+ Replacement = APValue(AtomicInt + ArgumentInt);
+ break;
+ case AtomicExpr::AO__c11_atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_sub_fetch:
+ Replacement = APValue(AtomicInt - ArgumentInt);
+ break;
+ case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__atomic_fetch_and:
+ case AtomicExpr::AO__atomic_and_fetch:
+ Replacement = APValue(AtomicInt & ArgumentInt);
+ break;
+ case AtomicExpr::AO__c11_atomic_fetch_or:
+ case AtomicExpr::AO__atomic_fetch_or:
+ case AtomicExpr::AO__atomic_or_fetch:
+ Replacement = APValue(AtomicInt | ArgumentInt);
+ break;
+ case AtomicExpr::AO__c11_atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_xor_fetch:
+ Replacement = APValue(AtomicInt ^ ArgumentInt);
+ break;
+ case AtomicExpr::AO__c11_atomic_fetch_nand:
+ case AtomicExpr::AO__atomic_fetch_nand:
+ case AtomicExpr::AO__atomic_nand_fetch:
+ Replacement = APValue(~(AtomicInt & ArgumentInt));
+ break;
+ case AtomicExpr::AO__c11_atomic_fetch_max:
+ case AtomicExpr::AO__atomic_fetch_max:
+ case AtomicExpr::AO__atomic_max_fetch:
+ Replacement =
+ APValue((AtomicInt > ArgumentInt) ? AtomicInt : ArgumentInt);
+ break;
+ case AtomicExpr::AO__c11_atomic_fetch_min:
+ case AtomicExpr::AO__atomic_fetch_min:
+ case AtomicExpr::AO__atomic_min_fetch:
+ Replacement =
+ APValue((AtomicInt < ArgumentInt) ? AtomicInt : ArgumentInt);
+ break;
+ default:
+ return false;
+ }
+ } else if (ResultType->isRealFloatingType()) {
+ const llvm::RoundingMode RM = getActiveRoundingMode(Info, E);
+ APFloat AtomicFlt = AtomicVal.getFloat();
+ const APFloat ArgumentFlt = ArgumentVal.getFloat();
+ APFloat::opStatus St;
+
+ switch (E->getOp()) {
+ case AtomicExpr::AO__c11_atomic_fetch_add: // GCC atomics doesn't support
+ // floats
+ St = AtomicFlt.add(ArgumentFlt, RM);
+ Replacement = APValue(AtomicFlt);
+ break;
+ case AtomicExpr::AO__c11_atomic_fetch_sub:
+ St = AtomicFlt.subtract(ArgumentFlt, RM);
+ Replacement = APValue(AtomicFlt);
+ break;
+ default:
+ return false;
+ }
+
+ if (!checkFloatingPointResult(Info, E, St)) {
+ return false;
+ }
+ } else if (ResultType->isPointerType()) {
+ LValue AtomicPtr;
+ AtomicPtr.setFrom(Info.Ctx, AtomicVal);
+
+ APSInt ArgumentInt = ArgumentVal.getInt();
+
+ CharUnits SizeOfPointee;
+ if (!HandleSizeof(Info, E->getExprLoc(), AtomicTy->getPointeeType(),
+ SizeOfPointee))
+ return false;
+
+ // GCC's atomic_fetch add/sub compute new pointer by bytes and not
+ // sizeof(T)
+ switch (E->getOp()) {
+ case AtomicExpr::AO__atomic_fetch_add:
+ case AtomicExpr::AO__atomic_add_fetch:
+ case AtomicExpr::AO__atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_sub_fetch: {
+ const auto sizeOfOneItem =
+ APSInt(APInt(ArgumentInt.getBitWidth(), SizeOfPointee.getQuantity(),
+ false),
+ false);
+ if ((ArgumentInt % sizeOfOneItem) != 0) {
+ // incrementing pointer by size which is not dividable by pointee size
+ // is UB and therefore disallowed
+ return false;
+ }
+ ArgumentInt /= sizeOfOneItem;
+ } break;
+ default:
+ break;
+ }
+
+ switch (E->getOp()) {
+ case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__atomic_fetch_add:
+ case AtomicExpr::AO__atomic_add_fetch:
+ AtomicPtr.adjustOffsetAndIndex(Info, E, ArgumentInt, SizeOfPointee);
+ break;
+ case AtomicExpr::AO__c11_atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_sub_fetch:
+ ArgumentInt.negate();
+ AtomicPtr.adjustOffsetAndIndex(Info, E, ArgumentInt, SizeOfPointee);
+ break;
+ default:
+ return false;
+ }
+
+ AtomicPtr.moveInto(Replacement);
+ } else {
+ // not float,int,pointer?
+ return false;
+ }
+
+ if (StoreToResultAfter) {
+ Result = Replacement;
+ }
+
+ if (!handleAssignment(Info, E, AtomicLV, AtomicTy, Replacement)) {
+ return false;
+ }
+
+ return true;
+ }
+
+ static bool StoreAtomicValue(const AtomicExpr *E, EvalInfo &Info) {
+ LValue LV;
+ if (!EvaluatePointer(E->getPtr(), LV, Info)) {
+ return false;
+ }
+
+ APValue NewVal;
+ if (!Evaluate(NewVal, Info, E->getVal1())) {
+ return false;
+ }
+
+ if (!handleAssignment(Info, E, LV, E->getVal1()->getType(), NewVal)) {
+ return false;
+ }
+
+ return true;
+ }
+
+ static bool CompareExchangeAtomicValue(const AtomicExpr *E, APValue &Result,
+ EvalInfo &Info) {
+ // dereference _Atomic * (atomic value)
+ LValue AtomicLV;
+ QualType AtomicTy =
+ E->getPtr()->getType()->getPointeeType().getAtomicUnqualifiedType();
+ if (!EvaluatePointer(E->getPtr(), AtomicLV, Info)) {
+ return false;
+ }
+
+ // dereference T * (expected value)
+ LValue ExpectedLV;
+ QualType ExpectedTy = E->getVal1()->getType()->getPointeeType();
+ if (!EvaluatePointer(E->getVal1(), ExpectedLV, Info)) {
+ return false;
+ }
+
+ // get values for atomic and expected
+ APValue AtomicVal;
+ APValue ExpectedVal;
+
+ // convert pointer to value
+ if (!handleLValueToRValueConversion(Info, E->getPtr(), AtomicTy, AtomicLV,
+ AtomicVal)) {
+ return false;
+ }
+
+ if (!handleLValueToRValueConversion(Info, E->getVal1(), ExpectedTy,
+ ExpectedLV, ExpectedVal)) {
+ return false;
+ }
+
+ bool DoExchange = false;
+
+ // compare atomic<int> and friends
+ if (AtomicTy->isIntegralOrEnumerationType() &&
+ ExpectedTy->isIntegralOrEnumerationType()) {
+ const APSInt AtomicInt = AtomicVal.getInt();
+ const APSInt ExpectedInt = ExpectedVal.getInt();
+ if (AtomicInt == ExpectedInt) {
+ DoExchange = true;
+ }
+ } else if (AtomicTy->isRealFloatingType() &&
+ ExpectedTy->isRealFloatingType()) {
+ const APFloat AtomicFlt = AtomicVal.getFloat();
+ const APFloat ExpectedFlt = ExpectedVal.getFloat();
+ if (AtomicFlt == ExpectedFlt) {
+ DoExchange = true;
+ }
+ } else if (AtomicTy->isPointerType() && ExpectedTy->isPointerType()) {
+ // get LValue of objects pointed to
+ LValue LHS;
+ LHS.setFrom(Info.Ctx, AtomicVal);
+
+ LValue RHS;
+ RHS.setFrom(Info.Ctx, ExpectedVal);
+
+ if (HasSameBase(LHS, RHS)) {
+ const CharUnits &LHSOffset = LHS.getLValueOffset();
+ const CharUnits &RHSOffset = RHS.getLValueOffset();
+
+ const unsigned PtrSize = Info.Ctx.getTypeSize(AtomicTy);
+ assert(PtrSize <= 64 && "Pointer width is larger than expected");
+ const uint64_t Mask = ~0ULL >> (64 - PtrSize);
+
+ const uint64_t CompareLHS = LHSOffset.getQuantity() & Mask;
+ const uint64_t CompareRHS = RHSOffset.getQuantity() & Mask;
+
+ if (CompareLHS == CompareRHS) {
+ DoExchange = true;
+ }
+ } else {
+
+ // it's implementation-defined to compare distinct literals
+ // it's not constant-evaluation
+ if ((IsLiteralLValue(LHS) || IsLiteralLValue(RHS)) && LHS.Base &&
+ RHS.Base) {
+ return false;
+ }
+
+ if (IsWeakLValue(LHS) || IsWeakLValue(RHS)) {
+ return false;
+ }
+
+ if ((!LHS.Base && !LHS.Offset.isZero()) ||
+ (!RHS.Base && !RHS.Offset.isZero())) {
+ return false;
+ }
+
+ if (LHS.Base && LHS.Offset.isZero() &&
+ isOnePastTheEndOfCompleteObject(Info.Ctx, RHS)) {
+ return false;
+ }
+
+ if (RHS.Base && RHS.Offset.isZero() &&
+ isOnePastTheEndOfCompleteObject(Info.Ctx, LHS)) {
+ return false;
+ }
+
+ if ((RHS.Base && isZeroSized(RHS)) || (LHS.Base && isZeroSized(LHS))) {
+ return false;
+ }
+
+ // after all it's a different object
+ DoExchange = false;
+ }
+
+ } else {
+ return false;
+ }
+
+ if (DoExchange) {
+ // if values are same do the exchange with replacement value
+ // but first I must evaluate the replacement value
+ APValue Replacement;
+ if (!Evaluate(Replacement, Info, E->getVal2())) {
+ return false;
+ }
+
+ // and assign it to atomic
+ if (!handleAssignment(Info, E, AtomicLV, AtomicTy, Replacement)) {
+ return false;
+ }
+ }
+
+ // to expected pointer I need to put previous value in atomic
+ if (!handleAssignment(Info, E, ExpectedLV, ExpectedTy, AtomicVal)) {
+ return false;
+ }
+
+ // and return boolean if I did the exchange
+ Result = APValue(Info.Ctx.MakeIntValue(DoExchange, E->getType()));
+ return true;
+ }
+
+ bool VisitAtomicExpr(const AtomicExpr *E) {
+ APValue LocalResult;
+ switch (E->getOp()) {
+ default:
+ return Error(E);
+ case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__atomic_load_n:
+ if (!LoadAtomicValue(E, LocalResult, Info)) {
+ return Error(E);
+ }
+ return DerivedSuccess(LocalResult, E);
+ case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+ case AtomicExpr::AO__atomic_compare_exchange_n:
+ if (!CompareExchangeAtomicValue(E, LocalResult, Info)) {
+ return Error(E);
+ }
+ return DerivedSuccess(LocalResult, E);
+ case AtomicExpr::AO__c11_atomic_exchange:
+ case AtomicExpr::AO__atomic_exchange_n:
+ if (!LoadAtomicValue(E, LocalResult, Info)) {
+ return Error(E);
+ }
+ if (!StoreAtomicValue(E, Info)) {
+ return Error(E);
+ }
+ return DerivedSuccess(LocalResult, E);
+ case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__c11_atomic_fetch_sub:
+ case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__c11_atomic_fetch_or:
+ case AtomicExpr::AO__c11_atomic_fetch_xor:
+ case AtomicExpr::AO__c11_atomic_fetch_nand:
+ case AtomicExpr::AO__c11_atomic_fetch_max:
+ case AtomicExpr::AO__c11_atomic_fetch_min:
+ case AtomicExpr::AO__atomic_fetch_add:
+ case AtomicExpr::AO__atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_fetch_and:
+ case AtomicExpr::AO__atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_or:
+ case AtomicExpr::AO__atomic_fetch_nand:
+ case AtomicExpr::AO__atomic_fetch_max:
+ case AtomicExpr::AO__atomic_fetch_min:
+ if (!FetchAtomicOp(E, LocalResult, Info, false)) {
+ return Error(E);
+ }
+ return DerivedSuccess(LocalResult, E);
+ case AtomicExpr::AO__atomic_add_fetch:
+ case AtomicExpr::AO__atomic_sub_fetch:
+ case AtomicExpr::AO__atomic_and_fetch:
+ case AtomicExpr::AO__atomic_xor_fetch:
+ case AtomicExpr::AO__atomic_or_fetch:
+ case AtomicExpr::AO__atomic_nand_fetch:
+ case AtomicExpr::AO__atomic_max_fetch:
+ case AtomicExpr::AO__atomic_min_fetch:
+ if (!FetchAtomicOp(E, LocalResult, Info, true)) {
+ return Error(E);
+ }
+ return DerivedSuccess(LocalResult, E);
+ }
+ }
+
bool VisitPseudoObjectExpr(const PseudoObjectExpr *E) {
for (const Expr *SemE : E->semantics()) {
if (auto *OVE = dyn_cast<OpaqueValueExpr>(SemE)) {
@@ -15595,6 +16021,17 @@ class VoidExprEvaluator
}
}
+ bool VisitAtomicExpr(const AtomicExpr *E) {
+ switch (E->getOp()) {
+ default:
+ return Error(E);
+ case AtomicExpr::AO__c11_atomic_init:
+ case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__atomic_store_n:
+ return StoreAtomicValue(E, Info);
+ }
+ }
+
bool VisitCallExpr(const CallExpr *E) {
if (!IsConstantEvaluatedBuiltinCall(E))
return ExprEvaluatorBaseTy::VisitCallExpr(E);
diff --git a/libcxx/include/__atomic/atomic.h b/libcxx/include/__atomic/atomic.h
index bd3f659c22df0..4a72f61d019fc 100644
--- a/libcxx/include/__atomic/atomic.h
+++ b/libcxx/include/__atomic/atomic.h
@@ -67,7 +67,7 @@ struct atomic<_Tp*> : public __atomic_base<_Tp*> {
using value_type = _Tp*;
using difference_type = ptrdiff_t;
- _LIBCPP_HIDE_FROM_ABI atomic() _NOEXCEPT = default;
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 atomic() _NOEXCEPT = default;
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR atomic(_Tp* __d) _NOEXCEPT : __base(__d) {}
@@ -75,7 +75,7 @@ struct atomic<_Tp*> : public __atomic_base<_Tp*> {
__base::store(__d);
return __d;
}
- _LIBCPP_HIDE_FROM_ABI _Tp* operator=(_Tp* __d) _NOEXCEPT {
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp* operator=(_Tp* __d) _NOEXCEPT {
__base::store(__d);
return __d;
}
@@ -86,7 +86,8 @@ struct atomic<_Tp*> : public __atomic_base<_Tp*> {
return std::__cxx_atomic_fetch_add(std::addressof(this->__a_), __op, __m);
}
- _LIBCPP_HIDE_FROM_ABI _Tp* fetch_add(ptrdiff_t __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp*
+ fetch_add(ptrdiff_t __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
// __atomic_fetch_add accepts function pointers, guard against them.
static_assert(!is_function<__remove_pointer_t<_Tp> >::value, "Pointer to function isn't allowed");
return std::__cxx_atomic_fetch_add(std::addressof(this->__a_), __op, __m);
@@ -98,24 +99,25 @@ struct atomic<_Tp*> : public __atomic_base<_Tp*> {
return std::__cxx_atomic_fetch_sub(std::addressof(this->__a_), __op, __m);
}
- _LIBCPP_HIDE_FROM_ABI _Tp* fetch_sub(ptrdiff_t __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp*
+ fetch_sub(ptrdiff_t __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
// __atomic_fetch_add accepts function pointers, guard against them.
static_assert(!is_function<__remove_pointer_t<_Tp> >::value, "Pointer to function isn't allowed");
return std::__cxx_atomic_fetch_sub(std::addressof(this->__a_), __op, __m);
}
_LIBCPP_HIDE_FROM_ABI _Tp* operator++(int) volatile _NOEXCEPT { return fetch_add(1); }
- _LIBCPP_HIDE_FROM_ABI _Tp* operator++(int) _NOEXCEPT { return fetch_add(1); }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp* operator++(int) _NOEXCEPT { return fetch_add(1); }
_LIBCPP_HIDE_FROM_ABI _Tp* operator--(int) volatile _NOEXCEPT { return fetch_sub(1); }
- _LIBCPP_HIDE_FROM_ABI _Tp* operator--(int) _NOEXCEPT { return fetch_sub(1); }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp* operator--(int) _NOEXCEPT { return fetch_sub(1); }
_LIBCPP_HIDE_FROM_ABI _Tp* operator++() volatile _NOEXCEPT { return fetch_add(1) + 1; }
- _LIBCPP_HIDE_FROM_ABI _Tp* operator++() _NOEXCEPT { return fetch_add(1) + 1; }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp* operator++() _NOEXCEPT { return fetch_add(1) + 1; }
_LIBCPP_HIDE_FROM_ABI _Tp* operator--() volatile _NOEXCEPT { return fetch_sub(1) - 1; }
- _LIBCPP_HIDE_FROM_ABI _Tp* operator--() _NOEXCEPT { return fetch_sub(1) - 1; }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp* operator--() _NOEXCEPT { return fetch_sub(1) - 1; }
_LIBCPP_HIDE_FROM_ABI _Tp* operator+=(ptrdiff_t __op) volatile _NOEXCEPT { return fetch_add(__op) + __op; }
- _LIBCPP_HIDE_FROM_ABI _Tp* operator+=(ptrdiff_t __op) _NOEXCEPT { return fetch_add(__op) + __op; }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp* operator+=(ptrdiff_t __op) _NOEXCEPT { return fetch_add(__op) + __op; }
_LIBCPP_HIDE_FROM_ABI _Tp* operator-=(ptrdiff_t __op) volatile _NOEXCEPT { return fetch_sub(__op) - __op; }
- _LIBCPP_HIDE_FROM_ABI _Tp* operator-=(ptrdiff_t __op) _NOEXCEPT { return fetch_sub(__op) - __op; }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp* operator-=(ptrdiff_t __op) _NOEXCEPT { return fetch_sub(__op) - __op; }
atomic& operator=(const atomic&) = delete;
atomic& operator=(const atomic&) volatile = delete;
@@ -151,7 +153,7 @@ struct atomic<_Tp> : __atomic_base<_Tp> {
}
template <class _This, class _Operation, class _BuiltinOp>
- _LIBCPP_HIDE_FROM_ABI static _Tp
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 static _Tp
__rmw_op(_This&& __self, _Tp __operand, memory_order __m, _Operation __operation, _BuiltinOp __builtin_op) {
if constexpr (__has_rmw_builtin()) {
return __builtin_op(std::addressof(std::forward<_This>(__self).__a_), __operand, __m);
@@ -174,7 +176,7 @@ struct atomic<_Tp> : __atomic_base<_Tp> {
}
template <class _This>
- _LIBCPP_HIDE_FROM_ABI static _Tp __fetch_add(_This&& __self, _Tp __operand, memory_order __m) {
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 static _Tp __fetch_add(_This&& __self, _Tp __operand, memory_order __m) {
auto __builtin_op = [](auto __a, auto __builtin_operand, auto __order) {
return std::__cxx_atomic_fetch_add(__a, __builtin_operand, __order);
};
@@ -182,7 +184,7 @@ struct atomic<_Tp> : __atomic_base<_Tp> {
}
template <class _This>
- _LIBCPP_HIDE_FROM_ABI static _Tp __fetch_sub(_This&& __self, _Tp __operand, memory_order __m) {
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 static _Tp __fetch_sub(_This&& __self, _Tp __operand, memory_order __m) {
auto __builtin_op = [](auto __a, auto __builtin_operand, auto __order) {
return std::__cxx_atomic_fetch_sub(__a, __builtin_operand, __order);
};
@@ -207,7 +209,7 @@ struct atomic<_Tp> : __atomic_base<_Tp> {
__base::store(__d);
return __d;
}
- _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __d) noexcept {
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp operator=(_Tp __d) noexcept {
__base::store(__d);
return __d;
}
@@ -218,7 +220,7 @@ struct atomic<_Tp> : __atomic_base<_Tp> {
return __fetch_add(*this, __op, __m);
}
- _LIBCPP_HIDE_FROM_ABI _Tp fetch_add(_Tp __op, memory_order __m = memory_order_seq_cst) noexcept {
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp fetch_add(_Tp __op, memory_order __m = memory_order_seq_cst) noexcept {
return __fetch_add(*this, __op, __m);
}
@@ -228,7 +230,7 @@ struct atomic<_Tp> : __atomic_base<_Tp> {
return __fetch_sub(*this, __op, __m);
}
- _LIBCPP_HIDE_FROM_ABI _Tp fetch_sub(_Tp __op, memory_order __m = memory_order_seq_cst) noexcept {
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp fetch_sub(_Tp __op, memory_order __m = memory_order_seq_cst) noexcept {
return __fetch_sub(*this, __op, __m);
}
@@ -238,7 +240,7 @@ struct atomic<_Tp> : __atomic_base<_Tp> {
return fetch_add(__op) + __op;
}
- _LIBCPP_HIDE_FROM_ABI _Tp operator+=(_Tp __op) noexcept { return fetch_add(__op) + __op; }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp operator+=(_Tp __op) noexcept { return fetch_add(__op) + __op; }
_LIBCPP_HIDE_FROM_ABI _Tp operator-=(_Tp __op) volatile noexcept
requires __base::is_always_lock_free
@@ -246,7 +248,7 @@ struct atomic<_Tp> : __atomic_base<_Tp> {
return fetch_sub(__op) - __op;
}
- _LIBCPP_HIDE_FROM_ABI _Tp operator-=(_Tp __op) noexcept { return fetch_sub(__op) - __op; }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp operator-=(_Tp __op) noexcept { return fetch_sub(__op) - __op; }
};
#endif // _LIBCPP_STD_VER >= 20
@@ -272,7 +274,7 @@ atomic_init(volatile atomic<_Tp>* __o, typename atomic<_Tp>::value_type __d) _NO
}
template <class _Tp>
-_LIBCPP_DEPRECATED_IN_CXX20 _LIBCPP_HIDE_FROM_ABI void
+_LIBCPP_DEPRECATED_IN_CXX20 _LIBCPP_CONSTEXPR_SINCE_CXX26 _LIBCPP_HIDE_FROM_ABI void
atomic_init(atomic<_Tp>* __o, typename atomic<_Tp>::value_type __d) _NOEXCEPT {
std::__cxx_atomic_init(std::addressof(__o->__a_), __d);
}
@@ -285,7 +287,8 @@ _LIBCPP_HIDE_FROM_ABI void atomic_store(volatile atomic<_Tp>* __o, typename atom
}
template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI void atomic_store(atomic<_Tp>* __o, typename atomic<_Tp>::value_type __d) _NOEXCEPT {
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 void
+atomic_store(atomic<_Tp>* __o, typename atomic<_Tp>::value_type __d) _NOEXCEPT {
__o->store(__d);
}
@@ -299,7 +302,7 @@ atomic_store_explicit(volatile atomic<_Tp>* __o, typename atomic<_Tp>::value_typ
}
template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI void
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 void
atomic_store_explicit(atomic<_Tp>* __o, typename atomic<_Tp>::value_type __d, memory_order __m) _NOEXCEPT
_LIBCPP_CHECK_STORE_MEMORY_ORDER(__m) {
__o->store(__d, __m);
@@ -313,7 +316,7 @@ _LIBCPP_HIDE_FROM_ABI _Tp atomic_load(const volatile atomic<_Tp>* __o) _NOEXCEPT
}
template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp atomic_load(const atomic<_Tp>* __o) _NOEXCEPT {
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp atomic_load(const atomic<_Tp>* __o) _NOEXCEPT {
return __o->load();
}
@@ -326,7 +329,7 @@ _LIBCPP_HIDE_FROM_ABI _Tp atomic_load_explicit(const volatile atomic<_Tp>* __o,
}
template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp atomic_load_explicit(const atomic<_Tp>* __o, memory_order __m) _NOEXCEPT
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp atomic_load_explicit(const atomic<_Tp>* __o, memory_order __m) _NOEXCEPT
_LIBCPP_CHECK_LOAD_MEMORY_ORDER(__m) {
return __o->load(__m);
}
@@ -339,7 +342,8 @@ _LIBCPP_HIDE_FROM_ABI _Tp atomic_exchange(volatile atomic<_Tp>* __o, typename at
}
template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp atomic_exchange(atomic<_Tp>* __o, typename atomic<_Tp>::value_type __d) _NOEXCEPT {
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp
+atomic_exchange(atomic<_Tp>* __o, typename atomic<_Tp>::value_type __d) _NOEXCEPT {
return __o->exchange(__d);
}
@@ -352,7 +356,7 @@ atomic_exchange_explicit(volatile atomic<_Tp>* __o, typename atomic<_Tp>::value_
}
template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp
atomic_exchange_explicit(atomic<_Tp>* __o, typename atomic<_Tp>::value_type __d, memory_order __m) _NOEXCEPT {
return __o->exchange(__d, __m);
}
@@ -366,7 +370,7 @@ _LIBCPP_HIDE_FROM_ABI bool atomic_compare_exchange_weak(
}
template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI bool atomic_compare_exchange_weak(
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 bool atomic_compare_exchange_weak(
atomic<_Tp>* __o, typename atomic<_Tp>::value_type* __e, typename atomic<_Tp>::value_type __d) _NOEXCEPT {
return __o->compare_exchange_weak(*__e, __d);
}
@@ -380,7 +384,7 @@ _LIBCPP_HIDE_FROM_ABI bool atomic_compare_exchange_strong(
}
template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI bool atomic_compare_exchange_strong(
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 bool atomic_compare_exchange_strong(
atomic<_Tp>* __o, typename atomic<_Tp>::value_type* __e, typename atomic<_Tp>::value_type __d) _NOEXCEPT {
return __o->compare_exchange_strong(*__e, __d);
}
@@ -398,7 +402,7 @@ _LIBCPP_HIDE_FROM_ABI bool atomic_compare_exchange_weak_explicit(
}
template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI bool atomic_compare_exchange_weak_explicit(
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 bool atomic_compare_exchange_weak_explicit(
atomic<_Tp>* __o,
typename atomic<_Tp>::value_type* __e,
typename atomic<_Tp>::value_type __d,
@@ -420,7 +424,7 @@ _LIBCPP_HIDE_FROM_ABI bool atomic_compare_exchange_strong_explicit(
}
template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI bool atomic_compare_exchange_strong_explicit(
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 bool atomic_compare_exchange_strong_explicit(
atomic<_Tp>* __o,
typename atomic<_Tp>::value_type* __e,
typename atomic<_Tp>::value_type __d,
@@ -438,7 +442,7 @@ atomic_wait(const volatile atomic<_Tp>* __o, typename atomic<_Tp>::value_type __
}
template <class _Tp>
-_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void
+_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 void
atomic_wait(const atomic<_Tp>* __o, typename atomic<_Tp>::value_type __v) _NOEXCEPT {
return __o->wait(__v);
}
@@ -453,7 +457,7 @@ atomic_wait_explicit(const volatile atomic<_Tp>* __o, typename atomic<_Tp>::valu
}
template <class _Tp>
-_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void
+_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 void
atomic_wait_explicit(const atomic<_Tp>* __o, typename atomic<_Tp>::value_type __v, memory_order __m) _NOEXCEPT
_LIBCPP_CHECK_LOAD_MEMORY_ORDER(__m) {
return __o->wait(__v, __m);
@@ -467,7 +471,7 @@ atomic_notify_one(volatile atomic<_Tp>* __o) _NOEXCEPT {
__o->notify_one();
}
template <class _Tp>
-_LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void
+_LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 void
atomic_notify_one(atomic<_Tp>* __o) _NOEXCEPT {
__o->notify_one();
}
@@ -480,7 +484,7 @@ atomic_notify_all(volatile atomic<_Tp>* __o) _NOEXCEPT {
__o->notify_all();
}
template <class _Tp>
-_LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void
+_LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 void
atomic_notify_all(atomic<_Tp>* __o) _NOEXCEPT {
__o->notify_all();
}
@@ -494,7 +498,8 @@ atomic_fetch_add(volatile atomic<_Tp>* __o, typename atomic<_Tp>::difference_typ
}
template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp atomic_fetch_add(atomic<_Tp>* __o, typename atomic<_Tp>::difference_type __op) _NOEXCEPT {
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp
+atomic_fetch_add(atomic<_Tp>* __o, typename atomic<_Tp>::difference_type __op) _NOEXCEPT {
return __o->fetch_add(__op);
}
@@ -507,7 +512,7 @@ _LIBCPP_HIDE_FROM_ABI _Tp atomic_fetch_add_explicit(
}
template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp
atomic_fetch_add_explicit(atomic<_Tp>* __o, typename atomic<_Tp>::difference_type __op, memory_order __m) _NOEXCEPT {
return __o->fetch_add(__op, __m);
}
@@ -521,7 +526,8 @@ atomic_fetch_sub(volatile atomic<_Tp>* __o, typename atomic<_Tp>::difference_typ
}
template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp atomic_fetch_sub(atomic<_Tp>* __o, typename atomic<_Tp>::difference_type __op) _NOEXCEPT {
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp
+atomic_fetch_sub(atomic<_Tp>* __o, typename atomic<_Tp>::difference_type __op) _NOEXCEPT {
return __o->fetch_sub(__op);
}
@@ -534,7 +540,7 @@ _LIBCPP_HIDE_FROM_ABI _Tp atomic_fetch_sub_explicit(
}
template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp
atomic_fetch_sub_explicit(atomic<_Tp>* __o, typename atomic<_Tp>::difference_type __op, memory_order __m) _NOEXCEPT {
return __o->fetch_sub(__op, __m);
}
@@ -547,7 +553,8 @@ _LIBCPP_HIDE_FROM_ABI _Tp atomic_fetch_and(volatile atomic<_Tp>* __o, typename a
}
template <class _Tp, __enable_if_t<is_integral<_Tp>::value && !is_same<_Tp, bool>::value, int> = 0>
-_LIBCPP_HIDE_FROM_ABI _Tp atomic_fetch_and(atomic<_Tp>* __o, typename atomic<_Tp>::value_type __op) _NOEXCEPT {
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp
+atomic_fetch_and(atomic<_Tp>* __o, typename atomic<_Tp>::value_type __op) _NOEXCEPT {
return __o->fetch_and(__op);
}
@@ -560,7 +567,7 @@ _LIBCPP_HIDE_FROM_ABI _Tp atomic_fetch_and_explicit(
}
template <class _Tp, __enable_if_t<is_integral<_Tp>::value && !is_same<_Tp, bool>::value, int> = 0>
-_LIBCPP_HIDE_FROM_ABI _Tp
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp
atomic_fetch_and_explicit(atomic<_Tp>* __o, typename atomic<_Tp>::value_type __op, memory_order __m) _NOEXCEPT {
return __o->fetch_and(__op, __m);
}
@@ -573,7 +580,8 @@ _LIBCPP_HIDE_FROM_ABI _Tp atomic_fetch_or(volatile atomic<_Tp>* __o, typename at
}
template <class _Tp, __enable_if_t<is_integral<_Tp>::value && !is_same<_Tp, bool>::value, int> = 0>
-_LIBCPP_HIDE_FROM_ABI _Tp atomic_fetch_or(atomic<_Tp>* __o, typename atomic<_Tp>::value_type __op) _NOEXCEPT {
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp
+atomic_fetch_or(atomic<_Tp>* __o, typename atomic<_Tp>::value_type __op) _NOEXCEPT {
return __o->fetch_or(__op);
}
@@ -586,7 +594,7 @@ atomic_fetch_or_explicit(volatile atomic<_Tp>* __o, typename atomic<_Tp>::value_
}
template <class _Tp, __enable_if_t<is_integral<_Tp>::value && !is_same<_Tp, bool>::value, int> = 0>
-_LIBCPP_HIDE_FROM_ABI _Tp
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp
atomic_fetch_or_explicit(atomic<_Tp>* __o, typename atomic<_Tp>::value_type __op, memory_order __m) _NOEXCEPT {
return __o->fetch_or(__op, __m);
}
@@ -599,7 +607,8 @@ _LIBCPP_HIDE_FROM_ABI _Tp atomic_fetch_xor(volatile atomic<_Tp>* __o, typename a
}
template <class _Tp, __enable_if_t<is_integral<_Tp>::value && !is_same<_Tp, bool>::value, int> = 0>
-_LIBCPP_HIDE_FROM_ABI _Tp atomic_fetch_xor(atomic<_Tp>* __o, typename atomic<_Tp>::value_type __op) _NOEXCEPT {
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp
+atomic_fetch_xor(atomic<_Tp>* __o, typename atomic<_Tp>::value_type __op) _NOEXCEPT {
return __o->fetch_xor(__op);
}
@@ -612,7 +621,7 @@ _LIBCPP_HIDE_FROM_ABI _Tp atomic_fetch_xor_explicit(
}
template <class _Tp, __enable_if_t<is_integral<_Tp>::value && !is_same<_Tp, bool>::value, int> = 0>
-_LIBCPP_HIDE_FROM_ABI _Tp
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp
atomic_fetch_xor_explicit(atomic<_Tp>* __o, typename atomic<_Tp>::value_type __op, memory_order __m) _NOEXCEPT {
return __o->fetch_xor(__op, __m);
}
diff --git a/libcxx/include/__atomic/atomic_base.h b/libcxx/include/__atomic/atomic_base.h
index 7e26434c9c3a0..22e898f736674 100644
--- a/libcxx/include/__atomic/atomic_base.h
+++ b/libcxx/include/__atomic/atomic_base.h
@@ -40,13 +40,13 @@ struct __atomic_base // false
return __cxx_atomic_is_lock_free(sizeof(__cxx_atomic_impl<_Tp>));
}
_LIBCPP_HIDE_FROM_ABI bool is_lock_free() const _NOEXCEPT {
- return static_cast<__atomic_base const volatile*>(this)->is_lock_free();
+ return __cxx_atomic_is_lock_free(sizeof(__cxx_atomic_impl<_Tp>));
}
_LIBCPP_HIDE_FROM_ABI void store(_Tp __d, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT
_LIBCPP_CHECK_STORE_MEMORY_ORDER(__m) {
std::__cxx_atomic_store(std::addressof(__a_), __d, __m);
}
- _LIBCPP_HIDE_FROM_ABI void store(_Tp __d, memory_order __m = memory_order_seq_cst) _NOEXCEPT
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 void store(_Tp __d, memory_order __m = memory_order_seq_cst) _NOEXCEPT
_LIBCPP_CHECK_STORE_MEMORY_ORDER(__m) {
std::__cxx_atomic_store(std::addressof(__a_), __d, __m);
}
@@ -54,16 +54,16 @@ struct __atomic_base // false
_LIBCPP_CHECK_LOAD_MEMORY_ORDER(__m) {
return std::__cxx_atomic_load(std::addressof(__a_), __m);
}
- _LIBCPP_HIDE_FROM_ABI _Tp load(memory_order __m = memory_order_seq_cst) const _NOEXCEPT
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp load(memory_order __m = memory_order_seq_cst) const _NOEXCEPT
_LIBCPP_CHECK_LOAD_MEMORY_ORDER(__m) {
return std::__cxx_atomic_load(std::addressof(__a_), __m);
}
_LIBCPP_HIDE_FROM_ABI operator _Tp() const volatile _NOEXCEPT { return load(); }
- _LIBCPP_HIDE_FROM_ABI operator _Tp() const _NOEXCEPT { return load(); }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 operator _Tp() const _NOEXCEPT { return load(); }
_LIBCPP_HIDE_FROM_ABI _Tp exchange(_Tp __d, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
return std::__cxx_atomic_exchange(std::addressof(__a_), __d, __m);
}
- _LIBCPP_HIDE_FROM_ABI _Tp exchange(_Tp __d, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp exchange(_Tp __d, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
return std::__cxx_atomic_exchange(std::addressof(__a_), __d, __m);
}
_LIBCPP_HIDE_FROM_ABI bool
@@ -71,7 +71,8 @@ struct __atomic_base // false
_LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) {
return std::__cxx_atomic_compare_exchange_weak(std::addressof(__a_), std::addressof(__e), __d, __s, __f);
}
- _LIBCPP_HIDE_FROM_ABI bool compare_exchange_weak(_Tp& __e, _Tp __d, memory_order __s, memory_order __f) _NOEXCEPT
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 bool
+ compare_exchange_weak(_Tp& __e, _Tp __d, memory_order __s, memory_order __f) _NOEXCEPT
_LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) {
return std::__cxx_atomic_compare_exchange_weak(std::addressof(__a_), std::addressof(__e), __d, __s, __f);
}
@@ -80,7 +81,8 @@ struct __atomic_base // false
_LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) {
return std::__cxx_atomic_compare_exchange_strong(std::addressof(__a_), std::addressof(__e), __d, __s, __f);
}
- _LIBCPP_HIDE_FROM_ABI bool compare_exchange_strong(_Tp& __e, _Tp __d, memory_order __s, memory_order __f) _NOEXCEPT
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 bool
+ compare_exchange_strong(_Tp& __e, _Tp __d, memory_order __s, memory_order __f) _NOEXCEPT
_LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) {
return std::__cxx_atomic_compare_exchange_strong(std::addressof(__a_), std::addressof(__e), __d, __s, __f);
}
@@ -88,7 +90,7 @@ struct __atomic_base // false
compare_exchange_weak(_Tp& __e, _Tp __d, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
return std::__cxx_atomic_compare_exchange_weak(std::addressof(__a_), std::addressof(__e), __d, __m, __m);
}
- _LIBCPP_HIDE_FROM_ABI bool
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 bool
compare_exchange_weak(_Tp& __e, _Tp __d, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
return std::__cxx_atomic_compare_exchange_weak(std::addressof(__a_), std::addressof(__e), __d, __m, __m);
}
@@ -96,7 +98,7 @@ struct __atomic_base // false
compare_exchange_strong(_Tp& __e, _Tp __d, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
return std::__cxx_atomic_compare_exchange_strong(std::addressof(__a_), std::addressof(__e), __d, __m, __m);
}
- _LIBCPP_HIDE_FROM_ABI bool
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 bool
compare_exchange_strong(_Tp& __e, _Tp __d, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
return std::__cxx_atomic_compare_exchange_strong(std::addressof(__a_), std::addressof(__e), __d, __m, __m);
}
@@ -105,23 +107,37 @@ struct __atomic_base // false
volatile _NOEXCEPT {
std::__atomic_wait(*this, __v, __m);
}
- _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void
+ _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 void
wait(_Tp __v, memory_order __m = memory_order_seq_cst) const _NOEXCEPT {
- std::__atomic_wait(*this, __v, __m);
+ if (__libcpp_is_constant_evaluated()) {
+ if (this->load(__m) != __v) {
+ __builtin_trap();
+ }
+ } else {
+ std::__atomic_wait(*this, __v, __m);
+ }
}
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_one() volatile _NOEXCEPT {
std::__atomic_notify_one(*this);
}
- _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_one() _NOEXCEPT { std::__atomic_notify_one(*this); }
+ _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 void notify_one() _NOEXCEPT {
+ if !consteval {
+ std::__atomic_notify_one(*this);
+ }
+ }
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_all() volatile _NOEXCEPT {
std::__atomic_notify_all(*this);
}
- _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_all() _NOEXCEPT { std::__atomic_notify_all(*this); }
+ _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 void notify_all() _NOEXCEPT {
+ if !consteval {
+ std::__atomic_notify_all(*this);
+ }
+ }
#if _LIBCPP_STD_VER >= 20
_LIBCPP_HIDE_FROM_ABI constexpr __atomic_base() noexcept(is_nothrow_default_constructible_v<_Tp>) : __a_(_Tp()) {}
#else
- _LIBCPP_HIDE_FROM_ABI __atomic_base() _NOEXCEPT = default;
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR __atomic_base() _NOEXCEPT = default;
#endif
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR __atomic_base(_Tp __d) _NOEXCEPT : __a_(__d) {}
@@ -142,52 +158,52 @@ struct __atomic_base<_Tp, true> : public __atomic_base<_Tp, false> {
_LIBCPP_HIDE_FROM_ABI _Tp fetch_add(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
return std::__cxx_atomic_fetch_add(std::addressof(this->__a_), __op, __m);
}
- _LIBCPP_HIDE_FROM_ABI _Tp fetch_add(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp fetch_add(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
return std::__cxx_atomic_fetch_add(std::addressof(this->__a_), __op, __m);
}
_LIBCPP_HIDE_FROM_ABI _Tp fetch_sub(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
return std::__cxx_atomic_fetch_sub(std::addressof(this->__a_), __op, __m);
}
- _LIBCPP_HIDE_FROM_ABI _Tp fetch_sub(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp fetch_sub(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
return std::__cxx_atomic_fetch_sub(std::addressof(this->__a_), __op, __m);
}
_LIBCPP_HIDE_FROM_ABI _Tp fetch_and(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
return std::__cxx_atomic_fetch_and(std::addressof(this->__a_), __op, __m);
}
- _LIBCPP_HIDE_FROM_ABI _Tp fetch_and(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp fetch_and(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
return std::__cxx_atomic_fetch_and(std::addressof(this->__a_), __op, __m);
}
_LIBCPP_HIDE_FROM_ABI _Tp fetch_or(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
return std::__cxx_atomic_fetch_or(std::addressof(this->__a_), __op, __m);
}
- _LIBCPP_HIDE_FROM_ABI _Tp fetch_or(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp fetch_or(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
return std::__cxx_atomic_fetch_or(std::addressof(this->__a_), __op, __m);
}
_LIBCPP_HIDE_FROM_ABI _Tp fetch_xor(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
return std::__cxx_atomic_fetch_xor(std::addressof(this->__a_), __op, __m);
}
- _LIBCPP_HIDE_FROM_ABI _Tp fetch_xor(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp fetch_xor(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
return std::__cxx_atomic_fetch_xor(std::addressof(this->__a_), __op, __m);
}
_LIBCPP_HIDE_FROM_ABI _Tp operator++(int) volatile _NOEXCEPT { return fetch_add(_Tp(1)); }
- _LIBCPP_HIDE_FROM_ABI _Tp operator++(int) _NOEXCEPT { return fetch_add(_Tp(1)); }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp operator++(int) _NOEXCEPT { return fetch_add(_Tp(1)); }
_LIBCPP_HIDE_FROM_ABI _Tp operator--(int) volatile _NOEXCEPT { return fetch_sub(_Tp(1)); }
- _LIBCPP_HIDE_FROM_ABI _Tp operator--(int) _NOEXCEPT { return fetch_sub(_Tp(1)); }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp operator--(int) _NOEXCEPT { return fetch_sub(_Tp(1)); }
_LIBCPP_HIDE_FROM_ABI _Tp operator++() volatile _NOEXCEPT { return fetch_add(_Tp(1)) + _Tp(1); }
- _LIBCPP_HIDE_FROM_ABI _Tp operator++() _NOEXCEPT { return fetch_add(_Tp(1)) + _Tp(1); }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp operator++() _NOEXCEPT { return fetch_add(_Tp(1)) + _Tp(1); }
_LIBCPP_HIDE_FROM_ABI _Tp operator--() volatile _NOEXCEPT { return fetch_sub(_Tp(1)) - _Tp(1); }
- _LIBCPP_HIDE_FROM_ABI _Tp operator--() _NOEXCEPT { return fetch_sub(_Tp(1)) - _Tp(1); }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp operator--() _NOEXCEPT { return fetch_sub(_Tp(1)) - _Tp(1); }
_LIBCPP_HIDE_FROM_ABI _Tp operator+=(_Tp __op) volatile _NOEXCEPT { return fetch_add(__op) + __op; }
- _LIBCPP_HIDE_FROM_ABI _Tp operator+=(_Tp __op) _NOEXCEPT { return fetch_add(__op) + __op; }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp operator+=(_Tp __op) _NOEXCEPT { return fetch_add(__op) + __op; }
_LIBCPP_HIDE_FROM_ABI _Tp operator-=(_Tp __op) volatile _NOEXCEPT { return fetch_sub(__op) - __op; }
- _LIBCPP_HIDE_FROM_ABI _Tp operator-=(_Tp __op) _NOEXCEPT { return fetch_sub(__op) - __op; }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp operator-=(_Tp __op) _NOEXCEPT { return fetch_sub(__op) - __op; }
_LIBCPP_HIDE_FROM_ABI _Tp operator&=(_Tp __op) volatile _NOEXCEPT { return fetch_and(__op) & __op; }
- _LIBCPP_HIDE_FROM_ABI _Tp operator&=(_Tp __op) _NOEXCEPT { return fetch_and(__op) & __op; }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp operator&=(_Tp __op) _NOEXCEPT { return fetch_and(__op) & __op; }
_LIBCPP_HIDE_FROM_ABI _Tp operator|=(_Tp __op) volatile _NOEXCEPT { return fetch_or(__op) | __op; }
- _LIBCPP_HIDE_FROM_ABI _Tp operator|=(_Tp __op) _NOEXCEPT { return fetch_or(__op) | __op; }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp operator|=(_Tp __op) _NOEXCEPT { return fetch_or(__op) | __op; }
_LIBCPP_HIDE_FROM_ABI _Tp operator^=(_Tp __op) volatile _NOEXCEPT { return fetch_xor(__op) ^ __op; }
- _LIBCPP_HIDE_FROM_ABI _Tp operator^=(_Tp __op) _NOEXCEPT { return fetch_xor(__op) ^ __op; }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp operator^=(_Tp __op) _NOEXCEPT { return fetch_xor(__op) ^ __op; }
};
// Here we need _IsIntegral because the default template argument is not enough
@@ -196,7 +212,7 @@ struct __atomic_base<_Tp, true> : public __atomic_base<_Tp, false> {
// __atomic_base<int, false>. So specializing __atomic_base<_Tp> does not work
template <class _Tp, bool _IsIntegral>
struct __atomic_waitable_traits<__atomic_base<_Tp, _IsIntegral> > {
- static _LIBCPP_HIDE_FROM_ABI _Tp __atomic_load(const __atomic_base<_Tp, _IsIntegral>& __a, memory_order __order) {
+ static _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp __atomic_load(const __atomic_base<_Tp, _IsIntegral>& __a, memory_order __order) {
return __a.load(__order);
}
@@ -205,7 +221,7 @@ struct __atomic_waitable_traits<__atomic_base<_Tp, _IsIntegral> > {
return __this.load(__order);
}
- static _LIBCPP_HIDE_FROM_ABI const __cxx_atomic_impl<_Tp>*
+ static _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const __cxx_atomic_impl<_Tp>*
__atomic_contention_address(const __atomic_base<_Tp, _IsIntegral>& __a) {
return std::addressof(__a.__a_);
}
diff --git a/libcxx/include/__atomic/atomic_flag.h b/libcxx/include/__atomic/atomic_flag.h
index 00b157cdff78b..d8598e11c48af 100644
--- a/libcxx/include/__atomic/atomic_flag.h
+++ b/libcxx/include/__atomic/atomic_flag.h
@@ -31,20 +31,21 @@ struct atomic_flag {
_LIBCPP_HIDE_FROM_ABI bool test(memory_order __m = memory_order_seq_cst) const volatile _NOEXCEPT {
return _LIBCPP_ATOMIC_FLAG_TYPE(true) == __cxx_atomic_load(&__a_, __m);
}
- _LIBCPP_HIDE_FROM_ABI bool test(memory_order __m = memory_order_seq_cst) const _NOEXCEPT {
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 bool test(memory_order __m = memory_order_seq_cst) const _NOEXCEPT {
return _LIBCPP_ATOMIC_FLAG_TYPE(true) == __cxx_atomic_load(&__a_, __m);
}
- _LIBCPP_HIDE_FROM_ABI bool test_and_set(memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
+ _LIBCPP_HIDE_FROM_ABI bool
+ test_and_set(memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
return __cxx_atomic_exchange(&__a_, _LIBCPP_ATOMIC_FLAG_TYPE(true), __m);
}
- _LIBCPP_HIDE_FROM_ABI bool test_and_set(memory_order __m = memory_order_seq_cst) _NOEXCEPT {
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 bool test_and_set(memory_order __m = memory_order_seq_cst) _NOEXCEPT {
return __cxx_atomic_exchange(&__a_, _LIBCPP_ATOMIC_FLAG_TYPE(true), __m);
}
_LIBCPP_HIDE_FROM_ABI void clear(memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
__cxx_atomic_store(&__a_, _LIBCPP_ATOMIC_FLAG_TYPE(false), __m);
}
- _LIBCPP_HIDE_FROM_ABI void clear(memory_order __m = memory_order_seq_cst) _NOEXCEPT {
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 void clear(memory_order __m = memory_order_seq_cst) _NOEXCEPT {
__cxx_atomic_store(&__a_, _LIBCPP_ATOMIC_FLAG_TYPE(false), __m);
}
@@ -52,20 +53,20 @@ struct atomic_flag {
wait(bool __v, memory_order __m = memory_order_seq_cst) const volatile _NOEXCEPT {
std::__atomic_wait(*this, _LIBCPP_ATOMIC_FLAG_TYPE(__v), __m);
}
- _LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void
+ _LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 void
wait(bool __v, memory_order __m = memory_order_seq_cst) const _NOEXCEPT {
std::__atomic_wait(*this, _LIBCPP_ATOMIC_FLAG_TYPE(__v), __m);
}
_LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_one() volatile _NOEXCEPT {
std::__atomic_notify_one(*this);
}
- _LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_one() _NOEXCEPT {
+ _LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 void notify_one() _NOEXCEPT {
std::__atomic_notify_one(*this);
}
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_all() volatile _NOEXCEPT {
std::__atomic_notify_all(*this);
}
- _LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_all() _NOEXCEPT {
+ _LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 void notify_all() _NOEXCEPT {
std::__atomic_notify_all(*this);
}
@@ -75,7 +76,7 @@ struct atomic_flag {
atomic_flag() _NOEXCEPT = default;
#endif
- _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR atomic_flag(bool __b) _NOEXCEPT : __a_(__b) {} // EXTENSION
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 atomic_flag(bool __b) _NOEXCEPT : __a_(__b) {} // EXTENSION
atomic_flag(const atomic_flag&) = delete;
atomic_flag& operator=(const atomic_flag&) = delete;
@@ -84,7 +85,8 @@ struct atomic_flag {
template <>
struct __atomic_waitable_traits<atomic_flag> {
- static _LIBCPP_HIDE_FROM_ABI _LIBCPP_ATOMIC_FLAG_TYPE __atomic_load(const atomic_flag& __a, memory_order __order) {
+ static _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _LIBCPP_ATOMIC_FLAG_TYPE
+ __atomic_load(const atomic_flag& __a, memory_order __order) {
return std::__cxx_atomic_load(&__a.__a_, __order);
}
@@ -93,7 +95,7 @@ struct __atomic_waitable_traits<atomic_flag> {
return std::__cxx_atomic_load(&__a.__a_, __order);
}
- static _LIBCPP_HIDE_FROM_ABI const __cxx_atomic_impl<_LIBCPP_ATOMIC_FLAG_TYPE>*
+ static _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const __cxx_atomic_impl<_LIBCPP_ATOMIC_FLAG_TYPE>*
__atomic_contention_address(const atomic_flag& __a) {
return std::addressof(__a.__a_);
}
@@ -104,16 +106,21 @@ struct __atomic_waitable_traits<atomic_flag> {
}
};
-inline _LIBCPP_HIDE_FROM_ABI bool atomic_flag_test(const volatile atomic_flag* __o) _NOEXCEPT { return __o->test(); }
+inline _LIBCPP_HIDE_FROM_ABI bool atomic_flag_test(const volatile atomic_flag* __o) _NOEXCEPT {
+ return __o->test();
+}
-inline _LIBCPP_HIDE_FROM_ABI bool atomic_flag_test(const atomic_flag* __o) _NOEXCEPT { return __o->test(); }
+inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 bool atomic_flag_test(const atomic_flag* __o) _NOEXCEPT {
+ return __o->test();
+}
inline _LIBCPP_HIDE_FROM_ABI bool
atomic_flag_test_explicit(const volatile atomic_flag* __o, memory_order __m) _NOEXCEPT {
return __o->test(__m);
}
-inline _LIBCPP_HIDE_FROM_ABI bool atomic_flag_test_explicit(const atomic_flag* __o, memory_order __m) _NOEXCEPT {
+inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 bool
+atomic_flag_test_explicit(const atomic_flag* __o, memory_order __m) _NOEXCEPT {
return __o->test(__m);
}
@@ -121,26 +128,33 @@ inline _LIBCPP_HIDE_FROM_ABI bool atomic_flag_test_and_set(volatile atomic_flag*
return __o->test_and_set();
}
-inline _LIBCPP_HIDE_FROM_ABI bool atomic_flag_test_and_set(atomic_flag* __o) _NOEXCEPT { return __o->test_and_set(); }
+inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 bool atomic_flag_test_and_set(atomic_flag* __o) _NOEXCEPT {
+ return __o->test_and_set();
+}
inline _LIBCPP_HIDE_FROM_ABI bool
atomic_flag_test_and_set_explicit(volatile atomic_flag* __o, memory_order __m) _NOEXCEPT {
return __o->test_and_set(__m);
}
-inline _LIBCPP_HIDE_FROM_ABI bool atomic_flag_test_and_set_explicit(atomic_flag* __o, memory_order __m) _NOEXCEPT {
+inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 bool
+atomic_flag_test_and_set_explicit(atomic_flag* __o, memory_order __m) _NOEXCEPT {
return __o->test_and_set(__m);
}
-inline _LIBCPP_HIDE_FROM_ABI void atomic_flag_clear(volatile atomic_flag* __o) _NOEXCEPT { __o->clear(); }
+inline _LIBCPP_HIDE_FROM_ABI void atomic_flag_clear(volatile atomic_flag* __o) _NOEXCEPT {
+ __o->clear();
+}
-inline _LIBCPP_HIDE_FROM_ABI void atomic_flag_clear(atomic_flag* __o) _NOEXCEPT { __o->clear(); }
+inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 void atomic_flag_clear(atomic_flag* __o) _NOEXCEPT { __o->clear(); }
-inline _LIBCPP_HIDE_FROM_ABI void atomic_flag_clear_explicit(volatile atomic_flag* __o, memory_order __m) _NOEXCEPT {
+inline _LIBCPP_HIDE_FROM_ABI void
+atomic_flag_clear_explicit(volatile atomic_flag* __o, memory_order __m) _NOEXCEPT {
__o->clear(__m);
}
-inline _LIBCPP_HIDE_FROM_ABI void atomic_flag_clear_explicit(atomic_flag* __o, memory_order __m) _NOEXCEPT {
+inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 void
+atomic_flag_clear_explicit(atomic_flag* __o, memory_order __m) _NOEXCEPT {
__o->clear(__m);
}
@@ -149,7 +163,7 @@ atomic_flag_wait(const volatile atomic_flag* __o, bool __v) _NOEXCEPT {
__o->wait(__v);
}
-inline _LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_HIDE_FROM_ABI _LIBCPP_AVAILABILITY_SYNC void
+inline _LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_HIDE_FROM_ABI _LIBCPP_AVAILABILITY_SYNC _LIBCPP_CONSTEXPR_SINCE_CXX26 void
atomic_flag_wait(const atomic_flag* __o, bool __v) _NOEXCEPT {
__o->wait(__v);
}
@@ -159,7 +173,7 @@ atomic_flag_wait_explicit(const volatile atomic_flag* __o, bool __v, memory_orde
__o->wait(__v, __m);
}
-inline _LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_HIDE_FROM_ABI _LIBCPP_AVAILABILITY_SYNC void
+inline _LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_HIDE_FROM_ABI _LIBCPP_AVAILABILITY_SYNC _LIBCPP_CONSTEXPR_SINCE_CXX26 void
atomic_flag_wait_explicit(const atomic_flag* __o, bool __v, memory_order __m) _NOEXCEPT {
__o->wait(__v, __m);
}
@@ -169,7 +183,7 @@ atomic_flag_notify_one(volatile atomic_flag* __o) _NOEXCEPT {
__o->notify_one();
}
-inline _LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_HIDE_FROM_ABI _LIBCPP_AVAILABILITY_SYNC void
+inline _LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_HIDE_FROM_ABI _LIBCPP_AVAILABILITY_SYNC _LIBCPP_CONSTEXPR_SINCE_CXX26 void
atomic_flag_notify_one(atomic_flag* __o) _NOEXCEPT {
__o->notify_one();
}
@@ -179,7 +193,7 @@ atomic_flag_notify_all(volatile atomic_flag* __o) _NOEXCEPT {
__o->notify_all();
}
-inline _LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_HIDE_FROM_ABI _LIBCPP_AVAILABILITY_SYNC void
+inline _LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_HIDE_FROM_ABI _LIBCPP_AVAILABILITY_SYNC _LIBCPP_CONSTEXPR_SINCE_CXX26 void
atomic_flag_notify_all(atomic_flag* __o) _NOEXCEPT {
__o->notify_all();
}
diff --git a/libcxx/include/__atomic/atomic_ref.h b/libcxx/include/__atomic/atomic_ref.h
index 156f1961151c1..7b8741dd3f225 100644
--- a/libcxx/include/__atomic/atomic_ref.h
+++ b/libcxx/include/__atomic/atomic_ref.h
@@ -47,10 +47,10 @@ struct __atomic_ref_base {
protected:
_Tp* __ptr_;
- _LIBCPP_HIDE_FROM_ABI __atomic_ref_base(_Tp& __obj) : __ptr_(std::addressof(__obj)) {}
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 __atomic_ref_base(_Tp& __obj) : __ptr_(std::addressof(__obj)) {}
private:
- _LIBCPP_HIDE_FROM_ABI static _Tp* __clear_padding(_Tp& __val) noexcept {
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 static _Tp* __clear_padding(_Tp& __val) noexcept {
_Tp* __ptr = std::addressof(__val);
# if __has_builtin(__builtin_clear_padding)
__builtin_clear_padding(__ptr);
@@ -58,7 +58,7 @@ struct __atomic_ref_base {
return __ptr;
}
- _LIBCPP_HIDE_FROM_ABI static bool __compare_exchange(
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 static bool __compare_exchange(
_Tp* __ptr, _Tp* __expected, _Tp* __desired, bool __is_weak, int __success, int __failure) noexcept {
if constexpr (
# if __has_builtin(__builtin_clear_padding)
@@ -107,112 +107,193 @@ struct __atomic_ref_base {
static constexpr bool is_always_lock_free =
__atomic_always_lock_free(sizeof(_Tp), reinterpret_cast<void*>(-required_alignment));
- _LIBCPP_HIDE_FROM_ABI bool is_lock_free() const noexcept { return __atomic_is_lock_free(sizeof(_Tp), __ptr_); }
+ _LIBCPP_HIDE_FROM_ABI bool is_lock_free() const noexcept {
+ return __atomic_is_lock_free(sizeof(_Tp), __ptr_);
+ }
- _LIBCPP_HIDE_FROM_ABI void store(_Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 void
+ store(_Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept
_LIBCPP_CHECK_STORE_MEMORY_ORDER(__order) {
_LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
__order == memory_order::relaxed || __order == memory_order::release || __order == memory_order::seq_cst,
"atomic_ref: memory order argument to atomic store operation is invalid");
- __atomic_store(__ptr_, __clear_padding(__desired), std::__to_gcc_order(__order));
+ if (__libcpp_is_constant_evaluated()) {
+ *__ptr_ = __desired;
+ } else {
+ __atomic_store(__ptr_, __clear_padding(__desired), std::__to_gcc_order(__order));
+ }
}
- _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept {
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp operator=(_Tp __desired) const noexcept {
store(__desired);
return __desired;
}
- _LIBCPP_HIDE_FROM_ABI _Tp load(memory_order __order = memory_order::seq_cst) const noexcept
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp load(memory_order __order = memory_order::seq_cst) const noexcept
_LIBCPP_CHECK_LOAD_MEMORY_ORDER(__order) {
_LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
__order == memory_order::relaxed || __order == memory_order::consume || __order == memory_order::acquire ||
__order == memory_order::seq_cst,
"atomic_ref: memory order argument to atomic load operation is invalid");
- alignas(_Tp) byte __mem[sizeof(_Tp)];
- auto* __ret = reinterpret_cast<_Tp*>(__mem);
- __atomic_load(__ptr_, __ret, std::__to_gcc_order(__order));
- return *__ret;
+ if (__libcpp_is_constant_evaluated()) {
+ return *__ptr_;
+ } else {
+ alignas(_Tp) byte __mem[sizeof(_Tp)];
+ auto* __ret = reinterpret_cast<_Tp*>(__mem);
+ __atomic_load(__ptr_, __ret, std::__to_gcc_order(__order));
+ return *__ret;
+ }
}
- _LIBCPP_HIDE_FROM_ABI operator _Tp() const noexcept { return load(); }
-
- _LIBCPP_HIDE_FROM_ABI _Tp exchange(_Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept {
- alignas(_Tp) byte __mem[sizeof(_Tp)];
- auto* __ret = reinterpret_cast<_Tp*>(__mem);
- __atomic_exchange(__ptr_, __clear_padding(__desired), __ret, std::__to_gcc_order(__order));
- return *__ret;
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 operator _Tp() const noexcept { return load(); }
+
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp
+ exchange(_Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept {
+ if (__libcpp_is_constant_evaluated()) {
+ _Tp tmp = *__ptr_;
+ *__ptr_ = __desired;
+ return tmp;
+ } else {
+ alignas(_Tp) byte __mem[sizeof(_Tp)];
+ auto* __ret = reinterpret_cast<_Tp*>(__mem);
+ __atomic_exchange(__ptr_, __clear_padding(__desired), __ret, std::__to_gcc_order(__order));
+ return *__ret;
+ }
}
- _LIBCPP_HIDE_FROM_ABI bool
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 bool
compare_exchange_weak(_Tp& __expected, _Tp __desired, memory_order __success, memory_order __failure) const noexcept
_LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__success, __failure) {
_LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
__failure == memory_order::relaxed || __failure == memory_order::consume ||
__failure == memory_order::acquire || __failure == memory_order::seq_cst,
"atomic_ref: failure memory order argument to weak atomic compare-and-exchange operation is invalid");
- return __compare_exchange(
- __ptr_,
- std::addressof(__expected),
- std::addressof(__desired),
- true,
- std::__to_gcc_order(__success),
- std::__to_gcc_order(__failure));
- }
- _LIBCPP_HIDE_FROM_ABI bool
+ if (__libcpp_is_constant_evaluated()) {
+ const _Tp __original = *__ptr_;
+ if (__original == __expected) {
+ *__ptr_ = __desired;
+ __expected = __original;
+ return true;
+ } else {
+ __expected = __original;
+ return false;
+ }
+ } else {
+ return __compare_exchange(
+ __ptr_,
+ std::addressof(__expected),
+ std::addressof(__desired),
+ true,
+ std::__to_gcc_order(__success),
+ std::__to_gcc_order(__failure));
+ }
+ }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 bool
compare_exchange_strong(_Tp& __expected, _Tp __desired, memory_order __success, memory_order __failure) const noexcept
_LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__success, __failure) {
_LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
__failure == memory_order::relaxed || __failure == memory_order::consume ||
__failure == memory_order::acquire || __failure == memory_order::seq_cst,
"atomic_ref: failure memory order argument to strong atomic compare-and-exchange operation is invalid");
- return __compare_exchange(
- __ptr_,
- std::addressof(__expected),
- std::addressof(__desired),
- false,
- std::__to_gcc_order(__success),
- std::__to_gcc_order(__failure));
+ if (__libcpp_is_constant_evaluated()) {
+ const _Tp __original = *__ptr_;
+ if (__original == __expected) {
+ *__ptr_ = __desired;
+ __expected = __original;
+ return true;
+ } else {
+ __expected = __original;
+ return false;
+ }
+ } else {
+ return __compare_exchange(
+ __ptr_,
+ std::addressof(__expected),
+ std::addressof(__desired),
+ false,
+ std::__to_gcc_order(__success),
+ std::__to_gcc_order(__failure));
+ }
}
- _LIBCPP_HIDE_FROM_ABI bool
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 bool
compare_exchange_weak(_Tp& __expected, _Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept {
- return __compare_exchange(
- __ptr_,
- std::addressof(__expected),
- std::addressof(__desired),
- true,
- std::__to_gcc_order(__order),
- std::__to_gcc_failure_order(__order));
- }
- _LIBCPP_HIDE_FROM_ABI bool
+ if (__libcpp_is_constant_evaluated()) {
+ if (*__ptr_ == __expected) {
+ __expected = *__ptr_;
+ *__ptr_ = __desired;
+ return true;
+ } else {
+ __expected = *__ptr_;
+ return false;
+ }
+ } else {
+ return __compare_exchange(
+ __ptr_,
+ std::addressof(__expected),
+ std::addressof(__desired),
+ true,
+ std::__to_gcc_order(__order),
+ std::__to_gcc_failure_order(__order));
+ }
+ }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 bool
compare_exchange_strong(_Tp& __expected, _Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept {
- return __compare_exchange(
- __ptr_,
- std::addressof(__expected),
- std::addressof(__desired),
- false,
- std::__to_gcc_order(__order),
- std::__to_gcc_failure_order(__order));
+ if (__libcpp_is_constant_evaluated()) {
+ if (*__ptr_ == __expected) {
+ __expected = *__ptr_;
+ *__ptr_ = __desired;
+ return true;
+ } else {
+ __expected = *__ptr_;
+ return false;
+ }
+ } else {
+ return __compare_exchange(
+ __ptr_,
+ std::addressof(__expected),
+ std::addressof(__desired),
+ false,
+ std::__to_gcc_order(__order),
+ std::__to_gcc_failure_order(__order));
+ }
}
- _LIBCPP_HIDE_FROM_ABI void wait(_Tp __old, memory_order __order = memory_order::seq_cst) const noexcept
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 void
+ wait(_Tp __old, memory_order __order = memory_order::seq_cst) const noexcept
_LIBCPP_CHECK_WAIT_MEMORY_ORDER(__order) {
- _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
- __order == memory_order::relaxed || __order == memory_order::consume || __order == memory_order::acquire ||
- __order == memory_order::seq_cst,
- "atomic_ref: memory order argument to atomic wait operation is invalid");
- std::__atomic_wait(*this, __old, __order);
+ if (__libcpp_is_constant_evaluated()) {
+ if (*__ptr_ != __old) {
+ __builtin_trap();
+ }
+ } else {
+ _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
+ __order == memory_order::relaxed || __order == memory_order::consume || __order == memory_order::acquire ||
+ __order == memory_order::seq_cst,
+ "atomic_ref: memory order argument to atomic wait operation is invalid");
+ std::__atomic_wait(*this, __old, __order);
+ }
+ }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 void notify_one() const noexcept {
+ if !consteval {
+ std::__atomic_notify_one(*this);
+ }
+ }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 void notify_all() const noexcept {
+ if !consteval {
+ std::__atomic_notify_all(*this);
+ }
}
- _LIBCPP_HIDE_FROM_ABI void notify_one() const noexcept { std::__atomic_notify_one(*this); }
- _LIBCPP_HIDE_FROM_ABI void notify_all() const noexcept { std::__atomic_notify_all(*this); }
};
template <class _Tp>
struct __atomic_waitable_traits<__atomic_ref_base<_Tp>> {
- static _LIBCPP_HIDE_FROM_ABI _Tp __atomic_load(const __atomic_ref_base<_Tp>& __a, memory_order __order) {
+ static _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp
+ __atomic_load(const __atomic_ref_base<_Tp>& __a, memory_order __order) {
return __a.load(__order);
}
- static _LIBCPP_HIDE_FROM_ABI const _Tp* __atomic_contention_address(const __atomic_ref_base<_Tp>& __a) {
+ static _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const _Tp*
+ __atomic_contention_address(const __atomic_ref_base<_Tp>& __a) {
return __a.__ptr_;
}
};
@@ -223,15 +304,21 @@ struct atomic_ref : public __atomic_ref_base<_Tp> {
using __base = __atomic_ref_base<_Tp>;
- _LIBCPP_HIDE_FROM_ABI explicit atomic_ref(_Tp& __obj) : __base(__obj) {
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 explicit atomic_ref(_Tp& __obj) : __base(__obj) {
_LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
+# if __has_builtin(__builtin_is_aligned)
+ __builtin_is_aligned(std::addressof(__obj), __base::required_alignment),
+# else
reinterpret_cast<uintptr_t>(std::addressof(__obj)) % __base::required_alignment == 0,
+# endif
"atomic_ref ctor: referenced object must be aligned to required_alignment");
}
- _LIBCPP_HIDE_FROM_ABI atomic_ref(const atomic_ref&) noexcept = default;
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 atomic_ref(const atomic_ref&) noexcept = default;
- _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept { return __base::operator=(__desired); }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp operator=(_Tp __desired) const noexcept {
+ return __base::operator=(__desired);
+ }
atomic_ref& operator=(const atomic_ref&) = delete;
};
@@ -243,43 +330,54 @@ struct atomic_ref<_Tp> : public __atomic_ref_base<_Tp> {
using difference_type = __base::value_type;
- _LIBCPP_HIDE_FROM_ABI explicit atomic_ref(_Tp& __obj) : __base(__obj) {
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 explicit atomic_ref(_Tp& __obj) : __base(__obj) {
_LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
+# if __has_builtin(__builtin_is_aligned)
+ __builtin_is_aligned(std::addressof(__obj), __base::required_alignment),
+# else
reinterpret_cast<uintptr_t>(std::addressof(__obj)) % __base::required_alignment == 0,
+# endif
"atomic_ref ctor: referenced object must be aligned to required_alignment");
}
- _LIBCPP_HIDE_FROM_ABI atomic_ref(const atomic_ref&) noexcept = default;
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 atomic_ref(const atomic_ref&) noexcept = default;
- _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept { return __base::operator=(__desired); }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp operator=(_Tp __desired) const noexcept {
+ return __base::operator=(__desired);
+ }
atomic_ref& operator=(const atomic_ref&) = delete;
- _LIBCPP_HIDE_FROM_ABI _Tp fetch_add(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp
+ fetch_add(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
return __atomic_fetch_add(this->__ptr_, __arg, std::__to_gcc_order(__order));
}
- _LIBCPP_HIDE_FROM_ABI _Tp fetch_sub(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp
+ fetch_sub(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
return __atomic_fetch_sub(this->__ptr_, __arg, std::__to_gcc_order(__order));
}
- _LIBCPP_HIDE_FROM_ABI _Tp fetch_and(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp
+ fetch_and(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
return __atomic_fetch_and(this->__ptr_, __arg, std::__to_gcc_order(__order));
}
- _LIBCPP_HIDE_FROM_ABI _Tp fetch_or(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp
+ fetch_or(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
return __atomic_fetch_or(this->__ptr_, __arg, std::__to_gcc_order(__order));
}
- _LIBCPP_HIDE_FROM_ABI _Tp fetch_xor(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp
+ fetch_xor(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
return __atomic_fetch_xor(this->__ptr_, __arg, std::__to_gcc_order(__order));
}
- _LIBCPP_HIDE_FROM_ABI _Tp operator++(int) const noexcept { return fetch_add(_Tp(1)); }
- _LIBCPP_HIDE_FROM_ABI _Tp operator--(int) const noexcept { return fetch_sub(_Tp(1)); }
- _LIBCPP_HIDE_FROM_ABI _Tp operator++() const noexcept { return fetch_add(_Tp(1)) + _Tp(1); }
- _LIBCPP_HIDE_FROM_ABI _Tp operator--() const noexcept { return fetch_sub(_Tp(1)) - _Tp(1); }
- _LIBCPP_HIDE_FROM_ABI _Tp operator+=(_Tp __arg) const noexcept { return fetch_add(__arg) + __arg; }
- _LIBCPP_HIDE_FROM_ABI _Tp operator-=(_Tp __arg) const noexcept { return fetch_sub(__arg) - __arg; }
- _LIBCPP_HIDE_FROM_ABI _Tp operator&=(_Tp __arg) const noexcept { return fetch_and(__arg) & __arg; }
- _LIBCPP_HIDE_FROM_ABI _Tp operator|=(_Tp __arg) const noexcept { return fetch_or(__arg) | __arg; }
- _LIBCPP_HIDE_FROM_ABI _Tp operator^=(_Tp __arg) const noexcept { return fetch_xor(__arg) ^ __arg; }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp operator++(int) const noexcept { return fetch_add(_Tp(1)); }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp operator--(int) const noexcept { return fetch_sub(_Tp(1)); }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp operator++() const noexcept { return fetch_add(_Tp(1)) + _Tp(1); }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp operator--() const noexcept { return fetch_sub(_Tp(1)) - _Tp(1); }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp operator+=(_Tp __arg) const noexcept { return fetch_add(__arg) + __arg; }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp operator-=(_Tp __arg) const noexcept { return fetch_sub(__arg) - __arg; }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp operator&=(_Tp __arg) const noexcept { return fetch_and(__arg) & __arg; }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp operator|=(_Tp __arg) const noexcept { return fetch_or(__arg) | __arg; }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp operator^=(_Tp __arg) const noexcept { return fetch_xor(__arg) ^ __arg; }
};
template <class _Tp>
@@ -289,19 +387,26 @@ struct atomic_ref<_Tp> : public __atomic_ref_base<_Tp> {
using difference_type = __base::value_type;
- _LIBCPP_HIDE_FROM_ABI explicit atomic_ref(_Tp& __obj) : __base(__obj) {
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 explicit atomic_ref(_Tp& __obj) : __base(__obj) {
_LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
+# if __has_builtin(__builtin_is_aligned)
+ __builtin_is_aligned(std::addressof(__obj), __base::required_alignment),
+# else
reinterpret_cast<uintptr_t>(std::addressof(__obj)) % __base::required_alignment == 0,
+# endif
"atomic_ref ctor: referenced object must be aligned to required_alignment");
}
- _LIBCPP_HIDE_FROM_ABI atomic_ref(const atomic_ref&) noexcept = default;
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 atomic_ref(const atomic_ref&) noexcept = default;
- _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept { return __base::operator=(__desired); }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp operator=(_Tp __desired) const noexcept {
+ return __base::operator=(__desired);
+ }
atomic_ref& operator=(const atomic_ref&) = delete;
- _LIBCPP_HIDE_FROM_ABI _Tp fetch_add(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp
+ fetch_add(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
_Tp __old = this->load(memory_order_relaxed);
_Tp __new = __old + __arg;
while (!this->compare_exchange_weak(__old, __new, __order, memory_order_relaxed)) {
@@ -309,7 +414,8 @@ struct atomic_ref<_Tp> : public __atomic_ref_base<_Tp> {
}
return __old;
}
- _LIBCPP_HIDE_FROM_ABI _Tp fetch_sub(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp
+ fetch_sub(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
_Tp __old = this->load(memory_order_relaxed);
_Tp __new = __old - __arg;
while (!this->compare_exchange_weak(__old, __new, __order, memory_order_relaxed)) {
@@ -318,8 +424,8 @@ struct atomic_ref<_Tp> : public __atomic_ref_base<_Tp> {
return __old;
}
- _LIBCPP_HIDE_FROM_ABI _Tp operator+=(_Tp __arg) const noexcept { return fetch_add(__arg) + __arg; }
- _LIBCPP_HIDE_FROM_ABI _Tp operator-=(_Tp __arg) const noexcept { return fetch_sub(__arg) - __arg; }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp operator+=(_Tp __arg) const noexcept { return fetch_add(__arg) + __arg; }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp operator-=(_Tp __arg) const noexcept { return fetch_sub(__arg) - __arg; }
};
template <class _Tp>
@@ -328,25 +434,33 @@ struct atomic_ref<_Tp*> : public __atomic_ref_base<_Tp*> {
using difference_type = ptrdiff_t;
- _LIBCPP_HIDE_FROM_ABI explicit atomic_ref(_Tp*& __ptr) : __base(__ptr) {}
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 explicit atomic_ref(_Tp*& __ptr) : __base(__ptr) {}
- _LIBCPP_HIDE_FROM_ABI _Tp* operator=(_Tp* __desired) const noexcept { return __base::operator=(__desired); }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp* operator=(_Tp* __desired) const noexcept {
+ return __base::operator=(__desired);
+ }
atomic_ref& operator=(const atomic_ref&) = delete;
- _LIBCPP_HIDE_FROM_ABI _Tp* fetch_add(ptrdiff_t __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp*
+ fetch_add(ptrdiff_t __arg, memory_order __order = memory_order_seq_cst) const noexcept {
return __atomic_fetch_add(this->__ptr_, __arg * sizeof(_Tp), std::__to_gcc_order(__order));
}
- _LIBCPP_HIDE_FROM_ABI _Tp* fetch_sub(ptrdiff_t __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp*
+ fetch_sub(ptrdiff_t __arg, memory_order __order = memory_order_seq_cst) const noexcept {
return __atomic_fetch_sub(this->__ptr_, __arg * sizeof(_Tp), std::__to_gcc_order(__order));
}
- _LIBCPP_HIDE_FROM_ABI _Tp* operator++(int) const noexcept { return fetch_add(1); }
- _LIBCPP_HIDE_FROM_ABI _Tp* operator--(int) const noexcept { return fetch_sub(1); }
- _LIBCPP_HIDE_FROM_ABI _Tp* operator++() const noexcept { return fetch_add(1) + 1; }
- _LIBCPP_HIDE_FROM_ABI _Tp* operator--() const noexcept { return fetch_sub(1) - 1; }
- _LIBCPP_HIDE_FROM_ABI _Tp* operator+=(ptrdiff_t __arg) const noexcept { return fetch_add(__arg) + __arg; }
- _LIBCPP_HIDE_FROM_ABI _Tp* operator-=(ptrdiff_t __arg) const noexcept { return fetch_sub(__arg) - __arg; }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp* operator++(int) const noexcept { return fetch_add(1); }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp* operator--(int) const noexcept { return fetch_sub(1); }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp* operator++() const noexcept { return fetch_add(1) + 1; }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp* operator--() const noexcept { return fetch_sub(1) - 1; }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp* operator+=(ptrdiff_t __arg) const noexcept {
+ return fetch_add(__arg) + __arg;
+ }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Tp* operator-=(ptrdiff_t __arg) const noexcept {
+ return fetch_sub(__arg) - __arg;
+ }
};
_LIBCPP_CTAD_SUPPORTED_FOR_TYPE(atomic_ref);
diff --git a/libcxx/include/__atomic/cxx_atomic_impl.h b/libcxx/include/__atomic/cxx_atomic_impl.h
index 18e88aa97bec7..71087081c2237 100644
--- a/libcxx/include/__atomic/cxx_atomic_impl.h
+++ b/libcxx/include/__atomic/cxx_atomic_impl.h
@@ -30,7 +30,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD
// the default operator= in an object is not volatile, a byte-by-byte copy
// is required.
template <typename _Tp, typename _Tv, __enable_if_t<is_assignable<_Tp&, _Tv>::value, int> = 0>
-_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_assign_volatile(_Tp& __a_value, _Tv const& __val) {
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR void __cxx_atomic_assign_volatile(_Tp& __a_value, _Tv const& __val) {
__a_value = __val;
}
template <typename _Tp, typename _Tv, __enable_if_t<is_assignable<_Tp&, _Tv>::value, int> = 0>
@@ -44,7 +44,7 @@ _LIBCPP_HIDE_FROM_ABI void __cxx_atomic_assign_volatile(_Tp volatile& __a_value,
template <typename _Tp>
struct __cxx_atomic_base_impl {
- _LIBCPP_HIDE_FROM_ABI
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR
# ifndef _LIBCPP_CXX03_LANG
__cxx_atomic_base_impl() _NOEXCEPT = default;
# else
@@ -61,15 +61,15 @@ _LIBCPP_HIDE_FROM_ABI void __cxx_atomic_init(volatile __cxx_atomic_base_impl<_Tp
}
template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_init(__cxx_atomic_base_impl<_Tp>* __a, _Tp __val) {
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR void __cxx_atomic_init(__cxx_atomic_base_impl<_Tp>* __a, _Tp __val) {
__a->__a_value = __val;
}
-_LIBCPP_HIDE_FROM_ABI inline void __cxx_atomic_thread_fence(memory_order __order) {
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR inline void __cxx_atomic_thread_fence(memory_order __order) {
__atomic_thread_fence(__to_gcc_order(__order));
}
-_LIBCPP_HIDE_FROM_ABI inline void __cxx_atomic_signal_fence(memory_order __order) {
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR inline void __cxx_atomic_signal_fence(memory_order __order) {
__atomic_signal_fence(__to_gcc_order(__order));
}
@@ -80,7 +80,8 @@ __cxx_atomic_store(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __val, memory_
}
template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_store(__cxx_atomic_base_impl<_Tp>* __a, _Tp __val, memory_order __order) {
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR void
+__cxx_atomic_store(__cxx_atomic_base_impl<_Tp>* __a, _Tp __val, memory_order __order) {
__atomic_store(std::addressof(__a->__a_value), std::addressof(__val), __to_gcc_order(__order));
}
@@ -98,13 +99,14 @@ __cxx_atomic_load_inplace(const volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp*
}
template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI void
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR void
__cxx_atomic_load_inplace(const __cxx_atomic_base_impl<_Tp>* __a, _Tp* __dst, memory_order __order) {
__atomic_load(std::addressof(__a->__a_value), __dst, __to_gcc_order(__order));
}
template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_load(const __cxx_atomic_base_impl<_Tp>* __a, memory_order __order) {
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR _Tp
+__cxx_atomic_load(const __cxx_atomic_base_impl<_Tp>* __a, memory_order __order) {
_Tp __ret;
__atomic_load(std::addressof(__a->__a_value), std::addressof(__ret), __to_gcc_order(__order));
return __ret;
@@ -120,7 +122,8 @@ __cxx_atomic_exchange(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __value, me
}
template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp>* __a, _Tp __value, memory_order __order) {
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR _Tp
+__cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp>* __a, _Tp __value, memory_order __order) {
_Tp __ret;
__atomic_exchange(
std::addressof(__a->__a_value), std::addressof(__value), std::addressof(__ret), __to_gcc_order(__order));
@@ -144,7 +147,7 @@ _LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_strong(
}
template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_strong(
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR bool __cxx_atomic_compare_exchange_strong(
__cxx_atomic_base_impl<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) {
return __atomic_compare_exchange(
std::addressof(__a->__a_value),
@@ -172,7 +175,7 @@ _LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_weak(
}
template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_weak(
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR bool __cxx_atomic_compare_exchange_weak(
__cxx_atomic_base_impl<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) {
return __atomic_compare_exchange(
std::addressof(__a->__a_value),
@@ -207,7 +210,8 @@ __cxx_atomic_fetch_add(volatile __cxx_atomic_base_impl<_Tp>* __a, _Td __delta, m
}
template <typename _Tp, typename _Td>
-_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp>* __a, _Td __delta, memory_order __order) {
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR _Tp
+__cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp>* __a, _Td __delta, memory_order __order) {
return __atomic_fetch_add(std::addressof(__a->__a_value), __delta * __skip_amt<_Tp>::value, __to_gcc_order(__order));
}
@@ -218,7 +222,8 @@ __cxx_atomic_fetch_sub(volatile __cxx_atomic_base_impl<_Tp>* __a, _Td __delta, m
}
template <typename _Tp, typename _Td>
-_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp>* __a, _Td __delta, memory_order __order) {
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR _Tp
+__cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp>* __a, _Td __delta, memory_order __order) {
return __atomic_fetch_sub(std::addressof(__a->__a_value), __delta * __skip_amt<_Tp>::value, __to_gcc_order(__order));
}
@@ -229,7 +234,7 @@ __cxx_atomic_fetch_and(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern,
}
template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR _Tp
__cxx_atomic_fetch_and(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) {
return __atomic_fetch_and(std::addressof(__a->__a_value), __pattern, __to_gcc_order(__order));
}
@@ -241,7 +246,8 @@ __cxx_atomic_fetch_or(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern,
}
template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_fetch_or(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) {
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR _Tp
+__cxx_atomic_fetch_or(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) {
return __atomic_fetch_or(std::addressof(__a->__a_value), __pattern, __to_gcc_order(__order));
}
@@ -252,7 +258,7 @@ __cxx_atomic_fetch_xor(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern,
}
template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR _Tp
__cxx_atomic_fetch_xor(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) {
return __atomic_fetch_xor(std::addressof(__a->__a_value), __pattern, __to_gcc_order(__order));
}
@@ -263,7 +269,7 @@ __cxx_atomic_fetch_xor(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_o
template <typename _Tp>
struct __cxx_atomic_base_impl {
- _LIBCPP_HIDE_FROM_ABI
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR
# ifndef _LIBCPP_CXX03_LANG
__cxx_atomic_base_impl() _NOEXCEPT = default;
# else
@@ -276,11 +282,11 @@ struct __cxx_atomic_base_impl {
# define __cxx_atomic_is_lock_free(__s) __c11_atomic_is_lock_free(__s)
-_LIBCPP_HIDE_FROM_ABI inline void __cxx_atomic_thread_fence(memory_order __order) _NOEXCEPT {
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR inline void __cxx_atomic_thread_fence(memory_order __order) _NOEXCEPT {
__c11_atomic_thread_fence(static_cast<__memory_order_underlying_t>(__order));
}
-_LIBCPP_HIDE_FROM_ABI inline void __cxx_atomic_signal_fence(memory_order __order) _NOEXCEPT {
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR inline void __cxx_atomic_signal_fence(memory_order __order) _NOEXCEPT {
__c11_atomic_signal_fence(static_cast<__memory_order_underlying_t>(__order));
}
@@ -289,7 +295,7 @@ _LIBCPP_HIDE_FROM_ABI void __cxx_atomic_init(__cxx_atomic_base_impl<_Tp> volatil
__c11_atomic_init(std::addressof(__a->__a_value), __val);
}
template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_init(__cxx_atomic_base_impl<_Tp>* __a, _Tp __val) _NOEXCEPT {
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR void __cxx_atomic_init(__cxx_atomic_base_impl<_Tp>* __a, _Tp __val) _NOEXCEPT {
__c11_atomic_init(std::addressof(__a->__a_value), __val);
}
@@ -299,7 +305,7 @@ __cxx_atomic_store(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __val, memory_
__c11_atomic_store(std::addressof(__a->__a_value), __val, static_cast<__memory_order_underlying_t>(__order));
}
template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI void
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR void
__cxx_atomic_store(__cxx_atomic_base_impl<_Tp>* __a, _Tp __val, memory_order __order) _NOEXCEPT {
__c11_atomic_store(std::addressof(__a->__a_value), __val, static_cast<__memory_order_underlying_t>(__order));
}
@@ -312,7 +318,8 @@ __cxx_atomic_load(__cxx_atomic_base_impl<_Tp> const volatile* __a, memory_order
const_cast<__ptr_type>(std::addressof(__a->__a_value)), static_cast<__memory_order_underlying_t>(__order));
}
template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_load(__cxx_atomic_base_impl<_Tp> const* __a, memory_order __order) _NOEXCEPT {
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR _Tp
+__cxx_atomic_load(__cxx_atomic_base_impl<_Tp> const* __a, memory_order __order) _NOEXCEPT {
using __ptr_type = __remove_const_t<decltype(__a->__a_value)>*;
return __c11_atomic_load(
const_cast<__ptr_type>(std::addressof(__a->__a_value)), static_cast<__memory_order_underlying_t>(__order));
@@ -326,7 +333,7 @@ __cxx_atomic_load_inplace(__cxx_atomic_base_impl<_Tp> const volatile* __a, _Tp*
const_cast<__ptr_type>(std::addressof(__a->__a_value)), static_cast<__memory_order_underlying_t>(__order));
}
template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI void
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR void
__cxx_atomic_load_inplace(__cxx_atomic_base_impl<_Tp> const* __a, _Tp* __dst, memory_order __order) _NOEXCEPT {
using __ptr_type = __remove_const_t<decltype(__a->__a_value)>*;
*__dst = __c11_atomic_load(
@@ -340,13 +347,13 @@ __cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __value, me
std::addressof(__a->__a_value), __value, static_cast<__memory_order_underlying_t>(__order));
}
template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR _Tp
__cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp>* __a, _Tp __value, memory_order __order) _NOEXCEPT {
return __c11_atomic_exchange(
std::addressof(__a->__a_value), __value, static_cast<__memory_order_underlying_t>(__order));
}
-_LIBCPP_HIDE_FROM_ABI inline _LIBCPP_CONSTEXPR memory_order __to_failure_order(memory_order __order) {
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR inline memory_order __to_failure_order(memory_order __order) {
// Avoid switch statement to make this a constexpr.
return __order == memory_order_release
? memory_order_relaxed
@@ -368,7 +375,7 @@ _LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_strong(
static_cast<__memory_order_underlying_t>(__to_failure_order(__failure)));
}
template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_strong(
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR bool __cxx_atomic_compare_exchange_strong(
__cxx_atomic_base_impl<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure)
_NOEXCEPT {
return __c11_atomic_compare_exchange_strong(
@@ -394,7 +401,7 @@ _LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_weak(
static_cast<__memory_order_underlying_t>(__to_failure_order(__failure)));
}
template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_weak(
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR bool __cxx_atomic_compare_exchange_weak(
__cxx_atomic_base_impl<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure)
_NOEXCEPT {
return __c11_atomic_compare_exchange_weak(
@@ -412,7 +419,7 @@ __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __delta, m
std::addressof(__a->__a_value), __delta, static_cast<__memory_order_underlying_t>(__order));
}
template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR _Tp
__cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp>* __a, _Tp __delta, memory_order __order) _NOEXCEPT {
return __c11_atomic_fetch_add(
std::addressof(__a->__a_value), __delta, static_cast<__memory_order_underlying_t>(__order));
@@ -425,7 +432,7 @@ __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp*> volatile* __a, ptrdiff_t __d
std::addressof(__a->__a_value), __delta, static_cast<__memory_order_underlying_t>(__order));
}
template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp*
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR _Tp*
__cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp*>* __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT {
return __c11_atomic_fetch_add(
std::addressof(__a->__a_value), __delta, static_cast<__memory_order_underlying_t>(__order));
@@ -438,7 +445,7 @@ __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __delta, m
std::addressof(__a->__a_value), __delta, static_cast<__memory_order_underlying_t>(__order));
}
template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR _Tp
__cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp>* __a, _Tp __delta, memory_order __order) _NOEXCEPT {
return __c11_atomic_fetch_sub(
std::addressof(__a->__a_value), __delta, static_cast<__memory_order_underlying_t>(__order));
@@ -450,7 +457,7 @@ __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp*> volatile* __a, ptrdiff_t __d
std::addressof(__a->__a_value), __delta, static_cast<__memory_order_underlying_t>(__order));
}
template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp*
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR _Tp*
__cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp*>* __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT {
return __c11_atomic_fetch_sub(
std::addressof(__a->__a_value), __delta, static_cast<__memory_order_underlying_t>(__order));
@@ -463,7 +470,7 @@ __cxx_atomic_fetch_and(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __pattern,
std::addressof(__a->__a_value), __pattern, static_cast<__memory_order_underlying_t>(__order));
}
template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR _Tp
__cxx_atomic_fetch_and(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) _NOEXCEPT {
return __c11_atomic_fetch_and(
std::addressof(__a->__a_value), __pattern, static_cast<__memory_order_underlying_t>(__order));
@@ -476,7 +483,7 @@ __cxx_atomic_fetch_or(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __pattern,
std::addressof(__a->__a_value), __pattern, static_cast<__memory_order_underlying_t>(__order));
}
template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR _Tp
__cxx_atomic_fetch_or(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) _NOEXCEPT {
return __c11_atomic_fetch_or(
std::addressof(__a->__a_value), __pattern, static_cast<__memory_order_underlying_t>(__order));
@@ -489,7 +496,7 @@ __cxx_atomic_fetch_xor(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __pattern,
std::addressof(__a->__a_value), __pattern, static_cast<__memory_order_underlying_t>(__order));
}
template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR _Tp
__cxx_atomic_fetch_xor(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) _NOEXCEPT {
return __c11_atomic_fetch_xor(
std::addressof(__a->__a_value), __pattern, static_cast<__memory_order_underlying_t>(__order));
@@ -501,7 +508,7 @@ template <typename _Tp, typename _Base = __cxx_atomic_base_impl<_Tp> >
struct __cxx_atomic_impl : public _Base {
static_assert(is_trivially_copyable<_Tp>::value, "std::atomic<T> requires that 'T' be a trivially copyable type");
- _LIBCPP_HIDE_FROM_ABI __cxx_atomic_impl() _NOEXCEPT = default;
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR __cxx_atomic_impl() _NOEXCEPT = default;
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR explicit __cxx_atomic_impl(_Tp __value) _NOEXCEPT : _Base(__value) {}
};
diff --git a/libcxx/include/__config b/libcxx/include/__config
index 108f700823cbf..4d5c588b15c00 100644
--- a/libcxx/include/__config
+++ b/libcxx/include/__config
@@ -796,6 +796,12 @@ typedef __char32_t char32_t;
# define _LIBCPP_CONSTEXPR_SINCE_CXX23
# endif
+# if _LIBCPP_STD_VER >= 26
+# define _LIBCPP_CONSTEXPR_SINCE_CXX26 constexpr
+# else
+# define _LIBCPP_CONSTEXPR_SINCE_CXX26
+# endif
+
# ifndef _LIBCPP_WEAK
# define _LIBCPP_WEAK __attribute__((__weak__))
# endif
>From 86ec6733d54cb33e65da19c5ed4485e4ec46f7cb Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Hana=20Dusi=CC=81kova=CC=81?= <hanicka at hanicka.net>
Date: Sat, 13 Jul 2024 19:58:21 +0200
Subject: [PATCH 2/2] [clang] __c11_atomic_OP and __atomic_OP constexpr
support + tests
---
clang/lib/AST/ExprConstant.cpp | 260 ++++++---
.../SemaCXX/atomic-constexpr-c11-builtins.cpp | 288 ++++++++++
.../SemaCXX/atomic-constexpr-gcc-builtins.cpp | 494 ++++++++++++++++++
3 files changed, 958 insertions(+), 84 deletions(-)
create mode 100644 clang/test/SemaCXX/atomic-constexpr-c11-builtins.cpp
create mode 100644 clang/test/SemaCXX/atomic-constexpr-gcc-builtins.cpp
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index 32f04ccaa1205..b39bf82aca6a0 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -7890,26 +7890,31 @@ class ExprEvaluatorBase
return StmtVisitorTy::Visit(Source);
}
- static bool EvaluateOrder(const Expr *E, EvalInfo &Info) {
- // we ignore order
- [[maybe_unused]] APSInt Order;
- if (!EvaluateInteger(E, Order, Info)) {
- return false;
+ static bool EvaluateAtomicOrderToIgnore(const AtomicExpr *E, EvalInfo &Info) {
+ // we ignore results, but we need to evaluate them
+ [[maybe_unused]] APSInt OrderIgnoredResult;
+
+ const Expr * OrderSuccess = E->getOrder();
+ if (!EvaluateInteger(OrderSuccess, OrderIgnoredResult, Info))
+ return false;
+
+ if (E->isCmpXChg()) {
+ const Expr * OrderFail = E->getOrderFail();
+ if (!EvaluateInteger(OrderFail, OrderIgnoredResult, Info))
+ return false;
}
return true;
}
-
- static bool ReadAtomicPtr(const AtomicExpr *E, APValue &Result,
- EvalInfo &Info) {
- LValue AtomicLV;
- if (!EvaluatePointer(E->getPtr(), AtomicLV, Info)) {
- return false;
- }
-
- if (!handleLValueToRValueConversion(Info, E->getPtr(), E->getType(),
- AtomicLV, Result)) {
- return false;
+
+ static bool EvaluateAtomicWeakToIgnore(const AtomicExpr *E, EvalInfo &Info) {
+ // we ignore results, but we need to evaluate them
+ [[maybe_unused]] APSInt WeakIgnoredResult;
+
+ if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange_n || E->getOp() == AtomicExpr::AO__atomic_compare_exchange) {
+ const Expr * Weak = E->getWeak();
+ if (!EvaluateInteger(Weak, WeakIgnoredResult, Info))
+ return false;
}
return true;
@@ -7917,47 +7922,124 @@ class ExprEvaluatorBase
static bool LoadAtomicValue(const AtomicExpr *E, APValue &Result,
EvalInfo &Info) {
- if (!ReadAtomicPtr(E, Result, Info)) {
+ LValue AtomicStorageLV;
+
+ if (!EvaluatePointer(E->getPtr(), AtomicStorageLV, Info))
+ return false;
+
+ return handleLValueToRValueConversion(Info, E->getPtr(), E->getValueType(), AtomicStorageLV, Result);
+ }
+
+ static bool StoreValueIntoResultPointer(Expr * ResultPtr, APValue & ValueToStore, EvalInfo &Info) {
+ // TODO check it must be a pointer
+ assert(ResultPtr->getType()->isPointerType());
+ QualType PointeeTy = ResultPtr->getType()->getPointeeType();
+ LValue PointeeLV;
+
+ if (!EvaluatePointer(ResultPtr, PointeeLV, Info))
+ return false;
+
+ return handleAssignment(Info, ResultPtr, PointeeLV, PointeeTy, ValueToStore);
+ }
+
+ static bool LoadAtomicValueInto(const AtomicExpr *E, EvalInfo &Info) {
+ APValue LocalResult;
+
+ if (!LoadAtomicValue(E, LocalResult, Info))
+ return false;
+
+ if (!StoreValueIntoResultPointer(E->getVal1(), LocalResult, Info))
return false;
- }
- // we ignore order
- if (!EvaluateOrder(E->getOrder(), Info)) {
+ return true;
+ }
+
+ static bool StoreAtomicValue(const AtomicExpr *E, EvalInfo &Info) {
+ LValue AtomicStorageLV;
+
+ if (!EvaluatePointer(E->getPtr(), AtomicStorageLV, Info))
return false;
+
+ APValue ProvidedValue;
+
+ // GCC's atomic_store takes pointer to value, not value itself
+ if (E->getOp() == AtomicExpr::AO__atomic_store) {
+ LValue ProvidedLV;
+ if (!EvaluatePointer(E->getVal1(), ProvidedLV, Info))
+ return false;
+
+ if (!handleLValueToRValueConversion(Info, E->getVal1(), E->getVal1()->getType(), ProvidedLV, ProvidedValue))
+ return false;
+
+ } else {
+ if (!Evaluate(ProvidedValue, Info, E->getVal1()))
+ return false;
}
+ if (!handleAssignment(Info, E, AtomicStorageLV, E->getValueType(), ProvidedValue))
+ return false;
return true;
}
+
+ static bool ExchangeAtomicValueInto(const AtomicExpr *E, EvalInfo &Info) {
+ assert(E->getOp() == AtomicExpr::AO__atomic_exchange);
+ // implementation of GCC's exchange (non _n version)
+ LValue AtomicStorageLV;
+ if (!EvaluatePointer(E->getPtr(), AtomicStorageLV, Info))
+ return false;
+
+ // read previous value
+ APValue PreviousValue;
+ if (!handleLValueToRValueConversion(Info, E->getPtr(), E->getValueType(), AtomicStorageLV, PreviousValue))
+ return false;
+
+ // get provided value from argument (pointer)
+ LValue ProvidedLV;
+ if (!EvaluatePointer(E->getVal1(), ProvidedLV, Info))
+ return false;
+
+ APValue ProvidedValue;
+ if (!handleLValueToRValueConversion(Info, E->getVal1(), E->getVal1()->getType(), ProvidedLV, ProvidedValue))
+ return false;
+
+ // store provided value to atomic value
+ if (!handleAssignment(Info, E, AtomicStorageLV, E->getValueType(), ProvidedValue))
+ return false;
+
+ // store previous value in output pointer
+ if (!StoreValueIntoResultPointer(E->getVal2(), PreviousValue, Info))
+ return false;
+
+ return true;
+ }
static bool FetchAtomicOp(const AtomicExpr *E, APValue &Result,
EvalInfo &Info, bool StoreToResultAfter) {
- LValue AtomicLV;
- QualType AtomicTy =
- E->getPtr()->getType()->getPointeeType().getAtomicUnqualifiedType();
- if (!EvaluatePointer(E->getPtr(), AtomicLV, Info)) {
+ // read atomic
+ LValue AtomicStorageLV;
+ QualType AtomicValueTy = E->getValueType();
+ if (!EvaluatePointer(E->getPtr(), AtomicStorageLV, Info))
return false;
- }
- APValue AtomicVal;
+ APValue CurrentValue;
if (!handleLValueToRValueConversion(Info, E->getPtr(), E->getType(),
- AtomicLV, AtomicVal)) {
+ AtomicStorageLV, CurrentValue))
return false;
- }
-
- if (!StoreToResultAfter) {
- Result = AtomicVal;
- }
- const auto ResultType = E->getType();
+ // store current value for fetch-OP operations
+ if (!StoreToResultAfter)
+ Result = CurrentValue;
+ // read argument for fetch OP
APValue ArgumentVal;
- if (!Evaluate(ArgumentVal, Info, E->getVal1())) {
+ if (!Evaluate(ArgumentVal, Info, E->getVal1()))
return false;
- }
+ // calculate new value
APValue Replacement;
- if (ResultType->isIntegralOrEnumerationType()) {
- const APSInt AtomicInt = AtomicVal.getInt();
+ if (AtomicValueTy->isIntegralOrEnumerationType()) {
+ // both arguments are integers
+ const APSInt AtomicInt = CurrentValue.getInt();
const APSInt ArgumentInt = ArgumentVal.getInt();
switch (E->getOp()) {
@@ -8006,9 +8088,10 @@ class ExprEvaluatorBase
default:
return false;
}
- } else if (ResultType->isRealFloatingType()) {
+ } else if (AtomicValueTy->isRealFloatingType()) {
+ // both arguments are float operations
const llvm::RoundingMode RM = getActiveRoundingMode(Info, E);
- APFloat AtomicFlt = AtomicVal.getFloat();
+ APFloat AtomicFlt = CurrentValue.getFloat();
const APFloat ArgumentFlt = ArgumentVal.getFloat();
APFloat::opStatus St;
@@ -8026,17 +8109,19 @@ class ExprEvaluatorBase
return false;
}
- if (!checkFloatingPointResult(Info, E, St)) {
+ if (!checkFloatingPointResult(Info, E, St))
return false;
- }
- } else if (ResultType->isPointerType()) {
+
+ } else if (AtomicValueTy->isPointerType()) {
+ // pointer + int arguments
LValue AtomicPtr;
- AtomicPtr.setFrom(Info.Ctx, AtomicVal);
+ AtomicPtr.setFrom(Info.Ctx, CurrentValue);
APSInt ArgumentInt = ArgumentVal.getInt();
+ // calculate size of pointee object
CharUnits SizeOfPointee;
- if (!HandleSizeof(Info, E->getExprLoc(), AtomicTy->getPointeeType(),
+ if (!HandleSizeof(Info, E->getExprLoc(), AtomicValueTy->getPointeeType(),
SizeOfPointee))
return false;
@@ -8046,18 +8131,20 @@ class ExprEvaluatorBase
case AtomicExpr::AO__atomic_fetch_add:
case AtomicExpr::AO__atomic_add_fetch:
case AtomicExpr::AO__atomic_fetch_sub:
- case AtomicExpr::AO__atomic_sub_fetch: {
- const auto sizeOfOneItem =
- APSInt(APInt(ArgumentInt.getBitWidth(), SizeOfPointee.getQuantity(),
- false),
- false);
- if ((ArgumentInt % sizeOfOneItem) != 0) {
+ case AtomicExpr::AO__atomic_sub_fetch:
+ {
+ const auto sizeOfOneItem =
+ APSInt(APInt(ArgumentInt.getBitWidth(), SizeOfPointee.getQuantity(),
+ false),
+ false);
// incrementing pointer by size which is not dividable by pointee size
// is UB and therefore disallowed
- return false;
+ if ((ArgumentInt % sizeOfOneItem) != 0)
+ return false;
+
+ ArgumentInt /= sizeOfOneItem;
}
- ArgumentInt /= sizeOfOneItem;
- } break;
+ break;
default:
break;
}
@@ -8084,41 +8171,21 @@ class ExprEvaluatorBase
return false;
}
- if (StoreToResultAfter) {
+ // OP-fetch operation store result, not previous value
+ if (StoreToResultAfter)
Result = Replacement;
- }
- if (!handleAssignment(Info, E, AtomicLV, AtomicTy, Replacement)) {
- return false;
- }
-
- return true;
- }
-
- static bool StoreAtomicValue(const AtomicExpr *E, EvalInfo &Info) {
- LValue LV;
- if (!EvaluatePointer(E->getPtr(), LV, Info)) {
- return false;
- }
-
- APValue NewVal;
- if (!Evaluate(NewVal, Info, E->getVal1())) {
- return false;
- }
-
- if (!handleAssignment(Info, E, LV, E->getVal1()->getType(), NewVal)) {
- return false;
- }
-
- return true;
+ // store result to atomic's storage
+ return handleAssignment(Info, E, AtomicStorageLV, AtomicValueTy, Replacement);
}
static bool CompareExchangeAtomicValue(const AtomicExpr *E, APValue &Result,
EvalInfo &Info) {
+ EvaluateAtomicWeakToIgnore(E, Info);
+
// dereference _Atomic * (atomic value)
LValue AtomicLV;
- QualType AtomicTy =
- E->getPtr()->getType()->getPointeeType().getAtomicUnqualifiedType();
+ QualType AtomicTy = E->getValueType();
if (!EvaluatePointer(E->getPtr(), AtomicLV, Info)) {
return false;
}
@@ -8223,14 +8290,27 @@ class ExprEvaluatorBase
} else {
return false;
}
+
+ APValue Replacement;
+ if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange) {
+ // GCC atomic_compare_exchange provides pointer and not value
+ LValue ReplacementLV;
+ if (!EvaluatePointer(E->getVal2(), ReplacementLV, Info))
+ return false;
+
+ if (!handleLValueToRValueConversion(Info, E->getVal2(), E->getVal2()->getType(), ReplacementLV, Replacement))
+ return false;
+
+ } else {
+ if (!Evaluate(Replacement, Info, E->getVal2())) {
+ return false;
+ }
+ }
+
if (DoExchange) {
// if values are same do the exchange with replacement value
// but first I must evaluate the replacement value
- APValue Replacement;
- if (!Evaluate(Replacement, Info, E->getVal2())) {
- return false;
- }
// and assign it to atomic
if (!handleAssignment(Info, E, AtomicLV, AtomicTy, Replacement)) {
@@ -8249,6 +8329,9 @@ class ExprEvaluatorBase
}
bool VisitAtomicExpr(const AtomicExpr *E) {
+ if (!EvaluateAtomicOrderToIgnore(E, Info))
+ return false;
+
APValue LocalResult;
switch (E->getOp()) {
default:
@@ -8261,6 +8344,7 @@ class ExprEvaluatorBase
return DerivedSuccess(LocalResult, E);
case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+ case AtomicExpr::AO__atomic_compare_exchange:
case AtomicExpr::AO__atomic_compare_exchange_n:
if (!CompareExchangeAtomicValue(E, LocalResult, Info)) {
return Error(E);
@@ -16022,11 +16106,19 @@ class VoidExprEvaluator
}
bool VisitAtomicExpr(const AtomicExpr *E) {
+ if (!EvaluateAtomicOrderToIgnore(E, Info))
+ return false;
+
switch (E->getOp()) {
default:
return Error(E);
+ case AtomicExpr::AO__atomic_load:
+ return LoadAtomicValueInto(E, Info);
+ case AtomicExpr::AO__atomic_exchange:
+ return ExchangeAtomicValueInto(E, Info);
case AtomicExpr::AO__c11_atomic_init:
case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__atomic_store:
case AtomicExpr::AO__atomic_store_n:
return StoreAtomicValue(E, Info);
}
diff --git a/clang/test/SemaCXX/atomic-constexpr-c11-builtins.cpp b/clang/test/SemaCXX/atomic-constexpr-c11-builtins.cpp
new file mode 100644
index 0000000000000..3dc18b3d7c730
--- /dev/null
+++ b/clang/test/SemaCXX/atomic-constexpr-c11-builtins.cpp
@@ -0,0 +1,288 @@
+// RUN: %clang_cc1 -std=c++20 %s
+
+// expected-no-diagnostics
+
+constexpr int int_min = -2147483648;
+constexpr int int_max = 2147483647;
+
+const int array[2] = {1,2};
+const char small_array[2] = {1,2};
+
+template <typename T> struct identity {
+ using type = T;
+};
+
+template <typename T> using do_not_deduce = typename identity<T>::type;
+
+// -- LOAD --
+
+template <typename T> consteval T load(T value) {
+ _Atomic(T) av = value;
+ return __c11_atomic_load(&av, __ATOMIC_RELAXED);
+}
+
+// integers
+static_assert(load(true) == true);
+static_assert(load(false) == false);
+
+static_assert(load(42) == 42);
+static_assert(load(-128) == -128);
+
+static_assert(load(42u) == 42u);
+static_assert(load(0xFFFFFFFFu) == 0xFFFFFFFFu);
+
+// floats
+static_assert(load(42.3) == 42.3);
+static_assert(load(42.3f) == 42.3f);
+
+// pointers
+static_assert(load(&array[0]) == &array[0]);
+static_assert(load(&small_array[1]) == &small_array[1]);
+
+// -- STORE --
+
+template <typename T> consteval T store(T value) {
+ _Atomic(T) av = T{};
+ __c11_atomic_store(&av, value, __ATOMIC_RELAXED);
+ return __c11_atomic_load(&av, __ATOMIC_RELAXED);
+}
+
+// integers
+static_assert(store(true) == true);
+static_assert(store(false) == false);
+
+static_assert(store(42) == 42);
+static_assert(store(-128) == -128);
+
+static_assert(store(42u) == 42u);
+static_assert(store(0xFFFFFFFFu) == 0xFFFFFFFFu);
+
+// floats
+static_assert(store(42.3) == 42.3);
+static_assert(store(42.3f) == 42.3f);
+
+// pointers
+static_assert(store(&array[0]) == &array[0]);
+static_assert(store(&small_array[1]) == &small_array[1]);
+
+// -- EXCHANGE --
+template <typename T> struct two_values {
+ T before;
+ T after;
+ constexpr friend bool operator==(two_values, two_values) = default;
+};
+
+template <typename T> consteval auto exchange(T value, do_not_deduce<T> replacement) -> two_values<T> {
+ _Atomic(T) av = T{value};
+ T previous = __c11_atomic_exchange(&av, replacement, __ATOMIC_RELAXED);
+ return two_values<T>{previous, __c11_atomic_load(&av, __ATOMIC_RELAXED)};
+}
+
+// integers
+static_assert(exchange(true,false) == two_values{true, false});
+static_assert(exchange(false,true) == two_values{false, true});
+
+static_assert(exchange(10,42) == two_values{10,42});
+static_assert(exchange(14,-128) == two_values{14,-128});
+
+
+static_assert(exchange(56u,42u) == two_values{56u,42u});
+static_assert(exchange(0xFFu, 0xFFFFFFFFu) == two_values{0xFFu,0xFFFFFFFFu});
+
+// floats
+static_assert(exchange(42.3,1.2) == two_values{42.3,1.2});
+static_assert(exchange(42.3f,-16.7f) == two_values{42.3f, -16.7f});
+
+// pointers
+static_assert(exchange(&array[0], &array[1]) == two_values{&array[0],&array[1]});
+static_assert(exchange(&small_array[1], &small_array[0]) == two_values{&small_array[1], &small_array[0]});
+
+// -- COMPARE-EXCHANGE --
+template <typename T> struct comp_result {
+ bool success;
+ T output;
+ T after;
+
+ constexpr comp_result(bool success_, T output_, do_not_deduce<T> after_): success{success_}, output{output_}, after{after_} { }
+
+ constexpr friend bool operator==(comp_result, comp_result) = default;
+};
+
+template <typename T> constexpr auto comp_success(T output, do_not_deduce<T> after) {
+ return comp_result<T>{true, output, after};
+}
+
+template <typename T> constexpr auto comp_failure(T after) {
+ return comp_result<T>{false, after, after};
+}
+
+template <typename T> consteval auto compare_exchange_weak(T original, do_not_deduce<T> expected, do_not_deduce<T> replacement) -> comp_result<T> {
+ _Atomic(T) av = T{original};
+ const bool success = __c11_atomic_compare_exchange_weak(&av, &expected, replacement, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ return comp_result<T>{success, expected, __c11_atomic_load(&av, __ATOMIC_RELAXED)};
+}
+
+// integers
+static_assert(compare_exchange_weak(true, true, false) == comp_success(true, false));
+static_assert(compare_exchange_weak(false, false, true) == comp_success(false, true));
+static_assert(compare_exchange_weak(false, true, false) == comp_failure(false));
+static_assert(compare_exchange_weak(true, false, true) == comp_failure(true));
+
+static_assert(compare_exchange_weak(10,10,42) == comp_success(10,42));
+static_assert(compare_exchange_weak(14,14,-128) == comp_success(14,-128));
+static_assert(compare_exchange_weak(-10,10,42) == comp_failure(-10));
+static_assert(compare_exchange_weak(-14,14,-128) == comp_failure(-14));
+
+static_assert(compare_exchange_weak(56u, 56u,42u) == comp_success(56u,42u));
+static_assert(compare_exchange_weak(0xFFu, 0xFFu, 0xFFFFFFFFu) == comp_success(0xFFu,0xFFFFFFFFu));
+static_assert(compare_exchange_weak(3u, 56u,42u) == comp_failure(3u));
+static_assert(compare_exchange_weak(0xFu, 0xFFu, 0xFFFFFFFFu) == comp_failure(0xFu));
+
+// floats
+static_assert(compare_exchange_weak(42.3, 42.3,1.2) == comp_success(42.3,1.2));
+static_assert(compare_exchange_weak(42.3f, 42.3f,-16.7f) == comp_success(42.3f, -16.7f));
+static_assert(compare_exchange_weak(0.0, 42.3,1.2) == comp_failure(0.0));
+static_assert(compare_exchange_weak(0.0f, 42.3f,-16.7f) == comp_failure(0.0f));
+
+// pointers
+static_assert(compare_exchange_weak(&array[0], &array[0], &array[1]) == comp_success(&array[0],&array[1]));
+static_assert(compare_exchange_weak(&small_array[1], &small_array[1], &small_array[0]) == comp_success(&small_array[1], &small_array[0]));
+static_assert(compare_exchange_weak(&array[1], &array[0], &array[1]) == comp_failure(&array[1]));
+static_assert(compare_exchange_weak(&small_array[0], &small_array[1], &small_array[0]) == comp_failure(&small_array[0]));
+
+
+template <typename T> consteval auto compare_exchange_strong(T original, do_not_deduce<T> expected, do_not_deduce<T> replacement) -> comp_result<T> {
+ _Atomic(T) av = T{original};
+ const bool success = __c11_atomic_compare_exchange_strong(&av, &expected, replacement, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ return comp_result<T>{success, expected, __c11_atomic_load(&av, __ATOMIC_RELAXED)};
+}
+
+// integers
+static_assert(compare_exchange_strong(true, true, false) == comp_success(true, false));
+static_assert(compare_exchange_strong(false, false, true) == comp_success(false, true));
+static_assert(compare_exchange_strong(false, true, false) == comp_failure(false));
+static_assert(compare_exchange_strong(true, false, true) == comp_failure(true));
+
+static_assert(compare_exchange_strong(10,10,42) == comp_success(10,42));
+static_assert(compare_exchange_strong(14,14,-128) == comp_success(14,-128));
+static_assert(compare_exchange_strong(-10,10,42) == comp_failure(-10));
+static_assert(compare_exchange_strong(-14,14,-128) == comp_failure(-14));
+
+static_assert(compare_exchange_strong(56u, 56u,42u) == comp_success(56u,42u));
+static_assert(compare_exchange_strong(0xFFu, 0xFFu, 0xFFFFFFFFu) == comp_success(0xFFu,0xFFFFFFFFu));
+static_assert(compare_exchange_strong(3u, 56u,42u) == comp_failure(3u));
+static_assert(compare_exchange_strong(0xFu, 0xFFu, 0xFFFFFFFFu) == comp_failure(0xFu));
+
+// floats
+static_assert(compare_exchange_strong(42.3, 42.3,1.2) == comp_success(42.3,1.2));
+static_assert(compare_exchange_strong(42.3f, 42.3f,-16.7f) == comp_success(42.3f, -16.7f));
+static_assert(compare_exchange_strong(0.0, 42.3,1.2) == comp_failure(0.0));
+static_assert(compare_exchange_strong(0.0f, 42.3f,-16.7f) == comp_failure(0.0f));
+
+// pointers
+static_assert(compare_exchange_strong(&array[0], &array[0], &array[1]) == comp_success(&array[0],&array[1]));
+static_assert(compare_exchange_strong(&small_array[1], &small_array[1], &small_array[0]) == comp_success(&small_array[1], &small_array[0]));
+static_assert(compare_exchange_strong(&array[1], &array[0], &array[1]) == comp_failure(&array[1]));
+static_assert(compare_exchange_strong(&small_array[0], &small_array[1], &small_array[0]) == comp_failure(&small_array[0]));
+
+
+// --FETCH-OP--
+template <typename T, typename Y> consteval auto fetch_add(T original, Y arg) -> two_values<T> {
+ _Atomic(T) av = T{original};
+ const T result = __c11_atomic_fetch_add(&av, arg, __ATOMIC_RELAXED);
+ return two_values<T>{result, __c11_atomic_load(&av, __ATOMIC_RELAXED)};
+}
+
+// integers
+static_assert(fetch_add(false, 1) == two_values{false, true});
+static_assert(fetch_add(0, 100) == two_values{0, 100});
+static_assert(fetch_add(100, -50) == two_values{100, 50});
+
+static_assert(fetch_add(int_max, 1) == two_values{int_max, int_min}); // overflow is defined for atomic
+static_assert(fetch_add(int_min, -1) == two_values{int_min, int_max});
+
+// floats
+static_assert(fetch_add(10.3, 2.1) == two_values{10.3, 12.4});
+static_assert(fetch_add(10.3f, 2.1f) == two_values{10.3f, 12.4f});
+
+// pointers
+static_assert(fetch_add(&array[0], 1) == two_values{&array[0], &array[1]});
+static_assert(fetch_add(&small_array[0], 1) == two_values{&small_array[0], &small_array[1]});
+static_assert(fetch_add(&array[1], 0) == two_values{&array[1], &array[1]});
+static_assert(fetch_add(&small_array[1], 0) == two_values{&small_array[1], &small_array[1]});
+static_assert(fetch_add(&array[1], -1) == two_values{&array[1], &array[0]});
+static_assert(fetch_add(&small_array[1], -1) == two_values{&small_array[1], &small_array[0]});
+
+template <typename T, typename Y> consteval auto fetch_sub(T original, Y arg) -> two_values<T> {
+ _Atomic(T) av = T{original};
+ const T result = __c11_atomic_fetch_sub(&av, arg, __ATOMIC_RELAXED);
+ return two_values<T>{result, __c11_atomic_load(&av, __ATOMIC_RELAXED)};
+}
+
+// integers
+static_assert(fetch_sub(true, 1) == two_values{true, false});
+static_assert(fetch_sub(0, 100) == two_values{0, -100});
+static_assert(fetch_sub(100, -50) == two_values{100, 150});
+
+static_assert(fetch_sub(int_min, 1) == two_values{int_min, int_max}); // overflow is defined for atomic
+static_assert(fetch_sub(int_max, -1) == two_values{int_max, int_min});
+
+// floats
+static_assert(fetch_sub(10.3, 2.3) == two_values{10.3, 8.0});
+static_assert(fetch_sub(10.3f, 2.3f) == two_values{10.3f, 8.0f});
+
+// pointers
+static_assert(fetch_sub(&array[1], 1) == two_values{&array[1], &array[0]});
+static_assert(fetch_sub(&small_array[1], 1) == two_values{&small_array[1], &small_array[0]});
+static_assert(fetch_sub(&array[1], 0) == two_values{&array[1], &array[1]});
+static_assert(fetch_sub(&small_array[1], 0) == two_values{&small_array[1], &small_array[1]});
+static_assert(fetch_sub(&array[0], -1) == two_values{&array[0], &array[1]});
+static_assert(fetch_sub(&small_array[0], -1) == two_values{&small_array[0], &small_array[1]});
+
+template <typename T, typename Y> consteval auto fetch_and(T original, Y arg) -> two_values<T> {
+ _Atomic(T) av = T{original};
+ const T result = __c11_atomic_fetch_and(&av, arg, __ATOMIC_RELAXED);
+ return two_values<T>{result, __c11_atomic_load(&av, __ATOMIC_RELAXED)};
+}
+
+template <typename T, typename Y> consteval auto fetch_or(T original, Y arg) -> two_values<T> {
+ _Atomic(T) av = T{original};
+ const T result = __c11_atomic_fetch_or(&av, arg, __ATOMIC_RELAXED);
+ return two_values<T>{result, __c11_atomic_load(&av, __ATOMIC_RELAXED)};
+}
+
+template <typename T, typename Y> consteval auto fetch_xor(T original, Y arg) -> two_values<T> {
+ _Atomic(T) av = T{original};
+ const T result = __c11_atomic_fetch_xor(&av, arg, __ATOMIC_RELAXED);
+ return two_values<T>{result, __c11_atomic_load(&av, __ATOMIC_RELAXED)};
+}
+
+template <typename T, typename Y> consteval auto fetch_nand(T original, Y arg) -> two_values<T> {
+ _Atomic(T) av = T{original};
+ const T result = __c11_atomic_fetch_nand(&av, arg, __ATOMIC_RELAXED);
+ return two_values<T>{result, __c11_atomic_load(&av, __ATOMIC_RELAXED)};
+}
+
+static_assert(fetch_and(0b1101u, 0b1011u) == two_values{0b1101u, 0b1001u});
+static_assert(fetch_or(0b1101u, 0b1011u) == two_values{0b1101u, 0b1111u});
+static_assert(fetch_xor(0b1101u, 0b1011u) == two_values{0b1101u, 0b0110u});
+static_assert(fetch_nand(0b1001u, 0b1011u) == two_values{0b1001u, 0xFFFFFFF6u});
+
+template <typename T> consteval auto fetch_min(T original, T arg) -> two_values<T> {
+ _Atomic(T) av = T{original};
+ const T result = __c11_atomic_fetch_min(&av, arg, __ATOMIC_RELAXED);
+ return two_values<T>{result, __c11_atomic_load(&av, __ATOMIC_RELAXED)};
+}
+
+template <typename T> consteval auto fetch_max(T original, T arg) -> two_values<T> {
+ _Atomic(T) av = T{original};
+ const T result = __c11_atomic_fetch_max(&av, arg, __ATOMIC_RELAXED);
+ return two_values<T>{result, __c11_atomic_load(&av, __ATOMIC_RELAXED)};
+}
+
+static_assert(fetch_max(10, 16) == two_values{10, 16});
+static_assert(fetch_max(16, 10) == two_values{16, 16});
+
+static_assert(fetch_min(10, 16) == two_values{10, 10});
+static_assert(fetch_min(16, 10) == two_values{16, 10});
+
diff --git a/clang/test/SemaCXX/atomic-constexpr-gcc-builtins.cpp b/clang/test/SemaCXX/atomic-constexpr-gcc-builtins.cpp
new file mode 100644
index 0000000000000..1e9ff93e313d6
--- /dev/null
+++ b/clang/test/SemaCXX/atomic-constexpr-gcc-builtins.cpp
@@ -0,0 +1,494 @@
+// RUN: %clang_cc1 -std=c++20 %s
+
+// expected-no-diagnostics
+
+constexpr int int_min = -2147483648;
+constexpr int int_max = 2147483647;
+
+const int array[2] = {1,2};
+const char small_array[2] = {1,2};
+
+template <typename T> struct identity {
+ using type = T;
+};
+
+template <typename T> using do_not_deduce = typename identity<T>::type;
+
+// -- LOAD --
+
+template <typename T> consteval T load(T value) {
+ T av = value;
+ T out{};
+ __atomic_load(&av, &out, __ATOMIC_RELAXED);
+ return out;
+}
+
+// integers
+static_assert(load(true) == true);
+static_assert(load(false) == false);
+
+static_assert(load(42) == 42);
+static_assert(load(-128) == -128);
+
+static_assert(load(42u) == 42u);
+static_assert(load(0xFFFFFFFFu) == 0xFFFFFFFFu);
+
+// pointers
+static_assert(load(&array[0]) == &array[0]);
+static_assert(load(&small_array[1]) == &small_array[1]);
+
+// -- LOAD-N --
+
+template <typename T> consteval T load_n(T value) {
+ T av = value;
+ return __atomic_load_n(&av, __ATOMIC_RELAXED);
+}
+
+// integers
+static_assert(load_n(true) == true);
+static_assert(load_n(false) == false);
+
+static_assert(load_n(42) == 42);
+static_assert(load_n(-128) == -128);
+
+static_assert(load_n(42u) == 42u);
+static_assert(load_n(0xFFFFFFFFu) == 0xFFFFFFFFu);
+
+// pointers
+static_assert(load_n(&array[0]) == &array[0]);
+static_assert(load_n(&small_array[1]) == &small_array[1]);
+
+
+// -- STORE --
+
+template <typename T> consteval T store(T value) {
+ T av = T{};
+ __atomic_store(&av, &value, __ATOMIC_RELAXED);
+ return __atomic_load_n(&av, __ATOMIC_RELAXED);
+}
+
+// integers
+static_assert(store(true) == true);
+static_assert(store(false) == false);
+
+static_assert(store(42) == 42);
+static_assert(store(-128) == -128);
+
+static_assert(store(42u) == 42u);
+static_assert(store(0xFFFFFFFFu) == 0xFFFFFFFFu);
+
+// pointers
+static_assert(store(&array[0]) == &array[0]);
+static_assert(store(&small_array[1]) == &small_array[1]);
+
+// -- STORE-N --
+
+template <typename T> consteval T store_n(T value) {
+ T av = T{};
+ __atomic_store_n(&av, value, __ATOMIC_RELAXED);
+ return __atomic_load_n(&av, __ATOMIC_RELAXED);
+}
+
+// integers
+static_assert(store_n(true) == true);
+static_assert(store_n(false) == false);
+
+static_assert(store_n(42) == 42);
+static_assert(store_n(-128) == -128);
+
+static_assert(store_n(42u) == 42u);
+static_assert(store_n(0xFFFFFFFFu) == 0xFFFFFFFFu);
+
+// pointers
+static_assert(store_n(&array[0]) == &array[0]);
+static_assert(store_n(&small_array[1]) == &small_array[1]);
+
+// -- EXCHANGE --
+template <typename T> struct two_values {
+ T before;
+ T after;
+ constexpr friend bool operator==(two_values, two_values) = default;
+};
+
+template <typename T> consteval auto exchange(T value, do_not_deduce<T> replacement) -> two_values<T> {
+ T av = T{value};
+ T out{};
+ __atomic_exchange(&av, &replacement, &out, __ATOMIC_RELAXED);
+ return two_values<T>{out, __atomic_load_n(&av, __ATOMIC_RELAXED)};
+}
+
+// integers
+static_assert(exchange(true,false) == two_values{true, false});
+static_assert(exchange(false,true) == two_values{false, true});
+
+static_assert(exchange(10,42) == two_values{10,42});
+static_assert(exchange(14,-128) == two_values{14,-128});
+
+
+static_assert(exchange(56u,42u) == two_values{56u,42u});
+static_assert(exchange(0xFFu, 0xFFFFFFFFu) == two_values{0xFFu,0xFFFFFFFFu});
+
+// -- EXCHANGE-N --
+template <typename T> consteval auto exchange_n(T value, do_not_deduce<T> replacement) -> two_values<T> {
+ T av = T{value};
+ T previous = __atomic_exchange_n(&av, replacement, __ATOMIC_RELAXED);
+ return two_values<T>{previous, __atomic_load_n(&av, __ATOMIC_RELAXED)};
+}
+
+// integers
+static_assert(exchange_n(true,false) == two_values{true, false});
+static_assert(exchange_n(false,true) == two_values{false, true});
+
+static_assert(exchange_n(10,42) == two_values{10,42});
+static_assert(exchange_n(14,-128) == two_values{14,-128});
+
+
+static_assert(exchange_n(56u,42u) == two_values{56u,42u});
+static_assert(exchange_n(0xFFu, 0xFFFFFFFFu) == two_values{0xFFu,0xFFFFFFFFu});
+
+// pointers
+static_assert(exchange_n(&array[0], &array[1]) == two_values{&array[0],&array[1]});
+static_assert(exchange_n(&small_array[1], &small_array[0]) == two_values{&small_array[1], &small_array[0]});
+
+// -- COMPARE-EXCHANGE --
+template <typename T> struct comp_result {
+ bool success;
+ T output;
+ T after;
+
+ constexpr comp_result(bool success_, T output_, do_not_deduce<T> after_): success{success_}, output{output_}, after{after_} { }
+
+ constexpr friend bool operator==(comp_result, comp_result) = default;
+};
+
+template <typename T> constexpr auto comp_success(T output, do_not_deduce<T> after) {
+ return comp_result<T>{true, output, after};
+}
+
+template <typename T> constexpr auto comp_failure(T after) {
+ return comp_result<T>{false, after, after};
+}
+
+template <typename T> consteval auto compare_exchange_weak(T original, do_not_deduce<T> expected, do_not_deduce<T> replacement) -> comp_result<T> {
+ T av = T{original};
+ const bool success = __atomic_compare_exchange(&av, &expected, &replacement, false, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ return comp_result<T>{success, expected, __atomic_load_n(&av, __ATOMIC_RELAXED)};
+}
+
+// integers
+static_assert(compare_exchange_weak(true, true, false) == comp_success(true, false));
+static_assert(compare_exchange_weak(false, false, true) == comp_success(false, true));
+static_assert(compare_exchange_weak(false, true, false) == comp_failure(false));
+static_assert(compare_exchange_weak(true, false, true) == comp_failure(true));
+
+static_assert(compare_exchange_weak(10,10,42) == comp_success(10,42));
+static_assert(compare_exchange_weak(14,14,-128) == comp_success(14,-128));
+static_assert(compare_exchange_weak(-10,10,42) == comp_failure(-10));
+static_assert(compare_exchange_weak(-14,14,-128) == comp_failure(-14));
+
+static_assert(compare_exchange_weak(56u, 56u,42u) == comp_success(56u,42u));
+static_assert(compare_exchange_weak(0xFFu, 0xFFu, 0xFFFFFFFFu) == comp_success(0xFFu,0xFFFFFFFFu));
+static_assert(compare_exchange_weak(3u, 56u,42u) == comp_failure(3u));
+static_assert(compare_exchange_weak(0xFu, 0xFFu, 0xFFFFFFFFu) == comp_failure(0xFu));
+
+// pointers
+static_assert(compare_exchange_weak(&array[0], &array[0], &array[1]) == comp_success(&array[0],&array[1]));
+static_assert(compare_exchange_weak(&small_array[1], &small_array[1], &small_array[0]) == comp_success(&small_array[1], &small_array[0]));
+static_assert(compare_exchange_weak(&array[1], &array[0], &array[1]) == comp_failure(&array[1]));
+static_assert(compare_exchange_weak(&small_array[0], &small_array[1], &small_array[0]) == comp_failure(&small_array[0]));
+
+
+template <typename T> consteval auto compare_exchange_strong(T original, do_not_deduce<T> expected, do_not_deduce<T> replacement) -> comp_result<T> {
+ T av = T{original};
+ const bool success = __atomic_compare_exchange(&av, &expected, &replacement, true, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ return comp_result<T>{success, expected, __atomic_load_n(&av, __ATOMIC_RELAXED)};
+}
+
+// integers
+static_assert(compare_exchange_strong(true, true, false) == comp_success(true, false));
+static_assert(compare_exchange_strong(false, false, true) == comp_success(false, true));
+static_assert(compare_exchange_strong(false, true, false) == comp_failure(false));
+static_assert(compare_exchange_strong(true, false, true) == comp_failure(true));
+
+static_assert(compare_exchange_strong(10,10,42) == comp_success(10,42));
+static_assert(compare_exchange_strong(14,14,-128) == comp_success(14,-128));
+static_assert(compare_exchange_strong(-10,10,42) == comp_failure(-10));
+static_assert(compare_exchange_strong(-14,14,-128) == comp_failure(-14));
+
+static_assert(compare_exchange_strong(56u, 56u,42u) == comp_success(56u,42u));
+static_assert(compare_exchange_strong(0xFFu, 0xFFu, 0xFFFFFFFFu) == comp_success(0xFFu,0xFFFFFFFFu));
+static_assert(compare_exchange_strong(3u, 56u,42u) == comp_failure(3u));
+static_assert(compare_exchange_strong(0xFu, 0xFFu, 0xFFFFFFFFu) == comp_failure(0xFu));
+
+// pointers
+static_assert(compare_exchange_strong(&array[0], &array[0], &array[1]) == comp_success(&array[0],&array[1]));
+static_assert(compare_exchange_strong(&small_array[1], &small_array[1], &small_array[0]) == comp_success(&small_array[1], &small_array[0]));
+static_assert(compare_exchange_strong(&array[1], &array[0], &array[1]) == comp_failure(&array[1]));
+static_assert(compare_exchange_strong(&small_array[0], &small_array[1], &small_array[0]) == comp_failure(&small_array[0]));
+
+// --COMPARE-EXCHANGE-N--
+
+template <typename T> consteval auto compare_exchange_weak_n(T original, do_not_deduce<T> expected, do_not_deduce<T> replacement) -> comp_result<T> {
+ T av = T{original};
+ const bool success = __atomic_compare_exchange_n(&av, &expected, replacement, false, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ return comp_result<T>{success, expected, __atomic_load_n(&av, __ATOMIC_RELAXED)};
+}
+
+// integers
+static_assert(compare_exchange_weak_n(true, true, false) == comp_success(true, false));
+static_assert(compare_exchange_weak_n(false, false, true) == comp_success(false, true));
+static_assert(compare_exchange_weak_n(false, true, false) == comp_failure(false));
+static_assert(compare_exchange_weak_n(true, false, true) == comp_failure(true));
+
+static_assert(compare_exchange_weak_n(10,10,42) == comp_success(10,42));
+static_assert(compare_exchange_weak_n(14,14,-128) == comp_success(14,-128));
+static_assert(compare_exchange_weak_n(-10,10,42) == comp_failure(-10));
+static_assert(compare_exchange_weak_n(-14,14,-128) == comp_failure(-14));
+
+static_assert(compare_exchange_weak_n(56u, 56u,42u) == comp_success(56u,42u));
+static_assert(compare_exchange_weak_n(0xFFu, 0xFFu, 0xFFFFFFFFu) == comp_success(0xFFu,0xFFFFFFFFu));
+static_assert(compare_exchange_weak_n(3u, 56u,42u) == comp_failure(3u));
+static_assert(compare_exchange_weak_n(0xFu, 0xFFu, 0xFFFFFFFFu) == comp_failure(0xFu));
+
+// pointers
+static_assert(compare_exchange_weak_n(&array[0], &array[0], &array[1]) == comp_success(&array[0],&array[1]));
+static_assert(compare_exchange_weak_n(&small_array[1], &small_array[1], &small_array[0]) == comp_success(&small_array[1], &small_array[0]));
+static_assert(compare_exchange_weak_n(&array[1], &array[0], &array[1]) == comp_failure(&array[1]));
+static_assert(compare_exchange_weak_n(&small_array[0], &small_array[1], &small_array[0]) == comp_failure(&small_array[0]));
+
+
+template <typename T> consteval auto compare_exchange_strong_n(T original, do_not_deduce<T> expected, do_not_deduce<T> replacement) -> comp_result<T> {
+ T av = T{original};
+ const bool success = __atomic_compare_exchange_n(&av, &expected, replacement, true, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ return comp_result<T>{success, expected, __atomic_load_n(&av, __ATOMIC_RELAXED)};
+}
+
+// integers
+static_assert(compare_exchange_strong_n(true, true, false) == comp_success(true, false));
+static_assert(compare_exchange_strong_n(false, false, true) == comp_success(false, true));
+static_assert(compare_exchange_strong_n(false, true, false) == comp_failure(false));
+static_assert(compare_exchange_strong_n(true, false, true) == comp_failure(true));
+
+static_assert(compare_exchange_strong_n(10,10,42) == comp_success(10,42));
+static_assert(compare_exchange_strong_n(14,14,-128) == comp_success(14,-128));
+static_assert(compare_exchange_strong_n(-10,10,42) == comp_failure(-10));
+static_assert(compare_exchange_strong_n(-14,14,-128) == comp_failure(-14));
+
+static_assert(compare_exchange_strong_n(56u, 56u,42u) == comp_success(56u,42u));
+static_assert(compare_exchange_strong_n(0xFFu, 0xFFu, 0xFFFFFFFFu) == comp_success(0xFFu,0xFFFFFFFFu));
+static_assert(compare_exchange_strong_n(3u, 56u,42u) == comp_failure(3u));
+static_assert(compare_exchange_strong_n(0xFu, 0xFFu, 0xFFFFFFFFu) == comp_failure(0xFu));
+
+// pointers
+static_assert(compare_exchange_strong_n(&array[0], &array[0], &array[1]) == comp_success(&array[0],&array[1]));
+static_assert(compare_exchange_strong_n(&small_array[1], &small_array[1], &small_array[0]) == comp_success(&small_array[1], &small_array[0]));
+static_assert(compare_exchange_strong_n(&array[1], &array[0], &array[1]) == comp_failure(&array[1]));
+static_assert(compare_exchange_strong_n(&small_array[0], &small_array[1], &small_array[0]) == comp_failure(&small_array[0]));
+
+// --FETCH-OP--
+template <typename T, typename Y> consteval auto fetch_add(T original, Y arg) -> two_values<T> {
+ T av = T{original};
+ const T result = __atomic_fetch_add(&av, arg, __ATOMIC_RELAXED);
+ return two_values<T>{result, __atomic_load_n(&av, __ATOMIC_RELAXED)};
+}
+
+template <typename T, typename Y> consteval auto fetch_add_ptr(T original, Y arg) -> two_values<T> {
+ T av = T{original};
+ constexpr auto pointee_size = sizeof(*static_cast<T>(nullptr));
+ arg *= pointee_size;
+ const T result = __atomic_fetch_add(&av, arg, __ATOMIC_RELAXED);
+ return two_values<T>{result, __atomic_load_n(&av, __ATOMIC_RELAXED)};
+}
+
+// integers
+static_assert(fetch_add(false, 1) == two_values{false, true});
+static_assert(fetch_add(0, 100) == two_values{0, 100});
+static_assert(fetch_add(100, -50) == two_values{100, 50});
+
+static_assert(fetch_add(int_max, 1) == two_values{int_max, int_min}); // overflow is defined for atomic
+static_assert(fetch_add(int_min, -1) == two_values{int_min, int_max});
+
+// pointers
+static_assert(fetch_add_ptr(&array[0], 1) == two_values{&array[0], &array[1]});
+static_assert(fetch_add_ptr(&small_array[0], 1) == two_values{&small_array[0], &small_array[1]});
+static_assert(fetch_add_ptr(&array[1], 0) == two_values{&array[1], &array[1]});
+static_assert(fetch_add_ptr(&small_array[1], 0) == two_values{&small_array[1], &small_array[1]});
+static_assert(fetch_add_ptr(&array[1], -1) == two_values{&array[1], &array[0]});
+static_assert(fetch_add_ptr(&small_array[1], -1) == two_values{&small_array[1], &small_array[0]});
+
+template <typename T, typename Y> consteval auto fetch_sub(T original, Y arg) -> two_values<T> {
+ T av = T{original};
+ const T result = __atomic_fetch_sub(&av, arg, __ATOMIC_RELAXED);
+ return two_values<T>{result, __atomic_load_n(&av, __ATOMIC_RELAXED)};
+}
+
+template <typename T, typename Y> consteval auto fetch_sub_ptr(T original, Y arg) -> two_values<T> {
+ T av = T{original};
+ constexpr auto pointee_size = sizeof(*static_cast<T>(nullptr));
+ arg *= pointee_size;
+ const T result = __atomic_fetch_sub(&av, arg, __ATOMIC_RELAXED);
+ return two_values<T>{result, __atomic_load_n(&av, __ATOMIC_RELAXED)};
+}
+
+// integers
+static_assert(fetch_sub(true, 1) == two_values{true, false});
+static_assert(fetch_sub(0, 100) == two_values{0, -100});
+static_assert(fetch_sub(100, -50) == two_values{100, 150});
+
+static_assert(fetch_sub(int_min, 1) == two_values{int_min, int_max}); // overflow is defined for atomic
+static_assert(fetch_sub(int_max, -1) == two_values{int_max, int_min});
+
+// pointers
+static_assert(fetch_sub_ptr(&array[1], 1) == two_values{&array[1], &array[0]});
+static_assert(fetch_sub_ptr(&small_array[1], 1) == two_values{&small_array[1], &small_array[0]});
+static_assert(fetch_sub_ptr(&array[1], 0) == two_values{&array[1], &array[1]});
+static_assert(fetch_sub_ptr(&small_array[1], 0) == two_values{&small_array[1], &small_array[1]});
+static_assert(fetch_sub_ptr(&array[0], -1) == two_values{&array[0], &array[1]});
+static_assert(fetch_sub_ptr(&small_array[0], -1) == two_values{&small_array[0], &small_array[1]});
+
+template <typename T, typename Y> consteval auto fetch_and(T original, Y arg) -> two_values<T> {
+ T av = T{original};
+ const T result = __atomic_fetch_and(&av, arg, __ATOMIC_RELAXED);
+ return two_values<T>{result, __atomic_load_n(&av, __ATOMIC_RELAXED)};
+}
+
+template <typename T, typename Y> consteval auto fetch_or(T original, Y arg) -> two_values<T> {
+ T av = T{original};
+ const T result = __atomic_fetch_or(&av, arg, __ATOMIC_RELAXED);
+ return two_values<T>{result, __atomic_load_n(&av, __ATOMIC_RELAXED)};
+}
+
+template <typename T, typename Y> consteval auto fetch_xor(T original, Y arg) -> two_values<T> {
+ T av = T{original};
+ const T result = __atomic_fetch_xor(&av, arg, __ATOMIC_RELAXED);
+ return two_values<T>{result, __atomic_load_n(&av, __ATOMIC_RELAXED)};
+}
+
+template <typename T, typename Y> consteval auto fetch_nand(T original, Y arg) -> two_values<T> {
+ T av = T{original};
+ const T result = __atomic_fetch_nand(&av, arg, __ATOMIC_RELAXED);
+ return two_values<T>{result, __atomic_load_n(&av, __ATOMIC_RELAXED)};
+}
+
+static_assert(fetch_and(0b1101u, 0b1011u) == two_values{0b1101u, 0b1001u});
+static_assert(fetch_or(0b1101u, 0b1011u) == two_values{0b1101u, 0b1111u});
+static_assert(fetch_xor(0b1101u, 0b1011u) == two_values{0b1101u, 0b0110u});
+static_assert(fetch_nand(0b1001u, 0b1011u) == two_values{0b1001u, 0xFFFFFFF6u});
+
+template <typename T> consteval auto fetch_min(T original, T arg) -> two_values<T> {
+ T av = T{original};
+ const T result = __atomic_fetch_min(&av, arg, __ATOMIC_RELAXED);
+ return two_values<T>{result, __atomic_load_n(&av, __ATOMIC_RELAXED)};
+}
+
+template <typename T> consteval auto fetch_max(T original, T arg) -> two_values<T> {
+ T av = T{original};
+ const T result = __atomic_fetch_max(&av, arg, __ATOMIC_RELAXED);
+ return two_values<T>{result, __atomic_load_n(&av, __ATOMIC_RELAXED)};
+}
+
+static_assert(fetch_max(10, 16) == two_values{10, 16});
+static_assert(fetch_max(16, 10) == two_values{16, 16});
+
+static_assert(fetch_min(10, 16) == two_values{10, 10});
+static_assert(fetch_min(16, 10) == two_values{16, 10});
+
+// --OP-FETCHP--
+template <typename T, typename Y> consteval auto add_fetch(T original, Y arg) -> T {
+ T av = T{original};
+ return __atomic_add_fetch(&av, arg, __ATOMIC_RELAXED);
+}
+
+template <typename T, typename Y> consteval auto add_fetch_ptr(T original, Y arg) -> T {
+ T av = T{original};
+ constexpr auto pointee_size = sizeof(*static_cast<T>(nullptr));
+ arg *= pointee_size;
+ return __atomic_add_fetch(&av, arg, __ATOMIC_RELAXED);
+}
+
+// integers
+static_assert(add_fetch(false, 1) == true);
+static_assert(add_fetch(0, 100) == 100);
+static_assert(add_fetch(100, -50) == 50);
+
+static_assert(add_fetch(int_max, 1) == int_min); // overflow is defined for atomic
+static_assert(add_fetch(int_min, -1) == int_max);
+
+// pointers
+static_assert(add_fetch_ptr(&array[0], 1) == &array[1]);
+static_assert(add_fetch_ptr(&small_array[0], 1) == &small_array[1]);
+static_assert(add_fetch_ptr(&array[1], 0) == &array[1]);
+static_assert(add_fetch_ptr(&small_array[1], 0) == &small_array[1]);
+static_assert(add_fetch_ptr(&array[1], -1) == &array[0]);
+static_assert(add_fetch_ptr(&small_array[1], -1) ==&small_array[0]);
+
+template <typename T, typename Y> consteval auto sub_fetch(T original, Y arg) -> T {
+ T av = T{original};
+ return __atomic_sub_fetch(&av, arg, __ATOMIC_RELAXED);
+}
+
+template <typename T, typename Y> consteval auto sub_fetch_ptr(T original, Y arg) -> T {
+ T av = T{original};
+ constexpr auto pointee_size = sizeof(*static_cast<T>(nullptr));
+ arg *= pointee_size;
+ return __atomic_sub_fetch(&av, arg, __ATOMIC_RELAXED);
+}
+
+// integers
+static_assert(sub_fetch(true, 1) == false);
+static_assert(sub_fetch(0, 100) == -100);
+static_assert(sub_fetch(100, -50) == 150);
+
+static_assert(sub_fetch(int_min, 1) == int_max); // overflow is defined for atomic
+static_assert(sub_fetch(int_max, -1) == int_min);
+
+// pointers
+static_assert(sub_fetch_ptr(&array[1], 1) == &array[0]);
+static_assert(sub_fetch_ptr(&small_array[1], 1) == &small_array[0]);
+static_assert(sub_fetch_ptr(&array[1], 0) == &array[1]);
+static_assert(sub_fetch_ptr(&small_array[1], 0) == &small_array[1]);
+static_assert(sub_fetch_ptr(&array[0], -1) == &array[1]);
+static_assert(sub_fetch_ptr(&small_array[0], -1) == &small_array[1]);
+
+template <typename T, typename Y> consteval auto and_fetch(T original, Y arg) -> T {
+ T av = T{original};
+ return __atomic_and_fetch(&av, arg, __ATOMIC_RELAXED);
+}
+
+template <typename T, typename Y> consteval auto or_fetch(T original, Y arg) -> T {
+ T av = T{original};
+ return __atomic_or_fetch(&av, arg, __ATOMIC_RELAXED);
+}
+
+template <typename T, typename Y> consteval auto xor_fetch(T original, Y arg) -> T {
+ T av = T{original};
+ return __atomic_xor_fetch(&av, arg, __ATOMIC_RELAXED);
+}
+
+template <typename T, typename Y> consteval auto nand_fetch(T original, Y arg) -> T {
+ T av = T{original};
+ return __atomic_nand_fetch(&av, arg, __ATOMIC_RELAXED);
+}
+
+static_assert(and_fetch(0b1101u, 0b1011u) == 0b1001u);
+static_assert(or_fetch(0b1101u, 0b1011u) == 0b1111u);
+static_assert(xor_fetch(0b1101u, 0b1011u) == 0b0110u);
+static_assert(nand_fetch(0b1001u, 0b1011u) == 0xFFFFFFF6u);
+
+template <typename T> consteval auto min_fetch(T original, T arg) -> T {
+ T av = T{original};
+ return __atomic_min_fetch(&av, arg, __ATOMIC_RELAXED);
+}
+
+template <typename T> consteval auto max_fetch(T original, T arg) -> T {
+ T av = T{original};
+ return __atomic_max_fetch(&av, arg, __ATOMIC_RELAXED);
+}
+
+static_assert(max_fetch(10, 16) == 16);
+static_assert(max_fetch(16, 10) == 16);
+
+static_assert(min_fetch(10, 16) == 10);
+static_assert(min_fetch(16, 10) == 10);
+
+
More information about the libcxx-commits
mailing list