r203561 - Sema: produce error when invalid ordering is passed to atomic builtin
Tim Northover
tnorthover at apple.com
Tue Mar 11 03:49:15 PDT 2014
Author: tnorthover
Date: Tue Mar 11 05:49:14 2014
New Revision: 203561
URL: http://llvm.org/viewvc/llvm-project?rev=203561&view=rev
Log:
Sema: produce error when invalid ordering is passed to atomic builtin
This is a conservative check, because it's valid for the expression to be
non-constant, and in cases like that we just don't know whether it's valid.
rdar://problem/16242991
Modified:
cfe/trunk/include/clang/AST/Expr.h
cfe/trunk/include/clang/Basic/DiagnosticSemaKinds.td
cfe/trunk/lib/CodeGen/CGAtomic.cpp
cfe/trunk/lib/Sema/SemaChecking.cpp
cfe/trunk/test/CodeGen/atomic-ops.c
cfe/trunk/test/CodeGen/big-atomic-ops.c
cfe/trunk/test/Sema/atomic-ops.c
Modified: cfe/trunk/include/clang/AST/Expr.h
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/include/clang/AST/Expr.h?rev=203561&r1=203560&r2=203561&view=diff
==============================================================================
--- cfe/trunk/include/clang/AST/Expr.h (original)
+++ cfe/trunk/include/clang/AST/Expr.h Tue Mar 11 05:49:14 2014
@@ -4728,6 +4728,16 @@ public:
BI_First = 0
};
+ // The ABI values for various atomic memory orderings.
+ enum AtomicOrderingKind {
+ AO_ABI_memory_order_relaxed = 0,
+ AO_ABI_memory_order_consume = 1,
+ AO_ABI_memory_order_acquire = 2,
+ AO_ABI_memory_order_release = 3,
+ AO_ABI_memory_order_acq_rel = 4,
+ AO_ABI_memory_order_seq_cst = 5
+ };
+
private:
enum { PTR, ORDER, VAL1, ORDER_FAIL, VAL2, WEAK, END_EXPR };
Stmt* SubExprs[END_EXPR];
Modified: cfe/trunk/include/clang/Basic/DiagnosticSemaKinds.td
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/include/clang/Basic/DiagnosticSemaKinds.td?rev=203561&r1=203560&r2=203561&view=diff
==============================================================================
--- cfe/trunk/include/clang/Basic/DiagnosticSemaKinds.td (original)
+++ cfe/trunk/include/clang/Basic/DiagnosticSemaKinds.td Tue Mar 11 05:49:14 2014
@@ -5689,7 +5689,9 @@ def err_atomic_op_needs_atomic_int_or_pt
def err_atomic_op_bitwise_needs_atomic_int : Error<
"address argument to bitwise atomic operation must be a pointer to "
"%select{|atomic }0integer (%1 invalid)">;
-
+def err_atomic_op_has_invalid_memory_order : Error<
+ "memory order argument to atomic operation is invalid">;
+
def err_atomic_load_store_uses_lib : Error<
"atomic %select{load|store}0 requires runtime support that is not "
"available for this target">;
Modified: cfe/trunk/lib/CodeGen/CGAtomic.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGAtomic.cpp?rev=203561&r1=203560&r2=203561&view=diff
==============================================================================
--- cfe/trunk/lib/CodeGen/CGAtomic.cpp (original)
+++ cfe/trunk/lib/CodeGen/CGAtomic.cpp Tue Mar 11 05:49:14 2014
@@ -24,16 +24,6 @@
using namespace clang;
using namespace CodeGen;
-// The ABI values for various atomic memory orderings.
-enum AtomicOrderingKind {
- AO_ABI_memory_order_relaxed = 0,
- AO_ABI_memory_order_consume = 1,
- AO_ABI_memory_order_acquire = 2,
- AO_ABI_memory_order_release = 3,
- AO_ABI_memory_order_acq_rel = 4,
- AO_ABI_memory_order_seq_cst = 5
-};
-
namespace {
class AtomicInfo {
CodeGenFunction &CGF;
@@ -642,30 +632,30 @@ RValue CodeGenFunction::EmitAtomicExpr(A
if (isa<llvm::ConstantInt>(Order)) {
int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
switch (ord) {
- case AO_ABI_memory_order_relaxed:
+ case AtomicExpr::AO_ABI_memory_order_relaxed:
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
llvm::Monotonic);
break;
- case AO_ABI_memory_order_consume:
- case AO_ABI_memory_order_acquire:
+ case AtomicExpr::AO_ABI_memory_order_consume:
+ case AtomicExpr::AO_ABI_memory_order_acquire:
if (IsStore)
break; // Avoid crashing on code with undefined behavior
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
llvm::Acquire);
break;
- case AO_ABI_memory_order_release:
+ case AtomicExpr::AO_ABI_memory_order_release:
if (IsLoad)
break; // Avoid crashing on code with undefined behavior
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
llvm::Release);
break;
- case AO_ABI_memory_order_acq_rel:
+ case AtomicExpr::AO_ABI_memory_order_acq_rel:
if (IsLoad || IsStore)
break; // Avoid crashing on code with undefined behavior
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
llvm::AcquireRelease);
break;
- case AO_ABI_memory_order_seq_cst:
+ case AtomicExpr::AO_ABI_memory_order_seq_cst:
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
llvm::SequentiallyConsistent);
break;
@@ -788,8 +778,8 @@ RValue CodeGenFunction::EmitAtomicLoad(L
getContext().VoidPtrTy);
args.add(RValue::get(EmitCastToVoidPtr(tempAddr)),
getContext().VoidPtrTy);
- args.add(RValue::get(llvm::ConstantInt::get(IntTy,
- AO_ABI_memory_order_seq_cst)),
+ args.add(RValue::get(llvm::ConstantInt::get(
+ IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
getContext().IntTy);
emitAtomicLibcall(*this, "__atomic_load", getContext().VoidTy, args);
@@ -938,8 +928,8 @@ void CodeGenFunction::EmitAtomicStore(RV
getContext().VoidPtrTy);
args.add(RValue::get(EmitCastToVoidPtr(srcAddr)),
getContext().VoidPtrTy);
- args.add(RValue::get(llvm::ConstantInt::get(IntTy,
- AO_ABI_memory_order_seq_cst)),
+ args.add(RValue::get(llvm::ConstantInt::get(
+ IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
getContext().IntTy);
emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
return;
Modified: cfe/trunk/lib/Sema/SemaChecking.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Sema/SemaChecking.cpp?rev=203561&r1=203560&r2=203561&view=diff
==============================================================================
--- cfe/trunk/lib/Sema/SemaChecking.cpp (original)
+++ cfe/trunk/lib/Sema/SemaChecking.cpp Tue Mar 11 05:49:14 2014
@@ -911,6 +911,33 @@ bool Sema::CheckOtherCall(CallExpr *TheC
return false;
}
+static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) {
+ if (Ordering < AtomicExpr::AO_ABI_memory_order_relaxed ||
+ Ordering > AtomicExpr::AO_ABI_memory_order_seq_cst)
+ return false;
+
+ switch (Op) {
+ case AtomicExpr::AO__c11_atomic_init:
+ llvm_unreachable("There is no ordering argument for an init");
+
+ case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__atomic_load_n:
+ case AtomicExpr::AO__atomic_load:
+ return Ordering != AtomicExpr::AO_ABI_memory_order_release &&
+ Ordering != AtomicExpr::AO_ABI_memory_order_acq_rel;
+
+ case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__atomic_store:
+ case AtomicExpr::AO__atomic_store_n:
+ return Ordering != AtomicExpr::AO_ABI_memory_order_consume &&
+ Ordering != AtomicExpr::AO_ABI_memory_order_acquire &&
+ Ordering != AtomicExpr::AO_ABI_memory_order_acq_rel;
+
+ default:
+ return true;
+ }
+}
+
ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op) {
CallExpr *TheCall = cast<CallExpr>(TheCallResult.get());
@@ -1199,7 +1226,16 @@ ExprResult Sema::SemaAtomicOpsOverloaded
SubExprs.push_back(TheCall->getArg(3)); // Weak
break;
}
-
+
+ if (SubExprs.size() >= 2 && Form != Init) {
+ llvm::APSInt Result(32);
+ if (SubExprs[1]->isIntegerConstantExpr(Result, Context) &&
+ !isValidOrderingForOp(Result.getSExtValue(), Op))
+ return ExprError(Diag(SubExprs[1]->getLocStart(),
+ diag::err_atomic_op_has_invalid_memory_order)
+ << SubExprs[1]->getSourceRange());
+ }
+
AtomicExpr *AE = new (Context) AtomicExpr(TheCall->getCallee()->getLocStart(),
SubExprs, ResultType, Op,
TheCall->getRParenLoc());
Modified: cfe/trunk/test/CodeGen/atomic-ops.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/atomic-ops.c?rev=203561&r1=203560&r2=203561&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/atomic-ops.c (original)
+++ cfe/trunk/test/CodeGen/atomic-ops.c Tue Mar 11 05:49:14 2014
@@ -315,13 +315,4 @@ void atomic_init_foo()
// CHECK: }
}
-// CHECK: @invalid_atomic
-void invalid_atomic(_Atomic(int) *i) {
- __c11_atomic_store(i, 1, memory_order_consume);
- __c11_atomic_store(i, 1, memory_order_acquire);
- __c11_atomic_store(i, 1, memory_order_acq_rel);
- __c11_atomic_load(i, memory_order_release);
- __c11_atomic_load(i, memory_order_acq_rel);
-}
-
#endif
Modified: cfe/trunk/test/CodeGen/big-atomic-ops.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/big-atomic-ops.c?rev=203561&r1=203560&r2=203561&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/big-atomic-ops.c (original)
+++ cfe/trunk/test/CodeGen/big-atomic-ops.c Tue Mar 11 05:49:14 2014
@@ -311,13 +311,4 @@ void atomic_init_foo()
// CHECK: }
}
-// CHECK: @invalid_atomic
-void invalid_atomic(_Atomic(int) *i) {
- __c11_atomic_store(i, 1, memory_order_consume);
- __c11_atomic_store(i, 1, memory_order_acquire);
- __c11_atomic_store(i, 1, memory_order_acq_rel);
- __c11_atomic_load(i, memory_order_release);
- __c11_atomic_load(i, memory_order_acq_rel);
-}
-
#endif
Modified: cfe/trunk/test/Sema/atomic-ops.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/Sema/atomic-ops.c?rev=203561&r1=203560&r2=203561&view=diff
==============================================================================
--- cfe/trunk/test/Sema/atomic-ops.c (original)
+++ cfe/trunk/test/Sema/atomic-ops.c Tue Mar 11 05:49:14 2014
@@ -182,3 +182,225 @@ void PR16931(int* x) { // expected-note
flag flagvar = { 0 };
PR16931(&flagvar); // expected-warning {{incompatible pointer types}}
}
+
+void memory_checks(_Atomic(int) *Ap, int *p, int val) {
+ (void)__c11_atomic_load(Ap, memory_order_relaxed);
+ (void)__c11_atomic_load(Ap, memory_order_acquire);
+ (void)__c11_atomic_load(Ap, memory_order_consume);
+ (void)__c11_atomic_load(Ap, memory_order_release); // expected-error {{memory order argument to atomic operation is invalid}}
+ (void)__c11_atomic_load(Ap, memory_order_acq_rel); // expected-error {{memory order argument to atomic operation is invalid}}
+ (void)__c11_atomic_load(Ap, memory_order_seq_cst);
+ (void)__c11_atomic_load(Ap, val);
+ (void)__c11_atomic_load(Ap, -1); // expected-error {{memory order argument to atomic operation is invalid}}
+ (void)__c11_atomic_load(Ap, 42); // expected-error {{memory order argument to atomic operation is invalid}}
+
+ (void)__c11_atomic_store(Ap, val, memory_order_relaxed);
+ (void)__c11_atomic_store(Ap, val, memory_order_acquire); // expected-error {{memory order argument to atomic operation is invalid}}
+ (void)__c11_atomic_store(Ap, val, memory_order_consume); // expected-error {{memory order argument to atomic operation is invalid}}
+ (void)__c11_atomic_store(Ap, val, memory_order_release);
+ (void)__c11_atomic_store(Ap, val, memory_order_acq_rel); // expected-error {{memory order argument to atomic operation is invalid}}
+ (void)__c11_atomic_store(Ap, val, memory_order_seq_cst);
+
+ (void)__c11_atomic_fetch_add(Ap, 1, memory_order_relaxed);
+ (void)__c11_atomic_fetch_add(Ap, 1, memory_order_acquire);
+ (void)__c11_atomic_fetch_add(Ap, 1, memory_order_consume);
+ (void)__c11_atomic_fetch_add(Ap, 1, memory_order_release);
+ (void)__c11_atomic_fetch_add(Ap, 1, memory_order_acq_rel);
+ (void)__c11_atomic_fetch_add(Ap, 1, memory_order_seq_cst);
+
+ (void)__c11_atomic_init(Ap, val);
+ (void)__c11_atomic_init(Ap, val);
+ (void)__c11_atomic_init(Ap, val);
+ (void)__c11_atomic_init(Ap, val);
+ (void)__c11_atomic_init(Ap, val);
+ (void)__c11_atomic_init(Ap, val);
+
+ (void)__c11_atomic_fetch_sub(Ap, val, memory_order_relaxed);
+ (void)__c11_atomic_fetch_sub(Ap, val, memory_order_acquire);
+ (void)__c11_atomic_fetch_sub(Ap, val, memory_order_consume);
+ (void)__c11_atomic_fetch_sub(Ap, val, memory_order_release);
+ (void)__c11_atomic_fetch_sub(Ap, val, memory_order_acq_rel);
+ (void)__c11_atomic_fetch_sub(Ap, val, memory_order_seq_cst);
+
+ (void)__c11_atomic_fetch_and(Ap, val, memory_order_relaxed);
+ (void)__c11_atomic_fetch_and(Ap, val, memory_order_acquire);
+ (void)__c11_atomic_fetch_and(Ap, val, memory_order_consume);
+ (void)__c11_atomic_fetch_and(Ap, val, memory_order_release);
+ (void)__c11_atomic_fetch_and(Ap, val, memory_order_acq_rel);
+ (void)__c11_atomic_fetch_and(Ap, val, memory_order_seq_cst);
+
+ (void)__c11_atomic_fetch_or(Ap, val, memory_order_relaxed);
+ (void)__c11_atomic_fetch_or(Ap, val, memory_order_acquire);
+ (void)__c11_atomic_fetch_or(Ap, val, memory_order_consume);
+ (void)__c11_atomic_fetch_or(Ap, val, memory_order_release);
+ (void)__c11_atomic_fetch_or(Ap, val, memory_order_acq_rel);
+ (void)__c11_atomic_fetch_or(Ap, val, memory_order_seq_cst);
+
+ (void)__c11_atomic_fetch_xor(Ap, val, memory_order_relaxed);
+ (void)__c11_atomic_fetch_xor(Ap, val, memory_order_acquire);
+ (void)__c11_atomic_fetch_xor(Ap, val, memory_order_consume);
+ (void)__c11_atomic_fetch_xor(Ap, val, memory_order_release);
+ (void)__c11_atomic_fetch_xor(Ap, val, memory_order_acq_rel);
+ (void)__c11_atomic_fetch_xor(Ap, val, memory_order_seq_cst);
+
+ (void)__c11_atomic_exchange(Ap, val, memory_order_relaxed);
+ (void)__c11_atomic_exchange(Ap, val, memory_order_acquire);
+ (void)__c11_atomic_exchange(Ap, val, memory_order_consume);
+ (void)__c11_atomic_exchange(Ap, val, memory_order_release);
+ (void)__c11_atomic_exchange(Ap, val, memory_order_acq_rel);
+ (void)__c11_atomic_exchange(Ap, val, memory_order_seq_cst);
+
+ (void)__c11_atomic_compare_exchange_strong(Ap, p, val, memory_order_relaxed, memory_order_relaxed);
+ (void)__c11_atomic_compare_exchange_strong(Ap, p, val, memory_order_acquire, memory_order_relaxed);
+ (void)__c11_atomic_compare_exchange_strong(Ap, p, val, memory_order_consume, memory_order_relaxed);
+ (void)__c11_atomic_compare_exchange_strong(Ap, p, val, memory_order_release, memory_order_relaxed);
+ (void)__c11_atomic_compare_exchange_strong(Ap, p, val, memory_order_acq_rel, memory_order_relaxed);
+ (void)__c11_atomic_compare_exchange_strong(Ap, p, val, memory_order_seq_cst, memory_order_relaxed);
+
+ (void)__c11_atomic_compare_exchange_weak(Ap, p, val, memory_order_relaxed, memory_order_relaxed);
+ (void)__c11_atomic_compare_exchange_weak(Ap, p, val, memory_order_acquire, memory_order_relaxed);
+ (void)__c11_atomic_compare_exchange_weak(Ap, p, val, memory_order_consume, memory_order_relaxed);
+ (void)__c11_atomic_compare_exchange_weak(Ap, p, val, memory_order_release, memory_order_relaxed);
+ (void)__c11_atomic_compare_exchange_weak(Ap, p, val, memory_order_acq_rel, memory_order_relaxed);
+ (void)__c11_atomic_compare_exchange_weak(Ap, p, val, memory_order_seq_cst, memory_order_relaxed);
+
+ (void)__atomic_load_n(p, memory_order_relaxed);
+ (void)__atomic_load_n(p, memory_order_acquire);
+ (void)__atomic_load_n(p, memory_order_consume);
+ (void)__atomic_load_n(p, memory_order_release); // expected-error {{memory order argument to atomic operation is invalid}}
+ (void)__atomic_load_n(p, memory_order_acq_rel); // expected-error {{memory order argument to atomic operation is invalid}}
+ (void)__atomic_load_n(p, memory_order_seq_cst);
+
+ (void)__atomic_load(p, p, memory_order_relaxed);
+ (void)__atomic_load(p, p, memory_order_acquire);
+ (void)__atomic_load(p, p, memory_order_consume);
+ (void)__atomic_load(p, p, memory_order_release); // expected-error {{memory order argument to atomic operation is invalid}}
+ (void)__atomic_load(p, p, memory_order_acq_rel); // expected-error {{memory order argument to atomic operation is invalid}}
+ (void)__atomic_load(p, p, memory_order_seq_cst);
+
+ (void)__atomic_store(p, p, memory_order_relaxed);
+ (void)__atomic_store(p, p, memory_order_acquire); // expected-error {{memory order argument to atomic operation is invalid}}
+ (void)__atomic_store(p, p, memory_order_consume); // expected-error {{memory order argument to atomic operation is invalid}}
+ (void)__atomic_store(p, p, memory_order_release);
+ (void)__atomic_store(p, p, memory_order_acq_rel); // expected-error {{memory order argument to atomic operation is invalid}}
+ (void)__atomic_store(p, p, memory_order_seq_cst);
+
+ (void)__atomic_store_n(p, val, memory_order_relaxed);
+ (void)__atomic_store_n(p, val, memory_order_acquire); // expected-error {{memory order argument to atomic operation is invalid}}
+ (void)__atomic_store_n(p, val, memory_order_consume); // expected-error {{memory order argument to atomic operation is invalid}}
+ (void)__atomic_store_n(p, val, memory_order_release);
+ (void)__atomic_store_n(p, val, memory_order_acq_rel); // expected-error {{memory order argument to atomic operation is invalid}}
+ (void)__atomic_store_n(p, val, memory_order_seq_cst);
+
+ (void)__atomic_fetch_add(p, val, memory_order_relaxed);
+ (void)__atomic_fetch_add(p, val, memory_order_acquire);
+ (void)__atomic_fetch_add(p, val, memory_order_consume);
+ (void)__atomic_fetch_add(p, val, memory_order_release);
+ (void)__atomic_fetch_add(p, val, memory_order_acq_rel);
+ (void)__atomic_fetch_add(p, val, memory_order_seq_cst);
+
+ (void)__atomic_fetch_sub(p, val, memory_order_relaxed);
+ (void)__atomic_fetch_sub(p, val, memory_order_acquire);
+ (void)__atomic_fetch_sub(p, val, memory_order_consume);
+ (void)__atomic_fetch_sub(p, val, memory_order_release);
+ (void)__atomic_fetch_sub(p, val, memory_order_acq_rel);
+ (void)__atomic_fetch_sub(p, val, memory_order_seq_cst);
+
+ (void)__atomic_add_fetch(p, val, memory_order_relaxed);
+ (void)__atomic_add_fetch(p, val, memory_order_acquire);
+ (void)__atomic_add_fetch(p, val, memory_order_consume);
+ (void)__atomic_add_fetch(p, val, memory_order_release);
+ (void)__atomic_add_fetch(p, val, memory_order_acq_rel);
+ (void)__atomic_add_fetch(p, val, memory_order_seq_cst);
+
+ (void)__atomic_sub_fetch(p, val, memory_order_relaxed);
+ (void)__atomic_sub_fetch(p, val, memory_order_acquire);
+ (void)__atomic_sub_fetch(p, val, memory_order_consume);
+ (void)__atomic_sub_fetch(p, val, memory_order_release);
+ (void)__atomic_sub_fetch(p, val, memory_order_acq_rel);
+ (void)__atomic_sub_fetch(p, val, memory_order_seq_cst);
+
+ (void)__atomic_fetch_and(p, val, memory_order_relaxed);
+ (void)__atomic_fetch_and(p, val, memory_order_acquire);
+ (void)__atomic_fetch_and(p, val, memory_order_consume);
+ (void)__atomic_fetch_and(p, val, memory_order_release);
+ (void)__atomic_fetch_and(p, val, memory_order_acq_rel);
+ (void)__atomic_fetch_and(p, val, memory_order_seq_cst);
+
+ (void)__atomic_fetch_or(p, val, memory_order_relaxed);
+ (void)__atomic_fetch_or(p, val, memory_order_acquire);
+ (void)__atomic_fetch_or(p, val, memory_order_consume);
+ (void)__atomic_fetch_or(p, val, memory_order_release);
+ (void)__atomic_fetch_or(p, val, memory_order_acq_rel);
+ (void)__atomic_fetch_or(p, val, memory_order_seq_cst);
+
+ (void)__atomic_fetch_xor(p, val, memory_order_relaxed);
+ (void)__atomic_fetch_xor(p, val, memory_order_acquire);
+ (void)__atomic_fetch_xor(p, val, memory_order_consume);
+ (void)__atomic_fetch_xor(p, val, memory_order_release);
+ (void)__atomic_fetch_xor(p, val, memory_order_acq_rel);
+ (void)__atomic_fetch_xor(p, val, memory_order_seq_cst);
+
+ (void)__atomic_fetch_nand(p, val, memory_order_relaxed);
+ (void)__atomic_fetch_nand(p, val, memory_order_acquire);
+ (void)__atomic_fetch_nand(p, val, memory_order_consume);
+ (void)__atomic_fetch_nand(p, val, memory_order_release);
+ (void)__atomic_fetch_nand(p, val, memory_order_acq_rel);
+ (void)__atomic_fetch_nand(p, val, memory_order_seq_cst);
+
+ (void)__atomic_and_fetch(p, val, memory_order_relaxed);
+ (void)__atomic_and_fetch(p, val, memory_order_acquire);
+ (void)__atomic_and_fetch(p, val, memory_order_consume);
+ (void)__atomic_and_fetch(p, val, memory_order_release);
+ (void)__atomic_and_fetch(p, val, memory_order_acq_rel);
+ (void)__atomic_and_fetch(p, val, memory_order_seq_cst);
+
+ (void)__atomic_or_fetch(p, val, memory_order_relaxed);
+ (void)__atomic_or_fetch(p, val, memory_order_acquire);
+ (void)__atomic_or_fetch(p, val, memory_order_consume);
+ (void)__atomic_or_fetch(p, val, memory_order_release);
+ (void)__atomic_or_fetch(p, val, memory_order_acq_rel);
+ (void)__atomic_or_fetch(p, val, memory_order_seq_cst);
+
+ (void)__atomic_xor_fetch(p, val, memory_order_relaxed);
+ (void)__atomic_xor_fetch(p, val, memory_order_acquire);
+ (void)__atomic_xor_fetch(p, val, memory_order_consume);
+ (void)__atomic_xor_fetch(p, val, memory_order_release);
+ (void)__atomic_xor_fetch(p, val, memory_order_acq_rel);
+ (void)__atomic_xor_fetch(p, val, memory_order_seq_cst);
+
+ (void)__atomic_nand_fetch(p, val, memory_order_relaxed);
+ (void)__atomic_nand_fetch(p, val, memory_order_acquire);
+ (void)__atomic_nand_fetch(p, val, memory_order_consume);
+ (void)__atomic_nand_fetch(p, val, memory_order_release);
+ (void)__atomic_nand_fetch(p, val, memory_order_acq_rel);
+ (void)__atomic_nand_fetch(p, val, memory_order_seq_cst);
+
+ (void)__atomic_exchange_n(p, val, memory_order_relaxed);
+ (void)__atomic_exchange_n(p, val, memory_order_acquire);
+ (void)__atomic_exchange_n(p, val, memory_order_consume);
+ (void)__atomic_exchange_n(p, val, memory_order_release);
+ (void)__atomic_exchange_n(p, val, memory_order_acq_rel);
+ (void)__atomic_exchange_n(p, val, memory_order_seq_cst);
+
+ (void)__atomic_exchange(p, p, p, memory_order_relaxed);
+ (void)__atomic_exchange(p, p, p, memory_order_acquire);
+ (void)__atomic_exchange(p, p, p, memory_order_consume);
+ (void)__atomic_exchange(p, p, p, memory_order_release);
+ (void)__atomic_exchange(p, p, p, memory_order_acq_rel);
+ (void)__atomic_exchange(p, p, p, memory_order_seq_cst);
+
+ (void)__atomic_compare_exchange(p, p, p, 0, memory_order_relaxed, memory_order_relaxed);
+ (void)__atomic_compare_exchange(p, p, p, 0, memory_order_acquire, memory_order_relaxed);
+ (void)__atomic_compare_exchange(p, p, p, 0, memory_order_consume, memory_order_relaxed);
+ (void)__atomic_compare_exchange(p, p, p, 0, memory_order_release, memory_order_relaxed);
+ (void)__atomic_compare_exchange(p, p, p, 0, memory_order_acq_rel, memory_order_relaxed);
+ (void)__atomic_compare_exchange(p, p, p, 0, memory_order_seq_cst, memory_order_relaxed);
+
+ (void)__atomic_compare_exchange_n(p, p, val, 0, memory_order_relaxed, memory_order_relaxed);
+ (void)__atomic_compare_exchange_n(p, p, val, 0, memory_order_acquire, memory_order_relaxed);
+ (void)__atomic_compare_exchange_n(p, p, val, 0, memory_order_consume, memory_order_relaxed);
+ (void)__atomic_compare_exchange_n(p, p, val, 0, memory_order_release, memory_order_relaxed);
+ (void)__atomic_compare_exchange_n(p, p, val, 0, memory_order_acq_rel, memory_order_relaxed);
+ (void)__atomic_compare_exchange_n(p, p, val, 0, memory_order_seq_cst, memory_order_relaxed);
+}
More information about the cfe-commits
mailing list