r233513 - [OPENMP] Codegen for 'atomic update' construct.
Alexey Bataev
a.bataev at hotmail.com
Sun Mar 29 22:20:59 PDT 2015
Author: abataev
Date: Mon Mar 30 00:20:59 2015
New Revision: 233513
URL: http://llvm.org/viewvc/llvm-project?rev=233513&view=rev
Log:
[OPENMP] Codegen for 'atomic update' construct.
Adds atomic update codegen for the following forms of expressions:
x binop= expr;
x++;
++x;
x--;
--x;
x = x binop expr;
x = expr binop x;
If x and expr are integer and binop is associative or x is a LHS in a RHS of the assignment expression, and atomics are allowed for type of x on the target platform atomicrmw instruction is emitted.
Otherwise compare-and-swap sequence is emitted:
bb:
...
atomic load <x>
cont:
<expected> = phi [ <x>, label %bb ], [ <new_failed>, %cont ]
<desired> = <expected> binop <expr>
<res> = cmpxchg atomic &<x>, desired, expected
<new_failed> = <res>.field1;
br <res>field2, label %exit, label %cont
exit:
...
Differential Revision: http://reviews.llvm.org/D8536
Added:
cfe/trunk/test/OpenMP/atomic_update_codegen.cpp (with props)
Modified:
cfe/trunk/include/clang/AST/StmtOpenMP.h
cfe/trunk/lib/AST/Stmt.cpp
cfe/trunk/lib/CodeGen/CGAtomic.cpp
cfe/trunk/lib/CodeGen/CGExprScalar.cpp
cfe/trunk/lib/CodeGen/CGStmtOpenMP.cpp
cfe/trunk/lib/CodeGen/CodeGenFunction.h
cfe/trunk/lib/Sema/SemaOpenMP.cpp
cfe/trunk/lib/Serialization/ASTReaderStmt.cpp
cfe/trunk/lib/Serialization/ASTWriterStmt.cpp
cfe/trunk/test/OpenMP/atomic_codegen.cpp
Modified: cfe/trunk/include/clang/AST/StmtOpenMP.h
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/include/clang/AST/StmtOpenMP.h?rev=233513&r1=233512&r2=233513&view=diff
==============================================================================
--- cfe/trunk/include/clang/AST/StmtOpenMP.h (original)
+++ cfe/trunk/include/clang/AST/StmtOpenMP.h Mon Mar 30 00:20:59 2015
@@ -1582,8 +1582,17 @@ public:
///
class OMPAtomicDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
- /// \brief Binary operator for update and capture constructs.
- BinaryOperatorKind OpKind;
+ /// \brief Used for 'atomic update' or 'atomic capture' constructs. They may
+ /// have atomic expressions of forms
+ /// \code
+ /// x = x binop expr;
+ /// x = expr binop x;
+ /// \endcode
+ /// This field is true for the first form of the expression and false for the
+ /// second. Required for correct codegen of non-associative operations (like
+ /// << or >>).
+ bool IsXLHSInRHSPart;
+
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
@@ -1593,7 +1602,8 @@ class OMPAtomicDirective : public OMPExe
OMPAtomicDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPAtomicDirectiveClass, OMPD_atomic,
- StartLoc, EndLoc, NumClauses, 5) {}
+ StartLoc, EndLoc, NumClauses, 5),
+ IsXLHSInRHSPart(false) {}
/// \brief Build an empty directive.
///
@@ -1602,15 +1612,15 @@ class OMPAtomicDirective : public OMPExe
explicit OMPAtomicDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPAtomicDirectiveClass, OMPD_atomic,
SourceLocation(), SourceLocation(), NumClauses,
- 5) {}
+ 5),
+ IsXLHSInRHSPart(false) {}
- /// \brief Set operator kind for update and capture atomic constructs.
- void setOpKind(const BinaryOperatorKind BOK) { OpKind = BOK; }
/// \brief Set 'x' part of the associated expression/statement.
void setX(Expr *X) { *std::next(child_begin()) = X; }
- /// \brief Set 'x' rvalue used in update and capture atomic constructs for
- /// proper update expression generation.
- void setXRVal(Expr *XRVal) { *std::next(child_begin(), 2) = XRVal; }
+ /// \brief Set helper expression of the form
+ /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
+ /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
+ void setUpdateExpr(Expr *UE) { *std::next(child_begin(), 2) = UE; }
/// \brief Set 'v' part of the associated expression/statement.
void setV(Expr *V) { *std::next(child_begin(), 3) = V; }
/// \brief Set 'expr' part of the associated expression/statement.
@@ -1626,19 +1636,18 @@ public:
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
- /// \param OpKind Binary operator used for updating of 'x' part of the
- /// expression in update and capture atomic constructs.
/// \param X 'x' part of the associated expression/statement.
- /// \param XRVal 'x' rvalue expression used in update and capture constructs
- /// for proper update expression generation. Used to read original value of
- /// the 'x' part of the expression.
/// \param V 'v' part of the associated expression/statement.
/// \param E 'expr' part of the associated expression/statement.
- ///
+ /// \param UE Helper expression of the form
+ /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
+ /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
+ /// \param IsXLHSInRHSPart true if \a UE has the first form and false if the
+ /// second.
static OMPAtomicDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
- ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
- BinaryOperatorKind OpKind, Expr *X, Expr *XRVal, Expr *V, Expr *E);
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V,
+ Expr *E, Expr *UE, bool IsXLHSInRHSPart);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
@@ -1649,19 +1658,24 @@ public:
static OMPAtomicDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
- /// \brief Get binary operation for update or capture atomic constructs.
- BinaryOperatorKind getOpKind() const { return OpKind; }
/// \brief Get 'x' part of the associated expression/statement.
Expr *getX() { return cast_or_null<Expr>(*std::next(child_begin())); }
const Expr *getX() const {
return cast_or_null<Expr>(*std::next(child_begin()));
}
- /// \brief Get 'x' rvalue used in update and capture atomic constructs for
- /// proper update expression generation.
- Expr *getXRVal() { return cast_or_null<Expr>(*std::next(child_begin(), 2)); }
- const Expr *getXRVal() const {
+ /// \brief Get helper expression of the form
+ /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
+ /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
+ Expr *getUpdateExpr() {
+ return cast_or_null<Expr>(*std::next(child_begin(), 2));
+ }
+ const Expr *getUpdateExpr() const {
return cast_or_null<Expr>(*std::next(child_begin(), 2));
}
+ /// \brief Return true if helper update expression has form
+ /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' and false if it has form
+ /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
+ bool isXLHSInRHSPart() const { return IsXLHSInRHSPart; }
/// \brief Get 'v' part of the associated expression/statement.
Expr *getV() { return cast_or_null<Expr>(*std::next(child_begin(), 3)); }
const Expr *getV() const {
Modified: cfe/trunk/lib/AST/Stmt.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/AST/Stmt.cpp?rev=233513&r1=233512&r2=233513&view=diff
==============================================================================
--- cfe/trunk/lib/AST/Stmt.cpp (original)
+++ cfe/trunk/lib/AST/Stmt.cpp Mon Mar 30 00:20:59 2015
@@ -2010,21 +2010,21 @@ OMPOrderedDirective *OMPOrderedDirective
OMPAtomicDirective *
OMPAtomicDirective::Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses,
- Stmt *AssociatedStmt, BinaryOperatorKind OpKind,
- Expr *X, Expr *XRVal, Expr *V, Expr *E) {
+ Stmt *AssociatedStmt, Expr *X, Expr *V, Expr *E,
+ Expr *UE, bool IsXLHSInRHSPart) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPAtomicDirective),
llvm::alignOf<OMPClause *>());
void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
5 * sizeof(Stmt *));
OMPAtomicDirective *Dir =
new (Mem) OMPAtomicDirective(StartLoc, EndLoc, Clauses.size());
- Dir->setOpKind(OpKind);
Dir->setClauses(Clauses);
Dir->setAssociatedStmt(AssociatedStmt);
Dir->setX(X);
- Dir->setXRVal(X);
Dir->setV(V);
Dir->setExpr(E);
+ Dir->setUpdateExpr(UE);
+ Dir->IsXLHSInRHSPart = IsXLHSInRHSPart;
return Dir;
}
Modified: cfe/trunk/lib/CodeGen/CGAtomic.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGAtomic.cpp?rev=233513&r1=233512&r2=233513&view=diff
==============================================================================
--- cfe/trunk/lib/CodeGen/CGAtomic.cpp (original)
+++ cfe/trunk/lib/CodeGen/CGAtomic.cpp Mon Mar 30 00:20:59 2015
@@ -209,7 +209,7 @@ namespace {
/// \param IsWeak true if atomic operation is weak, false otherwise.
/// \returns Pair of values: previous value from storage (value type) and
/// boolean flag (i1 type) with true if success and false otherwise.
- std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchange(
+ std::pair<RValue, llvm::Value *> EmitAtomicCompareExchange(
RValue Expected, RValue Desired,
llvm::AtomicOrdering Success = llvm::SequentiallyConsistent,
llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent,
@@ -235,13 +235,13 @@ namespace {
/// \brief Emits atomic load as LLVM instruction.
llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile);
/// \brief Emits atomic compare-and-exchange op as a libcall.
- std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeLibcall(
- llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
+ std::pair<RValue, llvm::Value *> EmitAtomicCompareExchangeLibcall(
+ RValue Expected, RValue DesiredAddr,
llvm::AtomicOrdering Success = llvm::SequentiallyConsistent,
llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent);
/// \brief Emits atomic compare-and-exchange op as LLVM instruction.
- std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
- llvm::Value *Expected, llvm::Value *Desired,
+ std::pair<RValue, llvm::Value *> EmitAtomicCompareExchangeOp(
+ RValue Expected, RValue Desired,
llvm::AtomicOrdering Success = llvm::SequentiallyConsistent,
llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent,
bool IsWeak = false);
@@ -1291,7 +1291,7 @@ llvm::Value *AtomicInfo::convertRValueTo
if (RVal.isScalar() && (!hasPadding() || !LVal.isSimple())) {
llvm::Value *Value = RVal.getScalarVal();
if (isa<llvm::IntegerType>(Value->getType()))
- return Value;
+ return CGF.EmitToMemory(Value, ValueTy);
else {
llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
CGF.getLLVMContext(),
@@ -1312,13 +1312,15 @@ llvm::Value *AtomicInfo::convertRValueTo
getAtomicAlignment().getQuantity());
}
-std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
- llvm::Value *Expected, llvm::Value *Desired, llvm::AtomicOrdering Success,
+std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
+ RValue Expected, RValue Desired, llvm::AtomicOrdering Success,
llvm::AtomicOrdering Failure, bool IsWeak) {
// Do the atomic store.
+ auto *ExpectedVal = convertRValueToInt(Expected);
+ auto *DesiredVal = convertRValueToInt(Desired);
auto *Addr = emitCastToAtomicIntPointer(getAtomicAddress());
- auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr, Expected, Desired, Success,
- Failure);
+ auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr, ExpectedVal, DesiredVal,
+ Success, Failure);
// Other decoration.
Inst->setVolatile(LVal.isVolatileQualified());
Inst->setWeak(IsWeak);
@@ -1326,16 +1328,20 @@ std::pair<llvm::Value *, llvm::Value *>
// Okay, turn that back into the original value type.
auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/0);
auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/1);
- return std::make_pair(PreviousVal, SuccessFailureVal);
+ return std::make_pair(
+ ConvertIntToValueOrAtomic(PreviousVal, AggValueSlot::ignored(),
+ SourceLocation(), /*AsValue=*/false),
+ SuccessFailureVal);
}
-std::pair<llvm::Value *, llvm::Value *>
-AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
- llvm::Value *DesiredAddr,
+std::pair<RValue, llvm::Value *>
+AtomicInfo::EmitAtomicCompareExchangeLibcall(RValue Expected, RValue Desired,
llvm::AtomicOrdering Success,
llvm::AtomicOrdering Failure) {
// bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
// void *desired, int success, int failure);
+ auto *ExpectedAddr = materializeRValue(Expected);
+ auto *DesiredAddr = materializeRValue(Desired);
CallArgList Args;
Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicAddress())),
@@ -1352,12 +1358,14 @@ AtomicInfo::EmitAtomicCompareExchangeLib
CGF.getContext().IntTy);
auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange",
CGF.getContext().BoolTy, Args);
- auto *PreviousVal = CGF.Builder.CreateAlignedLoad(
- ExpectedAddr, getValueAlignment().getQuantity());
- return std::make_pair(PreviousVal, SuccessFailureRVal.getScalarVal());
+
+ return std::make_pair(
+ convertTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
+ SourceLocation(), /*AsValue=*/false),
+ SuccessFailureRVal.getScalarVal());
}
-std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
+std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
RValue Expected, RValue Desired, llvm::AtomicOrdering Success,
llvm::AtomicOrdering Failure, bool IsWeak) {
if (Failure >= Success)
@@ -1366,20 +1374,15 @@ std::pair<llvm::Value *, llvm::Value *>
// Check whether we should use a library call.
if (shouldUseLibcall()) {
- auto *ExpectedAddr = materializeRValue(Expected);
// Produce a source address.
- auto *DesiredAddr = materializeRValue(Desired);
- return EmitAtomicCompareExchangeLibcall(ExpectedAddr, DesiredAddr, Success,
+ return EmitAtomicCompareExchangeLibcall(Expected, Desired, Success,
Failure);
}
// If we've got a scalar value of the right size, try to avoid going
// through memory.
- auto *ExpectedIntVal = convertRValueToInt(Expected);
- auto *DesiredIntVal = convertRValueToInt(Desired);
-
- return EmitAtomicCompareExchangeOp(ExpectedIntVal, DesiredIntVal, Success,
- Failure, IsWeak);
+ return EmitAtomicCompareExchangeOp(Expected, Desired, Success, Failure,
+ IsWeak);
}
void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue lvalue,
@@ -1498,20 +1501,14 @@ void CodeGenFunction::EmitAtomicStore(RV
atomics.getAtomicType(), SourceLocation()));
// Try to write new value using cmpxchg operation
auto Pair = atomics.EmitAtomicCompareExchange(OriginalRValue, NewRValue, AO);
- llvm::Value *OldValue = Pair.first;
- if (!atomics.shouldUseLibcall())
- // Convert integer value to original atomic type
- OldValue = atomics.ConvertIntToValueOrAtomic(
- OldValue, AggValueSlot::ignored(), SourceLocation(),
- /*AsValue=*/false).getScalarVal();
- PHI->addIncoming(OldValue, ContBB);
+ PHI->addIncoming(Pair.first.getScalarVal(), ContBB);
Builder.CreateCondBr(Pair.second, ExitBB, ContBB);
EmitBlock(ExitBB, /*IsFinished=*/true);
}
/// Emit a compare-and-exchange op for atomic type.
///
-std::pair<RValue, RValue> CodeGenFunction::EmitAtomicCompareExchange(
+std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange(
LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
AggValueSlot Slot) {
@@ -1525,13 +1522,78 @@ std::pair<RValue, RValue> CodeGenFunctio
Obj.getAddress()->getType()->getPointerElementType());
AtomicInfo Atomics(*this, Obj);
- auto Pair = Atomics.EmitAtomicCompareExchange(Expected, Desired, Success,
- Failure, IsWeak);
- return std::make_pair(Atomics.shouldUseLibcall()
- ? RValue::get(Pair.first)
- : Atomics.ConvertIntToValueOrAtomic(
- Pair.first, Slot, Loc, /*AsValue=*/true),
- RValue::get(Pair.second));
+ return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
+ IsWeak);
+}
+
+void CodeGenFunction::EmitAtomicUpdate(
+ LValue LVal, llvm::AtomicOrdering AO,
+ const std::function<RValue(RValue)> &UpdateOp, bool IsVolatile) {
+ AtomicInfo Atomics(*this, LVal);
+ LValue AtomicLVal = Atomics.getAtomicLValue();
+
+ // Atomic load of prev value.
+ RValue OldRVal =
+ Atomics.EmitAtomicLoad(AggValueSlot::ignored(), SourceLocation(),
+ /*AsValue=*/false, AO, IsVolatile);
+ bool IsScalar = OldRVal.isScalar();
+ auto *OldVal =
+ IsScalar ? OldRVal.getScalarVal() : Atomics.convertRValueToInt(OldRVal);
+ // For non-simple lvalues perform compare-and-swap procedure.
+ auto *ContBB = createBasicBlock("atomic_cont");
+ auto *ExitBB = createBasicBlock("atomic_exit");
+ auto *CurBB = Builder.GetInsertBlock();
+ EmitBlock(ContBB);
+ llvm::PHINode *PHI = Builder.CreatePHI(OldVal->getType(),
+ /*NumReservedValues=*/2);
+ PHI->addIncoming(OldVal, CurBB);
+ RValue OriginalRValue =
+ IsScalar ? RValue::get(PHI) : Atomics.ConvertIntToValueOrAtomic(
+ PHI, AggValueSlot::ignored(),
+ SourceLocation(), /*AsValue=*/false);
+ // Build new lvalue for temp address
+ LValue UpdateLVal;
+ llvm::Value *Ptr = nullptr;
+ RValue UpRVal;
+ if (AtomicLVal.isSimple()) {
+ UpRVal = OriginalRValue;
+ } else {
+ // Build new lvalue for temp address
+ Ptr = Atomics.materializeRValue(OriginalRValue);
+ if (AtomicLVal.isBitField())
+ UpdateLVal =
+ LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(),
+ AtomicLVal.getType(), AtomicLVal.getAlignment());
+ else if (AtomicLVal.isVectorElt())
+ UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(),
+ AtomicLVal.getType(),
+ AtomicLVal.getAlignment());
+ else {
+ assert(AtomicLVal.isExtVectorElt());
+ UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(),
+ AtomicLVal.getType(),
+ AtomicLVal.getAlignment());
+ }
+ UpdateLVal.setTBAAInfo(LVal.getTBAAInfo());
+ UpRVal = EmitLoadOfLValue(UpdateLVal, SourceLocation());
+ }
+ // Store new value in the corresponding memory area
+ RValue NewRVal = UpdateOp(UpRVal);
+ if (!AtomicLVal.isSimple()) {
+ EmitStoreThroughLValue(NewRVal, UpdateLVal);
+ // Load new value
+ NewRVal = RValue::get(
+ EmitLoadOfScalar(Ptr, AtomicLVal.isVolatile(),
+ Atomics.getAtomicAlignment().getQuantity(),
+ Atomics.getAtomicType(), SourceLocation()));
+ }
+ // Try to write new value using cmpxchg operation
+ auto Pair = Atomics.EmitAtomicCompareExchange(OriginalRValue, NewRVal, AO);
+ OldVal = IsScalar ? Pair.first.getScalarVal()
+ : Atomics.convertRValueToInt(Pair.first);
+ PHI->addIncoming(OldVal, ContBB);
+ Builder.CreateCondBr(Pair.second, ExitBB, ContBB);
+ EmitBlock(ExitBB, /*IsFinished=*/true);
}
void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
Modified: cfe/trunk/lib/CodeGen/CGExprScalar.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGExprScalar.cpp?rev=233513&r1=233512&r2=233513&view=diff
==============================================================================
--- cfe/trunk/lib/CodeGen/CGExprScalar.cpp (original)
+++ cfe/trunk/lib/CodeGen/CGExprScalar.cpp Mon Mar 30 00:20:59 2015
@@ -1845,10 +1845,9 @@ ScalarExprEmitter::EmitScalarPrePostIncD
llvm::BasicBlock *opBB = Builder.GetInsertBlock();
llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
auto Pair = CGF.EmitAtomicCompareExchange(
- LV, RValue::get(atomicPHI), RValue::get(CGF.EmitToMemory(value, type)),
- E->getExprLoc());
- llvm::Value *old = Pair.first.getScalarVal();
- llvm::Value *success = Pair.second.getScalarVal();
+ LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc());
+ llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type);
+ llvm::Value *success = Pair.second;
atomicPHI->addIncoming(old, opBB);
Builder.CreateCondBr(success, contBB, opBB);
Builder.SetInsertPoint(contBB);
@@ -2189,10 +2188,9 @@ LValue ScalarExprEmitter::EmitCompoundAs
llvm::BasicBlock *opBB = Builder.GetInsertBlock();
llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
auto Pair = CGF.EmitAtomicCompareExchange(
- LHSLV, RValue::get(atomicPHI),
- RValue::get(CGF.EmitToMemory(Result, LHSTy)), E->getExprLoc());
- llvm::Value *old = Pair.first.getScalarVal();
- llvm::Value *success = Pair.second.getScalarVal();
+ LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc());
+ llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy);
+ llvm::Value *success = Pair.second;
atomicPHI->addIncoming(old, opBB);
Builder.CreateCondBr(success, contBB, opBB);
Builder.SetInsertPoint(contBB);
Modified: cfe/trunk/lib/CodeGen/CGStmtOpenMP.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGStmtOpenMP.cpp?rev=233513&r1=233512&r2=233513&view=diff
==============================================================================
--- cfe/trunk/lib/CodeGen/CGStmtOpenMP.cpp (original)
+++ cfe/trunk/lib/CodeGen/CGStmtOpenMP.cpp Mon Mar 30 00:20:59 2015
@@ -1119,9 +1119,136 @@ static void EmitOMPAtomicWriteExpr(CodeG
CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
}
+static Optional<llvm::AtomicRMWInst::BinOp>
+getCompatibleAtomicRMWBinOp(ASTContext &Context, BinaryOperatorKind Op,
+ bool IsXLHSInRHSPart, LValue XLValue,
+ RValue ExprRValue) {
+ Optional<llvm::AtomicRMWInst::BinOp> RMWOp;
+ // Allow atomicrmw only if 'x' and 'expr' are integer values, lvalue for 'x'
+ // expression is simple and atomic is allowed for the given type for the
+ // target platform.
+ if (ExprRValue.isScalar() &&
+ ExprRValue.getScalarVal()->getType()->isIntegerTy() &&
+ XLValue.isSimple() &&
+ (isa<llvm::ConstantInt>(ExprRValue.getScalarVal()) ||
+ (ExprRValue.getScalarVal()->getType() ==
+ XLValue.getAddress()->getType()->getPointerElementType())) &&
+ Context.getTargetInfo().hasBuiltinAtomic(
+ Context.getTypeSize(XLValue.getType()),
+ Context.toBits(XLValue.getAlignment()))) {
+ switch (Op) {
+ case BO_Add:
+ RMWOp = llvm::AtomicRMWInst::Add;
+ break;
+ case BO_Sub:
+ if (IsXLHSInRHSPart) {
+ RMWOp = llvm::AtomicRMWInst::Sub;
+ }
+ break;
+ case BO_And:
+ RMWOp = llvm::AtomicRMWInst::And;
+ break;
+ case BO_Or:
+ RMWOp = llvm::AtomicRMWInst::Or;
+ break;
+ case BO_Xor:
+ RMWOp = llvm::AtomicRMWInst::Xor;
+ break;
+ case BO_Mul:
+ case BO_Div:
+ case BO_Rem:
+ case BO_Shl:
+ case BO_Shr:
+ break;
+ case BO_PtrMemD:
+ case BO_PtrMemI:
+ case BO_LT:
+ case BO_GT:
+ case BO_LE:
+ case BO_GE:
+ case BO_EQ:
+ case BO_NE:
+ case BO_LAnd:
+ case BO_LOr:
+ case BO_Assign:
+ case BO_MulAssign:
+ case BO_DivAssign:
+ case BO_RemAssign:
+ case BO_AddAssign:
+ case BO_SubAssign:
+ case BO_ShlAssign:
+ case BO_ShrAssign:
+ case BO_AndAssign:
+ case BO_XorAssign:
+ case BO_OrAssign:
+ case BO_Comma:
+ llvm_unreachable("Unexpected binary operation in 'atomic update'.");
+ }
+ }
+ return std::move(RMWOp);
+}
+
+static void EmitOMPAtomicUpdateExpr(CodeGenFunction &CGF, bool IsSeqCst,
+ const Expr *X, const Expr *E,
+ const Expr *UE, bool IsXLHSInRHSPart,
+ SourceLocation Loc) {
+ assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
+ "Update expr in 'atomic update' must be a binary operator.");
+ auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
+ // Update expressions are allowed to have the following forms:
+ // x binop= expr; -> xrval + expr;
+ // x++, ++x -> xrval + 1;
+ // x--, --x -> xrval - 1;
+ // x = x binop expr; -> xrval binop expr
+ // x = expr Op x; - > expr binop xrval;
+ assert(X->isLValue() && "X of 'omp atomic write' is not lvalue");
+ LValue XLValue = CGF.EmitLValue(X);
+ RValue ExprRValue = CGF.EmitAnyExpr(E);
+ const auto &Op =
+ getCompatibleAtomicRMWBinOp(CGF.CGM.getContext(), BOUE->getOpcode(),
+ IsXLHSInRHSPart, XLValue, ExprRValue);
+ auto AO = IsSeqCst ? llvm::SequentiallyConsistent : llvm::Monotonic;
+ if (Op) {
+ auto *ExprVal = ExprRValue.getScalarVal();
+ if (auto *IC = dyn_cast<llvm::ConstantInt>(ExprVal)) {
+ ExprVal = CGF.Builder.CreateIntCast(
+ IC, XLValue.getAddress()->getType()->getPointerElementType(),
+ XLValue.getType()->hasSignedIntegerRepresentation());
+ }
+ CGF.Builder.CreateAtomicRMW(*Op, XLValue.getAddress(), ExprVal, AO);
+ } else {
+ auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
+ auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
+ CodeGenFunction::OpaqueValueMapping MapExpr(
+ CGF, IsXLHSInRHSPart ? RHS : LHS, ExprRValue);
+ auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
+ if (XLValue.isGlobalReg()) {
+ // Emit an update expression: 'xrval' binop 'expr' or 'expr' binop
+ // 'xrval'.
+ CodeGenFunction::OpaqueValueMapping MapX(
+ CGF, XRValExpr, CGF.EmitLoadOfLValue(XLValue, Loc));
+ CGF.EmitStoreThroughLValue(CGF.EmitAnyExpr(UE), XLValue);
+ } else {
+ // Perform compare-and-swap procedure.
+ CGF.EmitAtomicUpdate(
+ XLValue, AO, [&CGF, &UE, &XRValExpr](RValue XRVal) -> RValue {
+ CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRVal);
+ return CGF.EmitAnyExpr(UE);
+ }, /*IsVolatile=*/false);
+ }
+ }
+ // OpenMP, 2.12.6, atomic Construct
+ // Any atomic construct with a seq_cst clause forces the atomically
+ // performed operation to include an implicit flush operation without a
+ // list.
+ if (IsSeqCst)
+ CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
+}
+
static void EmitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
bool IsSeqCst, const Expr *X, const Expr *V,
- const Expr *E, SourceLocation Loc) {
+ const Expr *E, const Expr *UE,
+ bool IsXLHSInRHSPart, SourceLocation Loc) {
switch (Kind) {
case OMPC_read:
EmitOMPAtomicReadExpr(CGF, IsSeqCst, X, V, Loc);
@@ -1129,7 +1256,10 @@ static void EmitOMPAtomicExpr(CodeGenFun
case OMPC_write:
EmitOMPAtomicWriteExpr(CGF, IsSeqCst, X, E, Loc);
break;
+ case OMPC_unknown:
case OMPC_update:
+ EmitOMPAtomicUpdateExpr(CGF, IsSeqCst, X, E, UE, IsXLHSInRHSPart, Loc);
+ break;
case OMPC_capture:
llvm_unreachable("CodeGen for 'omp atomic clause' is not supported yet.");
case OMPC_if:
@@ -1156,7 +1286,6 @@ static void EmitOMPAtomicExpr(CodeGenFun
case OMPC_untied:
case OMPC_threadprivate:
case OMPC_mergeable:
- case OMPC_unknown:
llvm_unreachable("Clause is not allowed in 'omp atomic'.");
}
}
@@ -1179,7 +1308,7 @@ void CodeGenFunction::EmitOMPAtomicDirec
InlinedOpenMPRegionScopeRAII Region(*this, S);
EmitOMPAtomicExpr(*this, Kind, IsSeqCst, S.getX(), S.getV(), S.getExpr(),
- S.getLocStart());
+ S.getUpdateExpr(), S.isXLHSInRHSPart(), S.getLocStart());
}
void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &) {
Modified: cfe/trunk/lib/CodeGen/CodeGenFunction.h
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CodeGenFunction.h?rev=233513&r1=233512&r2=233513&view=diff
==============================================================================
--- cfe/trunk/lib/CodeGen/CodeGenFunction.h (original)
+++ cfe/trunk/lib/CodeGen/CodeGenFunction.h Mon Mar 30 00:20:59 2015
@@ -2123,12 +2123,16 @@ public:
void EmitAtomicStore(RValue rvalue, LValue lvalue, llvm::AtomicOrdering AO,
bool IsVolatile, bool isInit);
- std::pair<RValue, RValue> EmitAtomicCompareExchange(
+ std::pair<RValue, llvm::Value *> EmitAtomicCompareExchange(
LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
llvm::AtomicOrdering Success = llvm::SequentiallyConsistent,
llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent,
bool IsWeak = false, AggValueSlot Slot = AggValueSlot::ignored());
+ void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO,
+ const std::function<RValue(RValue)> &UpdateOp,
+ bool IsVolatile);
+
/// EmitToMemory - Change a scalar value from its value
/// representation to its in-memory representation.
llvm::Value *EmitToMemory(llvm::Value *Value, QualType Ty);
Modified: cfe/trunk/lib/Sema/SemaOpenMP.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Sema/SemaOpenMP.cpp?rev=233513&r1=233512&r2=233513&view=diff
==============================================================================
--- cfe/trunk/lib/Sema/SemaOpenMP.cpp (original)
+++ cfe/trunk/lib/Sema/SemaOpenMP.cpp Mon Mar 30 00:20:59 2015
@@ -3282,21 +3282,24 @@ class OpenMPAtomicUpdateChecker {
Sema &SemaRef;
/// \brief A location for note diagnostics (when error is found).
SourceLocation NoteLoc;
- /// \brief Atomic operation supposed to be performed on source expression.
- BinaryOperatorKind OpKind;
/// \brief 'x' lvalue part of the source atomic expression.
Expr *X;
- /// \brief 'x' rvalue part of the source atomic expression, used in the right
- /// hand side of the expression. We need this to properly generate RHS part of
- /// the source expression (x = x'rval' binop expr or x = expr binop x'rval').
- Expr *XRVal;
/// \brief 'expr' rvalue part of the source atomic expression.
Expr *E;
+ /// \brief Helper expression of the form
+ /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
+ /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
+ Expr *UpdateExpr;
+ /// \brief Is 'x' a LHS in a RHS part of full update expression. It is
+ /// important for non-associative operations.
+ bool IsXLHSInRHSPart;
+ BinaryOperatorKind Op;
+ SourceLocation OpLoc;
public:
OpenMPAtomicUpdateChecker(Sema &SemaRef)
- : SemaRef(SemaRef), OpKind(BO_PtrMemD), X(nullptr), XRVal(nullptr),
- E(nullptr) {}
+ : SemaRef(SemaRef), X(nullptr), E(nullptr), UpdateExpr(nullptr),
+ IsXLHSInRHSPart(false), Op(BO_PtrMemD) {}
/// \brief Check specified statement that it is suitable for 'atomic update'
/// constructs and extract 'x', 'expr' and Operation from the original
/// expression.
@@ -3306,13 +3309,16 @@ public:
bool checkStatement(Stmt *S, unsigned DiagId, unsigned NoteId);
/// \brief Return the 'x' lvalue part of the source atomic expression.
Expr *getX() const { return X; }
- /// \brief Return the 'x' rvalue part of the source atomic expression, used in
- /// the RHS part of the source expression.
- Expr *getXRVal() const { return XRVal; }
/// \brief Return the 'expr' rvalue part of the source atomic expression.
Expr *getExpr() const { return E; }
- /// \brief Return required atomic operation.
- BinaryOperatorKind getOpKind() const {return OpKind;}
+ /// \brief Return the update expression used in calculation of the updated
+ /// value. Always has form 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
+ /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
+ Expr *getUpdateExpr() const { return UpdateExpr; }
+ /// \brief Return true if 'x' is LHS in RHS part of full update expression,
+ /// false otherwise.
+ bool isXLHSInRHSPart() const { return IsXLHSInRHSPart; }
+
private:
bool checkBinaryOperation(BinaryOperator *AtomicBinOp, unsigned DiagId,
unsigned NoteId);
@@ -3334,7 +3340,8 @@ bool OpenMPAtomicUpdateChecker::checkBin
if (AtomicInnerBinOp->isMultiplicativeOp() ||
AtomicInnerBinOp->isAdditiveOp() || AtomicInnerBinOp->isShiftOp() ||
AtomicInnerBinOp->isBitwiseOp()) {
- OpKind = AtomicInnerBinOp->getOpcode();
+ Op = AtomicInnerBinOp->getOpcode();
+ OpLoc = AtomicInnerBinOp->getOperatorLoc();
auto *LHS = AtomicInnerBinOp->getLHS();
auto *RHS = AtomicInnerBinOp->getRHS();
llvm::FoldingSetNodeID XId, LHSId, RHSId;
@@ -3346,10 +3353,10 @@ bool OpenMPAtomicUpdateChecker::checkBin
/*Canonical=*/true);
if (XId == LHSId) {
E = RHS;
- XRVal = LHS;
+ IsXLHSInRHSPart = true;
} else if (XId == RHSId) {
E = LHS;
- XRVal = RHS;
+ IsXLHSInRHSPart = false;
} else {
ErrorLoc = AtomicInnerBinOp->getExprLoc();
ErrorRange = AtomicInnerBinOp->getSourceRange();
@@ -3381,7 +3388,7 @@ bool OpenMPAtomicUpdateChecker::checkBin
SemaRef.Diag(NoteLoc, NoteId) << ErrorFound << NoteRange;
return true;
} else if (SemaRef.CurContext->isDependentContext())
- E = X = XRVal = nullptr;
+ E = X = UpdateExpr = nullptr;
return false;
}
@@ -3405,26 +3412,26 @@ bool OpenMPAtomicUpdateChecker::checkSta
if (auto *AtomicCompAssignOp = dyn_cast<CompoundAssignOperator>(
AtomicBody->IgnoreParenImpCasts())) {
// Check for Compound Assignment Operation
- OpKind = BinaryOperator::getOpForCompoundAssignment(
+ Op = BinaryOperator::getOpForCompoundAssignment(
AtomicCompAssignOp->getOpcode());
- X = AtomicCompAssignOp->getLHS();
- XRVal = SemaRef.PerformImplicitConversion(
- X, AtomicCompAssignOp->getComputationLHSType(),
- Sema::AA_Casting, /*AllowExplicit=*/true).get();
+ OpLoc = AtomicCompAssignOp->getOperatorLoc();
E = AtomicCompAssignOp->getRHS();
+ X = AtomicCompAssignOp->getLHS();
+ IsXLHSInRHSPart = true;
} else if (auto *AtomicBinOp = dyn_cast<BinaryOperator>(
AtomicBody->IgnoreParenImpCasts())) {
// Check for Binary Operation
- return checkBinaryOperation(AtomicBinOp, DiagId, NoteId);
+ if(checkBinaryOperation(AtomicBinOp, DiagId, NoteId))
+ return true;
} else if (auto *AtomicUnaryOp =
- // Check for Binary Operation
dyn_cast<UnaryOperator>(AtomicBody->IgnoreParenImpCasts())) {
// Check for Unary Operation
if (AtomicUnaryOp->isIncrementDecrementOp()) {
- OpKind = AtomicUnaryOp->isIncrementOp() ? BO_Add : BO_Sub;
- XRVal = X = AtomicUnaryOp->getSubExpr();
- E = SemaRef.ActOnIntegerConstant(AtomicUnaryOp->getOperatorLoc(), 1)
- .get();
+ Op = AtomicUnaryOp->isIncrementOp() ? BO_Add : BO_Sub;
+ OpLoc = AtomicUnaryOp->getOperatorLoc();
+ X = AtomicUnaryOp->getSubExpr();
+ E = SemaRef.ActOnIntegerConstant(OpLoc, /*uint64_t Val=*/1).get();
+ IsXLHSInRHSPart = true;
} else {
ErrorFound = NotAnUnaryIncDecExpression;
ErrorLoc = AtomicUnaryOp->getExprLoc();
@@ -3452,7 +3459,26 @@ bool OpenMPAtomicUpdateChecker::checkSta
SemaRef.Diag(NoteLoc, NoteId) << ErrorFound << NoteRange;
return true;
} else if (SemaRef.CurContext->isDependentContext())
- E = X = XRVal = nullptr;
+ E = X = UpdateExpr = nullptr;
+ if (E && X) {
+ // Build an update expression of form 'OpaqueValueExpr(x) binop
+ // OpaqueValueExpr(expr)' or 'OpaqueValueExpr(expr) binop
+ // OpaqueValueExpr(x)' and then cast it to the type of the 'x' expression.
+ auto *OVEX = new (SemaRef.getASTContext())
+ OpaqueValueExpr(X->getExprLoc(), X->getType(), VK_RValue);
+ auto *OVEExpr = new (SemaRef.getASTContext())
+ OpaqueValueExpr(E->getExprLoc(), E->getType(), VK_RValue);
+ auto Update =
+ SemaRef.CreateBuiltinBinOp(OpLoc, Op, IsXLHSInRHSPart ? OVEX : OVEExpr,
+ IsXLHSInRHSPart ? OVEExpr : OVEX);
+ if (Update.isInvalid())
+ return true;
+ Update = SemaRef.PerformImplicitConversion(Update.get(), X->getType(),
+ Sema::AA_Casting);
+ if (Update.isInvalid())
+ return true;
+ UpdateExpr = Update.get();
+ }
return false;
}
@@ -3490,11 +3516,11 @@ StmtResult Sema::ActOnOpenMPAtomicDirect
if (auto *EWC = dyn_cast<ExprWithCleanups>(Body))
Body = EWC->getSubExpr();
- BinaryOperatorKind OpKind = BO_PtrMemD;
Expr *X = nullptr;
- Expr *XRVal = nullptr;
Expr *V = nullptr;
Expr *E = nullptr;
+ Expr *UE = nullptr;
+ bool IsXLHSInRHSPart = false;
// OpenMP [2.12.6, atomic Construct]
// In the next expressions:
// * x and v (as applicable) are both l-value expressions with scalar type.
@@ -3652,8 +3678,8 @@ StmtResult Sema::ActOnOpenMPAtomicDirect
if (!CurContext->isDependentContext()) {
E = Checker.getExpr();
X = Checker.getX();
- XRVal = Checker.getXRVal();
- OpKind = Checker.getOpKind();
+ UE = Checker.getUpdateExpr();
+ IsXLHSInRHSPart = Checker.isXLHSInRHSPart();
}
} else if (AtomicKind == OMPC_capture) {
if (isa<Expr>(Body) && !isa<BinaryOperator>(Body)) {
@@ -3670,7 +3696,7 @@ StmtResult Sema::ActOnOpenMPAtomicDirect
getCurFunction()->setHasBranchProtectedScope();
return OMPAtomicDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
- OpKind, X, XRVal, V, E);
+ X, V, E, UE, IsXLHSInRHSPart);
}
StmtResult Sema::ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Modified: cfe/trunk/lib/Serialization/ASTReaderStmt.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Serialization/ASTReaderStmt.cpp?rev=233513&r1=233512&r2=233513&view=diff
==============================================================================
--- cfe/trunk/lib/Serialization/ASTReaderStmt.cpp (original)
+++ cfe/trunk/lib/Serialization/ASTReaderStmt.cpp Mon Mar 30 00:20:59 2015
@@ -2157,11 +2157,11 @@ void ASTStmtReader::VisitOMPAtomicDirect
// The NumClauses field was read in ReadStmtFromStream.
++Idx;
VisitOMPExecutableDirective(D);
- D->setOpKind(static_cast<BinaryOperatorKind>(Record[Idx++]));
D->setX(Reader.ReadSubExpr());
- D->setXRVal(Reader.ReadSubExpr());
D->setV(Reader.ReadSubExpr());
D->setExpr(Reader.ReadSubExpr());
+ D->setUpdateExpr(Reader.ReadSubExpr());
+ D->IsXLHSInRHSPart = Record[Idx++] != 0;
}
void ASTStmtReader::VisitOMPTargetDirective(OMPTargetDirective *D) {
Modified: cfe/trunk/lib/Serialization/ASTWriterStmt.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Serialization/ASTWriterStmt.cpp?rev=233513&r1=233512&r2=233513&view=diff
==============================================================================
--- cfe/trunk/lib/Serialization/ASTWriterStmt.cpp (original)
+++ cfe/trunk/lib/Serialization/ASTWriterStmt.cpp Mon Mar 30 00:20:59 2015
@@ -2005,11 +2005,11 @@ void ASTStmtWriter::VisitOMPAtomicDirect
VisitStmt(D);
Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
- Record.push_back(D->getOpKind());
Writer.AddStmt(D->getX());
- Writer.AddStmt(D->getXRVal());
Writer.AddStmt(D->getV());
Writer.AddStmt(D->getExpr());
+ Writer.AddStmt(D->getUpdateExpr());
+ Record.push_back(D->isXLHSInRHSPart() ? 1 : 0);
Code = serialization::STMT_OMP_ATOMIC_DIRECTIVE;
}
Modified: cfe/trunk/test/OpenMP/atomic_codegen.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/OpenMP/atomic_codegen.cpp?rev=233513&r1=233512&r2=233513&view=diff
==============================================================================
--- cfe/trunk/test/OpenMP/atomic_codegen.cpp (original)
+++ cfe/trunk/test/OpenMP/atomic_codegen.cpp Mon Mar 30 00:20:59 2015
@@ -29,6 +29,22 @@ void parallel_atomic_ewc() {
// CHECK: invoke void @_ZN2StD1Ev(%struct.St* [[TEMP_ST_ADDR]])
#pragma omp atomic write
St().get() = b;
+ // CHECK: invoke void @_ZN2StC1Ev(%struct.St* [[TEMP_ST_ADDR:%.+]])
+ // CHECK: [[SCALAR_ADDR:%.+]] = invoke dereferenceable(4) i32* @_ZN2St3getEv(%struct.St* [[TEMP_ST_ADDR]])
+ // CHECK: [[B_VAL:%.+]] = load i32, i32* @b
+ // CHECK: [[OLD_VAL:%.+]] = load atomic i32, i32* [[SCALAR_ADDR]] monotonic,
+ // CHECK: br label %[[OMP_UPDATE:.+]]
+ // CHECK: [[OMP_UPDATE]]
+ // CHECK: [[OLD_PHI_VAL:%.+]] = phi i32 [ [[OLD_VAL]], %{{.+}} ], [ [[NEW_OLD_VAL:%.+]], %[[OMP_UPDATE]] ]
+ // CHECK: [[NEW_VAL:%.+]] = srem i32 [[OLD_PHI_VAL]], [[B_VAL]]
+ // CHECK: [[RES:%.+]] = cmpxchg i32* [[SCALAR_ADDR]], i32 [[OLD_PHI_VAL]], i32 [[NEW_VAL]] monotonic monotonic
+ // CHECK: [[NEW_OLD_VAL]] = extractvalue { i32, i1 } [[RES]], 0
+ // CHECK: [[COND:%.+]] = extractvalue { i32, i1 } [[RES]], 1
+ // CHECK: br i1 [[COND]], label %[[OMP_DONE:.+]], label %[[OMP_UPDATE]]
+ // CHECK: [[OMP_DONE]]
+ // CHECK: invoke void @_ZN2StD1Ev(%struct.St* [[TEMP_ST_ADDR]])
+#pragma omp atomic
+ St().get() %= b;
}
}
@@ -50,11 +66,19 @@ void parallel_atomic() {
// TERM_DEBUG: unwind label %[[TERM_LPAD:.+]],
// TERM_DEBUG-NOT: __kmpc_global_thread_num
// TERM_DEBUG: store atomic i32 {{%.+}}, i32* @{{.+}} monotonic, {{.*}}!dbg [[WRITE_LOC:![0-9]+]]
- // TERM_DEBUG: [[TERM_LPAD]]
- // TERM_DEBUG: call void @__clang_call_terminate
- // TERM_DEBUG: unreachable
a = foo();
+#pragma omp atomic update
+ // TERM_DEBUG-NOT: __kmpc_global_thread_num
+ // TERM_DEBUG: invoke {{.*}}foo{{.*}}()
+ // TERM_DEBUG: unwind label %[[TERM_LPAD:.+]],
+ // TERM_DEBUG-NOT: __kmpc_global_thread_num
+ // TERM_DEBUG: atomicrmw add i32* @{{.+}}, i32 %{{.+}} monotonic, {{.*}}!dbg [[UPDATE_LOC:![0-9]+]]
+ a += foo();
}
+ // TERM_DEBUG: [[TERM_LPAD]]
+ // TERM_DEBUG: call void @__clang_call_terminate
+ // TERM_DEBUG: unreachable
}
-// TERM_DEBUG-DAG: [[READ_LOC]] = !MDLocation(line: 41,
-// TERM_DEBUG-DAG: [[WRITE_LOC]] = !MDLocation(line: 47,
+// TERM_DEBUG-DAG: [[READ_LOC]] = !MDLocation(line: [[@LINE-25]],
+// TERM_DEBUG-DAG: [[WRITE_LOC]] = !MDLocation(line: [[@LINE-20]],
+// TERM_DEBUG-DAG: [[UPDATE_LOC]] = !MDLocation(line: [[@LINE-14]],
Added: cfe/trunk/test/OpenMP/atomic_update_codegen.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/OpenMP/atomic_update_codegen.cpp?rev=233513&view=auto
==============================================================================
--- cfe/trunk/test/OpenMP/atomic_update_codegen.cpp (added)
+++ cfe/trunk/test/OpenMP/atomic_update_codegen.cpp Mon Mar 30 00:20:59 2015
@@ -0,0 +1,1070 @@
+// RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp=libiomp5 -x c -emit-llvm %s -o - | FileCheck %s
+// RUN: %clang_cc1 -fopenmp=libiomp5 -x c -triple x86_64-apple-darwin10 -emit-pch -o %t %s
+// RUN: %clang_cc1 -fopenmp=libiomp5 -x c -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
+// expected-no-diagnostics
+
+#ifndef HEADER
+#define HEADER
+
+_Bool bv, bx;
+char cv, cx;
+unsigned char ucv, ucx;
+short sv, sx;
+unsigned short usv, usx;
+int iv, ix;
+unsigned int uiv, uix;
+long lv, lx;
+unsigned long ulv, ulx;
+long long llv, llx;
+unsigned long long ullv, ullx;
+float fv, fx;
+double dv, dx;
+long double ldv, ldx;
+_Complex int civ, cix;
+_Complex float cfv, cfx;
+_Complex double cdv, cdx;
+
+typedef int int4 __attribute__((__vector_size__(16)));
+int4 int4x;
+
+struct BitFields {
+ int : 32;
+ int a : 31;
+} bfx;
+
+struct BitFields_packed {
+ int : 32;
+ int a : 31;
+} __attribute__ ((__packed__)) bfx_packed;
+
+struct BitFields2 {
+ int : 31;
+ int a : 1;
+} bfx2;
+
+struct BitFields2_packed {
+ int : 31;
+ int a : 1;
+} __attribute__ ((__packed__)) bfx2_packed;
+
+struct BitFields3 {
+ int : 11;
+ int a : 14;
+} bfx3;
+
+struct BitFields3_packed {
+ int : 11;
+ int a : 14;
+} __attribute__ ((__packed__)) bfx3_packed;
+
+struct BitFields4 {
+ short : 16;
+ int a: 1;
+ long b : 7;
+} bfx4;
+
+struct BitFields4_packed {
+ short : 16;
+ int a: 1;
+ long b : 7;
+} __attribute__ ((__packed__)) bfx4_packed;
+
+typedef float float2 __attribute__((ext_vector_type(2)));
+float2 float2x;
+
+register int rix __asm__("0");
+
+int main() {
+// CHECK: atomicrmw add i8* @{{.+}}, i8 1 monotonic
+#pragma omp atomic
+ bx++;
+// CHECK: atomicrmw add i8* @{{.+}}, i8 1 monotonic
+#pragma omp atomic update
+ ++cx;
+// CHECK: atomicrmw sub i8* @{{.+}}, i8 1 monotonic
+#pragma omp atomic
+ ucx--;
+// CHECK: atomicrmw sub i16* @{{.+}}, i16 1 monotonic
+#pragma omp atomic update
+ --sx;
+// CHECK: [[USV:%.+]] = load i16, i16* @{{.+}},
+// CHECK: [[EXPR:%.+]] = zext i16 [[USV]] to i32
+// CHECK: [[X:%.+]] = load atomic i16, i16* [[X_ADDR:@.+]] monotonic
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[EXPECTED:%.+]] = phi i16 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[CONV:%.+]] = zext i16 [[EXPECTED]] to i32
+// CHECK: [[ADD:%.+]] = add nsw i32 [[CONV]], [[EXPR]]
+// CHECK: [[DESIRED:%.+]] = trunc i32 [[ADD]] to i16
+// CHECK: [[RES:%.+]] = cmpxchg i16* [[X_ADDR]], i16 [[EXPECTED]], i16 [[DESIRED]] monotonic monotonic
+// CHECK: [[OLD_X]] = extractvalue { i16, i1 } [[RES]], 0
+// CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i16, i1 } [[RES]], 1
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+#pragma omp atomic
+ usx += usv;
+// CHECK: [[EXPR:%.+]] = load i32, i32* @{{.+}},
+// CHECK: [[X:%.+]] = load atomic i32, i32* [[X_ADDR:@.+]] monotonic
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[EXPECTED:%.+]] = phi i32 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[DESIRED:%.+]] = mul nsw i32 [[EXPECTED]], [[EXPR]]
+// CHECK: [[RES:%.+]] = cmpxchg i32* [[X_ADDR]], i32 [[EXPECTED]], i32 [[DESIRED]] monotonic monotonic
+// CHECK: [[OLD_X]] = extractvalue { i32, i1 } [[RES]], 0
+// CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i32, i1 } [[RES]], 1
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+#pragma omp atomic update
+ ix *= iv;
+// CHECK: [[EXPR:%.+]] = load i32, i32* @{{.+}},
+// CHECK: atomicrmw sub i32* @{{.+}}, i32 [[EXPR]] monotonic
+#pragma omp atomic
+ uix -= uiv;
+// CHECK: [[EXPR:%.+]] = load i32, i32* @{{.+}},
+// CHECK: [[X:%.+]] = load atomic i32, i32* [[X_ADDR:@.+]] monotonic
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[EXPECTED:%.+]] = phi i32 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[DESIRED:%.+]] = shl i32 [[EXPECTED]], [[EXPR]]
+// CHECK: [[RES:%.+]] = cmpxchg i32* [[X_ADDR]], i32 [[EXPECTED]], i32 [[DESIRED]] monotonic monotonic
+// CHECK: [[OLD_X]] = extractvalue { i32, i1 } [[RES]], 0
+// CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i32, i1 } [[RES]], 1
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+#pragma omp atomic update
+ ix <<= iv;
+// CHECK: [[EXPR:%.+]] = load i32, i32* @{{.+}},
+// CHECK: [[X:%.+]] = load atomic i32, i32* [[X_ADDR:@.+]] monotonic
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[EXPECTED:%.+]] = phi i32 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[DESIRED:%.+]] = lshr i32 [[EXPECTED]], [[EXPR]]
+// CHECK: [[RES:%.+]] = cmpxchg i32* [[X_ADDR]], i32 [[EXPECTED]], i32 [[DESIRED]] monotonic monotonic
+// CHECK: [[OLD_X]] = extractvalue { i32, i1 } [[RES]], 0
+// CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i32, i1 } [[RES]], 1
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+#pragma omp atomic
+ uix >>= uiv;
+// CHECK: [[EXPR:%.+]] = load i64, i64* @{{.+}},
+// CHECK: [[X:%.+]] = load atomic i64, i64* [[X_ADDR:@.+]] monotonic
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[EXPECTED:%.+]] = phi i64 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[DESIRED:%.+]] = sdiv i64 [[EXPECTED]], [[EXPR]]
+// CHECK: [[RES:%.+]] = cmpxchg i64* [[X_ADDR]], i64 [[EXPECTED]], i64 [[DESIRED]] monotonic monotonic
+// CHECK: [[OLD_X]] = extractvalue { i64, i1 } [[RES]], 0
+// CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i64, i1 } [[RES]], 1
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+#pragma omp atomic update
+ lx /= lv;
+// CHECK: [[EXPR:%.+]] = load i64, i64* @{{.+}},
+// CHECK: atomicrmw and i64* @{{.+}}, i64 [[EXPR]] monotonic
+#pragma omp atomic
+ ulx &= ulv;
+// CHECK: [[EXPR:%.+]] = load i64, i64* @{{.+}},
+// CHECK: atomicrmw xor i64* @{{.+}}, i64 [[EXPR]] monotonic
+#pragma omp atomic update
+ llx ^= llv;
+// CHECK: [[EXPR:%.+]] = load i64, i64* @{{.+}},
+// CHECK: atomicrmw or i64* @{{.+}}, i64 [[EXPR]] monotonic
+#pragma omp atomic
+ ullx |= ullv;
+// CHECK: [[EXPR:%.+]] = load float, float* @{{.+}},
+// CHECK: [[OLD:%.+]] = load atomic i32, i32* bitcast (float* [[X_ADDR:@.+]] to i32*) monotonic
+// CHECK: [[X:%.+]] = bitcast i32 [[OLD]] to float
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD:%.+]] = phi float [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[ADD:%.+]] = fadd float [[OLD]], [[EXPR]]
+// CHECK: [[EXPECTED:%.+]] = bitcast float [[OLD]] to i32
+// CHECK: [[DESIRED:%.+]] = bitcast float [[ADD]] to i32
+// CHECK: [[RES:%.+]] = cmpxchg i32* bitcast (float* [[X_ADDR]] to i32*), i32 [[EXPECTED]], i32 [[DESIRED]] monotonic monotonic
+// CHECK: [[PREV:%.+]] = extractvalue { i32, i1 } [[RES]], 0
+// CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i32, i1 } [[RES]], 1
+// CHECK: [[OLD_X]] = bitcast i32 [[PREV]] to float
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+#pragma omp atomic update
+ fx = fx + fv;
+// CHECK: [[EXPR:%.+]] = load double, double* @{{.+}},
+// CHECK: [[OLD:%.+]] = load atomic i64, i64* bitcast (double* [[X_ADDR:@.+]] to i64*) monotonic
+// CHECK: [[X:%.+]] = bitcast i64 [[OLD]] to double
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD:%.+]] = phi double [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[SUB:%.+]] = fsub double [[EXPR]], [[OLD]]
+// CHECK: [[EXPECTED:%.+]] = bitcast double [[OLD]] to i64
+// CHECK: [[DESIRED:%.+]] = bitcast double [[SUB]] to i64
+// CHECK: [[RES:%.+]] = cmpxchg i64* bitcast (double* [[X_ADDR]] to i64*), i64 [[EXPECTED]], i64 [[DESIRED]] monotonic monotonic
+// CHECK: [[PREV:%.+]] = extractvalue { i64, i1 } [[RES]], 0
+// CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i64, i1 } [[RES]], 1
+// CHECK: [[OLD_X]] = bitcast i64 [[PREV]] to double
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+#pragma omp atomic
+ dx = dv - dx;
+// CHECK: [[EXPR:%.+]] = load x86_fp80, x86_fp80* @{{.+}},
+// CHECK: [[OLD:%.+]] = load atomic i128, i128* bitcast (x86_fp80* [[X_ADDR:@.+]] to i128*) monotonic
+// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[TEMP:%.+]] to i128*
+// CHECK: store i128 [[OLD]], i128* [[BITCAST]]
+// CHECK: [[X:%.+]] = load x86_fp80, x86_fp80* [[TEMP]]
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD:%.+]] = phi x86_fp80 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[MUL:%.+]] = fmul x86_fp80 [[OLD]], [[EXPR]]
+// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[TEMP:%.+]] to i8*
+// CHECK: call void @llvm.memset.p0i8.i64(i8* [[BITCAST]], i8 0, i64 16, i32 16, i1 false)
+// CHECK: store x86_fp80 [[OLD]], x86_fp80* [[TEMP]]
+// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[TEMP]] to i128*
+// CHECK: [[EXPECTED:%.+]] = load i128, i128* [[BITCAST]]
+// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[TEMP:%.+]] to i8*
+// CHECK: call void @llvm.memset.p0i8.i64(i8* [[BITCAST]], i8 0, i64 16, i32 16, i1 false)
+// CHECK: store x86_fp80 [[MUL]], x86_fp80* [[TEMP]]
+// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[TEMP]] to i128*
+// CHECK: [[DESIRED:%.+]] = load i128, i128* [[BITCAST]]
+// CHECK: [[RES:%.+]] = cmpxchg i128* bitcast (x86_fp80* [[X_ADDR]] to i128*), i128 [[EXPECTED]], i128 [[DESIRED]] monotonic monotonic
+// CHECK: [[PREV:%.+]] = extractvalue { i128, i1 } [[RES]], 0
+// CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i128, i1 } [[RES]], 1
+// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[TEMP:%.+]] to i128*
+// CHECK: store i128 [[PREV]], i128* [[BITCAST]]
+// CHECK: [[OLD_X]] = load x86_fp80, x86_fp80* [[TEMP]],
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+#pragma omp atomic update
+ ldx = ldx * ldv;
+// CHECK: [[EXPR_RE:%.+]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @{{.+}}, i32 0, i32 0)
+// CHECK: [[EXPR_IM:%.+]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @{{.+}}, i32 0, i32 1)
+// CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP:%.+]] to i8*
+// CHECK: call void @__atomic_load(i64 8, i8* bitcast ({ i32, i32 }* [[X_ADDR:@.+]] to i8*), i8* [[BITCAST]], i32 0)
+// CHECK: [[LD_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 0
+// CHECK: [[LD_RE:%.+]] = load i32, i32* [[LD_RE_ADDR]]
+// CHECK: [[LD_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
+// CHECK: [[LD_IM:%.+]] = load i32, i32* [[LD_IM_ADDR]]
+// CHECK: [[LD_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP:%.+]], i32 0, i32 0
+// CHECK: [[LD_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
+// CHECK: store i32 [[LD_RE]], i32* [[LD_RE_ADDR]]
+// CHECK: store i32 [[LD_IM]], i32* [[LD_IM_ADDR]]
+// CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP]] to i64*
+// CHECK: [[X:%.+]] = load i64, i64* [[BITCAST]]
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD:%.+]] = phi i64 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP:%.+]] to i64*
+// CHECK: store i64 [[OLD]], i64* [[BITCAST]]
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 0
+// CHECK: [[X_RE:%.+]] = load i32, i32* [[X_RE_ADDR]]
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
+// CHECK: [[X_IM:%.+]] = load i32, i32* [[X_IM_ADDR]]
+// <Skip checks for complex calculations>
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[EXPECTED_ADDR:%.+]], i32 0, i32 0
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[EXPECTED_ADDR]], i32 0, i32 1
+// CHECK: store i32 [[X_RE]], i32* [[X_RE_ADDR]]
+// CHECK: store i32 [[X_IM]], i32* [[X_IM_ADDR]]
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[DESIRED_ADDR:%.+]], i32 0, i32 0
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[DESIRED_ADDR]], i32 0, i32 1
+// CHECK: store i32 %{{.+}}, i32* [[X_RE_ADDR]]
+// CHECK: store i32 %{{.+}}, i32* [[X_IM_ADDR]]
+// CHECK: [[EXPECTED:%.+]] = bitcast { i32, i32 }* [[EXPECTED_ADDR]] to i8*
+// CHECK: [[DESIRED:%.+]] = bitcast { i32, i32 }* [[DESIRED_ADDR]] to i8*
+// CHECK: [[SUCCESS_FAIL:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 8, i8* bitcast ({ i32, i32 }* [[X_ADDR]] to i8*), i8* [[EXPECTED]], i8* [[DESIRED]], i32 0, i32 0)
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP:%.+]], i32 0, i32 0
+// CHECK: [[X_RE:%.+]] = load i32, i32* [[X_RE_ADDR]]
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
+// CHECK: [[X_IM:%.+]] = load i32, i32* [[X_IM_ADDR]]
+// CHECK: [[LD_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP:%.+]], i32 0, i32 0
+// CHECK: [[LD_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
+// CHECK: store i32 [[X_RE]], i32* [[LD_RE_ADDR]]
+// CHECK: store i32 [[X_IM]], i32* [[LD_IM_ADDR]]
+// CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP]] to i64*
+// CHECK: [[OLD_X]] = load i64, i64* [[BITCAST]]
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+#pragma omp atomic
+ cix = civ / cix;
+// CHECK: [[EXPR_RE:%.+]] = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @{{.+}}, i32 0, i32 0)
+// CHECK: [[EXPR_IM:%.+]] = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @{{.+}}, i32 0, i32 1)
+// CHECK: [[BITCAST:%.+]] = bitcast { float, float }* [[TEMP:%.+]] to i8*
+// CHECK: call void @__atomic_load(i64 8, i8* bitcast ({ float, float }* [[X_ADDR:@.+]] to i8*), i8* [[BITCAST]], i32 0)
+// CHECK: [[LD_RE_ADDR:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[TEMP]], i32 0, i32 0
+// CHECK: [[LD_RE:%.+]] = load float, float* [[LD_RE_ADDR]]
+// CHECK: [[LD_IM_ADDR:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[TEMP]], i32 0, i32 1
+// CHECK: [[LD_IM:%.+]] = load float, float* [[LD_IM_ADDR]]
+// CHECK: [[LD_RE_ADDR:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[TEMP:%.+]], i32 0, i32 0
+// CHECK: [[LD_IM_ADDR:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[TEMP]], i32 0, i32 1
+// CHECK: store float [[LD_RE]], float* [[LD_RE_ADDR]]
+// CHECK: store float [[LD_IM]], float* [[LD_IM_ADDR]]
+// CHECK: [[BITCAST:%.+]] = bitcast { float, float }* [[TEMP]] to i64*
+// CHECK: [[X:%.+]] = load i64, i64* [[BITCAST]]
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD:%.+]] = phi i64 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[BITCAST:%.+]] = bitcast { float, float }* [[TEMP:%.+]] to i64*
+// CHECK: store i64 [[OLD]], i64* [[BITCAST]]
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[TEMP]], i32 0, i32 0
+// CHECK: [[X_RE:%.+]] = load float, float* [[X_RE_ADDR]]
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[TEMP]], i32 0, i32 1
+// CHECK: [[X_IM:%.+]] = load float, float* [[X_IM_ADDR]]
+// <Skip checks for complex calculations>
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[EXPECTED_ADDR:%.+]], i32 0, i32 0
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[EXPECTED_ADDR]], i32 0, i32 1
+// CHECK: store float [[X_RE]], float* [[X_RE_ADDR]]
+// CHECK: store float [[X_IM]], float* [[X_IM_ADDR]]
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[DESIRED_ADDR:%.+]], i32 0, i32 0
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[DESIRED_ADDR]], i32 0, i32 1
+// CHECK: store float %{{.+}}, float* [[X_RE_ADDR]]
+// CHECK: store float %{{.+}}, float* [[X_IM_ADDR]]
+// CHECK: [[EXPECTED:%.+]] = bitcast { float, float }* [[EXPECTED_ADDR]] to i8*
+// CHECK: [[DESIRED:%.+]] = bitcast { float, float }* [[DESIRED_ADDR]] to i8*
+// CHECK: [[SUCCESS_FAIL:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 8, i8* bitcast ({ float, float }* [[X_ADDR]] to i8*), i8* [[EXPECTED]], i8* [[DESIRED]], i32 0, i32 0)
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[TEMP:%.+]], i32 0, i32 0
+// CHECK: [[X_RE:%.+]] = load float, float* [[X_RE_ADDR]]
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[TEMP]], i32 0, i32 1
+// CHECK: [[X_IM:%.+]] = load float, float* [[X_IM_ADDR]]
+// CHECK: [[LD_RE_ADDR:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[TEMP:%.+]], i32 0, i32 0
+// CHECK: [[LD_IM_ADDR:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[TEMP]], i32 0, i32 1
+// CHECK: store float [[X_RE]], float* [[LD_RE_ADDR]]
+// CHECK: store float [[X_IM]], float* [[LD_IM_ADDR]]
+// CHECK: [[BITCAST:%.+]] = bitcast { float, float }* [[TEMP]] to i64*
+// CHECK: [[OLD_X]] = load i64, i64* [[BITCAST]]
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+#pragma omp atomic update
+ cfx = cfv + cfx;
+// CHECK: [[EXPR_RE:%.+]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @{{.+}}, i32 0, i32 0)
+// CHECK: [[EXPR_IM:%.+]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @{{.+}}, i32 0, i32 1)
+// CHECK: [[BITCAST:%.+]] = bitcast { double, double }* [[TEMP:%.+]] to i8*
+// CHECK: call void @__atomic_load(i64 16, i8* bitcast ({ double, double }* [[X_ADDR:@.+]] to i8*), i8* [[BITCAST]], i32 5)
+// CHECK: [[LD_RE_ADDR:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[TEMP]], i32 0, i32 0
+// CHECK: [[LD_RE:%.+]] = load double, double* [[LD_RE_ADDR]]
+// CHECK: [[LD_IM_ADDR:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[TEMP]], i32 0, i32 1
+// CHECK: [[LD_IM:%.+]] = load double, double* [[LD_IM_ADDR]]
+// CHECK: [[LD_RE_ADDR:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[TEMP:%.+]], i32 0, i32 0
+// CHECK: [[LD_IM_ADDR:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[TEMP]], i32 0, i32 1
+// CHECK: store double [[LD_RE]], double* [[LD_RE_ADDR]]
+// CHECK: store double [[LD_IM]], double* [[LD_IM_ADDR]]
+// CHECK: [[BITCAST:%.+]] = bitcast { double, double }* [[TEMP]] to i128*
+// CHECK: [[X:%.+]] = load i128, i128* [[BITCAST]]
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD:%.+]] = phi i128 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[BITCAST:%.+]] = bitcast { double, double }* [[TEMP:%.+]] to i128*
+// CHECK: store i128 [[OLD]], i128* [[BITCAST]]
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[TEMP]], i32 0, i32 0
+// CHECK: [[X_RE:%.+]] = load double, double* [[X_RE_ADDR]]
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[TEMP]], i32 0, i32 1
+// CHECK: [[X_IM:%.+]] = load double, double* [[X_IM_ADDR]]
+// <Skip checks for complex calculations>
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[EXPECTED_ADDR:%.+]], i32 0, i32 0
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[EXPECTED_ADDR]], i32 0, i32 1
+// CHECK: store double [[X_RE]], double* [[X_RE_ADDR]]
+// CHECK: store double [[X_IM]], double* [[X_IM_ADDR]]
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[DESIRED_ADDR:%.+]], i32 0, i32 0
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[DESIRED_ADDR]], i32 0, i32 1
+// CHECK: store double %{{.+}}, double* [[X_RE_ADDR]]
+// CHECK: store double %{{.+}}, double* [[X_IM_ADDR]]
+// CHECK: [[EXPECTED:%.+]] = bitcast { double, double }* [[EXPECTED_ADDR]] to i8*
+// CHECK: [[DESIRED:%.+]] = bitcast { double, double }* [[DESIRED_ADDR]] to i8*
+// CHECK: [[SUCCESS_FAIL:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 16, i8* bitcast ({ double, double }* [[X_ADDR]] to i8*), i8* [[EXPECTED]], i8* [[DESIRED]], i32 5, i32 5)
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[TEMP:%.+]], i32 0, i32 0
+// CHECK: [[X_RE:%.+]] = load double, double* [[X_RE_ADDR]]
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[TEMP]], i32 0, i32 1
+// CHECK: [[X_IM:%.+]] = load double, double* [[X_IM_ADDR]]
+// CHECK: [[LD_RE_ADDR:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[TEMP:%.+]], i32 0, i32 0
+// CHECK: [[LD_IM_ADDR:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[TEMP]], i32 0, i32 1
+// CHECK: store double [[X_RE]], double* [[LD_RE_ADDR]]
+// CHECK: store double [[X_IM]], double* [[LD_IM_ADDR]]
+// CHECK: [[BITCAST:%.+]] = bitcast { double, double }* [[TEMP]] to i128*
+// CHECK: [[OLD_X]] = load i128, i128* [[BITCAST]]
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+// CHECK: call{{.*}} @__kmpc_flush(
+#pragma omp atomic seq_cst
+ cdx = cdx - cdv;
+// CHECK: [[BV:%.+]] = load i8, i8* @{{.+}}
+// CHECK: [[BOOL:%.+]] = trunc i8 [[BV]] to i1
+// CHECK: [[EXPR:%.+]] = zext i1 [[BOOL]] to i64
+// CHECK: atomicrmw and i64* @{{.+}}, i64 [[EXPR]] monotonic
+#pragma omp atomic update
+ ulx = ulx & bv;
+// CHECK: [[CV:%.+]] = load i8, i8* @{{.+}}, align 1
+// CHECK: [[EXPR:%.+]] = sext i8 [[CV]] to i32
+// CHECK: [[BX:%.+]] = load atomic i8, i8* [[BX_ADDR:@.+]] monotonic
+// CHECK: [[X:%.+]] = trunc i8 [[BX]] to i1
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD:%.+]] = phi i1 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[X_RVAL:%.+]] = zext i1 [[OLD]] to i32
+// CHECK: [[AND:%.+]] = and i32 [[EXPR]], [[X_RVAL]]
+// CHECK: [[CAST:%.+]] = icmp ne i32 [[AND]], 0
+// CHECK: [[EXPECTED:%.+]] = zext i1 [[OLD]] to i8
+// CHECK: [[DESIRED:%.+]] = zext i1 [[CAST]] to i8
+// CHECK: [[RES:%.+]] = cmpxchg i8* [[BX_ADDR]], i8 [[EXPECTED]], i8 [[DESIRED]] monotonic monotonic
+// CHECK: [[OLD:%.+]] = extractvalue { i8, i1 } [[RES]], 0
+// CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i8, i1 } [[RES]], 1
+// CHECK: [[OLD_X]] = trunc i8 [[OLD]] to i1
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+#pragma omp atomic
+ bx = cv & bx;
+// CHECK: [[UCV:%.+]] = load i8, i8* @{{.+}},
+// CHECK: [[EXPR:%.+]] = zext i8 [[UCV]] to i32
+// CHECK: [[X:%.+]] = load atomic i8, i8* [[CX_ADDR:@.+]] seq_cst
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[EXPECTED:%.+]] = phi i8 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[X_RVAL:%.+]] = sext i8 [[EXPECTED]] to i32
+// CHECK: [[ASHR:%.+]] = ashr i32 [[X_RVAL]], [[EXPR]]
+// CHECK: [[DESIRED:%.+]] = trunc i32 [[ASHR]] to i8
+// CHECK: [[RES:%.+]] = cmpxchg i8* [[CX_ADDR]], i8 [[EXPECTED]], i8 [[DESIRED]] seq_cst seq_cst
+// CHECK: [[OLD_X:%.+]] = extractvalue { i8, i1 } [[RES]], 0
+// CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i8, i1 } [[RES]], 1
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+// CHECK: call{{.*}} @__kmpc_flush(
+#pragma omp atomic update, seq_cst
+ cx = cx >> ucv;
+// CHECK: [[SV:%.+]] = load i16, i16* @{{.+}},
+// CHECK: [[EXPR:%.+]] = sext i16 [[SV]] to i32
+// CHECK: [[X:%.+]] = load atomic i64, i64* [[ULX_ADDR:@.+]] monotonic
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[EXPECTED:%.+]] = phi i64 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[X_RVAL:%.+]] = trunc i64 [[EXPECTED]] to i32
+// CHECK: [[SHL:%.+]] = shl i32 [[EXPR]], [[X_RVAL]]
+// CHECK: [[DESIRED:%.+]] = sext i32 [[SHL]] to i64
+// CHECK: [[RES:%.+]] = cmpxchg i64* [[ULX_ADDR]], i64 [[EXPECTED]], i64 [[DESIRED]] monotonic monotonic
+// CHECK: [[OLD_X:%.+]] = extractvalue { i64, i1 } [[RES]], 0
+// CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i64, i1 } [[RES]], 1
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+#pragma omp atomic update
+ ulx = sv << ulx;
+// CHECK: [[USV:%.+]] = load i16, i16* @{{.+}},
+// CHECK: [[EXPR:%.+]] = zext i16 [[USV]] to i64
+// CHECK: [[X:%.+]] = load atomic i64, i64* [[LX_ADDR:@.+]] monotonic
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[EXPECTED:%.+]] = phi i64 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[DESIRED:%.+]] = srem i64 [[EXPECTED]], [[EXPR]]
+// CHECK: [[RES:%.+]] = cmpxchg i64* [[LX_ADDR]], i64 [[EXPECTED]], i64 [[DESIRED]] monotonic monotonic
+// CHECK: [[OLD_X:%.+]] = extractvalue { i64, i1 } [[RES]], 0
+// CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i64, i1 } [[RES]], 1
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+#pragma omp atomic
+ lx = lx % usv;
+// CHECK: [[EXPR:%.+]] = load i32, i32* @{{.+}}
+// CHECK: atomicrmw or i32* @{{.+}}, i32 [[EXPR]] seq_cst
+// CHECK: call{{.*}} @__kmpc_flush(
+#pragma omp atomic seq_cst, update
+ uix = iv | uix;
+// CHECK: [[EXPR:%.+]] = load i32, i32* @{{.+}}
+// CHECK: atomicrmw and i32* @{{.+}}, i32 [[EXPR]] monotonic
+#pragma omp atomic
+ ix = ix & uiv;
+// CHECK: [[EXPR:%.+]] = load i64, i64* @{{.+}},
+// CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP:%.+]] to i8*
+// CHECK: call void @__atomic_load(i64 8, i8* bitcast ({ i32, i32 }* [[X_ADDR:@.+]] to i8*), i8* [[BITCAST]], i32 0)
+// CHECK: [[LD_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 0
+// CHECK: [[LD_RE:%.+]] = load i32, i32* [[LD_RE_ADDR]]
+// CHECK: [[LD_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
+// CHECK: [[LD_IM:%.+]] = load i32, i32* [[LD_IM_ADDR]]
+// CHECK: [[LD_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP:%.+]], i32 0, i32 0
+// CHECK: [[LD_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
+// CHECK: store i32 [[LD_RE]], i32* [[LD_RE_ADDR]]
+// CHECK: store i32 [[LD_IM]], i32* [[LD_IM_ADDR]]
+// CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP]] to i64*
+// CHECK: [[X:%.+]] = load i64, i64* [[BITCAST]]
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD:%.+]] = phi i64 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP:%.+]] to i64*
+// CHECK: store i64 [[OLD]], i64* [[BITCAST]]
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 0
+// CHECK: [[X_RE:%.+]] = load i32, i32* [[X_RE_ADDR]]
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
+// CHECK: [[X_IM:%.+]] = load i32, i32* [[X_IM_ADDR]]
+// <Skip checks for complex calculations>
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[EXPECTED_ADDR:%.+]], i32 0, i32 0
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[EXPECTED_ADDR]], i32 0, i32 1
+// CHECK: store i32 [[X_RE]], i32* [[X_RE_ADDR]]
+// CHECK: store i32 [[X_IM]], i32* [[X_IM_ADDR]]
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[DESIRED_ADDR:%.+]], i32 0, i32 0
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[DESIRED_ADDR]], i32 0, i32 1
+// CHECK: store i32 %{{.+}}, i32* [[X_RE_ADDR]]
+// CHECK: store i32 %{{.+}}, i32* [[X_IM_ADDR]]
+// CHECK: [[EXPECTED:%.+]] = bitcast { i32, i32 }* [[EXPECTED_ADDR]] to i8*
+// CHECK: [[DESIRED:%.+]] = bitcast { i32, i32 }* [[DESIRED_ADDR]] to i8*
+// CHECK: [[SUCCESS_FAIL:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 8, i8* bitcast ({ i32, i32 }* [[X_ADDR]] to i8*), i8* [[EXPECTED]], i8* [[DESIRED]], i32 0, i32 0)
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP:%.+]], i32 0, i32 0
+// CHECK: [[X_RE:%.+]] = load i32, i32* [[X_RE_ADDR]]
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
+// CHECK: [[X_IM:%.+]] = load i32, i32* [[X_IM_ADDR]]
+// CHECK: [[LD_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP:%.+]], i32 0, i32 0
+// CHECK: [[LD_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
+// CHECK: store i32 [[X_RE]], i32* [[LD_RE_ADDR]]
+// CHECK: store i32 [[X_IM]], i32* [[LD_IM_ADDR]]
+// CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP]] to i64*
+// CHECK: [[OLD_X]] = load i64, i64* [[BITCAST]]
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+#pragma omp atomic update
+ cix = lv + cix;
+// CHECK: [[ULV:%.+]] = load i64, i64* @{{.+}},
+// CHECK: [[EXPR:%.+]] = uitofp i64 [[ULV]] to float
+// CHECK: [[OLD:%.+]] = load atomic i32, i32* bitcast (float* [[X_ADDR:@.+]] to i32*) monotonic
+// CHECK: [[X:%.+]] = bitcast i32 [[OLD]] to float
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD:%.+]] = phi float [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[MUL:%.+]] = fmul float [[OLD]], [[EXPR]]
+// CHECK: [[EXPECTED:%.+]] = bitcast float [[OLD]] to i32
+// CHECK: [[DESIRED:%.+]] = bitcast float [[MUL]] to i32
+// CHECK: [[RES:%.+]] = cmpxchg i32* bitcast (float* [[X_ADDR]] to i32*), i32 [[EXPECTED]], i32 [[DESIRED]] monotonic monotonic
+// CHECK: [[PREV:%.+]] = extractvalue { i32, i1 } [[RES]], 0
+// CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i32, i1 } [[RES]], 1
+// CHECK: [[OLD_X]] = bitcast i32 [[PREV]] to float
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+#pragma omp atomic
+ fx = fx * ulv;
+// CHECK: [[LLV:%.+]] = load i64, i64* @{{.+}},
+// CHECK: [[EXPR:%.+]] = sitofp i64 [[LLV]] to double
+// CHECK: [[OLD:%.+]] = load atomic i64, i64* bitcast (double* [[X_ADDR:@.+]] to i64*) monotonic
+// CHECK: [[X:%.+]] = bitcast i64 [[OLD]] to double
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD:%.+]] = phi double [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[DIV:%.+]] = fdiv double [[OLD]], [[EXPR]]
+// CHECK: [[EXPECTED:%.+]] = bitcast double [[OLD]] to i64
+// CHECK: [[DESIRED:%.+]] = bitcast double [[DIV]] to i64
+// CHECK: [[RES:%.+]] = cmpxchg i64* bitcast (double* [[X_ADDR]] to i64*), i64 [[EXPECTED]], i64 [[DESIRED]] monotonic monotonic
+// CHECK: [[PREV:%.+]] = extractvalue { i64, i1 } [[RES]], 0
+// CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i64, i1 } [[RES]], 1
+// CHECK: [[OLD_X]] = bitcast i64 [[PREV]] to double
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+#pragma omp atomic update
+ dx /= llv;
+// CHECK: [[ULLV:%.+]] = load i64, i64* @{{.+}},
+// CHECK: [[EXPR:%.+]] = uitofp i64 [[ULLV]] to x86_fp80
+// CHECK: [[OLD:%.+]] = load atomic i128, i128* bitcast (x86_fp80* [[X_ADDR:@.+]] to i128*) monotonic
+// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[TEMP:%.+]] to i128*
+// CHECK: store i128 [[OLD]], i128* [[BITCAST]]
+// CHECK: [[X:%.+]] = load x86_fp80, x86_fp80* [[TEMP]]
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD:%.+]] = phi x86_fp80 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[SUB:%.+]] = fsub x86_fp80 [[OLD]], [[EXPR]]
+// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[TEMP:%.+]] to i8*
+// CHECK: call void @llvm.memset.p0i8.i64(i8* [[BITCAST]], i8 0, i64 16, i32 16, i1 false)
+// CHECK: store x86_fp80 [[OLD]], x86_fp80* [[TEMP]]
+// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[TEMP]] to i128*
+// CHECK: [[EXPECTED:%.+]] = load i128, i128* [[BITCAST]]
+// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[TEMP:%.+]] to i8*
+// CHECK: call void @llvm.memset.p0i8.i64(i8* [[BITCAST]], i8 0, i64 16, i32 16, i1 false)
+// CHECK: store x86_fp80 [[SUB]], x86_fp80* [[TEMP]]
+// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[TEMP]] to i128*
+// CHECK: [[DESIRED:%.+]] = load i128, i128* [[BITCAST]]
+// CHECK: [[RES:%.+]] = cmpxchg i128* bitcast (x86_fp80* [[X_ADDR]] to i128*), i128 [[EXPECTED]], i128 [[DESIRED]] monotonic monotonic
+// CHECK: [[PREV:%.+]] = extractvalue { i128, i1 } [[RES]], 0
+// CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i128, i1 } [[RES]], 1
+// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[TEMP:%.+]] to i128*
+// CHECK: store i128 [[PREV]], i128* [[BITCAST]]
+// CHECK: [[OLD_X]] = load x86_fp80, x86_fp80* [[TEMP]],
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+#pragma omp atomic
+ ldx -= ullv;
+// CHECK: [[EXPR:%.+]] = load float, float* @{{.+}},
+// CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP:%.+]] to i8*
+// CHECK: call void @__atomic_load(i64 8, i8* bitcast ({ i32, i32 }* [[X_ADDR:@.+]] to i8*), i8* [[BITCAST]], i32 0)
+// CHECK: [[LD_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 0
+// CHECK: [[LD_RE:%.+]] = load i32, i32* [[LD_RE_ADDR]]
+// CHECK: [[LD_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
+// CHECK: [[LD_IM:%.+]] = load i32, i32* [[LD_IM_ADDR]]
+// CHECK: [[LD_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP:%.+]], i32 0, i32 0
+// CHECK: [[LD_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
+// CHECK: store i32 [[LD_RE]], i32* [[LD_RE_ADDR]]
+// CHECK: store i32 [[LD_IM]], i32* [[LD_IM_ADDR]]
+// CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP]] to i64*
+// CHECK: [[X:%.+]] = load i64, i64* [[BITCAST]]
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD:%.+]] = phi i64 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP:%.+]] to i64*
+// CHECK: store i64 [[OLD]], i64* [[BITCAST]]
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 0
+// CHECK: [[X_RE:%.+]] = load i32, i32* [[X_RE_ADDR]]
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
+// CHECK: [[X_IM:%.+]] = load i32, i32* [[X_IM_ADDR]]
+// <Skip checks for complex calculations>
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[EXPECTED_ADDR:%.+]], i32 0, i32 0
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[EXPECTED_ADDR]], i32 0, i32 1
+// CHECK: store i32 [[X_RE]], i32* [[X_RE_ADDR]]
+// CHECK: store i32 [[X_IM]], i32* [[X_IM_ADDR]]
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[DESIRED_ADDR:%.+]], i32 0, i32 0
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[DESIRED_ADDR]], i32 0, i32 1
+// CHECK: store i32 %{{.+}}, i32* [[X_RE_ADDR]]
+// CHECK: store i32 %{{.+}}, i32* [[X_IM_ADDR]]
+// CHECK: [[EXPECTED:%.+]] = bitcast { i32, i32 }* [[EXPECTED_ADDR]] to i8*
+// CHECK: [[DESIRED:%.+]] = bitcast { i32, i32 }* [[DESIRED_ADDR]] to i8*
+// CHECK: [[SUCCESS_FAIL:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 8, i8* bitcast ({ i32, i32 }* [[X_ADDR]] to i8*), i8* [[EXPECTED]], i8* [[DESIRED]], i32 0, i32 0)
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP:%.+]], i32 0, i32 0
+// CHECK: [[X_RE:%.+]] = load i32, i32* [[X_RE_ADDR]]
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
+// CHECK: [[X_IM:%.+]] = load i32, i32* [[X_IM_ADDR]]
+// CHECK: [[LD_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP:%.+]], i32 0, i32 0
+// CHECK: [[LD_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
+// CHECK: store i32 [[X_RE]], i32* [[LD_RE_ADDR]]
+// CHECK: store i32 [[X_IM]], i32* [[LD_IM_ADDR]]
+// CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP]] to i64*
+// CHECK: [[OLD_X]] = load i64, i64* [[BITCAST]]
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+#pragma omp atomic update
+ cix = fv / cix;
+// CHECK: [[EXPR:%.+]] = load double, double* @{{.+}},
+// CHECK: [[X:%.+]] = load atomic i16, i16* [[X_ADDR:@.+]] monotonic
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[EXPECTED:%.+]] = phi i16 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[CONV:%.+]] = sext i16 [[EXPECTED]] to i32
+// CHECK: [[X_RVAL:%.+]] = sitofp i32 [[CONV]] to double
+// CHECK: [[ADD:%.+]] = fadd double [[X_RVAL]], [[EXPR]]
+// CHECK: [[DESIRED:%.+]] = fptosi double [[ADD]] to i16
+// CHECK: [[RES:%.+]] = cmpxchg i16* [[X_ADDR]], i16 [[EXPECTED]], i16 [[DESIRED]] monotonic monotonic
+// CHECK: [[OLD_X]] = extractvalue { i16, i1 } [[RES]], 0
+// CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i16, i1 } [[RES]], 1
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+#pragma omp atomic
+ sx = sx + dv;
+// CHECK: [[EXPR:%.+]] = load x86_fp80, x86_fp80* @{{.+}},
+// CHECK: [[XI8:%.+]] = load atomic i8, i8* [[X_ADDR:@.+]] monotonic
+// CHECK: [[X:%.+]] = trunc i8 [[XI8]] to i1
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[BOOL_EXPECTED:%.+]] = phi i1 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[CONV:%.+]] = zext i1 [[BOOL_EXPECTED]] to i32
+// CHECK: [[X_RVAL:%.+]] = sitofp i32 [[CONV]] to x86_fp80
+// CHECK: [[MUL:%.+]] = fmul x86_fp80 [[EXPR]], [[X_RVAL]]
+// CHECK: [[BOOL_DESIRED:%.+]] = fcmp une x86_fp80 [[MUL]], 0xK00000000000000000000
+// CHECK: [[EXPECTED:%.+]] = zext i1 [[BOOL_EXPECTED]] to i8
+// CHECK: [[DESIRED:%.+]] = zext i1 [[BOOL_DESIRED]] to i8
+// CHECK: [[RES:%.+]] = cmpxchg i8* [[X_ADDR]], i8 [[EXPECTED]], i8 [[DESIRED]] monotonic monotonic
+// CHECK: [[OLD_XI8:%.+]] = extractvalue { i8, i1 } [[RES]], 0
+// CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i8, i1 } [[RES]], 1
+// CHECK: [[OLD_X]] = trunc i8 [[OLD_XI8]] to i1
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+#pragma omp atomic update
+ bx = ldv * bx;
+// CHECK: [[EXPR_RE:%.+]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* [[CIV_ADDR:@.+]], i32 0, i32 0),
+// CHECK: [[EXPR_IM:%.+]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* [[CIV_ADDR]], i32 0, i32 1),
+// CHECK: [[XI8:%.+]] = load atomic i8, i8* [[X_ADDR:@.+]] monotonic
+// CHECK: [[X:%.+]] = trunc i8 [[XI8]] to i1
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[BOOL_EXPECTED:%.+]] = phi i1 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[X_RVAL:%.+]] = zext i1 [[BOOL_EXPECTED]] to i32
+// CHECK: [[SUB_RE:%.+]] = sub i32 [[EXPR_RE:%.+]], [[X_RVAL]]
+// CHECK: [[SUB_IM:%.+]] = sub i32 [[EXPR_IM:%.+]], 0
+// CHECK: icmp ne i32 [[SUB_RE]], 0
+// CHECK: icmp ne i32 [[SUB_IM]], 0
+// CHECK: [[BOOL_DESIRED:%.+]] = or i1
+// CHECK: [[EXPECTED:%.+]] = zext i1 [[BOOL_EXPECTED]] to i8
+// CHECK: [[DESIRED:%.+]] = zext i1 [[BOOL_DESIRED]] to i8
+// CHECK: [[RES:%.+]] = cmpxchg i8* [[X_ADDR]], i8 [[EXPECTED]], i8 [[DESIRED]] monotonic monotonic
+// CHECK: [[OLD_XI8:%.+]] = extractvalue { i8, i1 } [[RES]], 0
+// CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i8, i1 } [[RES]], 1
+// CHECK: [[OLD_X]] = trunc i8 [[OLD_XI8]] to i1
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+#pragma omp atomic
+ bx = civ - bx;
+// CHECK: [[EXPR_RE:%.+]] = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @{{.+}}, i32 0, i32 0)
+// CHECK: [[EXPR_IM:%.+]] = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @{{.+}}, i32 0, i32 1)
+// CHECK: [[X:%.+]] = load atomic i16, i16* [[X_ADDR:@.+]] monotonic
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[EXPECTED:%.+]] = phi i16 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[CONV:%.+]] = zext i16 [[EXPECTED]] to i32
+// CHECK: [[X_RVAL:%.+]] = sitofp i32 [[CONV]] to float
+// <Skip checks for complex calculations>
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[TEMP:%.+]], i32 0, i32 0
+// CHECK: [[X_RE:%.+]] = load float, float* [[X_RE_ADDR]]
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[TEMP]], i32 0, i32 1
+// CHECK: [[X_IM:%.+]] = load float, float* [[X_IM_ADDR]]
+// CHECK: [[DESIRED:%.+]] = fptoui float [[X_RE]] to i16
+// CHECK: [[RES:%.+]] = cmpxchg i16* [[X_ADDR]], i16 [[EXPECTED]], i16 [[DESIRED]] monotonic monotonic
+// CHECK: [[OLD_X]] = extractvalue { i16, i1 } [[RES]], 0
+// CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i16, i1 } [[RES]], 1
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+#pragma omp atomic update
+ usx /= cfv;
+// CHECK: [[EXPR_RE:%.+]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @{{.+}}, i32 0, i32 0)
+// CHECK: [[EXPR_IM:%.+]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @{{.+}}, i32 0, i32 1)
+// CHECK: [[X:%.+]] = load atomic i64, i64* [[X_ADDR:@.+]] monotonic
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[EXPECTED:%.+]] = phi i64 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[X_RVAL:%.+]] = sitofp i64 [[EXPECTED]] to double
+// CHECK: [[ADD_RE:%.+]] = fadd double [[X_RVAL]], [[EXPR_RE]]
+// CHECK: [[ADD_IM:%.+]] = fadd double 0.000000e+00, [[EXPR_IM]]
+// CHECK: [[DESIRED:%.+]] = fptosi double [[ADD_RE]] to i64
+// CHECK: [[RES:%.+]] = cmpxchg i64* [[X_ADDR]], i64 [[EXPECTED]], i64 [[DESIRED]] monotonic monotonic
+// CHECK: [[OLD_X]] = extractvalue { i64, i1 } [[RES]], 0
+// CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i64, i1 } [[RES]], 1
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+#pragma omp atomic
+ llx += cdv;
+// CHECK: [[IDX:%.+]] = load i16, i16* @{{.+}}
+// CHECK: load i8, i8*
+// CHECK: [[VEC_ITEM_VAL:%.+]] = zext i1 %{{.+}} to i32
+// CHECK: [[I128VAL:%.+]] = load atomic i128, i128* bitcast (<4 x i32>* [[DEST:@.+]] to i128*) monotonic
+// CHECK: [[LD:%.+]] = bitcast i128 [[I128VAL]] to <4 x i32>
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD_VEC_VAL:%.+]] = phi <4 x i32> [ [[LD]], %{{.+}} ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
+// CHECK: store <4 x i32> [[OLD_VEC_VAL]], <4 x i32>* [[LDTEMP:%.+]],
+// CHECK: [[VEC_VAL:%.+]] = load <4 x i32>, <4 x i32>* [[LDTEMP]]
+// CHECK: [[ITEM:%.+]] = extractelement <4 x i32> [[VEC_VAL]], i16 [[IDX]]
+// CHECK: [[OR:%.+]] = or i32 [[ITEM]], [[VEC_ITEM_VAL]]
+// CHECK: [[VEC_VAL:%.+]] = load <4 x i32>, <4 x i32>* [[LDTEMP]]
+// CHECK: [[NEW_VEC_VAL:%.+]] = insertelement <4 x i32> [[VEC_VAL]], i32 [[OR]], i16 [[IDX]]
+// CHECK: store <4 x i32> [[NEW_VEC_VAL]], <4 x i32>* [[LDTEMP]]
+// CHECK: [[NEW_VEC_VAL:%.+]] = load <4 x i32>, <4 x i32>* [[LDTEMP]]
+// CHECK: [[OLD_I128:%.+]] = bitcast <4 x i32> [[OLD_VEC_VAL]] to i128
+// CHECK: [[NEW_I128:%.+]] = bitcast <4 x i32> [[NEW_VEC_VAL]] to i128
+// CHECK: [[RES:%.+]] = cmpxchg i128* bitcast (<4 x i32>* [[DEST]] to i128*), i128 [[OLD_I128]], i128 [[NEW_I128]] monotonic monotonic
+// CHECK: [[FAILED_I128_OLD_VAL:%.+]] = extractvalue { i128, i1 } [[RES]], 0
+// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i128, i1 } [[RES]], 1
+// CHECK: [[FAILED_OLD_VAL]] = bitcast i128 [[FAILED_I128_OLD_VAL]] to <4 x i32>
+// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+#pragma omp atomic update
+ int4x[sv] |= bv;
+// CHECK: [[EXPR:%.+]] = load x86_fp80, x86_fp80* @{{.+}}
+// CHECK: [[PREV_VALUE:%.+]] = load atomic i32, i32* bitcast (i8* getelementptr (i8, i8* bitcast (%struct.BitFields* @{{.+}} to i8*), i64 4) to i32*) monotonic
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD_BF_VALUE:%.+]] = phi i32 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
+// CHECK: store i32 [[OLD_BF_VALUE]], i32* [[TEMP:%.+]],
+// CHECK: [[A_LD:%.+]] = load i32, i32* [[TEMP]],
+// CHECK: [[A_SHL:%.+]] = shl i32 [[A_LD]], 1
+// CHECK: [[A_ASHR:%.+]] = ashr i32 [[A_SHL]], 1
+// CHECK: [[X_RVAL:%.+]] = sitofp i32 [[A_ASHR]] to x86_fp80
+// CHECK: [[SUB:%.+]] = fsub x86_fp80 [[X_RVAL]], [[EXPR]]
+// CHECK: [[CONV:%.+]] = fptosi x86_fp80 [[SUB]] to i32
+// CHECK: [[NEW_VAL:%.+]] = load i32, i32* [[TEMP]],
+// CHECK: [[BF_VALUE:%.+]] = and i32 [[CONV]], 2147483647
+// CHECK: [[BF_CLEAR:%.+]] = and i32 [[NEW_VAL]], -2147483648
+// CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]]
+// CHECK: store i32 %{{.+}}, i32* [[LDTEMP:%.+]]
+// CHECK: [[NEW_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP]]
+// CHECK: [[RES:%.+]] = cmpxchg i32* bitcast (i8* getelementptr (i8, i8* bitcast (%struct.BitFields* @{{.+}} to i8*), i64 4) to i32*), i32 [[OLD_BF_VALUE]], i32 [[NEW_BF_VALUE]] monotonic monotonic
+// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i32, i1 } [[RES]], 0
+// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i32, i1 } [[RES]], 1
+// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+#pragma omp atomic
+ bfx.a = bfx.a - ldv;
+// CHECK: [[EXPR:%.+]] = load x86_fp80, x86_fp80* @{{.+}}
+// CHECK: [[BITCAST:%.+]] = bitcast i32* [[LDTEMP:%.+]] to i8*
+// CHECK: call void @__atomic_load(i64 4, i8* getelementptr (i8, i8* bitcast (%struct.BitFields_packed* @{{.+}} to i8*), i64 4), i8* [[BITCAST]], i32 0)
+// CHECK: [[PREV_VALUE:%.+]] = load i32, i32* [[LDTEMP]]
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD_BF_VALUE:%.+]] = phi i32 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
+// CHECK: store i32 [[OLD_BF_VALUE]], i32* [[TEMP:%.+]],
+// CHECK: [[A_LD:%.+]] = load i32, i32* [[TEMP]],
+// CHECK: [[A_SHL:%.+]] = shl i32 [[A_LD]], 1
+// CHECK: [[A_ASHR:%.+]] = ashr i32 [[A_SHL]], 1
+// CHECK: [[X_RVAL:%.+]] = sitofp i32 [[A_ASHR]] to x86_fp80
+// CHECK: [[MUL:%.+]] = fmul x86_fp80 [[X_RVAL]], [[EXPR]]
+// CHECK: [[CONV:%.+]] = fptosi x86_fp80 [[MUL]] to i32
+// CHECK: [[NEW_VAL:%.+]] = load i32, i32* [[TEMP]],
+// CHECK: [[BF_VALUE:%.+]] = and i32 [[CONV]], 2147483647
+// CHECK: [[BF_CLEAR:%.+]] = and i32 [[NEW_VAL]], -2147483648
+// CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]]
+// CHECK: store i32 %{{.+}}, i32* [[LDTEMP:%.+]]
+// CHECK: [[NEW_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP]]
+// CHECK: store i32 [[OLD_BF_VALUE]], i32* [[TEMP_OLD_BF_ADDR:%.+]],
+// CHECK: store i32 [[NEW_BF_VALUE]], i32* [[TEMP_NEW_BF_ADDR:%.+]],
+// CHECK: [[BITCAST_TEMP_OLD_BF_ADDR:%.+]] = bitcast i32* [[TEMP_OLD_BF_ADDR]] to i8*
+// CHECK: [[BITCAST_TEMP_NEW_BF_ADDR:%.+]] = bitcast i32* [[TEMP_NEW_BF_ADDR]] to i8*
+// CHECK: [[FAIL_SUCCESS:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 4, i8* getelementptr (i8, i8* bitcast (%struct.BitFields_packed* @{{.+}} to i8*), i64 4), i8* [[BITCAST_TEMP_OLD_BF_ADDR]], i8* [[BITCAST_TEMP_NEW_BF_ADDR]], i32 0, i32 0)
+// CHECK: [[FAILED_OLD_VAL]] = load i32, i32* [[TEMP_OLD_BF_ADDR]]
+// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+#pragma omp atomic update
+ bfx_packed.a *= ldv;
+// CHECK: [[EXPR:%.+]] = load x86_fp80, x86_fp80* @{{.+}}
+// CHECK: [[PREV_VALUE:%.+]] = load atomic i32, i32* getelementptr inbounds (%struct.BitFields2, %struct.BitFields2* @{{.+}}, i32 0, i32 0) monotonic
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD_BF_VALUE:%.+]] = phi i32 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
+// CHECK: store i32 [[OLD_BF_VALUE]], i32* [[TEMP:%.+]],
+// CHECK: [[A_LD:%.+]] = load i32, i32* [[TEMP]],
+// CHECK: [[A_ASHR:%.+]] = ashr i32 [[A_LD]], 31
+// CHECK: [[X_RVAL:%.+]] = sitofp i32 [[A_ASHR]] to x86_fp80
+// CHECK: [[SUB:%.+]] = fsub x86_fp80 [[X_RVAL]], [[EXPR]]
+// CHECK: [[CONV:%.+]] = fptosi x86_fp80 [[SUB]] to i32
+// CHECK: [[NEW_VAL:%.+]] = load i32, i32* [[TEMP]],
+// CHECK: [[BF_AND:%.+]] = and i32 [[CONV]], 1
+// CHECK: [[BF_VALUE:%.+]] = shl i32 [[BF_AND]], 31
+// CHECK: [[BF_CLEAR:%.+]] = and i32 [[NEW_VAL]], 2147483647
+// CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]]
+// CHECK: store i32 %{{.+}}, i32* [[LDTEMP:%.+]]
+// CHECK: [[NEW_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP]]
+// CHECK: [[RES:%.+]] = cmpxchg i32* getelementptr inbounds (%struct.BitFields2, %struct.BitFields2* @{{.+}}, i32 0, i32 0), i32 [[OLD_BF_VALUE]], i32 [[NEW_BF_VALUE]] monotonic monotonic
+// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i32, i1 } [[RES]], 0
+// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i32, i1 } [[RES]], 1
+// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+#pragma omp atomic
+ bfx2.a -= ldv;
+// CHECK: [[EXPR:%.+]] = load x86_fp80, x86_fp80* @{{.+}}
+// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, i8* getelementptr (i8, i8* bitcast (%struct.BitFields2_packed* @{{.+}} to i8*), i64 3) monotonic
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
+// CHECK: [[BITCAST:%.+]] = bitcast i32* %{{.+}} to i8*
+// CHECK: store i8 [[OLD_BF_VALUE]], i8* [[BITCAST]],
+// CHECK: [[A_LD:%.+]] = load i8, i8* [[BITCAST]],
+// CHECK: [[A_ASHR:%.+]] = ashr i8 [[A_LD]], 7
+// CHECK: [[CAST:%.+]] = sext i8 [[A_ASHR]] to i32
+// CHECK: [[X_RVAL:%.+]] = sitofp i32 [[CAST]] to x86_fp80
+// CHECK: [[DIV:%.+]] = fdiv x86_fp80 [[EXPR]], [[X_RVAL]]
+// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 [[DIV]] to i32
+// CHECK: [[TRUNC:%.+]] = trunc i32 [[NEW_VAL]] to i8
+// CHECK: [[BF_LD:%.+]] = load i8, i8* [[BITCAST]],
+// CHECK: [[BF_AND:%.+]] = and i8 [[TRUNC]], 1
+// CHECK: [[BF_VALUE:%.+]] = shl i8 [[BF_AND]], 7
+// CHECK: [[BF_CLEAR:%.+]] = and i8 %{{.+}}, 127
+// CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
+// CHECK: store i8 %{{.+}}, i8* [[LDTEMP:%.+]]
+// CHECK: [[NEW_BF_VALUE:%.+]] = load i8, i8* [[LDTEMP]]
+// CHECK: [[RES:%.+]] = cmpxchg i8* getelementptr (i8, i8* bitcast (%struct.BitFields2_packed* @{{.+}} to i8*), i64 3), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic
+// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
+// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
+// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+#pragma omp atomic update
+ bfx2_packed.a = ldv / bfx2_packed.a;
+// CHECK: [[EXPR:%.+]] = load x86_fp80, x86_fp80* @{{.+}}
+// CHECK: [[PREV_VALUE:%.+]] = load atomic i32, i32* getelementptr inbounds (%struct.BitFields3, %struct.BitFields3* @{{.+}}, i32 0, i32 0) monotonic
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD_BF_VALUE:%.+]] = phi i32 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
+// CHECK: store i32 [[OLD_BF_VALUE]], i32* [[TEMP:%.+]],
+// CHECK: [[A_LD:%.+]] = load i32, i32* [[TEMP]],
+// CHECK: [[A_SHL:%.+]] = shl i32 [[A_LD]], 7
+// CHECK: [[A_ASHR:%.+]] = ashr i32 [[A_SHL]], 18
+// CHECK: [[X_RVAL:%.+]] = sitofp i32 [[A_ASHR]] to x86_fp80
+// CHECK: [[DIV:%.+]] = fdiv x86_fp80 [[X_RVAL]], [[EXPR]]
+// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 [[DIV]] to i32
+// CHECK: [[BF_LD:%.+]] = load i32, i32* [[TEMP]],
+// CHECK: [[BF_AND:%.+]] = and i32 [[NEW_VAL]], 16383
+// CHECK: [[BF_VALUE:%.+]] = shl i32 [[BF_AND]], 11
+// CHECK: [[BF_CLEAR:%.+]] = and i32 %{{.+}}, -33552385
+// CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]]
+// CHECK: store i32 %{{.+}}, i32* [[LDTEMP:%.+]]
+// CHECK: [[NEW_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP]]
+// CHECK: [[RES:%.+]] = cmpxchg i32* getelementptr inbounds (%struct.BitFields3, %struct.BitFields3* @{{.+}}, i32 0, i32 0), i32 [[OLD_BF_VALUE]], i32 [[NEW_BF_VALUE]] monotonic monotonic
+// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i32, i1 } [[RES]], 0
+// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i32, i1 } [[RES]], 1
+// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+#pragma omp atomic
+ bfx3.a /= ldv;
+// CHECK: [[EXPR:%.+]] = load x86_fp80, x86_fp80* @{{.+}}
+// CHECK: [[LDTEMP:%.+]] = bitcast i32* %{{.+}} to i24*
+// CHECK: [[BITCAST:%.+]] = bitcast i24* %{{.+}} to i8*
+// CHECK: call void @__atomic_load(i64 3, i8* getelementptr (i8, i8* bitcast (%struct.BitFields3_packed* @{{.+}} to i8*), i64 1), i8* [[BITCAST]], i32 0)
+// CHECK: [[PREV_VALUE:%.+]] = load i24, i24* [[LDTEMP]]
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD_BF_VALUE:%.+]] = phi i24 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
+// CHECK: [[BITCAST:%.+]] = bitcast i32* %{{.+}} to i24*
+// CHECK: store i24 [[OLD_BF_VALUE]], i24* [[BITCAST]],
+// CHECK: [[A_LD:%.+]] = load i24, i24* [[BITCAST]],
+// CHECK: [[A_SHL:%.+]] = shl i24 [[A_LD]], 7
+// CHECK: [[A_ASHR:%.+]] = ashr i24 [[A_SHL]], 10
+// CHECK: [[CAST:%.+]] = sext i24 [[A_ASHR]] to i32
+// CHECK: [[X_RVAL:%.+]] = sitofp i32 [[CAST]] to x86_fp80
+// CHECK: [[ADD:%.+]] = fadd x86_fp80 [[X_RVAL]], [[EXPR]]
+// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 [[ADD]] to i32
+// CHECK: [[TRUNC:%.+]] = trunc i32 [[NEW_VAL]] to i24
+// CHECK: [[BF_LD:%.+]] = load i24, i24* [[BITCAST]],
+// CHECK: [[BF_AND:%.+]] = and i24 [[TRUNC]], 16383
+// CHECK: [[BF_VALUE:%.+]] = shl i24 [[BF_AND]], 3
+// CHECK: [[BF_CLEAR:%.+]] = and i24 [[BF_LD]], -131065
+// CHECK: or i24 [[BF_CLEAR]], [[BF_VALUE]]
+// CHECK: store i24 %{{.+}}, i24* [[LDTEMP:%.+]]
+// CHECK: [[NEW_BF_VALUE:%.+]] = load i24, i24* [[LDTEMP]]
+// CHECK: [[TEMP_OLD_BF_ADDR:%.+]] = bitcast i32* %{{.+}} to i24*
+// CHECK: store i24 [[OLD_BF_VALUE]], i24* [[TEMP_OLD_BF_ADDR]]
+// CHECK: [[TEMP_NEW_BF_ADDR:%.+]] = bitcast i32* %{{.+}} to i24*
+// CHECK: store i24 [[NEW_BF_VALUE]], i24* [[TEMP_NEW_BF_ADDR]]
+// CHECK: [[BITCAST_TEMP_OLD_BF_ADDR:%.+]] = bitcast i24* [[TEMP_OLD_BF_ADDR]] to i8*
+// CHECK: [[BITCAST_TEMP_NEW_BF_ADDR:%.+]] = bitcast i24* [[TEMP_NEW_BF_ADDR]] to i8*
+// CHECK: [[FAIL_SUCCESS:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 3, i8* getelementptr (i8, i8* bitcast (%struct.BitFields3_packed* @{{.+}} to i8*), i64 1), i8* [[BITCAST_TEMP_OLD_BF_ADDR]], i8* [[BITCAST_TEMP_NEW_BF_ADDR]], i32 0, i32 0)
+// CHECK: [[FAILED_OLD_VAL]] = load i24, i24* [[TEMP_OLD_BF_ADDR]]
+// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+#pragma omp atomic update
+ bfx3_packed.a += ldv;
+// CHECK: [[EXPR:%.+]] = load x86_fp80, x86_fp80* @{{.+}}
+// CHECK: [[PREV_VALUE:%.+]] = load atomic i64, i64* bitcast (%struct.BitFields4* @{{.+}} to i64*) monotonic
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD_BF_VALUE:%.+]] = phi i64 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
+// CHECK: store i64 [[OLD_BF_VALUE]], i64* [[TEMP:%.+]],
+// CHECK: [[A_LD:%.+]] = load i64, i64* [[TEMP]],
+// CHECK: [[A_SHL:%.+]] = shl i64 [[A_LD]], 47
+// CHECK: [[A_ASHR:%.+]] = ashr i64 [[A_SHL:%.+]], 63
+// CHECK: [[A_CAST:%.+]] = trunc i64 [[A_ASHR:%.+]] to i32
+// CHECK: [[X_RVAL:%.+]] = sitofp i32 [[CAST:%.+]] to x86_fp80
+// CHECK: [[MUL:%.+]] = fmul x86_fp80 [[X_RVAL]], [[EXPR]]
+// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 [[MUL]] to i32
+// CHECK: [[ZEXT:%.+]] = zext i32 [[NEW_VAL]] to i64
+// CHECK: [[BF_LD:%.+]] = load i64, i64* [[TEMP]],
+// CHECK: [[BF_AND:%.+]] = and i64 [[ZEXT]], 1
+// CHECK: [[BF_VALUE:%.+]] = shl i64 [[BF_AND]], 16
+// CHECK: [[BF_CLEAR:%.+]] = and i64 [[BF_LD]], -65537
+// CHECK: or i64 [[BF_CLEAR]], [[BF_VALUE]]
+// CHECK: store i64 %{{.+}}, i64* [[LDTEMP:%.+]]
+// CHECK: [[NEW_BF_VALUE:%.+]] = load i64, i64* [[LDTEMP]]
+// CHECK: [[RES:%.+]] = cmpxchg i64* bitcast (%struct.BitFields4* @{{.+}} to i64*), i64 [[OLD_BF_VALUE]], i64 [[NEW_BF_VALUE]] monotonic monotonic
+// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i64, i1 } [[RES]], 0
+// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i64, i1 } [[RES]], 1
+// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+#pragma omp atomic
+ bfx4.a = bfx4.a * ldv;
+// CHECK: [[EXPR:%.+]] = load x86_fp80, x86_fp80* @{{.+}}
+// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2) monotonic
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %{{.+}} ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
+// CHECK: [[BITCAST:%.+]] = bitcast i32* %{{.+}} to i8*
+// CHECK: store i8 [[OLD_BF_VALUE]], i8* [[BITCAST]],
+// CHECK: [[A_LD:%.+]] = load i8, i8* [[BITCAST]],
+// CHECK: [[A_SHL:%.+]] = shl i8 [[A_LD]], 7
+// CHECK: [[A_ASHR:%.+]] = ashr i8 [[A_SHL:%.+]], 7
+// CHECK: [[CAST:%.+]] = sext i8 [[A_ASHR:%.+]] to i32
+// CHECK: [[CONV:%.+]] = sitofp i32 [[CAST]] to x86_fp80
+// CHECK: [[SUB: %.+]] = fsub x86_fp80 [[CONV]], [[EXPR]]
+// CHECK: [[CONV:%.+]] = fptosi x86_fp80 [[SUB:%.+]] to i32
+// CHECK: [[NEW_VAL:%.+]] = trunc i32 [[CONV]] to i8
+// CHECK: [[BF_LD:%.+]] = load i8, i8* [[BITCAST]],
+// CHECK: [[BF_VALUE:%.+]] = and i8 [[NEW_VAL]], 1
+// CHECK: [[BF_CLEAR:%.+]] = and i8 [[BF_LD]], -2
+// CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
+// CHECK: store i8 %{{.+}}, i8* [[LDTEMP:%.+]]
+// CHECK: [[NEW_BF_VALUE:%.+]] = load i8, i8* [[LDTEMP]]
+// CHECK: [[RES:%.+]] = cmpxchg i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic
+// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
+// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
+// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+#pragma omp atomic update
+ bfx4_packed.a -= ldv;
+// CHECK: [[EXPR:%.+]] = load x86_fp80, x86_fp80* @{{.+}}
+// CHECK: [[PREV_VALUE:%.+]] = load atomic i64, i64* bitcast (%struct.BitFields4* @{{.+}} to i64*) monotonic
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD_BF_VALUE:%.+]] = phi i64 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
+// CHECK: store i64 [[OLD_BF_VALUE]], i64* [[TEMP:%.+]],
+// CHECK: [[A_LD:%.+]] = load i64, i64* [[TEMP]],
+// CHECK: [[A_SHL:%.+]] = shl i64 [[A_LD]], 40
+// CHECK: [[A_ASHR:%.+]] = ashr i64 [[A_SHL:%.+]], 57
+// CHECK: [[CONV:%.+]] = sitofp i64 [[A_ASHR]] to x86_fp80
+// CHECK: [[DIV:%.+]] = fdiv x86_fp80 [[CONV]], [[EXPR]]
+// CHECK: [[CONV:%.+]] = fptosi x86_fp80 [[DIV]] to i64
+// CHECK: [[BF_LD:%.+]] = load i64, i64* [[TEMP]],
+// CHECK: [[BF_AND:%.+]] = and i64 [[CONV]], 127
+// CHECK: [[BF_VALUE:%.+]] = shl i64 [[BF_AND:%.+]], 17
+// CHECK: [[BF_CLEAR:%.+]] = and i64 [[BF_LD]], -16646145
+// CHECK: [[VAL:%.+]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
+// CHECK: store i64 [[VAL]], i64* [[LDTEMP:%.+]]
+// CHECK: [[NEW_BF_VALUE:%.+]] = load i64, i64* [[LDTEMP]]
+// CHECK: [[RES:%.+]] = cmpxchg i64* bitcast (%struct.BitFields4* @{{.+}} to i64*), i64 [[OLD_BF_VALUE]], i64 [[NEW_BF_VALUE]] monotonic monotonic
+// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i64, i1 } [[RES]], 0
+// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i64, i1 } [[RES]], 1
+// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+#pragma omp atomic
+ bfx4.b /= ldv;
+// CHECK: [[EXPR:%.+]] = load x86_fp80, x86_fp80* @{{.+}}
+// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2) monotonic
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
+// CHECK: [[BITCAST:%.+]] = bitcast i64* %{{.+}} to i8*
+// CHECK: store i8 [[OLD_BF_VALUE]], i8* [[BITCAST]],
+// CHECK: [[A_LD:%.+]] = load i8, i8* [[BITCAST]],
+// CHECK: [[A_ASHR:%.+]] = ashr i8 [[A_LD]], 1
+// CHECK: [[CAST:%.+]] = sext i8 [[A_ASHR]] to i64
+// CHECK: [[CONV:%.+]] = sitofp i64 [[CAST]] to x86_fp80
+// CHECK: [[ADD:%.+]] = fadd x86_fp80 [[CONV]], [[EXPR]]
+// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 [[ADD]] to i64
+// CHECK: [[TRUNC:%.+]] = trunc i64 [[NEW_VAL]] to i8
+// CHECK: [[BF_LD:%.+]] = load i8, i8* [[BITCAST]],
+// CHECK: [[BF_AND:%.+]] = and i8 [[TRUNC]], 127
+// CHECK: [[BF_VALUE:%.+]] = shl i8 [[BF_AND]], 1
+// CHECK: [[BF_CLEAR:%.+]] = and i8 [[BF_LD]], 1
+// CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
+// CHECK: store i8 %{{.+}}, i8* [[LDTEMP:%.+]]
+// CHECK: [[NEW_BF_VALUE:%.+]] = load i8, i8* [[LDTEMP]]
+// CHECK: [[RES:%.+]] = cmpxchg i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic
+// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
+// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
+// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+#pragma omp atomic update
+ bfx4_packed.b += ldv;
+// CHECK: load i64, i64*
+// CHECK: [[EXPR:%.+]] = uitofp i64 %{{.+}} to float
+// CHECK: [[I64VAL:%.+]] = load atomic i64, i64* bitcast (<2 x float>* [[DEST:@.+]] to i64*) monotonic
+// CHECK: [[LD:%.+]] = bitcast i64 [[I64VAL]] to <2 x float>
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD_VEC_VAL:%.+]] = phi <2 x float> [ [[LD]], %{{.+}} ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
+// CHECK: store <2 x float> [[OLD_VEC_VAL]], <2 x float>* [[LDTEMP:%.+]],
+// CHECK: [[VEC_VAL:%.+]] = load <2 x float>, <2 x float>* [[LDTEMP]]
+// CHECK: [[X:%.+]] = extractelement <2 x float> [[VEC_VAL]], i64 0
+// CHECK: [[VEC_ITEM_VAL:%.+]] = fsub float [[EXPR]], [[X]]
+// CHECK: [[VEC_VAL:%.+]] = load <2 x float>, <2 x float>* [[LDTEMP]],
+// CHECK: [[NEW_VEC_VAL:%.+]] = insertelement <2 x float> [[VEC_VAL]], float [[VEC_ITEM_VAL]], i64 0
+// CHECK: store <2 x float> [[NEW_VEC_VAL]], <2 x float>* [[LDTEMP]]
+// CHECK: [[NEW_VEC_VAL:%.+]] = load <2 x float>, <2 x float>* [[LDTEMP]]
+// CHECK: [[OLD_I64:%.+]] = bitcast <2 x float> [[OLD_VEC_VAL]] to i64
+// CHECK: [[NEW_I64:%.+]] = bitcast <2 x float> [[NEW_VEC_VAL]] to i64
+// CHECK: [[RES:%.+]] = cmpxchg i64* bitcast (<2 x float>* [[DEST]] to i64*), i64 [[OLD_I64]], i64 [[NEW_I64]] monotonic monotonic
+// CHECK: [[FAILED_I64_OLD_VAL:%.+]] = extractvalue { i64, i1 } [[RES]], 0
+// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i64, i1 } [[RES]], 1
+// CHECK: [[FAILED_OLD_VAL]] = bitcast i64 [[FAILED_I64_OLD_VAL]] to <2 x float>
+// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+#pragma omp atomic
+ float2x.x = ulv - float2x.x;
+// CHECK: [[EXPR:%.+]] = load double, double* @{{.+}},
+// CHECK: [[OLD_VAL:%.+]] = call i32 @llvm.read_register.i32([[REG:metadata ![0-9]+]])
+// CHECK: [[X_RVAL:%.+]] = sitofp i32 [[OLD_VAL]] to double
+// CHECK: [[DIV:%.+]] = fdiv double [[EXPR]], [[X_RVAL]]
+// CHECK: [[NEW_VAL:%.+]] = fptosi double [[DIV]] to i32
+// CHECK: call void @llvm.write_register.i32([[REG]], i32 [[NEW_VAL]])
+// CHECK: call{{.*}} @__kmpc_flush(
+#pragma omp atomic seq_cst
+ rix = dv / rix;
+ return 0;
+}
+
+#endif
Propchange: cfe/trunk/test/OpenMP/atomic_update_codegen.cpp
------------------------------------------------------------------------------
svn:eol-style = native
Propchange: cfe/trunk/test/OpenMP/atomic_update_codegen.cpp
------------------------------------------------------------------------------
svn:keywords = Author Date Id Rev URL
Propchange: cfe/trunk/test/OpenMP/atomic_update_codegen.cpp
------------------------------------------------------------------------------
svn:mime-type = text/plain
More information about the cfe-commits
mailing list