[llvm] 766ad7d - [OpenMP][OMPIRBuilder]Adding support for `omp atomic`
Fady Ghanim via llvm-commits
llvm-commits at lists.llvm.org
Sun May 23 15:10:04 PDT 2021
Author: Fady Ghanim
Date: 2021-05-23T17:44:09-04:00
New Revision: 766ad7d0aa6f4ebd325fbabfa39e15e11984c060
URL: https://github.com/llvm/llvm-project/commit/766ad7d0aa6f4ebd325fbabfa39e15e11984c060
DIFF: https://github.com/llvm/llvm-project/commit/766ad7d0aa6f4ebd325fbabfa39e15e11984c060.diff
LOG: [OpenMP][OMPIRBuilder]Adding support for `omp atomic`
This patch adds support for generating `omp atomic` for all different
atomic clauses
Added:
Modified:
llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h
llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp
Removed:
################################################################################
diff --git a/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h b/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h
index d50ee3eb341c..2eeeef600b84 100644
--- a/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h
+++ b/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h
@@ -882,6 +882,167 @@ class OpenMPIRBuilder {
///
Value *getOMPCriticalRegionLock(StringRef CriticalName);
+ /// Callback type for Atomic Expression update
+ /// ex:
+ /// \code{.cpp}
+ /// unsigned x = 0;
+ /// #pragma omp atomic update
+ /// x = Expr(x_old); //Expr() is any legal operation
+ /// \endcode
+ ///
+ /// \param XOld the value of the atomic memory address to use for update
+ /// \param IRB reference to the IRBuilder to use
+ ///
+ /// \returns Value to update X to.
+ using AtomicUpdateCallbackTy =
+ const function_ref<Value *(Value *XOld, IRBuilder<> &IRB)>;
+
+private:
+ enum AtomicKind { Read, Write, Update, Capture };
+
+ /// Determine whether to emit flush or not
+ ///
+ /// \param Loc The insert and source location description.
+ /// \param AO The required atomic ordering
+ /// \param AK The OpenMP atomic operation kind used.
+ ///
+ /// \returns wether a flush was emitted or not
+ bool checkAndEmitFlushAfterAtomic(const LocationDescription &Loc,
+ AtomicOrdering AO, AtomicKind AK);
+
+ /// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X
+ /// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X)
+ /// Only Scalar data types.
+ ///
+ /// \param AllocIP Instruction to create AllocaInst before.
+ /// \param X The target atomic pointer to be updated
+ /// \param Expr The value to update X with.
+ /// \param AO Atomic ordering of the generated atomic
+ /// instructions.
+ /// \param RMWOp The binary operation used for update. If
+ /// operation is not supported by atomicRMW,
+ /// or belong to {FADD, FSUB, BAD_BINOP}.
+ /// Then a `cmpExch` based atomic will be generated.
+ /// \param UpdateOp Code generator for complex expressions that cannot be
+ /// expressed through atomicrmw instruction.
+ /// \param VolatileX true if \a X volatile?
+ /// \param IsXLHSInRHSPart true if \a X is Left H.S. in Right H.S. part of
+ /// the update expression, false otherwise.
+ /// (e.g. true for X = X BinOp Expr)
+ ///
+ /// \returns A pair of the old value of X before the update, and the value
+ /// used for the update.
+ std::pair<Value *, Value *> emitAtomicUpdate(Instruction *AllocIP, Value *X,
+ Value *Expr, AtomicOrdering AO,
+ AtomicRMWInst::BinOp RMWOp,
+ AtomicUpdateCallbackTy &UpdateOp,
+ bool VolatileX,
+ bool IsXLHSInRHSPart);
+
+ /// Emit the binary op. described by \p RMWOp, using \p Src1 and \p Src2 .
+ ///
+ /// \Return The instruction
+ Value *emitRMWOpAsInstruction(Value *Src1, Value *Src2,
+ AtomicRMWInst::BinOp RMWOp);
+
+public:
+ /// a struct to pack relevant information while generating atomic Ops
+ struct AtomicOpValue {
+ Value *Var = nullptr;
+ bool IsSigned = false;
+ bool IsVolatile = false;
+ };
+
+ /// Emit atomic Read for : V = X --- Only Scalar data types.
+ ///
+ /// \param Loc The insert and source location description.
+ /// \param X The target pointer to be atomically read
+ /// \param V Memory address where to store atomically read
+ /// value
+ /// \param AO Atomic ordering of the generated atomic
+ /// instructions.
+ ///
+ /// \return Insertion point after generated atomic read IR.
+ InsertPointTy createAtomicRead(const LocationDescription &Loc,
+ AtomicOpValue &X, AtomicOpValue &V,
+ AtomicOrdering AO);
+
+ /// Emit atomic write for : X = Expr --- Only Scalar data types.
+ ///
+ /// \param Loc The insert and source location description.
+ /// \param X The target pointer to be atomically written to
+ /// \param Expr The value to store.
+ /// \param AO Atomic ordering of the generated atomic
+ /// instructions.
+ ///
+ /// \return Insertion point after generated atomic Write IR.
+ InsertPointTy createAtomicWrite(const LocationDescription &Loc,
+ AtomicOpValue &X, Value *Expr,
+ AtomicOrdering AO);
+
+ /// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X
+ /// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X)
+ /// Only Scalar data types.
+ ///
+ /// \param Loc The insert and source location description.
+ /// \param AllocIP Instruction to create AllocaInst before.
+ /// \param X The target atomic pointer to be updated
+ /// \param Expr The value to update X with.
+ /// \param AO Atomic ordering of the generated atomic instructions.
+ /// \param RMWOp The binary operation used for update. If operation
+ /// is not supported by atomicRMW, or belong to
+ /// {FADD, FSUB, BAD_BINOP}. Then a `cmpExch` based
+ /// atomic will be generated.
+ /// \param UpdateOp Code generator for complex expressions that cannot be
+ /// expressed through atomicrmw instruction.
+ /// \param IsXLHSInRHSPart true if \a X is Left H.S. in Right H.S. part of
+ /// the update expression, false otherwise.
+ /// (e.g. true for X = X BinOp Expr)
+ ///
+ /// \return Insertion point after generated atomic update IR.
+ InsertPointTy createAtomicUpdate(const LocationDescription &Loc,
+ Instruction *AllocIP, AtomicOpValue &X,
+ Value *Expr, AtomicOrdering AO,
+ AtomicRMWInst::BinOp RMWOp,
+ AtomicUpdateCallbackTy &UpdateOp,
+ bool IsXLHSInRHSPart);
+
+ /// Emit atomic update for constructs: --- Only Scalar data types
+ /// V = X; X = X BinOp Expr ,
+ /// X = X BinOp Expr; V = X,
+ /// V = X; X = Expr BinOp X,
+ /// X = Expr BinOp X; V = X,
+ /// V = X; X = UpdateOp(X),
+ /// X = UpdateOp(X); V = X,
+ ///
+ /// \param Loc The insert and source location description.
+ /// \param AllocIP Instruction to create AllocaInst before.
+ /// \param X The target atomic pointer to be updated
+ /// \param V Memory address where to store captured value
+ /// \param Expr The value to update X with.
+ /// \param AO Atomic ordering of the generated atomic instructions
+ /// \param RMWOp The binary operation used for update. If
+ /// operation is not supported by atomicRMW, or belong to
+ /// {FADD, FSUB, BAD_BINOP}. Then a cmpExch based
+ /// atomic will be generated.
+ /// \param UpdateOp Code generator for complex expressions that cannot be
+ /// expressed through atomicrmw instruction.
+ /// \param UpdateExpr true if X is an in place update of the form
+ /// X = X BinOp Expr or X = Expr BinOp X
+ /// \param IsXLHSInRHSPart true if X is Left H.S. in Right H.S. part of the
+ /// update expression, false otherwise.
+ /// (e.g. true for X = X BinOp Expr)
+ /// \param IsPostfixUpdate true if original value of 'x' must be stored in
+ /// 'v', not an updated one.
+ ///
+ /// \return Insertion point after generated atomic capture IR.
+ InsertPointTy
+ createAtomicCapture(const LocationDescription &Loc, Instruction *AllocIP,
+ AtomicOpValue &X, AtomicOpValue &V, Value *Expr,
+ AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp,
+ AtomicUpdateCallbackTy &UpdateOp, bool UpdateExpr,
+ bool IsPostfixUpdate, bool IsXLHSInRHSPart);
+
/// Create the control flow structure of a canonical OpenMP loop.
///
/// The emitted loop will be disconnected, i.e. no edge to the loop's
diff --git a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
index b379f85e9490..8dec37d9668b 100644
--- a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
+++ b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
@@ -2243,6 +2243,324 @@ OpenMPIRBuilder::createOffloadMaptypes(SmallVectorImpl<uint64_t> &Mappings,
return MaptypesArrayGlobal;
}
+bool OpenMPIRBuilder::checkAndEmitFlushAfterAtomic(
+ const LocationDescription &Loc, llvm::AtomicOrdering AO, AtomicKind AK) {
+ assert(!(AO == AtomicOrdering::NotAtomic ||
+ AO == llvm::AtomicOrdering::Unordered) &&
+ "Unexpected Atomic Ordering.");
+
+ bool Flush = false;
+ llvm::AtomicOrdering FlushAO = AtomicOrdering::Monotonic;
+
+ switch (AK) {
+ case Read:
+ if (AO == AtomicOrdering::Acquire || AO == AtomicOrdering::AcquireRelease ||
+ AO == AtomicOrdering::SequentiallyConsistent) {
+ FlushAO = AtomicOrdering::Acquire;
+ Flush = true;
+ }
+ break;
+ case Write:
+ case Update:
+ if (AO == AtomicOrdering::Release || AO == AtomicOrdering::AcquireRelease ||
+ AO == AtomicOrdering::SequentiallyConsistent) {
+ FlushAO = AtomicOrdering::Release;
+ Flush = true;
+ }
+ break;
+ case Capture:
+ switch (AO) {
+ case AtomicOrdering::Acquire:
+ FlushAO = AtomicOrdering::Acquire;
+ Flush = true;
+ break;
+ case AtomicOrdering::Release:
+ FlushAO = AtomicOrdering::Release;
+ Flush = true;
+ break;
+ case AtomicOrdering::AcquireRelease:
+ case AtomicOrdering::SequentiallyConsistent:
+ FlushAO = AtomicOrdering::AcquireRelease;
+ Flush = true;
+ break;
+ default:
+ // do nothing - leave silently.
+ break;
+ }
+ }
+
+ if (Flush) {
+ // Currently Flush RT call still doesn't take memory_ordering, so for when
+ // that happens, this tries to do the resolution of which atomic ordering
+ // to use with but issue the flush call
+ // TODO: pass `FlushAO` after memory ordering support is added
+ (void)FlushAO;
+ emitFlush(Loc);
+ }
+
+ // for AO == AtomicOrdering::Monotonic and all other case combinations
+ // do nothing
+ return Flush;
+}
+
+OpenMPIRBuilder::InsertPointTy
+OpenMPIRBuilder::createAtomicRead(const LocationDescription &Loc,
+ AtomicOpValue &X, AtomicOpValue &V,
+ AtomicOrdering AO) {
+ if (!updateToLocation(Loc))
+ return Loc.IP;
+
+ Type *XTy = X.Var->getType();
+ assert(XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory");
+ Type *XElemTy = XTy->getPointerElementType();
+ assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() ||
+ XElemTy->isPointerTy()) &&
+ "OMP atomic read expected a scalar type");
+
+ Value *XRead = nullptr;
+
+ if (XElemTy->isIntegerTy()) {
+ LoadInst *XLD =
+ Builder.CreateLoad(XElemTy, X.Var, X.IsVolatile, "omp.atomic.read");
+ XLD->setAtomic(AO);
+ XRead = cast<Value>(XLD);
+ } else {
+ // We need to bitcast and perform atomic op as integer
+ unsigned Addrspace = cast<PointerType>(XTy)->getAddressSpace();
+ IntegerType *IntCastTy =
+ IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits());
+ Value *XBCast = Builder.CreateBitCast(
+ X.Var, IntCastTy->getPointerTo(Addrspace), "atomic.src.int.cast");
+ LoadInst *XLoad =
+ Builder.CreateLoad(IntCastTy, XBCast, X.IsVolatile, "omp.atomic.load");
+ XLoad->setAtomic(AO);
+ if (XElemTy->isFloatingPointTy()) {
+ XRead = Builder.CreateBitCast(XLoad, XElemTy, "atomic.flt.cast");
+ } else {
+ XRead = Builder.CreateIntToPtr(XLoad, XElemTy, "atomic.ptr.cast");
+ }
+ }
+ checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Read);
+ Builder.CreateStore(XRead, V.Var, V.IsVolatile);
+ return Builder.saveIP();
+}
+
+OpenMPIRBuilder::InsertPointTy
+OpenMPIRBuilder::createAtomicWrite(const LocationDescription &Loc,
+ AtomicOpValue &X, Value *Expr,
+ AtomicOrdering AO) {
+ if (!updateToLocation(Loc))
+ return Loc.IP;
+
+ Type *XTy = X.Var->getType();
+ assert(XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory");
+ Type *XElemTy = XTy->getPointerElementType();
+ assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() ||
+ XElemTy->isPointerTy()) &&
+ "OMP atomic write expected a scalar type");
+
+ if (XElemTy->isIntegerTy()) {
+ StoreInst *XSt = Builder.CreateStore(Expr, X.Var, X.IsVolatile);
+ XSt->setAtomic(AO);
+ } else {
+ // We need to bitcast and perform atomic op as integers
+ unsigned Addrspace = cast<PointerType>(XTy)->getAddressSpace();
+ IntegerType *IntCastTy =
+ IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits());
+ Value *XBCast = Builder.CreateBitCast(
+ X.Var, IntCastTy->getPointerTo(Addrspace), "atomic.dst.int.cast");
+ Value *ExprCast =
+ Builder.CreateBitCast(Expr, IntCastTy, "atomic.src.int.cast");
+ Builder.GetInsertBlock()->getParent()->dump();
+ StoreInst *XSt = Builder.CreateStore(ExprCast, XBCast, X.IsVolatile);
+ Builder.GetInsertBlock()->dump();
+ XSt->setAtomic(AO);
+ }
+
+ checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Write);
+ return Builder.saveIP();
+}
+
+OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createAtomicUpdate(
+ const LocationDescription &Loc, Instruction *AllocIP, AtomicOpValue &X,
+ Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp,
+ AtomicUpdateCallbackTy &UpdateOp, bool IsXLHSInRHSPart) {
+ if (!updateToLocation(Loc))
+ return Loc.IP;
+
+ Type *XTy = X.Var->getType();
+ assert(XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory");
+ Type *XElemTy = XTy->getPointerElementType();
+ assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() ||
+ XElemTy->isPointerTy()) &&
+ "OMP atomic update expected a scalar type");
+ assert((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) &&
+ (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) &&
+ "OpenMP atomic does not support LT or GT operations");
+
+ emitAtomicUpdate(AllocIP, X.Var, Expr, AO, RMWOp, UpdateOp, X.IsVolatile,
+ IsXLHSInRHSPart);
+ checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Update);
+ return Builder.saveIP();
+}
+
+Value *OpenMPIRBuilder::emitRMWOpAsInstruction(Value *Src1, Value *Src2,
+ AtomicRMWInst::BinOp RMWOp) {
+ switch (RMWOp) {
+ case AtomicRMWInst::Add:
+ return Builder.CreateAdd(Src1, Src2);
+ case AtomicRMWInst::Sub:
+ return Builder.CreateSub(Src1, Src2);
+ case AtomicRMWInst::And:
+ return Builder.CreateAnd(Src1, Src2);
+ case AtomicRMWInst::Nand:
+ return Builder.CreateNeg(Builder.CreateAnd(Src1, Src2));
+ case AtomicRMWInst::Or:
+ return Builder.CreateOr(Src1, Src2);
+ case AtomicRMWInst::Xor:
+ return Builder.CreateXor(Src1, Src2);
+ case AtomicRMWInst::Xchg:
+ case AtomicRMWInst::FAdd:
+ case AtomicRMWInst::FSub:
+ case AtomicRMWInst::BAD_BINOP:
+ case AtomicRMWInst::Max:
+ case AtomicRMWInst::Min:
+ case AtomicRMWInst::UMax:
+ case AtomicRMWInst::UMin:
+ llvm_unreachable("Unsupported atomic update operation");
+ }
+ llvm_unreachable("Unsupported atomic update operation");
+}
+
+std::pair<Value *, Value *>
+OpenMPIRBuilder::emitAtomicUpdate(Instruction *AllocIP, Value *X, Value *Expr,
+ AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp,
+ AtomicUpdateCallbackTy &UpdateOp,
+ bool VolatileX, bool IsXLHSInRHSPart) {
+ Type *XElemTy = X->getType()->getPointerElementType();
+
+ bool DoCmpExch =
+ ((RMWOp == AtomicRMWInst::BAD_BINOP) || (RMWOp == AtomicRMWInst::FAdd)) ||
+ (RMWOp == AtomicRMWInst::FSub) ||
+ (RMWOp == AtomicRMWInst::Sub && !IsXLHSInRHSPart);
+
+ std::pair<Value *, Value *> Res;
+ if (XElemTy->isIntegerTy() && !DoCmpExch) {
+ Res.first = Builder.CreateAtomicRMW(RMWOp, X, Expr, llvm::MaybeAlign(), AO);
+ // not needed except in case of postfix captures. Generate anyway for
+ // consistency with the else part. Will be removed with any DCE pass.
+ Res.second = emitRMWOpAsInstruction(Res.first, Expr, RMWOp);
+ } else {
+ unsigned Addrspace = cast<PointerType>(X->getType())->getAddressSpace();
+ IntegerType *IntCastTy =
+ IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits());
+ Value *XBCast =
+ Builder.CreateBitCast(X, IntCastTy->getPointerTo(Addrspace));
+ LoadInst *OldVal =
+ Builder.CreateLoad(IntCastTy, XBCast, X->getName() + ".atomic.load");
+ OldVal->setAtomic(AO);
+ // CurBB
+ // | /---\
+ // ContBB |
+ // | \---/
+ // ExitBB
+ BasicBlock *CurBB = Builder.GetInsertBlock();
+ Instruction *CurBBTI = CurBB->getTerminator();
+ CurBBTI = CurBBTI ? CurBBTI : Builder.CreateUnreachable();
+ BasicBlock *ExitBB =
+ CurBB->splitBasicBlock(CurBBTI, X->getName() + ".atomic.exit");
+ BasicBlock *ContBB = CurBB->splitBasicBlock(CurBB->getTerminator(),
+ X->getName() + ".atomic.cont");
+ ContBB->getTerminator()->eraseFromParent();
+ Builder.SetInsertPoint(ContBB);
+ llvm::PHINode *PHI = Builder.CreatePHI(OldVal->getType(), 2);
+ PHI->addIncoming(OldVal, CurBB);
+ AllocaInst *NewAtomicAddr = Builder.CreateAlloca(XElemTy);
+ NewAtomicAddr->setName(X->getName() + "x.new.val");
+ NewAtomicAddr->moveBefore(AllocIP);
+ IntegerType *NewAtomicCastTy =
+ IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits());
+ bool IsIntTy = XElemTy->isIntegerTy();
+ Value *NewAtomicIntAddr =
+ (IsIntTy)
+ ? NewAtomicAddr
+ : Builder.CreateBitCast(NewAtomicAddr,
+ NewAtomicCastTy->getPointerTo(Addrspace));
+ Value *OldExprVal = PHI;
+ if (!IsIntTy) {
+ if (XElemTy->isFloatingPointTy()) {
+ OldExprVal = Builder.CreateBitCast(PHI, XElemTy,
+ X->getName() + ".atomic.fltCast");
+ } else {
+ OldExprVal = Builder.CreateIntToPtr(PHI, XElemTy,
+ X->getName() + ".atomic.ptrCast");
+ }
+ }
+
+ Value *Upd = UpdateOp(OldExprVal, Builder);
+ Builder.CreateStore(Upd, NewAtomicAddr);
+ LoadInst *DesiredVal = Builder.CreateLoad(XElemTy, NewAtomicIntAddr);
+ Value *XAddr =
+ (IsIntTy)
+ ? X
+ : Builder.CreateBitCast(X, IntCastTy->getPointerTo(Addrspace));
+ AtomicOrdering Failure =
+ llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
+ AtomicCmpXchgInst *Result = Builder.CreateAtomicCmpXchg(
+ XAddr, OldExprVal, DesiredVal, llvm::MaybeAlign(), AO, Failure);
+ Result->setVolatile(VolatileX);
+ Value *PreviousVal = Builder.CreateExtractValue(Result, /*Idxs=*/0);
+ Value *SuccessFailureVal = Builder.CreateExtractValue(Result, /*Idxs=*/1);
+ PHI->addIncoming(PreviousVal, Builder.GetInsertBlock());
+ Builder.CreateCondBr(SuccessFailureVal, ExitBB, ContBB);
+
+ Res.first = OldExprVal;
+ Res.second = Upd;
+
+ // set Insertion point in exit block
+ if (UnreachableInst *ExitTI =
+ dyn_cast<UnreachableInst>(ExitBB->getTerminator())) {
+ CurBBTI->eraseFromParent();
+ Builder.SetInsertPoint(ExitBB);
+ } else {
+ Builder.SetInsertPoint(ExitTI);
+ }
+ }
+
+ return Res;
+}
+
+OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createAtomicCapture(
+ const LocationDescription &Loc, Instruction *AllocIP, AtomicOpValue &X,
+ AtomicOpValue &V, Value *Expr, AtomicOrdering AO,
+ AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp,
+ bool UpdateExpr, bool IsPostfixUpdate, bool IsXLHSInRHSPart) {
+ if (!updateToLocation(Loc))
+ return Loc.IP;
+
+ Type *XTy = X.Var->getType();
+ assert(XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory");
+ Type *XElemTy = XTy->getPointerElementType();
+ assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() ||
+ XElemTy->isPointerTy()) &&
+ "OMP atomic capture expected a scalar type");
+ assert((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) &&
+ "OpenMP atomic does not support LT or GT operations");
+
+ // If UpdateExpr is 'x' updated with some `expr` not based on 'x',
+ // 'x' is simply atomically rewritten with 'expr'.
+ AtomicRMWInst::BinOp AtomicOp = (UpdateExpr ? RMWOp : AtomicRMWInst::Xchg);
+ std::pair<Value *, Value *> Result =
+ emitAtomicUpdate(AllocIP, X.Var, Expr, AO, AtomicOp, UpdateOp,
+ X.IsVolatile, IsXLHSInRHSPart);
+
+ Value *CapturedVal = (IsPostfixUpdate ? Result.first : Result.second);
+ Builder.CreateStore(CapturedVal, V.Var, V.IsVolatile);
+
+ checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Capture);
+ return Builder.saveIP();
+}
+
GlobalVariable *
OpenMPIRBuilder::createOffloadMapnames(SmallVectorImpl<llvm::Constant *> &Names,
std::string VarName) {
diff --git a/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp b/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp
index 35a52310d376..962dcc235983 100644
--- a/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp
+++ b/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp
@@ -2192,6 +2192,293 @@ TEST_F(OpenMPIRBuilderTest, SingleDirective) {
EXPECT_EQ(SingleEndCI->getArgOperand(1), SingleEntryCI->getArgOperand(1));
}
+TEST_F(OpenMPIRBuilderTest, OMPAtomicReadFlt) {
+ OpenMPIRBuilder OMPBuilder(*M);
+ OMPBuilder.initialize();
+ F->setName("func");
+ IRBuilder<> Builder(BB);
+
+ OpenMPIRBuilder::LocationDescription Loc({Builder.saveIP(), DL});
+
+ Type *Float32 = Type::getFloatTy(M->getContext());
+ AllocaInst *XVal = Builder.CreateAlloca(Float32);
+ XVal->setName("AtomicVar");
+ AllocaInst *VVal = Builder.CreateAlloca(Float32);
+ VVal->setName("AtomicRead");
+ AtomicOrdering AO = AtomicOrdering::Monotonic;
+ OpenMPIRBuilder::AtomicOpValue X = {XVal, false, false};
+ OpenMPIRBuilder::AtomicOpValue V = {VVal, false, false};
+
+ Builder.restoreIP(OMPBuilder.createAtomicRead(Loc, X, V, AO));
+
+ IntegerType *IntCastTy =
+ IntegerType::get(M->getContext(), Float32->getScalarSizeInBits());
+
+ BitCastInst *CastFrmFlt = cast<BitCastInst>(VVal->getNextNode());
+ EXPECT_EQ(CastFrmFlt->getSrcTy(), Float32->getPointerTo());
+ EXPECT_EQ(CastFrmFlt->getDestTy(), IntCastTy->getPointerTo());
+ EXPECT_EQ(CastFrmFlt->getOperand(0), XVal);
+
+ LoadInst *AtomicLoad = cast<LoadInst>(CastFrmFlt->getNextNode());
+ EXPECT_TRUE(AtomicLoad->isAtomic());
+ EXPECT_EQ(AtomicLoad->getPointerOperand(), CastFrmFlt);
+
+ BitCastInst *CastToFlt = cast<BitCastInst>(AtomicLoad->getNextNode());
+ EXPECT_EQ(CastToFlt->getSrcTy(), IntCastTy);
+ EXPECT_EQ(CastToFlt->getDestTy(), Float32);
+ EXPECT_EQ(CastToFlt->getOperand(0), AtomicLoad);
+
+ StoreInst *StoreofAtomic = cast<StoreInst>(CastToFlt->getNextNode());
+ EXPECT_EQ(StoreofAtomic->getValueOperand(), CastToFlt);
+ EXPECT_EQ(StoreofAtomic->getPointerOperand(), VVal);
+
+ Builder.CreateRetVoid();
+ OMPBuilder.finalize();
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+}
+
+TEST_F(OpenMPIRBuilderTest, OMPAtomicReadInt) {
+ OpenMPIRBuilder OMPBuilder(*M);
+ OMPBuilder.initialize();
+ F->setName("func");
+ IRBuilder<> Builder(BB);
+
+ OpenMPIRBuilder::LocationDescription Loc({Builder.saveIP(), DL});
+
+ IntegerType *Int32 = Type::getInt32Ty(M->getContext());
+ AllocaInst *XVal = Builder.CreateAlloca(Int32);
+ XVal->setName("AtomicVar");
+ AllocaInst *VVal = Builder.CreateAlloca(Int32);
+ VVal->setName("AtomicRead");
+ AtomicOrdering AO = AtomicOrdering::Monotonic;
+ OpenMPIRBuilder::AtomicOpValue X = {XVal, false, false};
+ OpenMPIRBuilder::AtomicOpValue V = {VVal, false, false};
+
+ BasicBlock *EntryBB = BB;
+
+ Builder.restoreIP(OMPBuilder.createAtomicRead(Loc, X, V, AO));
+ LoadInst *AtomicLoad = nullptr;
+ StoreInst *StoreofAtomic = nullptr;
+
+ for (Instruction &Cur : *EntryBB) {
+ if (isa<LoadInst>(Cur)) {
+ AtomicLoad = cast<LoadInst>(&Cur);
+ if (AtomicLoad->getPointerOperand() == XVal)
+ continue;
+ AtomicLoad = nullptr;
+ } else if (isa<StoreInst>(Cur)) {
+ StoreofAtomic = cast<StoreInst>(&Cur);
+ if (StoreofAtomic->getPointerOperand() == VVal)
+ continue;
+ StoreofAtomic = nullptr;
+ }
+ }
+
+ EXPECT_NE(AtomicLoad, nullptr);
+ EXPECT_TRUE(AtomicLoad->isAtomic());
+
+ EXPECT_NE(StoreofAtomic, nullptr);
+ EXPECT_EQ(StoreofAtomic->getValueOperand(), AtomicLoad);
+
+ Builder.CreateRetVoid();
+ OMPBuilder.finalize();
+
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+}
+
+TEST_F(OpenMPIRBuilderTest, OMPAtomicWriteFlt) {
+ OpenMPIRBuilder OMPBuilder(*M);
+ OMPBuilder.initialize();
+ F->setName("func");
+ IRBuilder<> Builder(BB);
+
+ OpenMPIRBuilder::LocationDescription Loc({Builder.saveIP(), DL});
+
+ LLVMContext &Ctx = M->getContext();
+ Type *Float32 = Type::getFloatTy(Ctx);
+ AllocaInst *XVal = Builder.CreateAlloca(Float32);
+ XVal->setName("AtomicVar");
+ OpenMPIRBuilder::AtomicOpValue X = {XVal, false, false};
+ AtomicOrdering AO = AtomicOrdering::Monotonic;
+ Constant *ValToWrite = ConstantFP::get(Float32, 1.0);
+
+ Builder.restoreIP(OMPBuilder.createAtomicWrite(Loc, X, ValToWrite, AO));
+
+ IntegerType *IntCastTy =
+ IntegerType::get(M->getContext(), Float32->getScalarSizeInBits());
+
+ BitCastInst *CastFrmFlt = cast<BitCastInst>(XVal->getNextNode());
+ EXPECT_EQ(CastFrmFlt->getSrcTy(), Float32->getPointerTo());
+ EXPECT_EQ(CastFrmFlt->getDestTy(), IntCastTy->getPointerTo());
+ EXPECT_EQ(CastFrmFlt->getOperand(0), XVal);
+
+ Value *ExprCast = Builder.CreateBitCast(ValToWrite, IntCastTy);
+
+ StoreInst *StoreofAtomic = cast<StoreInst>(CastFrmFlt->getNextNode());
+ EXPECT_EQ(StoreofAtomic->getValueOperand(), ExprCast);
+ EXPECT_EQ(StoreofAtomic->getPointerOperand(), CastFrmFlt);
+ EXPECT_TRUE(StoreofAtomic->isAtomic());
+
+ Builder.CreateRetVoid();
+ OMPBuilder.finalize();
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+}
+
+TEST_F(OpenMPIRBuilderTest, OMPAtomicWriteInt) {
+ OpenMPIRBuilder OMPBuilder(*M);
+ OMPBuilder.initialize();
+ F->setName("func");
+ IRBuilder<> Builder(BB);
+
+ OpenMPIRBuilder::LocationDescription Loc({Builder.saveIP(), DL});
+
+ LLVMContext &Ctx = M->getContext();
+ IntegerType *Int32 = Type::getInt32Ty(Ctx);
+ AllocaInst *XVal = Builder.CreateAlloca(Int32);
+ XVal->setName("AtomicVar");
+ OpenMPIRBuilder::AtomicOpValue X = {XVal, false, false};
+ AtomicOrdering AO = AtomicOrdering::Monotonic;
+ ConstantInt *ValToWrite = ConstantInt::get(Type::getInt32Ty(Ctx), 1U);
+
+ BasicBlock *EntryBB = BB;
+
+ Builder.restoreIP(OMPBuilder.createAtomicWrite(Loc, X, ValToWrite, AO));
+
+ StoreInst *StoreofAtomic = nullptr;
+
+ for (Instruction &Cur : *EntryBB) {
+ if (isa<StoreInst>(Cur)) {
+ StoreofAtomic = cast<StoreInst>(&Cur);
+ if (StoreofAtomic->getPointerOperand() == XVal)
+ continue;
+ StoreofAtomic = nullptr;
+ }
+ }
+
+ EXPECT_NE(StoreofAtomic, nullptr);
+ EXPECT_TRUE(StoreofAtomic->isAtomic());
+ EXPECT_EQ(StoreofAtomic->getValueOperand(), ValToWrite);
+
+ Builder.CreateRetVoid();
+ OMPBuilder.finalize();
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+}
+
+TEST_F(OpenMPIRBuilderTest, OMPAtomicUpdate) {
+ OpenMPIRBuilder OMPBuilder(*M);
+ OMPBuilder.initialize();
+ F->setName("func");
+ IRBuilder<> Builder(BB);
+
+ OpenMPIRBuilder::LocationDescription Loc({Builder.saveIP(), DL});
+
+ IntegerType *Int32 = Type::getInt32Ty(M->getContext());
+ AllocaInst *XVal = Builder.CreateAlloca(Int32);
+ XVal->setName("AtomicVar");
+ Builder.CreateStore(ConstantInt::get(Type::getInt32Ty(Ctx), 0U), XVal);
+ OpenMPIRBuilder::AtomicOpValue X = {XVal, false, false};
+ AtomicOrdering AO = AtomicOrdering::Monotonic;
+ ConstantInt *ConstVal = ConstantInt::get(Type::getInt32Ty(Ctx), 1U);
+ Value *Expr = nullptr;
+ AtomicRMWInst::BinOp RMWOp = AtomicRMWInst::Sub;
+ bool IsXLHSInRHSPart = false;
+
+ BasicBlock *EntryBB = BB;
+ Instruction *AllocIP = EntryBB->getFirstNonPHI();
+ Value *Sub = nullptr;
+
+ auto UpdateOp = [&](Value *Atomic, IRBuilder<> &IRB) {
+ Sub = IRB.CreateSub(ConstVal, Atomic);
+ return Sub;
+ };
+ Builder.restoreIP(OMPBuilder.createAtomicUpdate(
+ Builder, AllocIP, X, Expr, AO, RMWOp, UpdateOp, IsXLHSInRHSPart));
+ BasicBlock *ContBB = EntryBB->getSingleSuccessor();
+ BranchInst *ContTI = dyn_cast<BranchInst>(ContBB->getTerminator());
+ EXPECT_NE(ContTI, nullptr);
+ BasicBlock *EndBB = ContTI->getSuccessor(0);
+ EXPECT_TRUE(ContTI->isConditional());
+ EXPECT_EQ(ContTI->getSuccessor(1), ContBB);
+ EXPECT_NE(EndBB, nullptr);
+
+ PHINode *Phi = dyn_cast<PHINode>(&ContBB->front());
+ EXPECT_NE(Phi, nullptr);
+ EXPECT_EQ(Phi->getNumIncomingValues(), 2U);
+ EXPECT_EQ(Phi->getIncomingBlock(0), EntryBB);
+ EXPECT_EQ(Phi->getIncomingBlock(1), ContBB);
+
+ EXPECT_EQ(Sub->getNumUses(), 1U);
+ StoreInst *St = dyn_cast<StoreInst>(Sub->user_back());
+ AllocaInst *UpdateTemp = dyn_cast<AllocaInst>(St->getPointerOperand());
+
+ ExtractValueInst *ExVI1 =
+ dyn_cast<ExtractValueInst>(Phi->getIncomingValueForBlock(ContBB));
+ EXPECT_NE(ExVI1, nullptr);
+ AtomicCmpXchgInst *CmpExchg =
+ dyn_cast<AtomicCmpXchgInst>(ExVI1->getAggregateOperand());
+ EXPECT_NE(CmpExchg, nullptr);
+ EXPECT_EQ(CmpExchg->getPointerOperand(), XVal);
+ EXPECT_EQ(CmpExchg->getCompareOperand(), Phi);
+ EXPECT_EQ(CmpExchg->getSuccessOrdering(), AtomicOrdering::Monotonic);
+
+ LoadInst *Ld = dyn_cast<LoadInst>(CmpExchg->getNewValOperand());
+ EXPECT_NE(Ld, nullptr);
+ EXPECT_EQ(UpdateTemp, Ld->getPointerOperand());
+
+ Builder.CreateRetVoid();
+ OMPBuilder.finalize();
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+}
+
+TEST_F(OpenMPIRBuilderTest, OMPAtomicCapture) {
+ OpenMPIRBuilder OMPBuilder(*M);
+ OMPBuilder.initialize();
+ F->setName("func");
+ IRBuilder<> Builder(BB);
+
+ OpenMPIRBuilder::LocationDescription Loc({Builder.saveIP(), DL});
+
+ LLVMContext &Ctx = M->getContext();
+ IntegerType *Int32 = Type::getInt32Ty(Ctx);
+ AllocaInst *XVal = Builder.CreateAlloca(Int32);
+ XVal->setName("AtomicVar");
+ AllocaInst *VVal = Builder.CreateAlloca(Int32);
+ VVal->setName("AtomicCapTar");
+ StoreInst *Init =
+ Builder.CreateStore(ConstantInt::get(Type::getInt32Ty(Ctx), 0U), XVal);
+
+ OpenMPIRBuilder::AtomicOpValue X = {XVal, false, false};
+ OpenMPIRBuilder::AtomicOpValue V = {VVal, false, false};
+ AtomicOrdering AO = AtomicOrdering::Monotonic;
+ ConstantInt *Expr = ConstantInt::get(Type::getInt32Ty(Ctx), 1U);
+ AtomicRMWInst::BinOp RMWOp = AtomicRMWInst::Add;
+ bool IsXLHSInRHSPart = true;
+ bool IsPostfixUpdate = true;
+ bool UpdateExpr = true;
+
+ BasicBlock *EntryBB = BB;
+ Instruction *AllocIP = EntryBB->getFirstNonPHI();
+
+ // integer update - not used
+ auto UpdateOp = [&](Value *Atomic, IRBuilder<> &IRB) { return nullptr; };
+
+ Builder.restoreIP(OMPBuilder.createAtomicCapture(
+ Builder, AllocIP, X, V, Expr, AO, RMWOp, UpdateOp, UpdateExpr,
+ IsPostfixUpdate, IsXLHSInRHSPart));
+ EXPECT_EQ(EntryBB->getParent()->size(), 1U);
+ AtomicRMWInst *ARWM = dyn_cast<AtomicRMWInst>(Init->getNextNode());
+ EXPECT_NE(ARWM, nullptr);
+ EXPECT_EQ(ARWM->getPointerOperand(), XVal);
+ EXPECT_EQ(ARWM->getOperation(), RMWOp);
+ StoreInst *St = dyn_cast<StoreInst>(ARWM->user_back());
+ EXPECT_NE(St, nullptr);
+ EXPECT_EQ(St->getPointerOperand(), VVal);
+
+ Builder.CreateRetVoid();
+ OMPBuilder.finalize();
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+}
+
TEST_F(OpenMPIRBuilderTest, CreateSections) {
using InsertPointTy = OpenMPIRBuilder::InsertPointTy;
using BodyGenCallbackTy = llvm::OpenMPIRBuilder::StorableBodyGenCallbackTy;
More information about the llvm-commits
mailing list