[llvm] [Transforms] Introduce BuildBuiltins.h atomic helpers (PR #134455)
Michael Kruse via llvm-commits
llvm-commits at lists.llvm.org
Mon Apr 7 03:25:48 PDT 2025
https://github.com/Meinersbur updated https://github.com/llvm/llvm-project/pull/134455
>From 70c65b33bba3d482fbdf68f37c398a2bcb4e24ec Mon Sep 17 00:00:00 2001
From: Michael Kruse <llvm-project at meinersbur.de>
Date: Fri, 4 Apr 2025 23:47:35 +0200
Subject: [PATCH 1/2] Add BuildBuiltins utilities for atomic load,store,cmpxchg
---
.../llvm/Analysis/TargetLibraryInfo.def | 80 +
.../llvm/Frontend/OpenMP/OMPIRBuilder.h | 48 -
llvm/include/llvm/Support/AtomicOrdering.h | 22 +
llvm/include/llvm/Testing/Support/Error.h | 49 +
.../llvm/Transforms/Utils/BasicBlockUtils.h | 49 +
.../llvm/Transforms/Utils/BuildBuiltins.h | 278 +
.../llvm/Transforms/Utils/BuildLibCalls.h | 47 +
llvm/lib/Analysis/TargetLibraryInfo.cpp | 21 +
llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp | 75 -
llvm/lib/Transforms/Utils/BasicBlockUtils.cpp | 75 +
llvm/lib/Transforms/Utils/BuildBuiltins.cpp | 850 ++++
llvm/lib/Transforms/Utils/BuildLibCalls.cpp | 216 +-
llvm/lib/Transforms/Utils/CMakeLists.txt | 1 +
.../tools/llvm-tli-checker/ps4-tli-check.yaml | 4 +-
.../Analysis/TargetLibraryInfoTest.cpp | 18 +
.../Transforms/Utils/BuildBuiltinsTest.cpp | 4462 +++++++++++++++++
.../unittests/Transforms/Utils/CMakeLists.txt | 2 +
17 files changed, 6171 insertions(+), 126 deletions(-)
create mode 100644 llvm/include/llvm/Transforms/Utils/BuildBuiltins.h
create mode 100644 llvm/lib/Transforms/Utils/BuildBuiltins.cpp
create mode 100644 llvm/unittests/Transforms/Utils/BuildBuiltinsTest.cpp
diff --git a/llvm/include/llvm/Analysis/TargetLibraryInfo.def b/llvm/include/llvm/Analysis/TargetLibraryInfo.def
index db566b8ee610e..53fb11aff8a44 100644
--- a/llvm/include/llvm/Analysis/TargetLibraryInfo.def
+++ b/llvm/include/llvm/Analysis/TargetLibraryInfo.def
@@ -462,11 +462,91 @@ TLI_DEFINE_ENUM_INTERNAL(atomic_load)
TLI_DEFINE_STRING_INTERNAL("__atomic_load")
TLI_DEFINE_SIG_INTERNAL(Void, SizeT, Ptr, Ptr, Int)
+/// int8_t __atomic_load_1(void *ptr, int memorder);
+TLI_DEFINE_ENUM_INTERNAL(atomic_load_1)
+TLI_DEFINE_STRING_INTERNAL("__atomic_load_1")
+TLI_DEFINE_SIG_INTERNAL(Int8, Ptr, Int)
+
+/// int16_t __atomic_load_2(void *ptr, int memorder);
+TLI_DEFINE_ENUM_INTERNAL(atomic_load_2)
+TLI_DEFINE_STRING_INTERNAL("__atomic_load_2")
+TLI_DEFINE_SIG_INTERNAL(Int16, Ptr, Int)
+
+/// int32_t __atomic_load_4(void *ptr, int memorder);
+TLI_DEFINE_ENUM_INTERNAL(atomic_load_4)
+TLI_DEFINE_STRING_INTERNAL("__atomic_load_4")
+TLI_DEFINE_SIG_INTERNAL(Int32, Ptr, Int)
+
+/// int64_t __atomic_load_8(void *ptr int memorder);
+TLI_DEFINE_ENUM_INTERNAL(atomic_load_8)
+TLI_DEFINE_STRING_INTERNAL("__atomic_load_8")
+TLI_DEFINE_SIG_INTERNAL(Int64, Ptr, Int)
+
+/// int128_t __atomic_load_16(void *ptr, int memorder);
+TLI_DEFINE_ENUM_INTERNAL(atomic_load_16)
+TLI_DEFINE_STRING_INTERNAL("__atomic_load_16")
+TLI_DEFINE_SIG_INTERNAL(Int128, Ptr, Int)
+
/// void __atomic_store(size_t size, void *mptr, void *vptr, int smodel);
TLI_DEFINE_ENUM_INTERNAL(atomic_store)
TLI_DEFINE_STRING_INTERNAL("__atomic_store")
TLI_DEFINE_SIG_INTERNAL(Void, SizeT, Ptr, Ptr, Int)
+/// void __atomic_store_1(void *ptr, int8_t val, int smodel);
+TLI_DEFINE_ENUM_INTERNAL(atomic_store_1)
+TLI_DEFINE_STRING_INTERNAL("__atomic_store_1")
+TLI_DEFINE_SIG_INTERNAL(Void, Ptr, Int8, Int)
+
+/// void __atomic_store_2(void *ptr, int16_t val, int smodel);
+TLI_DEFINE_ENUM_INTERNAL(atomic_store_2)
+TLI_DEFINE_STRING_INTERNAL("__atomic_store_2")
+TLI_DEFINE_SIG_INTERNAL(Void, Ptr, Int16, Int)
+
+/// void __atomic_store_4(void *ptr, int32_t val, int smodel);
+TLI_DEFINE_ENUM_INTERNAL(atomic_store_4)
+TLI_DEFINE_STRING_INTERNAL("__atomic_store_4")
+TLI_DEFINE_SIG_INTERNAL(Void, Ptr, Int32, Int)
+
+/// void __atomic_store_8(void *ptr, int64_t val, int smodel);
+TLI_DEFINE_ENUM_INTERNAL(atomic_store_8)
+TLI_DEFINE_STRING_INTERNAL("__atomic_store_8")
+TLI_DEFINE_SIG_INTERNAL(Void, Ptr, Int64, Int)
+
+/// void __atomic_store_16(void *ptr, int128_t val, int smodel);
+TLI_DEFINE_ENUM_INTERNAL(atomic_store_16)
+TLI_DEFINE_STRING_INTERNAL("__atomic_store_16")
+TLI_DEFINE_SIG_INTERNAL(Void, Ptr, Int128, Int)
+
+/// bool __atomic_compare_exchange(size_t size, void *ptr, void *expected, void *desired, int success, int failure);
+TLI_DEFINE_ENUM_INTERNAL(atomic_compare_exchange)
+TLI_DEFINE_STRING_INTERNAL("__atomic_compare_exchange")
+TLI_DEFINE_SIG_INTERNAL(Bool, SizeT, Ptr, Ptr, Ptr, Int, Int)
+
+/// bool __atomic_compare_exchange_1(void *ptr, void *expected, uint8_t desired, int success, int failure);
+TLI_DEFINE_ENUM_INTERNAL(atomic_compare_exchange_1)
+TLI_DEFINE_STRING_INTERNAL("__atomic_compare_exchange_1")
+TLI_DEFINE_SIG_INTERNAL(Bool, Ptr, Ptr, Int8, Int, Int)
+
+/// bool __atomic_compare_exchange_2(void *ptr, void *expected, uint16_t desired, int success, int failure);
+TLI_DEFINE_ENUM_INTERNAL(atomic_compare_exchange_2)
+TLI_DEFINE_STRING_INTERNAL("__atomic_compare_exchange_2")
+TLI_DEFINE_SIG_INTERNAL(Bool, Ptr, Ptr, Int16, Int, Int)
+
+/// bool __atomic_compare_exchange_4(void *ptr, void *expected, uint32_t desired, int success, int failure);
+TLI_DEFINE_ENUM_INTERNAL(atomic_compare_exchange_4)
+TLI_DEFINE_STRING_INTERNAL("__atomic_compare_exchange_4")
+TLI_DEFINE_SIG_INTERNAL(Bool, Ptr, Ptr, Int32, Int, Int)
+
+/// bool __atomic_compare_exchange_8(void *ptr, void *expected, uint64_t desired, int success, int failure);
+TLI_DEFINE_ENUM_INTERNAL(atomic_compare_exchange_8)
+TLI_DEFINE_STRING_INTERNAL("__atomic_compare_exchange_8")
+TLI_DEFINE_SIG_INTERNAL(Bool, Ptr, Ptr, Int64, Int, Int)
+
+/// bool __atomic_compare_exchange_16(void *ptr, void *expected, uint128_t desired, int success, int failure);
+TLI_DEFINE_ENUM_INTERNAL(atomic_compare_exchange_16)
+TLI_DEFINE_STRING_INTERNAL("__atomic_compare_exchange_16")
+TLI_DEFINE_SIG_INTERNAL(Bool, Ptr, Ptr, Int128, Int, Int)
+
/// double __cosh_finite(double x);
TLI_DEFINE_ENUM_INTERNAL(cosh_finite)
TLI_DEFINE_STRING_INTERNAL("__cosh_finite")
diff --git a/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h b/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h
index 28909cef4748d..2c2c1a8c6166b 100644
--- a/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h
+++ b/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h
@@ -33,54 +33,6 @@ struct TargetRegionEntryInfo;
class OffloadEntriesInfoManager;
class OpenMPIRBuilder;
-/// Move the instruction after an InsertPoint to the beginning of another
-/// BasicBlock.
-///
-/// The instructions after \p IP are moved to the beginning of \p New which must
-/// not have any PHINodes. If \p CreateBranch is true, a branch instruction to
-/// \p New will be added such that there is no semantic change. Otherwise, the
-/// \p IP insert block remains degenerate and it is up to the caller to insert a
-/// terminator. \p DL is used as the debug location for the branch instruction
-/// if one is created.
-void spliceBB(IRBuilderBase::InsertPoint IP, BasicBlock *New, bool CreateBranch,
- DebugLoc DL);
-
-/// Splice a BasicBlock at an IRBuilder's current insertion point. Its new
-/// insert location will stick to after the instruction before the insertion
-/// point (instead of moving with the instruction the InsertPoint stores
-/// internally).
-void spliceBB(IRBuilder<> &Builder, BasicBlock *New, bool CreateBranch);
-
-/// Split a BasicBlock at an InsertPoint, even if the block is degenerate
-/// (missing the terminator).
-///
-/// llvm::SplitBasicBlock and BasicBlock::splitBasicBlock require a well-formed
-/// BasicBlock. \p Name is used for the new successor block. If \p CreateBranch
-/// is true, a branch to the new successor will new created such that
-/// semantically there is no change; otherwise the block of the insertion point
-/// remains degenerate and it is the caller's responsibility to insert a
-/// terminator. \p DL is used as the debug location for the branch instruction
-/// if one is created. Returns the new successor block.
-BasicBlock *splitBB(IRBuilderBase::InsertPoint IP, bool CreateBranch,
- DebugLoc DL, llvm::Twine Name = {});
-
-/// Split a BasicBlock at \p Builder's insertion point, even if the block is
-/// degenerate (missing the terminator). Its new insert location will stick to
-/// after the instruction before the insertion point (instead of moving with the
-/// instruction the InsertPoint stores internally).
-BasicBlock *splitBB(IRBuilderBase &Builder, bool CreateBranch,
- llvm::Twine Name = {});
-
-/// Split a BasicBlock at \p Builder's insertion point, even if the block is
-/// degenerate (missing the terminator). Its new insert location will stick to
-/// after the instruction before the insertion point (instead of moving with the
-/// instruction the InsertPoint stores internally).
-BasicBlock *splitBB(IRBuilder<> &Builder, bool CreateBranch, llvm::Twine Name);
-
-/// Like splitBB, but reuses the current block's name for the new name.
-BasicBlock *splitBBWithSuffix(IRBuilderBase &Builder, bool CreateBranch,
- llvm::Twine Suffix = ".split");
-
/// Captures attributes that affect generating LLVM-IR using the
/// OpenMPIRBuilder and related classes. Note that not all attributes are
/// required for all classes or functions. In some use cases the configuration
diff --git a/llvm/include/llvm/Support/AtomicOrdering.h b/llvm/include/llvm/Support/AtomicOrdering.h
index e08c1b262a92b..010bc06bb8570 100644
--- a/llvm/include/llvm/Support/AtomicOrdering.h
+++ b/llvm/include/llvm/Support/AtomicOrdering.h
@@ -158,6 +158,28 @@ inline AtomicOrderingCABI toCABI(AtomicOrdering AO) {
return lookup[static_cast<size_t>(AO)];
}
+inline AtomicOrdering fromCABI(AtomicOrderingCABI AO) {
+ // Acquire is the the closest but still stronger ordering of consume.
+ static const AtomicOrdering lookup[8] = {
+ /* relaxed */ AtomicOrdering::Monotonic,
+ /* consume */ AtomicOrdering::Acquire,
+ /* acquire */ AtomicOrdering::Acquire,
+ /* release */ AtomicOrdering::Release,
+ /* acq_rel */ AtomicOrdering::AcquireRelease,
+ /* acq_seq */ AtomicOrdering::SequentiallyConsistent,
+ };
+ return lookup[static_cast<size_t>(AO)];
+}
+
+inline AtomicOrdering fromCABI(int64_t AO) {
+ if (!isValidAtomicOrderingCABI(AO)) {
+ // This fallback is what CGAtomic does
+ return AtomicOrdering::Monotonic;
+ }
+ assert(isValidAtomicOrderingCABI(AO));
+ return fromCABI(static_cast<AtomicOrderingCABI>(AO));
+}
+
} // end namespace llvm
#endif // LLVM_SUPPORT_ATOMICORDERING_H
diff --git a/llvm/include/llvm/Testing/Support/Error.h b/llvm/include/llvm/Testing/Support/Error.h
index 5ed8f11e6189b..05a50a74c06ec 100644
--- a/llvm/include/llvm/Testing/Support/Error.h
+++ b/llvm/include/llvm/Testing/Support/Error.h
@@ -80,6 +80,48 @@ class ValueMatchesPoly {
M Matcher;
};
+template <typename RefT> class StoreResultMatcher {
+ class Impl : public testing::MatcherInterface<
+ const llvm::detail::ExpectedHolder<RefT> &> {
+ public:
+ explicit Impl(RefT &Ref) : Ref(Ref) {}
+
+ bool
+ MatchAndExplain(const llvm::detail::ExpectedHolder<RefT> &Holder,
+ testing::MatchResultListener *listener) const override {
+ // If failed to get a value, fail the ASSERT/EXPECT and do not store any
+ // value
+ if (!Holder.Success())
+ return false;
+
+ // Succeeded with a value, remember it
+ Ref = *Holder.Exp;
+
+ return true;
+ }
+
+ void DescribeTo(std::ostream *OS) const override { *OS << "succeeded"; }
+
+ void DescribeNegationTo(std::ostream *OS) const override {
+ *OS << "failed";
+ }
+
+ private:
+ RefT &Ref;
+ };
+
+public:
+ explicit StoreResultMatcher(RefT &Ref) : Ref(Ref) {}
+
+ template <typename T>
+ operator testing::Matcher<const llvm::detail::ExpectedHolder<T> &>() const {
+ return MakeMatcher(new Impl(Ref));
+ }
+
+private:
+ RefT &Ref;
+};
+
template <typename InfoT>
class ErrorMatchesMono : public testing::MatcherInterface<const ErrorHolder &> {
public:
@@ -222,6 +264,13 @@ detail::ValueMatchesPoly<M> HasValue(M Matcher) {
return detail::ValueMatchesPoly<M>(Matcher);
}
+/// Matches on Expected<T> values that succeed, but also stores its value into a
+/// variable.
+template <typename RefT>
+detail::StoreResultMatcher<RefT> StoreResult(RefT &Ref) {
+ return detail::StoreResultMatcher<RefT>(Ref);
+}
+
} // namespace llvm
#endif
diff --git a/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h b/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h
index 6faff3d1fd8e3..7746313c82209 100644
--- a/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h
+++ b/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h
@@ -20,6 +20,7 @@
#include "llvm/ADT/SetVector.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Dominators.h"
+#include "llvm/IR/IRBuilder.h"
#include <cassert>
namespace llvm {
@@ -384,6 +385,54 @@ void SplitLandingPadPredecessors(
DomTreeUpdater *DTU = nullptr, LoopInfo *LI = nullptr,
MemorySSAUpdater *MSSAU = nullptr, bool PreserveLCSSA = false);
+/// Move the instruction after an InsertPoint to the beginning of another
+/// BasicBlock.
+///
+/// The instructions after \p IP are moved to the beginning of \p New which must
+/// not have any PHINodes. If \p CreateBranch is true, a branch instruction to
+/// \p New will be added such that there is no semantic change. Otherwise, the
+/// \p IP insert block remains degenerate and it is up to the caller to insert a
+/// terminator. \p DL is used as the debug location for the branch instruction
+/// if one is created.
+void spliceBB(IRBuilderBase::InsertPoint IP, BasicBlock *New, bool CreateBranch,
+ DebugLoc DL);
+
+/// Splice a BasicBlock at an IRBuilder's current insertion point. Its new
+/// insert location will stick to after the instruction before the insertion
+/// point (instead of moving with the instruction the InsertPoint stores
+/// internally).
+void spliceBB(IRBuilder<> &Builder, BasicBlock *New, bool CreateBranch);
+
+/// Split a BasicBlock at an InsertPoint, even if the block is degenerate
+/// (missing the terminator).
+///
+/// llvm::SplitBasicBlock and BasicBlock::splitBasicBlock require a well-formed
+/// BasicBlock. \p Name is used for the new successor block. If \p CreateBranch
+/// is true, a branch to the new successor will new created such that
+/// semantically there is no change; otherwise the block of the insertion point
+/// remains degenerate and it is the caller's responsibility to insert a
+/// terminator. \p DL is used as the debug location for the branch instruction
+/// if one is created. Returns the new successor block.
+BasicBlock *splitBB(IRBuilderBase::InsertPoint IP, bool CreateBranch,
+ DebugLoc DL, llvm::Twine Name = {});
+
+/// Split a BasicBlock at \p Builder's insertion point, even if the block is
+/// degenerate (missing the terminator). Its new insert location will stick to
+/// after the instruction before the insertion point (instead of moving with the
+/// instruction the InsertPoint stores internally).
+BasicBlock *splitBB(IRBuilderBase &Builder, bool CreateBranch,
+ llvm::Twine Name = {});
+
+/// Split a BasicBlock at \p Builder's insertion point, even if the block is
+/// degenerate (missing the terminator). Its new insert location will stick to
+/// after the instruction before the insertion point (instead of moving with the
+/// instruction the InsertPoint stores internally).
+BasicBlock *splitBB(IRBuilder<> &Builder, bool CreateBranch, llvm::Twine Name);
+
+/// Like splitBB, but reuses the current block's name for the new name.
+BasicBlock *splitBBWithSuffix(IRBuilderBase &Builder, bool CreateBranch,
+ llvm::Twine Suffix = ".split");
+
/// This method duplicates the specified return instruction into a predecessor
/// which ends in an unconditional branch. If the return instruction returns a
/// value defined by a PHI, propagate the right value into the return. It
diff --git a/llvm/include/llvm/Transforms/Utils/BuildBuiltins.h b/llvm/include/llvm/Transforms/Utils/BuildBuiltins.h
new file mode 100644
index 0000000000000..65765adc297ea
--- /dev/null
+++ b/llvm/include/llvm/Transforms/Utils/BuildBuiltins.h
@@ -0,0 +1,278 @@
+//===- BuildBuiltins.h - Utility builder for builtins ---------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements some functions for lowering compiler builtins,
+// specifically for atomics. Currently, LLVM-IR has no representation of atomics
+// that can be used independent of its arguments:
+//
+// * The instructions load atomic, store atomic, atomicrmw, and cmpxchg can only
+// be used with constant memory model, sync scope, data sizes (that must be
+// power-of-2), volatile and weak property, and should not be used with data
+// types that are untypically large which may slow down the compiler.
+//
+// * libcall (in GCC's case: libatomic; LLVM: Compiler-RT) functions work with
+// any data size, but are slower. Specialized functions for a selected number
+// of data sizes exist as well. They do not support sync scopes, the volatile
+// or weakness property. These functions may be implemented using a lock and
+// availability depends on the target triple (e.g. GPU devices cannot
+// implement a global lock by design).
+//
+// Whe want to mimic Clang's behaviour:
+//
+// * Prefer atomic instructions over libcall functions whenever possible. When a
+// target backend does not support atomic instructions natively,
+// AtomicExpandPass, LowerAtomicPass, or some backend-specific pass lower will
+// convert such instructions to a libcall function call. The reverse is not
+// the case, i.e. once a libcall function is emitted, there is no pass that
+// optimizes it into an instruction.
+//
+// * When passed a non-constant enum argument which the instruction requires to
+// be constant, then emit a switch case for each enum case.
+//
+// Clang currently doesn't actually check whether the target actually supports
+// atomic libcall functions so it will always fall back to a libcall function
+// even if the target does not support it. That is, emitting an atomic builtin
+// may fail and a frontend needs to handle this case.
+//
+// Clang also assumes that the maximum supported data size of atomic instruction
+// is 16, despite this is target-dependent and should be queried using
+// TargetLowing::getMaxAtomicSizeInBitsSupported(). However, TargetMachine
+// (which is a factory for TargetLowing) is not available during Clang's CodeGen
+// phase, it is only created for the LLVM pass pipeline.
+//
+// The functions in this file are intended to handle the complexity of builtins
+// so frontends do not need to care about the details. A major difference betwee
+// the cases is that the IR instructions take values directly as an llvm::Value
+// (except the atomic address of course), but the libcall functions almost
+// always take pointers to those values. Since we cannot assume that everything
+// can be passed an llvm::Value (LLVM does not handle large types such as i4096
+// well), our abstraction passes everything as pointer which is load'ed when
+// needed. The caller is responsible to emit a temporary AllocaInst and store if
+// it needs to pass an llvm::Value. Mem2Reg/SROA will easily remove any
+// unnecessary store/load pairs.
+//
+// In the future LLVM may introduce more generic atomic constructs that is
+// lowered by an LLVM pass, such as AtomicExpandPass. Once this exist, the
+// emitBuiltin functions in this file become trivial.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_BUILDBUILTINS_H
+#define LLVM_TRANSFORMS_UTILS_BUILDBUILTINS_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/Alignment.h"
+#include "llvm/Support/AtomicOrdering.h"
+#include "llvm/Support/Error.h"
+#include <cstdint>
+#include <variant>
+
+namespace llvm {
+class Value;
+class TargetLibraryInfo;
+class DataLayout;
+class IRBuilderBase;
+class Type;
+class TargetLowering;
+
+namespace SyncScope {
+typedef uint8_t ID;
+}
+
+/// Options for controlling atomic builtins.
+struct AtomicEmitOptions {
+ AtomicEmitOptions(const DataLayout &DL, const TargetLibraryInfo *TLI,
+ const TargetLowering *TL = nullptr)
+ : DL(DL), TLI(TLI), TL(TL) {}
+
+ /// The target's data layout.
+ const DataLayout &DL;
+
+ /// The target's libcall library availability.
+ const TargetLibraryInfo *TLI;
+
+ /// Used to determine which instructions thetarget support. If omitted,
+ /// assumes all accesses up to a size of 16 bytes are supported.
+ const TargetLowering *TL = nullptr;
+
+ /// Whether an LLVM instruction can be emitted. LLVM instructions include:
+ /// * load atomic
+ /// * store atomic
+ /// * cmpxchg
+ /// * atomicrmw
+ ///
+ /// Atomic LLVM intructions have several restructions on when they can be
+ /// used, including:
+ /// * Properties such as IsWeak,Memorder,Scope must be constant.
+ /// * Must be an integer or pointer type. Some cases also allow float types.
+ /// * Size must be a power-of-two number of bytes.
+ /// * Size must be at most the size of atomics supported by the target.
+ /// * Size should not be too large (e.g. i4096) since LLVM does not scale
+ /// will with huge types.
+ ///
+ /// Even with all these limitations adhered to, AtomicExpandPass may still
+ /// lower the instruction to a libcall function if the target does not support
+ /// it.
+ ///
+ /// See also:
+ /// * https://llvm.org/docs/Atomics.html
+ /// * https://llvm.org/docs/LangRef.html#i-load
+ /// * https://llvm.org/docs/LangRef.html#i-store
+ /// * https://llvm.org/docs/LangRef.html#cmpxchg-instruction
+ /// * https://llvm.org/docs/LangRef.html#i-atomicrmw
+ bool AllowInstruction = true;
+
+ /// Whether a switch can be emitted to work around the requirement of
+ /// properties of an instruction must be constant. That is, for each possible
+ /// value of the property, jump to a version of that instruction encoding that
+ /// property.
+ bool AllowSwitch = true;
+
+ /// Allow emitting calls to constant-sized libcall functions, such as
+ /// * __atomic_load_n
+ /// * __atomic_store_n
+ /// * __atomic_compare_exchange_n
+ ///
+ /// where n is as size supported by the target, typically 1,2,4,8,16
+ ///
+ /// See also:
+ /// * https://llvm.org/docs/Atomics.html
+ /// * https://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary#GCC_intrinsics
+ bool AllowSizedLibcall = true;
+
+ /// Allow emitting call to variable-sized libcall functions, such as
+ // / * __atomic_load
+ /// * __atomic_store
+ /// * __atomic_compare_exchange
+ ///
+ /// Note that the signatures of these libcall functions are different from the
+ /// compiler builtins of the same name.
+ ///
+ /// See also:
+ /// * https://llvm.org/docs/Atomics.html
+ /// * https://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary#GCC_intrinsics
+ bool AllowLibcall = true;
+
+ // TODO: Add additional lowerings:
+ // * __sync_* libcalls
+ // * Differently named atomic primitives
+ // (e.g. InterlockedCompareExchange, C11 primitives on Windows)
+ // * Using a lock implemention as last resort
+};
+
+/// Emit the __atomic_load builtin. This may either be lowered to the load LLVM
+/// instruction, or to one of the following libcall functions: __atomic_load_1,
+/// __atomic_load_2, __atomic_load_4, __atomic_load_8, __atomic_load_16,
+/// __atomic_load.
+///
+/// Also see:
+/// * https://llvm.org/docs/Atomics.html
+/// * https://llvm.org/docs/LangRef.html#load-instruction
+/// * https://gcc.gnu.org/onlinedocs/gcc/_005f_005fatomic-Builtins.html
+/// * https://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary#GCC_intrinsics
+Error emitAtomicLoadBuiltin(
+ Value *AtomicPtr, Value *RetPtr, std::variant<Type *, uint64_t> TypeOrSize,
+ bool IsVolatile,
+ std::variant<Value *, AtomicOrdering, AtomicOrderingCABI> Memorder,
+ SyncScope::ID Scope, MaybeAlign Align, IRBuilderBase &Builder,
+ AtomicEmitOptions EmitOptions, const Twine &Name = Twine());
+
+/// Emit the __atomic_store builtin. It may either be lowered to the store LLVM
+/// instruction, or to one of the following libcall functions: __atomic_store_1,
+/// __atomic_store_2, __atomic_store_4, __atomic_store_8, __atomic_store_16,
+/// __atomic_static.
+///
+/// Also see:
+/// * https://llvm.org/docs/Atomics.html
+/// * https://llvm.org/docs/LangRef.html#store-instruction
+/// * https://gcc.gnu.org/onlinedocs/gcc/_005f_005fatomic-Builtins.html
+/// * https://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary#GCC_intrinsics
+Error emitAtomicStoreBuiltin(
+ Value *AtomicPtr, Value *ValPtr, std::variant<Type *, uint64_t> TypeOrSize,
+ bool IsVolatile,
+ std::variant<Value *, AtomicOrdering, AtomicOrderingCABI> Memorder,
+ SyncScope::ID Scope, MaybeAlign Align, IRBuilderBase &Builder,
+ AtomicEmitOptions EmitOptions, const Twine &Name = Twine());
+
+/// Emit the __atomic_compare_exchange builtin. This may either be
+/// lowered to the cmpxchg LLVM instruction, or to one of the following libcall
+/// functions: __atomic_compare_exchange_1, __atomic_compare_exchange_2,
+/// __atomic_compare_exchange_4, __atomic_compare_exchange_8,
+/// __atomic_compare_exchange_16, __atomic_compare_exchange.
+///
+/// Also see:
+/// * https://llvm.org/docs/Atomics.html
+/// * https://llvm.org/docs/LangRef.html#cmpxchg-instruction
+/// * https://gcc.gnu.org/onlinedocs/gcc/_005f_005fatomic-Builtins.html
+/// * https://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary#GCC_intrinsics
+///
+/// @param AtomicPtr The memory location accessed atomically.
+/// @Param ExpectedPtr Pointer to the data expected at \p Ptr. The exchange will
+/// only happen if the value at \p Ptr is equal to this
+/// (unless IsWeak is set). Data at \p ExpectedPtr may or may
+/// not be be overwritten, so do not use after this call.
+/// @Param DesiredPtr Pointer to the data that the data at \p Ptr is replaced
+/// with.
+/// @param TypeOrSize Type of the value to be accessed. cmpxchg
+/// supports integer and pointers only, other atomics also
+/// support floats. If any other type or omitted, type-prunes
+/// to an integer the holds at least \p DataSize bytes.
+/// Alternatively, the number of bytes can be specified in
+/// which case an intergers is also used.
+/// @param IsWeak If true, the exchange may not happen even if the data at
+/// \p Ptr equals to \p ExpectedPtr.
+/// @param IsVolatile Whether to mark the access as volatile.
+/// @param SuccessMemorder If the exchange succeeds, memory is affected
+/// according to the memory model.
+/// @param FailureMemorder If the exchange fails, memory is affected according
+/// to the memory model. It is considered an atomic "read"
+/// for the purpose of identifying release sequences. Must
+/// not be release, acquire-release, and at most as strong as
+/// \p SuccessMemorder.
+/// @param Scope (optional) The synchronization scope (domain of threads
+/// where this access has to be atomic, e.g. CUDA
+/// warp/block/grid-level atomics) of this access. Defaults
+/// to system scope.
+/// @param ActualPtr (optional) Receives the value at \p Ptr before the atomic
+/// exchange is attempted. This means:
+/// In case of success:
+/// The value at \p Ptr before the update. That is, the
+/// value passed behind \p ExpectedPtr.
+/// In case of failure
+/// (including spurious failures if IsWeak):
+/// The current value at \p Ptr, i.e. the operation
+/// effectively was an atomic load of that value using
+/// FailureMemorder semantics.
+/// Can be the same as ExpectedPtr in which case after the
+/// call returns \p ExpectedPtr/\p ActualPtr will be the
+/// value as defined above (in contrast to being undefined).
+/// @param Align (optional) Known alignment of /p Ptr. If omitted,
+/// alignment is inferred from /p Ptr itself and falls back
+/// to no alignment.
+/// @param Builder User to emit instructions.
+/// @param EmitOptions For controlling what IR is emitted.
+/// @param Name (optional) Stem for generated instruction names.
+///
+/// @return A boolean value that indicates whether the exchange has happened
+/// (true) or not (false), or an error if the atomic operation could not
+/// be emitted.
+Expected<Value *> emitAtomicCompareExchangeBuiltin(
+ Value *AtomicPtr, Value *ExpectedPtr, Value *DesiredPtr,
+ std::variant<Type *, uint64_t> TypeOrSize,
+ std::variant<Value *, bool> IsWeak, bool IsVolatile,
+ std::variant<Value *, AtomicOrdering, AtomicOrderingCABI> SuccessMemorder,
+ std::variant<std::monostate, Value *, AtomicOrdering, AtomicOrderingCABI>
+ FailureMemorder,
+ SyncScope::ID Scope, Value *PrevPtr, MaybeAlign Align,
+ IRBuilderBase &Builder, AtomicEmitOptions EmitOptions,
+ const Twine &Name = Twine());
+
+} // namespace llvm
+
+#endif /* LLVM_TRANSFORMS_UTILS_BUILDBUILTINS_H */
diff --git a/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h b/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h
index 50f695dbe6c07..2bd30554644c1 100644
--- a/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h
+++ b/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h
@@ -198,6 +198,53 @@ namespace llvm {
Value *emitVSPrintf(Value *Dest, Value *Fmt, Value *VAList, IRBuilderBase &B,
const TargetLibraryInfo *TLI);
+ /// Emit a call to the __atomic_load function.
+ /// Defined here:
+ /// https://llvm.org/docs/Atomics.html#libcalls-atomic
+ /// https://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary#list_of_library_routines
+ Value *emitAtomicLoad(Value *Size, Value *Ptr, Value *Ret, Value *Memorder,
+ IRBuilderBase &B, const DataLayout &DL,
+ const TargetLibraryInfo *TLI);
+
+ /// Variant of __atomic_load where \p Size is either 1, 2, 4, 8, or 16.
+ Value *emitAtomicLoadN(size_t Size, Value *Ptr, Value *Memorder,
+ IRBuilderBase &B, const DataLayout &DL,
+ const TargetLibraryInfo *TLI);
+
+ /// Emit a call to the __atomic_store function.
+ /// Defined here:
+ /// https://llvm.org/docs/Atomics.html#libcalls-atomic
+ /// https://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary#list_of_library_routines
+ Value *emitAtomicStore(Value *Size, Value *Ptr, Value *ValPtr,
+ Value *Memorder, IRBuilderBase &B,
+ const DataLayout &DL, const TargetLibraryInfo *TLI);
+
+ /// Variant of __atomic_store where \p Size is either 1, 2, 4, 8, or 16.
+ Value *emitAtomicStoreN(size_t Size, Value *Ptr, Value *Val, Value *Memorder,
+ IRBuilderBase &B, const DataLayout &DL,
+ const TargetLibraryInfo *TLI);
+
+ /// Emit a call to the __atomic_compare_exchange function.
+ /// Defined here:
+ /// https://llvm.org/docs/Atomics.html#libcalls-atomic
+ /// https://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary#list_of_library_routines
+ ///
+ /// NOTE: Signature is different to the builtins defined here:
+ /// https://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary#GCC_intrinsics
+ Value *emitAtomicCompareExchange(Value *Size, Value *Ptr, Value *Expected,
+ Value *Desired, Value *SuccessMemorder,
+ Value *FailureMemorder, IRBuilderBase &B,
+ const DataLayout &DL,
+ const TargetLibraryInfo *TLI);
+
+ /// Variant of __atomic_compare_exchange where \p Size is either 1, 2, 4, 8,
+ /// or 16.
+ Value *emitAtomicCompareExchangeN(size_t Size, Value *Ptr, Value *Expected,
+ Value *Desired, Value *SuccessMemorder,
+ Value *FailureMemorder, IRBuilderBase &B,
+ const DataLayout &DL,
+ const TargetLibraryInfo *TLI);
+
/// Emit a call to the unary function named 'Name' (e.g. 'floor'). This
/// function is known to take a single of type matching 'Op' and returns one
/// value with the same type. If 'Op' is a long double, 'l' is added as the
diff --git a/llvm/lib/Analysis/TargetLibraryInfo.cpp b/llvm/lib/Analysis/TargetLibraryInfo.cpp
index 3a8cdf946da37..a7a6b6d64ebf7 100644
--- a/llvm/lib/Analysis/TargetLibraryInfo.cpp
+++ b/llvm/lib/Analysis/TargetLibraryInfo.cpp
@@ -60,6 +60,7 @@ std::string VecDesc::getVectorFunctionABIVariantString() const {
enum FuncArgTypeID : char {
Void = 0, // Must be zero.
Bool, // 8 bits on all targets
+ Int8,
Int16,
Int32,
Int,
@@ -67,6 +68,7 @@ enum FuncArgTypeID : char {
Long, // Either 32 or 64 bits.
IntX, // Any integer type.
Int64,
+ Int128,
LLong, // 64 bits on all targets.
SizeT, // size_t.
SSizeT, // POSIX ssize_t.
@@ -828,7 +830,23 @@ static void initializeLibCalls(TargetLibraryInfoImpl &TLI, const Triple &T,
// Miscellaneous other functions not provided.
TLI.setUnavailable(LibFunc_atomic_load);
+ TLI.setUnavailable(LibFunc_atomic_load_1);
+ TLI.setUnavailable(LibFunc_atomic_load_2);
+ TLI.setUnavailable(LibFunc_atomic_load_4);
+ TLI.setUnavailable(LibFunc_atomic_load_8);
+ TLI.setUnavailable(LibFunc_atomic_load_16);
TLI.setUnavailable(LibFunc_atomic_store);
+ TLI.setUnavailable(LibFunc_atomic_store_1);
+ TLI.setUnavailable(LibFunc_atomic_store_2);
+ TLI.setUnavailable(LibFunc_atomic_store_4);
+ TLI.setUnavailable(LibFunc_atomic_store_8);
+ TLI.setUnavailable(LibFunc_atomic_store_16);
+ TLI.setUnavailable(LibFunc_atomic_compare_exchange);
+ TLI.setUnavailable(LibFunc_atomic_compare_exchange_1);
+ TLI.setUnavailable(LibFunc_atomic_compare_exchange_2);
+ TLI.setUnavailable(LibFunc_atomic_compare_exchange_4);
+ TLI.setUnavailable(LibFunc_atomic_compare_exchange_8);
+ TLI.setUnavailable(LibFunc_atomic_compare_exchange_16);
TLI.setUnavailable(LibFunc___kmpc_alloc_shared);
TLI.setUnavailable(LibFunc___kmpc_free_shared);
TLI.setUnavailable(LibFunc_dunder_strndup);
@@ -1024,6 +1042,7 @@ static bool matchType(FuncArgTypeID ArgTy, const Type *Ty, unsigned IntBits,
case Void:
return Ty->isVoidTy();
case Bool:
+ case Int8:
return Ty->isIntegerTy(8);
case Int16:
return Ty->isIntegerTy(16);
@@ -1040,6 +1059,8 @@ static bool matchType(FuncArgTypeID ArgTy, const Type *Ty, unsigned IntBits,
return Ty->isIntegerTy() && Ty->getPrimitiveSizeInBits() >= IntBits;
case Int64:
return Ty->isIntegerTy(64);
+ case Int128:
+ return Ty->isIntegerTy(128);
case LLong:
return Ty->isIntegerTy(64);
case SizeT:
diff --git a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
index 2e5ce5308eea5..1096ccab52c77 100644
--- a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
+++ b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
@@ -316,81 +316,6 @@ static void redirectTo(BasicBlock *Source, BasicBlock *Target, DebugLoc DL) {
NewBr->setDebugLoc(DL);
}
-void llvm::spliceBB(IRBuilderBase::InsertPoint IP, BasicBlock *New,
- bool CreateBranch, DebugLoc DL) {
- assert(New->getFirstInsertionPt() == New->begin() &&
- "Target BB must not have PHI nodes");
-
- // Move instructions to new block.
- BasicBlock *Old = IP.getBlock();
- New->splice(New->begin(), Old, IP.getPoint(), Old->end());
-
- if (CreateBranch) {
- auto *NewBr = BranchInst::Create(New, Old);
- NewBr->setDebugLoc(DL);
- }
-}
-
-void llvm::spliceBB(IRBuilder<> &Builder, BasicBlock *New, bool CreateBranch) {
- DebugLoc DebugLoc = Builder.getCurrentDebugLocation();
- BasicBlock *Old = Builder.GetInsertBlock();
-
- spliceBB(Builder.saveIP(), New, CreateBranch, DebugLoc);
- if (CreateBranch)
- Builder.SetInsertPoint(Old->getTerminator());
- else
- Builder.SetInsertPoint(Old);
-
- // SetInsertPoint also updates the Builder's debug location, but we want to
- // keep the one the Builder was configured to use.
- Builder.SetCurrentDebugLocation(DebugLoc);
-}
-
-BasicBlock *llvm::splitBB(IRBuilderBase::InsertPoint IP, bool CreateBranch,
- DebugLoc DL, llvm::Twine Name) {
- BasicBlock *Old = IP.getBlock();
- BasicBlock *New = BasicBlock::Create(
- Old->getContext(), Name.isTriviallyEmpty() ? Old->getName() : Name,
- Old->getParent(), Old->getNextNode());
- spliceBB(IP, New, CreateBranch, DL);
- New->replaceSuccessorsPhiUsesWith(Old, New);
- return New;
-}
-
-BasicBlock *llvm::splitBB(IRBuilderBase &Builder, bool CreateBranch,
- llvm::Twine Name) {
- DebugLoc DebugLoc = Builder.getCurrentDebugLocation();
- BasicBlock *New = splitBB(Builder.saveIP(), CreateBranch, DebugLoc, Name);
- if (CreateBranch)
- Builder.SetInsertPoint(Builder.GetInsertBlock()->getTerminator());
- else
- Builder.SetInsertPoint(Builder.GetInsertBlock());
- // SetInsertPoint also updates the Builder's debug location, but we want to
- // keep the one the Builder was configured to use.
- Builder.SetCurrentDebugLocation(DebugLoc);
- return New;
-}
-
-BasicBlock *llvm::splitBB(IRBuilder<> &Builder, bool CreateBranch,
- llvm::Twine Name) {
- DebugLoc DebugLoc = Builder.getCurrentDebugLocation();
- BasicBlock *New = splitBB(Builder.saveIP(), CreateBranch, DebugLoc, Name);
- if (CreateBranch)
- Builder.SetInsertPoint(Builder.GetInsertBlock()->getTerminator());
- else
- Builder.SetInsertPoint(Builder.GetInsertBlock());
- // SetInsertPoint also updates the Builder's debug location, but we want to
- // keep the one the Builder was configured to use.
- Builder.SetCurrentDebugLocation(DebugLoc);
- return New;
-}
-
-BasicBlock *llvm::splitBBWithSuffix(IRBuilderBase &Builder, bool CreateBranch,
- llvm::Twine Suffix) {
- BasicBlock *Old = Builder.GetInsertBlock();
- return splitBB(Builder, CreateBranch, Old->getName() + Suffix);
-}
-
// This function creates a fake integer value and a fake use for the integer
// value. It returns the fake value created. This is useful in modeling the
// extra arguments to the outlined functions.
diff --git a/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp b/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
index ce5bf0c7207c7..edf59054c9599 100644
--- a/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
+++ b/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
@@ -1548,6 +1548,81 @@ void llvm::SplitLandingPadPredecessors(BasicBlock *OrigBB,
PreserveLCSSA);
}
+void llvm::spliceBB(IRBuilderBase::InsertPoint IP, BasicBlock *New,
+ bool CreateBranch, DebugLoc DL) {
+ assert(New->getFirstInsertionPt() == New->begin() &&
+ "Target BB must not have PHI nodes");
+
+ // Move instructions to new block.
+ BasicBlock *Old = IP.getBlock();
+ New->splice(New->begin(), Old, IP.getPoint(), Old->end());
+
+ if (CreateBranch) {
+ auto *NewBr = BranchInst::Create(New, Old);
+ NewBr->setDebugLoc(DL);
+ }
+}
+
+void llvm::spliceBB(IRBuilder<> &Builder, BasicBlock *New, bool CreateBranch) {
+ DebugLoc DebugLoc = Builder.getCurrentDebugLocation();
+ BasicBlock *Old = Builder.GetInsertBlock();
+
+ spliceBB(Builder.saveIP(), New, CreateBranch, DebugLoc);
+ if (CreateBranch)
+ Builder.SetInsertPoint(Old->getTerminator());
+ else
+ Builder.SetInsertPoint(Old);
+
+ // SetInsertPoint also updates the Builder's debug location, but we want to
+ // keep the one the Builder was configured to use.
+ Builder.SetCurrentDebugLocation(DebugLoc);
+}
+
+BasicBlock *llvm::splitBB(IRBuilderBase::InsertPoint IP, bool CreateBranch,
+ DebugLoc DL, llvm::Twine Name) {
+ BasicBlock *Old = IP.getBlock();
+ BasicBlock *New = BasicBlock::Create(
+ Old->getContext(), Name.isTriviallyEmpty() ? Old->getName() : Name,
+ Old->getParent(), Old->getNextNode());
+ spliceBB(IP, New, CreateBranch, DL);
+ New->replaceSuccessorsPhiUsesWith(Old, New);
+ return New;
+}
+
+BasicBlock *llvm::splitBB(IRBuilderBase &Builder, bool CreateBranch,
+ llvm::Twine Name) {
+ DebugLoc DebugLoc = Builder.getCurrentDebugLocation();
+ BasicBlock *New = splitBB(Builder.saveIP(), CreateBranch, DebugLoc, Name);
+ if (CreateBranch)
+ Builder.SetInsertPoint(Builder.GetInsertBlock()->getTerminator());
+ else
+ Builder.SetInsertPoint(Builder.GetInsertBlock());
+ // SetInsertPoint also updates the Builder's debug location, but we want to
+ // keep the one the Builder was configured to use.
+ Builder.SetCurrentDebugLocation(DebugLoc);
+ return New;
+}
+
+BasicBlock *llvm::splitBB(IRBuilder<> &Builder, bool CreateBranch,
+ llvm::Twine Name) {
+ DebugLoc DebugLoc = Builder.getCurrentDebugLocation();
+ BasicBlock *New = splitBB(Builder.saveIP(), CreateBranch, DebugLoc, Name);
+ if (CreateBranch)
+ Builder.SetInsertPoint(Builder.GetInsertBlock()->getTerminator());
+ else
+ Builder.SetInsertPoint(Builder.GetInsertBlock());
+ // SetInsertPoint also updates the Builder's debug location, but we want to
+ // keep the one the Builder was configured to use.
+ Builder.SetCurrentDebugLocation(DebugLoc);
+ return New;
+}
+
+BasicBlock *llvm::splitBBWithSuffix(IRBuilderBase &Builder, bool CreateBranch,
+ llvm::Twine Suffix) {
+ BasicBlock *Old = Builder.GetInsertBlock();
+ return splitBB(Builder, CreateBranch, Old->getName() + Suffix);
+}
+
ReturnInst *llvm::FoldReturnIntoUncondBranch(ReturnInst *RI, BasicBlock *BB,
BasicBlock *Pred,
DomTreeUpdater *DTU) {
diff --git a/llvm/lib/Transforms/Utils/BuildBuiltins.cpp b/llvm/lib/Transforms/Utils/BuildBuiltins.cpp
new file mode 100644
index 0000000000000..f290583b1d14b
--- /dev/null
+++ b/llvm/lib/Transforms/Utils/BuildBuiltins.cpp
@@ -0,0 +1,850 @@
+//===- BuildBuiltins.cpp - Utility builder for builtins -------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Transforms/Utils/BuildBuiltins.h"
+#include "llvm/CodeGen/TargetLowering.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Utils/BuildLibCalls.h"
+
+using namespace llvm;
+
+namespace {
+static IntegerType *getIntTy(IRBuilderBase &B, const TargetLibraryInfo *TLI) {
+ return B.getIntNTy(TLI->getIntSize());
+}
+
+static IntegerType *getSizeTTy(IRBuilderBase &B, const TargetLibraryInfo *TLI) {
+ const Module *M = B.GetInsertBlock()->getModule();
+ return B.getIntNTy(TLI->getSizeTSize(*M));
+}
+
+/// In order to use one of the sized library calls such as
+/// __atomic_fetch_add_4, the alignment must be sufficient, the size
+/// must be one of the potentially-specialized sizes, and the value
+/// type must actually exist in C on the target (otherwise, the
+/// function wouldn't actually be defined.)
+static bool canUseSizedAtomicCall(unsigned Size, Align Alignment,
+ const DataLayout &DL) {
+ // TODO: "LargestSize" is an approximation for "largest type that
+ // you can express in C". It seems to be the case that int128 is
+ // supported on all 64-bit platforms, otherwise only up to 64-bit
+ // integers are supported. If we get this wrong, then we'll try to
+ // call a sized libcall that doesn't actually exist. There should
+ // really be some more reliable way in LLVM of determining integer
+ // sizes which are valid in the target's C ABI...
+ unsigned LargestSize = DL.getLargestLegalIntTypeSizeInBits() >= 64 ? 16 : 8;
+ return Alignment >= Size &&
+ (Size == 1 || Size == 2 || Size == 4 || Size == 8 || Size == 16) &&
+ Size <= LargestSize;
+}
+
+// Helper to check if a type is in a variant
+template <typename T, typename Variant> struct is_in_variant;
+
+template <typename T, typename... Types>
+struct is_in_variant<T, std::variant<Types...>>
+ : std::disjunction<std::is_same<T, Types>...> {};
+
+/// Alternative to std::holds_alternative that works even if the std::variant
+/// cannot hold T.
+template <typename T, typename Variant>
+constexpr bool holds_alternative_if_exists(const Variant &v) {
+ if constexpr (is_in_variant<T, Variant>::value) {
+ return std::holds_alternative<T>(v);
+ } else {
+ // Type T is not in the variant, return false or handle accordingly
+ return false;
+ }
+}
+
+/// Common code for emitting an atomic builtin (load, store, cmpxchg).
+class AtomicEmitter {
+public:
+ AtomicEmitter(
+ Value *Ptr, std::variant<Type *, uint64_t> TypeOrSize,
+ std::variant<Value *, bool> IsWeak, bool IsVolatile,
+ std::variant<Value *, AtomicOrdering, AtomicOrderingCABI> SuccessMemorder,
+ std::variant<std::monostate, Value *, AtomicOrdering, AtomicOrderingCABI>
+ FailureMemorder,
+ SyncScope::ID Scope, MaybeAlign Align, IRBuilderBase &Builder,
+ AtomicEmitOptions EmitOptions, const llvm::Twine &Name)
+ : Ctx(Builder.getContext()), CurFn(Builder.GetInsertBlock()->getParent()),
+ AtomicPtr(Ptr), TypeOrSize(TypeOrSize), IsWeak(IsWeak),
+ IsVolatile(IsVolatile), SuccessMemorder(SuccessMemorder),
+ FailureMemorder(FailureMemorder), Scope(Scope), Align(Align),
+ Builder(Builder), EmitOptions(std::move(EmitOptions)), Name(Name) {}
+ virtual ~AtomicEmitter() = default;
+
+protected:
+ LLVMContext &Ctx;
+ Function *CurFn;
+
+ Value *AtomicPtr;
+ std::variant<Type *, uint64_t> TypeOrSize;
+ std::variant<Value *, bool> IsWeak;
+ bool IsVolatile;
+ std::variant<Value *, AtomicOrdering, AtomicOrderingCABI> SuccessMemorder;
+ std::variant<std::monostate, Value *, AtomicOrdering, AtomicOrderingCABI>
+ FailureMemorder;
+ SyncScope::ID Scope;
+ MaybeAlign Align;
+ IRBuilderBase &Builder;
+ AtomicEmitOptions EmitOptions;
+ const Twine &Name;
+
+ uint64_t DataSize;
+ Type *CoercedTy = nullptr;
+ Type *InstCoercedTy = nullptr;
+
+ llvm::Align EffectiveAlign;
+ std::optional<AtomicOrdering> SuccessMemorderConst;
+ Value *SuccessMemorderCABI;
+ std::optional<AtomicOrdering> FailureMemorderConst;
+ Value *FailureMemorderCABI;
+ std::optional<bool> IsWeakConst;
+ Value *IsWeakVal;
+
+ BasicBlock *createBasicBlock(const Twine &BBName) {
+ return BasicBlock::Create(Ctx, Name + "." + getBuiltinSig() + "." + BBName,
+ CurFn);
+ };
+
+ virtual const char *getBuiltinSig() const { return "atomic"; }
+ virtual bool supportsInstOnFloat() const { return true; }
+ virtual bool supportsAcquireOrdering() const { return true; }
+ virtual bool supportsReleaseOrdering() const { return true; }
+
+ virtual void prepareInst() {}
+
+ virtual Value *emitInst(bool IsWeak, AtomicOrdering SuccessMemorder,
+ AtomicOrdering FailureMemorder) = 0;
+
+ Value *emitFailureMemorderSwitch(bool IsWeak,
+ AtomicOrdering SuccessMemorder) {
+ if (FailureMemorderConst) {
+ // FIXME: (from CGAtomic)
+ // 31.7.2.18: "The failure argument shall not be memory_order_release
+ // nor memory_order_acq_rel". Fallback to monotonic.
+ //
+ // Prior to c++17, "the failure argument shall be no stronger than the
+ // success argument". This condition has been lifted and the only
+ // precondition is 31.7.2.18. Effectively treat this as a DR and skip
+ // language version checks.
+ return emitInst(IsWeak, SuccessMemorder, *FailureMemorderConst);
+ }
+
+ // Create all the relevant BB's
+ BasicBlock *ContBB =
+ splitBB(Builder, /*CreateBranch=*/false,
+ Name + "." + getBuiltinSig() + ".failorder.continue");
+ BasicBlock *MonotonicBB = createBasicBlock("monotonic_fail");
+ BasicBlock *AcquireBB = createBasicBlock("acquire_fail");
+ BasicBlock *SeqCstBB = createBasicBlock("seqcst_fail");
+
+ // MonotonicBB is arbitrarily chosen as the default case; in practice,
+ // this doesn't matter unless someone is crazy enough to use something
+ // that doesn't fold to a constant for the ordering.
+ SwitchInst *SI = Builder.CreateSwitch(FailureMemorderCABI, MonotonicBB);
+ // Implemented as acquire, since it's the closest in LLVM.
+ SI->addCase(
+ Builder.getInt32(static_cast<int32_t>(AtomicOrderingCABI::consume)),
+ AcquireBB);
+ SI->addCase(
+ Builder.getInt32(static_cast<int32_t>(AtomicOrderingCABI::acquire)),
+ AcquireBB);
+ SI->addCase(
+ Builder.getInt32(static_cast<int32_t>(AtomicOrderingCABI::seq_cst)),
+ SeqCstBB);
+
+ // TODO: Do not insert PHINode if operation cannot fail
+ Builder.SetInsertPoint(ContBB, ContBB->begin());
+ PHINode *Result =
+ Builder.CreatePHI(Builder.getInt1Ty(), 3,
+ Name + "." + getBuiltinSig() + ".failorder.success");
+ IRBuilderBase::InsertPoint ContIP = Builder.saveIP();
+
+ // Emit all the different atomics
+ Builder.SetInsertPoint(MonotonicBB);
+ Value *MonotonicResult =
+ emitInst(IsWeak, SuccessMemorder, AtomicOrdering::Monotonic);
+ Builder.CreateBr(ContBB);
+ Result->addIncoming(MonotonicResult, Builder.GetInsertBlock());
+
+ Builder.SetInsertPoint(AcquireBB);
+ Value *AcquireResult =
+ emitInst(IsWeak, SuccessMemorder, AtomicOrdering::Acquire);
+ Builder.CreateBr(ContBB);
+ Result->addIncoming(AcquireResult, Builder.GetInsertBlock());
+
+ Builder.SetInsertPoint(SeqCstBB);
+ Value *SeqCstResult = emitInst(IsWeak, SuccessMemorder,
+ AtomicOrdering::SequentiallyConsistent);
+ Builder.CreateBr(ContBB);
+ Result->addIncoming(SeqCstResult, Builder.GetInsertBlock());
+
+ Builder.restoreIP(ContIP);
+ return Result;
+ };
+
+ Value *emitSuccessMemorderSwitch(bool IsWeak) {
+ if (SuccessMemorderConst)
+ return emitFailureMemorderSwitch(IsWeak, *SuccessMemorderConst);
+
+ Type *BoolTy = Builder.getInt1Ty();
+
+ // Create all the relevant BB's
+ BasicBlock *ContBB =
+ splitBB(Builder, /*CreateBranch=*/false,
+ Name + "." + getBuiltinSig() + ".memorder.continue");
+ BasicBlock *MonotonicBB = createBasicBlock("monotonic");
+ BasicBlock *AcquireBB =
+ supportsAcquireOrdering() ? createBasicBlock("acquire") : nullptr;
+ BasicBlock *ReleaseBB =
+ supportsReleaseOrdering() ? createBasicBlock("release") : nullptr;
+ BasicBlock *AcqRelBB =
+ supportsAcquireOrdering() && supportsReleaseOrdering()
+ ? createBasicBlock("acqrel")
+ : nullptr;
+ BasicBlock *SeqCstBB = createBasicBlock("seqcst");
+
+ // Create the switch for the split
+ // MonotonicBB is arbitrarily chosen as the default case; in practice,
+ // this doesn't matter unless someone is crazy enough to use something
+ // that doesn't fold to a constant for the ordering.
+ IntegerType *IntTy = getIntTy(Builder, EmitOptions.TLI);
+ Value *Order = Builder.CreateIntCast(SuccessMemorderCABI, IntTy, false);
+ SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
+
+ // TODO: No PHI if operation cannot fail
+ Builder.SetInsertPoint(ContBB, ContBB->begin());
+ PHINode *Result = Builder.CreatePHI(
+ BoolTy, 5, Name + "." + getBuiltinSig() + ".memorder.success");
+ IRBuilderBase::InsertPoint ContIP = Builder.saveIP();
+
+ // Emit all the different atomics
+ Builder.SetInsertPoint(MonotonicBB);
+ Value *MonotonicResult =
+ emitFailureMemorderSwitch(IsWeak, AtomicOrdering::Monotonic);
+ Builder.CreateBr(ContBB);
+ Result->addIncoming(MonotonicResult, Builder.GetInsertBlock());
+
+ if (AcquireBB) {
+ SI->addCase(
+ Builder.getInt32(static_cast<uint32_t>(AtomicOrderingCABI::consume)),
+ AcquireBB);
+ SI->addCase(
+ Builder.getInt32(static_cast<uint32_t>(AtomicOrderingCABI::acquire)),
+ AcquireBB);
+ Builder.SetInsertPoint(AcquireBB);
+ Value *AcquireResult =
+ emitFailureMemorderSwitch(IsWeak, AtomicOrdering::Acquire);
+ Builder.CreateBr(ContBB);
+ Result->addIncoming(AcquireResult, Builder.GetInsertBlock());
+ }
+
+ if (ReleaseBB) {
+ SI->addCase(
+ Builder.getInt32(static_cast<uint32_t>(AtomicOrderingCABI::release)),
+ ReleaseBB);
+ Builder.SetInsertPoint(ReleaseBB);
+ Value *ReleaseResult =
+ emitFailureMemorderSwitch(IsWeak, AtomicOrdering::Release);
+ Builder.CreateBr(ContBB);
+ Result->addIncoming(ReleaseResult, Builder.GetInsertBlock());
+ }
+
+ if (AcqRelBB) {
+ SI->addCase(
+ Builder.getInt32(static_cast<uint32_t>(AtomicOrderingCABI::acq_rel)),
+ AcqRelBB);
+ Builder.SetInsertPoint(AcqRelBB);
+ Value *AcqRelResult =
+ emitFailureMemorderSwitch(IsWeak, AtomicOrdering::AcquireRelease);
+ Builder.CreateBr(ContBB);
+ Result->addIncoming(AcqRelResult, Builder.GetInsertBlock());
+ }
+
+ SI->addCase(
+ Builder.getInt32(static_cast<uint32_t>(AtomicOrderingCABI::seq_cst)),
+ SeqCstBB);
+ Builder.SetInsertPoint(SeqCstBB);
+ Value *SeqCstResult = emitFailureMemorderSwitch(
+ IsWeak, AtomicOrdering::SequentiallyConsistent);
+ Builder.CreateBr(ContBB);
+ Result->addIncoming(SeqCstResult, Builder.GetInsertBlock());
+
+ Builder.restoreIP(ContIP);
+ return Result;
+ };
+
+ Value *emitWeakSwitch() {
+ if (IsWeakConst)
+ return emitSuccessMemorderSwitch(*IsWeakConst);
+
+ // Create all the relevant BBs
+ BasicBlock *ContBB =
+ splitBB(Builder, /*CreateBranch=*/false,
+ Name + "." + getBuiltinSig() + ".weak.continue");
+ BasicBlock *StrongBB = createBasicBlock("strong");
+ BasicBlock *WeakBB = createBasicBlock("weak");
+
+ // FIXME: Originally copied CGAtomic. Why does it use a switch?
+ SwitchInst *SI = Builder.CreateSwitch(IsWeakVal, WeakBB);
+ SI->addCase(Builder.getInt1(false), StrongBB);
+
+ Builder.SetInsertPoint(ContBB, ContBB->begin());
+ PHINode *Result =
+ Builder.CreatePHI(Builder.getInt1Ty(), 2,
+ Name + "." + getBuiltinSig() + ".isweak.success");
+ IRBuilderBase::InsertPoint ContIP = Builder.saveIP();
+
+ Builder.SetInsertPoint(StrongBB);
+ Value *StrongResult = emitSuccessMemorderSwitch(false);
+ Builder.CreateBr(ContBB);
+ Result->addIncoming(StrongResult, Builder.GetInsertBlock());
+
+ Builder.SetInsertPoint(WeakBB);
+ Value *WeakResult = emitSuccessMemorderSwitch(true);
+ Builder.CreateBr(ContBB);
+ Result->addIncoming(WeakResult, Builder.GetInsertBlock());
+
+ Builder.restoreIP(ContIP);
+ return Result;
+ };
+
+ virtual Expected<Value *> emitSizedLibcall() = 0;
+
+ virtual Expected<Value *> emitLibcall() = 0;
+
+ virtual Expected<Value *> makeFallbackError() = 0;
+
+ Expected<Value *> emit() {
+ assert(AtomicPtr->getType()->isPointerTy() &&
+ "Atomic must apply on pointer");
+ assert(EmitOptions.TLI && "TargetLibraryInfo is mandatory");
+
+ unsigned MaxAtomicSizeSupported = 16;
+ if (EmitOptions.TL)
+ MaxAtomicSizeSupported =
+ EmitOptions.TL->getMaxAtomicSizeInBitsSupported() / 8;
+
+ // Determine data size. It is still possible to be unknown after
+ // this with SVE types, but neither atomic instructions nor libcall
+ // functions support that. After this, *DataSize can be assume to have a
+ // value.
+ Type *DataType = nullptr;
+ if (std::holds_alternative<Type *>(TypeOrSize)) {
+ DataType = std::get<Type *>(TypeOrSize);
+ TypeSize DS = EmitOptions.DL.getTypeStoreSize(DataType);
+ assert(DS.isFixed() && "Atomics on scalable types are invalid");
+ DataSize = DS.getFixedValue();
+ } else {
+ DataSize = std::get<uint64_t>(TypeOrSize);
+ }
+
+#ifndef NDEBUG
+ if (DataType) {
+ // 'long double' (80-bit extended precision) behaves strange here.
+ // DL.getTypeStoreSize says it is 10 bytes
+ // Clang assumes it is 12 bytes
+ // So AtomicExpandPass will disagree with CGAtomic (except for cmpxchg
+ // which does not support floats, so AtomicExpandPass doesn't even know it
+ // originally was an FP80)
+ TypeSize DS = EmitOptions.DL.getTypeStoreSize(DataType);
+ assert(DS.getKnownMinValue() <= DataSize &&
+ "Must access at least all the relevant bits of the data, possibly "
+ "some more for padding");
+ }
+#endif
+
+ if (Align) {
+ EffectiveAlign = *Align;
+ } else {
+ // https://llvm.org/docs/LangRef.html#cmpxchg-instruction
+ //
+ // The alignment is only optional when parsing textual IR; for in-memory
+ // IR, it is always present. If unspecified, the alignment is assumed to
+ // be equal to the size of the ‘<value>’ type.
+ //
+ // We prefer safety here and assume no alignment, unless
+ // getPointerAlignment() can determine the actual alignment.
+ // TODO: Would be great if this could determine alignment through a GEP
+ EffectiveAlign = AtomicPtr->getPointerAlignment(EmitOptions.DL);
+ }
+
+ // Only use the original data type if it is compatible with the atomic
+ // instruction (and sized libcall function) and matches the preferred size.
+ // No type punning needed when using the libcall function while only takes
+ // pointers.
+ if (!DataType)
+ DataType = IntegerType::get(Ctx, DataSize * 8);
+
+ // Additional type requirements when using an atomic instruction.
+ // Since we don't know the size of SVE instructions, can only use keep the
+ // original type. If the type is too large, we must not attempt to pass it
+ // by value if it wasn't an integer already.
+ if (DataType->isIntegerTy() || DataType->isPointerTy() ||
+ (supportsInstOnFloat() && DataType->isFloatingPointTy()))
+ InstCoercedTy = DataType;
+ else if (DataSize > MaxAtomicSizeSupported)
+ InstCoercedTy = nullptr;
+ else
+ InstCoercedTy = IntegerType::get(Ctx, DataSize * 8);
+
+ Type *IntTy = getIntTy(Builder, EmitOptions.TLI);
+
+ // For resolving the SuccessMemorder/FailureMemorder arguments. If it is
+ // constant, determine the AtomicOrdering for use with the cmpxchg
+ // instruction. Also determines the llvm::Value to be passed to
+ // __atomic_compare_exchange in case cmpxchg is not legal.
+ auto processMemorder = [&](auto MemorderVariant)
+ -> std::pair<std::optional<AtomicOrdering>, Value *> {
+ if (holds_alternative_if_exists<std::monostate>(MemorderVariant)) {
+ // Derive FailureMemorder from SucccessMemorder
+ if (SuccessMemorderConst) {
+ MemorderVariant = AtomicCmpXchgInst::getStrongestFailureOrdering(
+ *SuccessMemorderConst);
+ } else {
+ // TODO: If SucccessMemorder is not constant, emit logic that derives
+ // the failure ordering from FailureMemorderCABI as
+ // getStrongestFailureOrdering() would do. For now use the strongest
+ // possible ordering
+ MemorderVariant = AtomicOrderingCABI::seq_cst;
+ }
+ }
+
+ if (std::holds_alternative<AtomicOrdering>(MemorderVariant)) {
+ auto Memorder = std::get<AtomicOrdering>(MemorderVariant);
+ return std::make_pair(
+ Memorder,
+ ConstantInt::get(IntTy, static_cast<uint64_t>(toCABI(Memorder))));
+ }
+
+ if (std::holds_alternative<AtomicOrderingCABI>(MemorderVariant)) {
+ auto MemorderCABI = std::get<AtomicOrderingCABI>(MemorderVariant);
+ return std::make_pair(
+ fromCABI(MemorderCABI),
+ ConstantInt::get(IntTy, static_cast<uint64_t>(MemorderCABI)));
+ }
+
+ auto *MemorderCABI = std::get<Value *>(MemorderVariant);
+ if (auto *MO = dyn_cast<ConstantInt>(MemorderCABI)) {
+ uint64_t MOInt = MO->getZExtValue();
+ return std::make_pair(fromCABI(MOInt), MO);
+ }
+
+ return std::make_pair(std::nullopt, MemorderCABI);
+ };
+
+ auto processIsWeak =
+ [&](auto WeakVariant) -> std::pair<std::optional<bool>, Value *> {
+ if (std::holds_alternative<bool>(WeakVariant)) {
+ bool IsWeakBool = std::get<bool>(WeakVariant);
+ return std::make_pair(IsWeakBool, Builder.getInt1(IsWeakBool));
+ }
+
+ auto *BoolVal = std::get<Value *>(WeakVariant);
+ if (auto *BoolConst = dyn_cast<ConstantInt>(BoolVal)) {
+ uint64_t IsWeakBool = BoolConst->getZExtValue();
+ return std::make_pair(IsWeakBool != 0, BoolVal);
+ }
+
+ return std::make_pair(std::nullopt, BoolVal);
+ };
+
+ std::tie(IsWeakConst, IsWeakVal) = processIsWeak(IsWeak);
+ std::tie(SuccessMemorderConst, SuccessMemorderCABI) =
+ processMemorder(SuccessMemorder);
+ std::tie(FailureMemorderConst, FailureMemorderCABI) =
+ processMemorder(FailureMemorder);
+
+ // Fix malformed inputs. We do not want to emit illegal IR.
+ //
+ // https://gcc.gnu.org/onlinedocs/gcc/_005f_005fatomic-Builtins.html
+ //
+ // [failure_memorder] This memory order cannot be __ATOMIC_RELEASE nor
+ // __ATOMIC_ACQ_REL. It also cannot be a stronger order than that
+ // specified by success_memorder.
+ //
+ // https://llvm.org/docs/LangRef.html#cmpxchg-instruction
+ //
+ // Both ordering parameters must be at least monotonic, the failure
+ // ordering cannot be either release or acq_rel.
+ //
+ if (FailureMemorderConst &&
+ ((*FailureMemorderConst == AtomicOrdering::Release) ||
+ (*FailureMemorderConst == AtomicOrdering::AcquireRelease))) {
+ // Fall back to monotonic atomic when illegal value is passed. As with the
+ // dynamic case below, it is an arbitrary choice.
+ FailureMemorderConst = AtomicOrdering::Monotonic;
+ }
+ if (FailureMemorderConst && SuccessMemorderConst &&
+ !isAtLeastOrStrongerThan(*SuccessMemorderConst,
+ *FailureMemorderConst)) {
+ // Make SuccessMemorder as least as strong as FailureMemorder
+ SuccessMemorderConst =
+ getMergedAtomicOrdering(*SuccessMemorderConst, *FailureMemorderConst);
+ }
+
+ // https://llvm.org/docs/LangRef.html#cmpxchg-instruction
+ //
+ // The type of ‘<cmp>’ must be an integer or pointer type whose bit width
+ // is a power of two greater than or equal to eight and less than or equal
+ // to a target-specific size limit.
+ bool CanUseInst = DataSize <= MaxAtomicSizeSupported &&
+ llvm::isPowerOf2_64(DataSize) && InstCoercedTy;
+ bool CanUseSingleInst = CanUseInst && SuccessMemorderConst &&
+ FailureMemorderConst && IsWeakConst;
+ bool CanUseSizedLibcall =
+ canUseSizedAtomicCall(DataSize, EffectiveAlign, EmitOptions.DL) &&
+ Scope == SyncScope::System;
+ bool CanUseLibcall = Scope == SyncScope::System;
+
+ if (CanUseSingleInst && EmitOptions.AllowInstruction) {
+ prepareInst();
+ return emitInst(*IsWeakConst, *SuccessMemorderConst,
+ *FailureMemorderConst);
+ }
+
+ // Switching only needed for cmpxchg instruction which requires constant
+ // arguments.
+ // FIXME: If AtomicExpandPass later considers the cmpxchg not lowerable for
+ // the given target, it will also generate a call to the
+ // __atomic_compare_exchange function. In that case the switching was very
+ // unnecessary but cannot be undone.
+ if (CanUseInst && EmitOptions.AllowSwitch && EmitOptions.AllowInstruction) {
+ prepareInst();
+ return emitWeakSwitch();
+ }
+
+ // Fallback to a libcall function. From here on IsWeak/Scope/IsVolatile is
+ // ignored. IsWeak is assumed to be false, Scope is assumed to be
+ // SyncScope::System (strongest possible assumption synchronizing with
+ // everything, instead of just a subset of sibling threads), and volatile
+ // does not apply to function calls.
+
+ if (CanUseSizedLibcall && EmitOptions.AllowSizedLibcall) {
+ Expected<Value *> SizedLibcallResult = emitSizedLibcall();
+ if (SizedLibcallResult)
+ return SizedLibcallResult;
+ consumeError(SizedLibcallResult.takeError());
+ }
+
+ if (CanUseLibcall && EmitOptions.AllowLibcall) {
+ Expected<Value *> LibcallResult = emitLibcall();
+ if (LibcallResult)
+ return LibcallResult;
+ consumeError(LibcallResult.takeError());
+ }
+
+ return makeFallbackError();
+ }
+};
+
+class AtomicLoadEmitter final : public AtomicEmitter {
+public:
+ using AtomicEmitter::AtomicEmitter;
+
+ Error emitLoad(Value *RetPtr) {
+ assert(RetPtr->getType()->isPointerTy());
+ this->RetPtr = RetPtr;
+ return emit().takeError();
+ }
+
+protected:
+ Value *RetPtr;
+
+ bool supportsReleaseOrdering() const override { return false; }
+
+ Value *emitInst(bool IsWeak, AtomicOrdering SuccessMemorder,
+ AtomicOrdering FailureMemorder) override {
+ LoadInst *AtomicInst = Builder.CreateLoad(
+ InstCoercedTy, AtomicPtr, IsVolatile, Name + ".atomic.load");
+ AtomicInst->setAtomic(SuccessMemorder, Scope);
+ AtomicInst->setAlignment(EffectiveAlign);
+
+ // Store loaded result to where the caller expects it.
+ // FIXME: Do we need to zero the padding, if any?
+ Builder.CreateStore(AtomicInst, RetPtr, IsVolatile);
+ return Builder.getTrue();
+ }
+
+ Expected<Value *> emitSizedLibcall() override {
+ Value *LoadResult =
+ emitAtomicLoadN(DataSize, AtomicPtr, SuccessMemorderCABI, Builder,
+ EmitOptions.DL, EmitOptions.TLI);
+ LoadResult->setName(Name);
+ if (LoadResult) {
+ Builder.CreateStore(LoadResult, RetPtr);
+ return Builder.getTrue();
+ }
+
+ // emitAtomicLoadN can return nullptr if the backend does not
+ // support sized libcalls. Fall back to the non-sized libcall and remove the
+ // unused load again.
+ return make_error<StringError>("__atomic_load_N libcall absent",
+ inconvertibleErrorCode());
+ }
+
+ Expected<Value *> emitLibcall() override {
+ // Fallback to a libcall function. From here on IsWeak/Scope/IsVolatile is
+ // ignored. IsWeak is assumed to be false, Scope is assumed to be
+ // SyncScope::System (strongest possible assumption synchronizing with
+ // everything, instead of just a subset of sibling threads), and volatile
+ // does not apply to function calls.
+
+ Value *DataSizeVal =
+ ConstantInt::get(getSizeTTy(Builder, EmitOptions.TLI), DataSize);
+ Value *LoadCall =
+ emitAtomicLoad(DataSizeVal, AtomicPtr, RetPtr, SuccessMemorderCABI,
+ Builder, EmitOptions.DL, EmitOptions.TLI);
+ if (!LoadCall)
+ return make_error<StringError>("__atomic_load libcall absent",
+ inconvertibleErrorCode());
+
+ if (!LoadCall->getType()->isVoidTy())
+ LoadCall->setName(Name);
+ return Builder.getTrue();
+ }
+
+ Expected<Value *> makeFallbackError() override {
+ return make_error<StringError>(
+ "__atomic_load builtin not supported by any available means",
+ inconvertibleErrorCode());
+ }
+};
+
+class AtomicStoreEmitter final : public AtomicEmitter {
+public:
+ using AtomicEmitter::AtomicEmitter;
+
+ Error emitStore(Value *ValPtr) {
+ assert(ValPtr->getType()->isPointerTy());
+ this->ValPtr = ValPtr;
+ return emit().takeError();
+ }
+
+protected:
+ Value *ValPtr;
+ Value *Val;
+
+ bool supportsAcquireOrdering() const override { return false; }
+
+ void prepareInst() override {
+ Val = Builder.CreateLoad(InstCoercedTy, ValPtr, Name + ".atomic.val");
+ }
+
+ Value *emitInst(bool IsWeak, AtomicOrdering SuccessMemorder,
+ AtomicOrdering FailureMemorder) override {
+ StoreInst *AtomicInst = Builder.CreateStore(Val, AtomicPtr, IsVolatile);
+ AtomicInst->setAtomic(SuccessMemorder, Scope);
+ AtomicInst->setAlignment(EffectiveAlign);
+ return Builder.getTrue();
+ }
+
+ Expected<Value *> emitSizedLibcall() override {
+ Val = Builder.CreateLoad(CoercedTy, ValPtr, Name + ".atomic.val");
+ Value *StoreCall =
+ emitAtomicStoreN(DataSize, AtomicPtr, Val, SuccessMemorderCABI, Builder,
+ EmitOptions.DL, EmitOptions.TLI);
+ StoreCall->setName(Name);
+ if (StoreCall)
+ return Builder.getTrue();
+
+ // emitAtomiStoreN can return nullptr if the backend does not
+ // support sized libcalls. Fall back to the non-sized libcall and remove the
+ // unused load again.
+ return make_error<StringError>("__atomic_store_N libcall absent",
+ inconvertibleErrorCode());
+ }
+
+ Expected<Value *> emitLibcall() override {
+ // Fallback to a libcall function. From here on IsWeak/Scope/IsVolatile is
+ // ignored. IsWeak is assumed to be false, Scope is assumed to be
+ // SyncScope::System (strongest possible assumption synchronizing with
+ // everything, instead of just a subset of sibling threads), and volatile
+ // does not apply to function calls.
+
+ Value *DataSizeVal =
+ ConstantInt::get(getSizeTTy(Builder, EmitOptions.TLI), DataSize);
+ Value *StoreCall =
+ emitAtomicStore(DataSizeVal, AtomicPtr, ValPtr, SuccessMemorderCABI,
+ Builder, EmitOptions.DL, EmitOptions.TLI);
+ if (!StoreCall)
+ return make_error<StringError>("__atomic_store libcall absent",
+ inconvertibleErrorCode());
+
+ return Builder.getTrue();
+ }
+
+ Expected<Value *> makeFallbackError() override {
+ return make_error<StringError>(
+ "__atomic_store builtin not supported by any available means",
+ inconvertibleErrorCode());
+ }
+};
+
+class AtomicCompareExchangeEmitter final : public AtomicEmitter {
+public:
+ using AtomicEmitter::AtomicEmitter;
+
+ Expected<Value *> emitCmpXchg(Value *ExpectedPtr, Value *DesiredPtr,
+ Value *ActualPtr) {
+ assert(ExpectedPtr->getType()->isPointerTy());
+ assert(DesiredPtr->getType()->isPointerTy());
+ assert(!ActualPtr || ActualPtr->getType()->isPointerTy());
+ assert(AtomicPtr != ExpectedPtr);
+ assert(AtomicPtr != DesiredPtr);
+ assert(AtomicPtr != ActualPtr);
+ assert(ActualPtr != DesiredPtr);
+
+ this->ExpectedPtr = ExpectedPtr;
+ this->DesiredPtr = DesiredPtr;
+ this->ActualPtr = ActualPtr;
+ return emit();
+ }
+
+protected:
+ Value *ExpectedPtr;
+ Value *DesiredPtr;
+ Value *ActualPtr;
+ Value *ExpectedVal;
+ Value *DesiredVal;
+
+ const char *getBuiltinSig() const override { return "cmpxchg"; }
+
+ bool supportsInstOnFloat() const override { return false; }
+
+ void prepareInst() override {
+ ExpectedVal = Builder.CreateLoad(InstCoercedTy, ExpectedPtr,
+ Name + ".cmpxchg.expected");
+ DesiredVal = Builder.CreateLoad(InstCoercedTy, DesiredPtr,
+ Name + ".cmpxchg.desired");
+ }
+
+ Value *emitInst(bool IsWeak, AtomicOrdering SuccessMemorder,
+ AtomicOrdering FailureMemorder) override {
+ AtomicCmpXchgInst *AtomicInst =
+ Builder.CreateAtomicCmpXchg(AtomicPtr, ExpectedVal, DesiredVal, Align,
+ SuccessMemorder, FailureMemorder, Scope);
+ AtomicInst->setName(Name + ".cmpxchg.pair");
+ AtomicInst->setAlignment(EffectiveAlign);
+ AtomicInst->setWeak(IsWeak);
+ AtomicInst->setVolatile(IsVolatile);
+
+ if (ActualPtr) {
+ Value *ActualVal = Builder.CreateExtractValue(AtomicInst, /*Idxs=*/0,
+ Name + ".cmpxchg.prev");
+ Builder.CreateStore(ActualVal, ActualPtr);
+ }
+ Value *SuccessFailureVal = Builder.CreateExtractValue(
+ AtomicInst, /*Idxs=*/1, Name + ".cmpxchg.success");
+
+ assert(SuccessFailureVal->getType()->isIntegerTy(1));
+ return SuccessFailureVal;
+ }
+
+ Expected<Value *> emitSizedLibcall() override {
+ LoadInst *DesiredVal =
+ Builder.CreateLoad(IntegerType::get(Ctx, DataSize * 8), DesiredPtr,
+ Name + ".cmpxchg.desired");
+ Value *SuccessResult = emitAtomicCompareExchangeN(
+ DataSize, AtomicPtr, ExpectedPtr, DesiredVal, SuccessMemorderCABI,
+ FailureMemorderCABI, Builder, EmitOptions.DL, EmitOptions.TLI);
+ if (SuccessResult) {
+ Value *SuccessBool =
+ Builder.CreateCmp(CmpInst::Predicate::ICMP_EQ, SuccessResult,
+ Builder.getInt8(0), Name + ".cmpxchg.success");
+
+ if (ActualPtr && ActualPtr != ExpectedPtr)
+ Builder.CreateMemCpy(ActualPtr, {}, ExpectedPtr, {}, DataSize);
+ return SuccessBool;
+ }
+
+ // emitAtomicCompareExchangeN can return nullptr if the backend does not
+ // support sized libcalls. Fall back to the non-sized libcall and remove the
+ // unused load again.
+ DesiredVal->eraseFromParent();
+ return make_error<StringError>("__atomic_compare_exchange_N libcall absent",
+ inconvertibleErrorCode());
+ }
+
+ Expected<Value *> emitLibcall() override {
+ // FIXME: Some AMDGCN regression tests the addrspace, but
+ // __atomic_compare_exchange by definition is addrsspace(0) and
+ // emitAtomicCompareExchange will complain about it.
+ if (AtomicPtr->getType()->getPointerAddressSpace() ||
+ ExpectedPtr->getType()->getPointerAddressSpace() ||
+ DesiredPtr->getType()->getPointerAddressSpace())
+ return Builder.getInt1(false);
+
+ Value *SuccessResult = emitAtomicCompareExchange(
+ ConstantInt::get(getSizeTTy(Builder, EmitOptions.TLI), DataSize),
+ AtomicPtr, ExpectedPtr, DesiredPtr, SuccessMemorderCABI,
+ FailureMemorderCABI, Builder, EmitOptions.DL, EmitOptions.TLI);
+ if (!SuccessResult)
+ return make_error<StringError>("__atomic_compare_exchange libcall absent",
+ inconvertibleErrorCode());
+
+ Value *SuccessBool =
+ Builder.CreateCmp(CmpInst::Predicate::ICMP_EQ, SuccessResult,
+ Builder.getInt8(0), Name + ".cmpxchg.success");
+
+ if (ActualPtr && ActualPtr != ExpectedPtr)
+ Builder.CreateMemCpy(ActualPtr, {}, ExpectedPtr, {}, DataSize);
+ return SuccessBool;
+ }
+
+ Expected<Value *> makeFallbackError() override {
+ return make_error<StringError>("__atomic_compare_exchange builtin not "
+ "supported by any available means",
+ inconvertibleErrorCode());
+ }
+};
+
+} // namespace
+
+Error llvm::emitAtomicLoadBuiltin(
+ Value *AtomicPtr, Value *RetPtr, std::variant<Type *, uint64_t> TypeOrSize,
+ bool IsVolatile,
+ std::variant<Value *, AtomicOrdering, AtomicOrderingCABI> Memorder,
+ SyncScope::ID Scope, MaybeAlign Align, IRBuilderBase &Builder,
+ AtomicEmitOptions EmitOptions, const Twine &Name) {
+ AtomicLoadEmitter Emitter(AtomicPtr, TypeOrSize, false, IsVolatile, Memorder,
+ {}, Scope, Align, Builder, EmitOptions, Name);
+ return Emitter.emitLoad(RetPtr);
+}
+
+Error llvm::emitAtomicStoreBuiltin(
+ Value *AtomicPtr, Value *ValPtr, std::variant<Type *, uint64_t> TypeOrSize,
+ bool IsVolatile,
+ std::variant<Value *, AtomicOrdering, AtomicOrderingCABI> Memorder,
+ SyncScope::ID Scope, MaybeAlign Align, IRBuilderBase &Builder,
+ AtomicEmitOptions EmitOptions, const Twine &Name) {
+ AtomicStoreEmitter Emitter(AtomicPtr, TypeOrSize, false, IsVolatile, Memorder,
+ {}, Scope, Align, Builder, EmitOptions, Name);
+ return Emitter.emitStore(ValPtr);
+}
+
+Expected<Value *> llvm::emitAtomicCompareExchangeBuiltin(
+ Value *AtomicPtr, Value *ExpectedPtr, Value *DesiredPtr,
+ std::variant<Type *, uint64_t> TypeOrSize,
+ std::variant<Value *, bool> IsWeak, bool IsVolatile,
+ std::variant<Value *, AtomicOrdering, AtomicOrderingCABI> SuccessMemorder,
+ std::variant<std::monostate, Value *, AtomicOrdering, AtomicOrderingCABI>
+ FailureMemorder,
+ SyncScope::ID Scope, Value *PrevPtr, MaybeAlign Align,
+ IRBuilderBase &Builder, AtomicEmitOptions EmitOptions, const Twine &Name) {
+ AtomicCompareExchangeEmitter Emitter(
+ AtomicPtr, TypeOrSize, IsWeak, IsVolatile, SuccessMemorder,
+ FailureMemorder, Scope, Align, Builder, EmitOptions, Name);
+ return Emitter.emitCmpXchg(ExpectedPtr, DesiredPtr, PrevPtr);
+}
diff --git a/llvm/lib/Transforms/Utils/BuildLibCalls.cpp b/llvm/lib/Transforms/Utils/BuildLibCalls.cpp
index 24eefc91117b4..47b7a16de7aa4 100644
--- a/llvm/lib/Transforms/Utils/BuildLibCalls.cpp
+++ b/llvm/lib/Transforms/Utils/BuildLibCalls.cpp
@@ -1346,6 +1346,29 @@ bool llvm::inferNonMandatoryLibFuncAttrs(Function &F,
Changed |= setWillReturn(F);
Changed |= setOnlyWritesArgMemOrErrnoMem(F);
break;
+ case LibFunc_atomic_load:
+ case LibFunc_atomic_load_1:
+ case LibFunc_atomic_load_2:
+ case LibFunc_atomic_load_4:
+ case LibFunc_atomic_load_8:
+ case LibFunc_atomic_load_16:
+ case LibFunc_atomic_store:
+ case LibFunc_atomic_store_1:
+ case LibFunc_atomic_store_2:
+ case LibFunc_atomic_store_4:
+ case LibFunc_atomic_store_8:
+ case LibFunc_atomic_store_16:
+ case LibFunc_atomic_compare_exchange:
+ case LibFunc_atomic_compare_exchange_1:
+ case LibFunc_atomic_compare_exchange_2:
+ case LibFunc_atomic_compare_exchange_4:
+ case LibFunc_atomic_compare_exchange_8:
+ case LibFunc_atomic_compare_exchange_16:
+ Changed |= setArgsNoUndef(F);
+ Changed |= setDoesNotThrow(F);
+ Changed |= setWillReturn(F);
+ Changed |= setOnlyAccessesInaccessibleMemOrArgMem(F);
+ break;
default:
// FIXME: It'd be really nice to cover all the library functions we're
// aware of here.
@@ -1443,6 +1466,49 @@ FunctionCallee llvm::getOrInsertLibFunc(Module *M, const TargetLibraryInfo &TLI,
setArgExtAttr(*F, 2, TLI);
break;
+ case LibFunc_atomic_load:
+ setArgExtAttr(*F, 4, TLI); // Memorder
+ break;
+
+ case LibFunc_atomic_load_1:
+ case LibFunc_atomic_load_2:
+ case LibFunc_atomic_load_4:
+ case LibFunc_atomic_load_8:
+ case LibFunc_atomic_load_16:
+ setRetExtAttr(*F, TLI); // return
+ setArgExtAttr(*F, 3, TLI); // Memorder
+ break;
+
+ case LibFunc_atomic_store:
+ setArgExtAttr(*F, 4, TLI); // Memorder
+ break;
+
+ case LibFunc_atomic_store_1:
+ case LibFunc_atomic_store_2:
+ case LibFunc_atomic_store_4:
+ case LibFunc_atomic_store_8:
+ case LibFunc_atomic_store_16:
+ setArgExtAttr(*F, 2, TLI); // Val
+ setArgExtAttr(*F, 3, TLI); // Memorder
+ break;
+
+ case LibFunc_atomic_compare_exchange:
+ setRetExtAttr(*F, TLI); // return
+ setArgExtAttr(*F, 4, TLI); // SuccessMemorder
+ setArgExtAttr(*F, 5, TLI); // FailureMemorder
+ break;
+
+ case LibFunc_atomic_compare_exchange_1:
+ case LibFunc_atomic_compare_exchange_2:
+ case LibFunc_atomic_compare_exchange_4:
+ case LibFunc_atomic_compare_exchange_8:
+ case LibFunc_atomic_compare_exchange_16:
+ setRetExtAttr(*F, TLI); // return
+ setArgExtAttr(*F, 2, TLI); // Desired
+ setArgExtAttr(*F, 3, TLI); // SuccessMemorder
+ setArgExtAttr(*F, 4, TLI); // FailureMemorder
+ break;
+
// These are functions that are known to not need any argument extension
// on any target: A size_t argument (which may be an i32 on some targets)
// should not trigger the assert below.
@@ -1568,7 +1634,8 @@ static Value *emitLibCall(LibFunc TheLibFunc, Type *ReturnType,
FunctionType *FuncType = FunctionType::get(ReturnType, ParamTypes, IsVaArgs);
FunctionCallee Callee = getOrInsertLibFunc(M, *TLI, TheLibFunc, FuncType);
inferNonMandatoryLibFuncAttrs(M, FuncName, *TLI);
- CallInst *CI = B.CreateCall(Callee, Operands, FuncName);
+ CallInst *CI = B.CreateCall(Callee, Operands,
+ ReturnType->isVoidTy() ? Twine() : FuncName);
if (const Function *F =
dyn_cast<Function>(Callee.getCallee()->stripPointerCasts()))
CI->setCallingConv(F->getCallingConv());
@@ -1807,6 +1874,153 @@ Value *llvm::emitVSPrintf(Value *Dest, Value *Fmt, Value *VAList,
{Dest, Fmt, VAList}, B, TLI);
}
+Value *llvm::emitAtomicLoad(Value *Size, Value *Ptr, Value *Ret,
+ Value *Memorder, IRBuilderBase &B,
+ const DataLayout &DL,
+ const TargetLibraryInfo *TLI) {
+ Type *VoidTy = B.getVoidTy();
+ Type *SizeTTy = getSizeTTy(B, TLI);
+ Type *PtrTy = B.getPtrTy();
+ Type *IntTy = getIntTy(B, TLI);
+ return emitLibCall(LibFunc_atomic_load, VoidTy,
+ {SizeTTy, PtrTy, PtrTy, IntTy}, {Size, Ptr, Ret, Memorder},
+ B, TLI);
+}
+
+Value *llvm::emitAtomicLoadN(size_t Size, Value *Ptr, Value *Memorder,
+ IRBuilderBase &B, const DataLayout &DL,
+ const TargetLibraryInfo *TLI) {
+ LibFunc TheLibFunc;
+ switch (Size) {
+ case 1:
+ TheLibFunc = LibFunc_atomic_load_1;
+ break;
+ case 2:
+ TheLibFunc = LibFunc_atomic_load_2;
+ break;
+ case 4:
+ TheLibFunc = LibFunc_atomic_load_4;
+ break;
+ case 8:
+ TheLibFunc = LibFunc_atomic_load_8;
+ break;
+ case 16:
+ TheLibFunc = LibFunc_atomic_load_16;
+ break;
+ default:
+ // emitLibCall below is also allowed to return nullptr, e.g. if
+ // TargetLibraryInfo says the backend does not support the libcall function.
+ return nullptr;
+ }
+
+ Type *PtrTy = B.getPtrTy();
+ Type *ValTy = B.getIntNTy(Size * 8);
+ Type *IntTy = getIntTy(B, TLI);
+ return emitLibCall(TheLibFunc, ValTy, {PtrTy, IntTy}, {Ptr, Memorder}, B,
+ TLI);
+}
+
+Value *llvm::emitAtomicStore(Value *Size, Value *Ptr, Value *ValPtr,
+ Value *Memorder, IRBuilderBase &B,
+ const DataLayout &DL,
+ const TargetLibraryInfo *TLI) {
+ Type *VoidTy = B.getVoidTy();
+ Type *SizeTTy = getSizeTTy(B, TLI);
+ Type *PtrTy = B.getPtrTy();
+ Type *IntTy = getIntTy(B, TLI);
+ return emitLibCall(LibFunc_atomic_store, VoidTy,
+ {SizeTTy, PtrTy, PtrTy, IntTy},
+ {Size, Ptr, ValPtr, Memorder}, B, TLI);
+}
+
+Value *llvm::emitAtomicStoreN(size_t Size, Value *Ptr, Value *Val,
+ Value *Memorder, IRBuilderBase &B,
+ const DataLayout &DL,
+ const TargetLibraryInfo *TLI) {
+ LibFunc TheLibFunc;
+ switch (Size) {
+ case 1:
+ TheLibFunc = LibFunc_atomic_store_1;
+ break;
+ case 2:
+ TheLibFunc = LibFunc_atomic_store_2;
+ break;
+ case 4:
+ TheLibFunc = LibFunc_atomic_store_4;
+ break;
+ case 8:
+ TheLibFunc = LibFunc_atomic_store_8;
+ break;
+ case 16:
+ TheLibFunc = LibFunc_atomic_store_16;
+ break;
+ default:
+ // emitLibCall below is also allowed to return nullptr, e.g. if
+ // TargetLibraryInfo says the backend does not support the libcall function.
+ return nullptr;
+ }
+
+ Type *VoidTy = B.getVoidTy();
+ Type *PtrTy = B.getPtrTy();
+ Type *ValTy = B.getIntNTy(Size * 8);
+ Type *IntTy = getIntTy(B, TLI);
+ return emitLibCall(TheLibFunc, VoidTy, {PtrTy, ValTy, IntTy},
+ {Ptr, Val, Memorder}, B, TLI);
+}
+
+Value *llvm::emitAtomicCompareExchange(Value *Size, Value *Ptr, Value *Expected,
+ Value *Desired, Value *SuccessMemorder,
+ Value *FailureMemorder, IRBuilderBase &B,
+ const DataLayout &DL,
+ const TargetLibraryInfo *TLI) {
+ Type *BoolTy = B.getInt8Ty();
+ Type *SizeTTy = getSizeTTy(B, TLI);
+ Type *PtrTy = B.getPtrTy();
+ Type *IntTy = getIntTy(B, TLI);
+ return emitLibCall(
+ LibFunc_atomic_compare_exchange, BoolTy,
+ {SizeTTy, PtrTy, PtrTy, PtrTy, IntTy, IntTy},
+ {Size, Ptr, Expected, Desired, SuccessMemorder, FailureMemorder}, B, TLI);
+}
+
+Value *llvm::emitAtomicCompareExchangeN(size_t Size, Value *Ptr,
+ Value *Expected, Value *Desired,
+ Value *SuccessMemorder,
+ Value *FailureMemorder,
+ IRBuilderBase &B, const DataLayout &DL,
+ const TargetLibraryInfo *TLI) {
+ LibFunc TheLibFunc;
+ switch (Size) {
+ case 1:
+ TheLibFunc = LibFunc_atomic_compare_exchange_1;
+ break;
+ case 2:
+ TheLibFunc = LibFunc_atomic_compare_exchange_2;
+ break;
+ case 4:
+ TheLibFunc = LibFunc_atomic_compare_exchange_4;
+ break;
+ case 8:
+ TheLibFunc = LibFunc_atomic_compare_exchange_8;
+ break;
+ case 16:
+ TheLibFunc = LibFunc_atomic_compare_exchange_16;
+ break;
+ default:
+ // emitLibCall below is also allowed to return nullptr, e.g. if
+ // TargetLibraryInfo says the backend does not support the libcall function.
+ return nullptr;
+ }
+
+ Type *BoolTy = B.getInt8Ty();
+ Type *PtrTy = B.getPtrTy();
+ Type *ValTy = B.getIntNTy(Size * 8);
+ Type *IntTy = getIntTy(B, TLI);
+ return emitLibCall(TheLibFunc, BoolTy, {PtrTy, PtrTy, ValTy, IntTy, IntTy},
+ {Ptr, Expected, Desired, SuccessMemorder, FailureMemorder},
+ B, TLI);
+}
+
/// Append a suffix to the function name according to the type of 'Op'.
static void appendTypeSuffix(Value *Op, StringRef &Name,
SmallString<20> &NameBuffer) {
diff --git a/llvm/lib/Transforms/Utils/CMakeLists.txt b/llvm/lib/Transforms/Utils/CMakeLists.txt
index 78cad0d253be8..cc85126911d32 100644
--- a/llvm/lib/Transforms/Utils/CMakeLists.txt
+++ b/llvm/lib/Transforms/Utils/CMakeLists.txt
@@ -5,6 +5,7 @@ add_llvm_component_library(LLVMTransformUtils
AssumeBundleBuilder.cpp
BasicBlockUtils.cpp
BreakCriticalEdges.cpp
+ BuildBuiltins.cpp
BuildLibCalls.cpp
BypassSlowDivision.cpp
CallPromotionUtils.cpp
diff --git a/llvm/test/tools/llvm-tli-checker/ps4-tli-check.yaml b/llvm/test/tools/llvm-tli-checker/ps4-tli-check.yaml
index 2d23b15d74b17..a9e5b8bbf67bf 100644
--- a/llvm/test/tools/llvm-tli-checker/ps4-tli-check.yaml
+++ b/llvm/test/tools/llvm-tli-checker/ps4-tli-check.yaml
@@ -54,10 +54,10 @@
## the exact count first; the two directives should add up to that.
## Yes, this means additions to TLI will fail this test, but the argument
## to -COUNT can't be an expression.
-# AVAIL: TLI knows 523 symbols, 289 available
+# AVAIL: TLI knows 539 symbols, 289 available
# AVAIL-COUNT-289: {{^}} available
# AVAIL-NOT: {{^}} available
-# UNAVAIL-COUNT-234: not available
+# UNAVAIL-COUNT-250: not available
# UNAVAIL-NOT: not available
## This is a large file so it's worth telling lit to stop here.
diff --git a/llvm/unittests/Analysis/TargetLibraryInfoTest.cpp b/llvm/unittests/Analysis/TargetLibraryInfoTest.cpp
index 97722483aefe0..4f77a9017fc51 100644
--- a/llvm/unittests/Analysis/TargetLibraryInfoTest.cpp
+++ b/llvm/unittests/Analysis/TargetLibraryInfoTest.cpp
@@ -592,7 +592,25 @@ TEST_F(TargetLibraryInfoTest, ValidProto) {
"declare i8* @memrchr(i8*, i32, i64)\n"
"declare void @__atomic_load(i64, i8*, i8*, i32)\n"
+ "declare i8 @__atomic_load_1(ptr, i32)\n"
+ "declare i16 @__atomic_load_2(ptr, i32)\n"
+ "declare i32 @__atomic_load_4(ptr, i32)\n"
+ "declare i64 @__atomic_load_8(ptr, i32)\n"
+ "declare i128 @__atomic_load_16(ptr, i32)\n"
+
"declare void @__atomic_store(i64, i8*, i8*, i32)\n"
+ "declare void @__atomic_store_1(ptr, i8, i32)\n"
+ "declare void @__atomic_store_2(ptr, i16, i32)\n"
+ "declare void @__atomic_store_4(ptr, i32, i32)\n"
+ "declare void @__atomic_store_8(ptr, i64, i32)\n"
+ "declare void @__atomic_store_16(ptr, i128, i32)\n"
+
+ "declare i8 @__atomic_compare_exchange(i64, ptr, ptr, ptr, i32, i32)\n"
+ "declare i8 @__atomic_compare_exchange_1(ptr, ptr, i8, i32, i32)\n"
+ "declare i8 @__atomic_compare_exchange_2(ptr, ptr, i16, i32, i32)\n"
+ "declare i8 @__atomic_compare_exchange_4(ptr, ptr, i32, i32, i32)\n"
+ "declare i8 @__atomic_compare_exchange_8(ptr, ptr, i64, i32, i32)\n"
+ "declare i8 @__atomic_compare_exchange_16(ptr, ptr, i128, i32, i32)\n"
// These are similar to the FILE* fgetc/fputc.
"declare i32 @_IO_getc(%struct*)\n"
diff --git a/llvm/unittests/Transforms/Utils/BuildBuiltinsTest.cpp b/llvm/unittests/Transforms/Utils/BuildBuiltinsTest.cpp
new file mode 100644
index 0000000000000..26ae255e43006
--- /dev/null
+++ b/llvm/unittests/Transforms/Utils/BuildBuiltinsTest.cpp
@@ -0,0 +1,4462 @@
+//===- BuildBuiltinsTest.cpp - Unit tests for BasicBlockUtils -------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Transforms/Utils/BuildBuiltins.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Verifier.h"
+#include "llvm/TargetParser/Triple.h"
+#include "llvm/Testing/Support/Error.h"
+#include "gtest/gtest.h"
+
+using namespace llvm;
+
+namespace {
+
+static void
+followBackwardsLookForWrites(Value *Ptr, BasicBlock *StartFromBB,
+ BasicBlock::reverse_iterator StartFromIt,
+ DenseSet<BasicBlock *> &Visited,
+ SmallVectorImpl<Instruction *> &WriteAccs) {
+ for (auto &&I : make_range(StartFromIt, StartFromBB->rend())) {
+ if (!I.mayHaveSideEffects())
+ continue;
+ if (isa<LoadInst>(I))
+ continue;
+
+ if (auto *SI = dyn_cast<StoreInst>(&I)) {
+ if (SI->getPointerOperand() == Ptr) {
+ WriteAccs.push_back(SI);
+ return;
+ }
+ continue;
+ }
+ if (auto *CmpXchg = dyn_cast<AtomicCmpXchgInst>(&I)) {
+ if (CmpXchg->getPointerOperand() == Ptr) {
+ WriteAccs.push_back(CmpXchg);
+ return;
+ }
+ continue;
+ }
+
+ if (auto *ARMW = dyn_cast<AtomicRMWInst>(&I)) {
+ if (ARMW->getPointerOperand() == Ptr) {
+ WriteAccs.push_back(ARMW);
+ return;
+ }
+ continue;
+ }
+
+ if (auto *CI = dyn_cast<CallInst>(&I)) {
+ MemoryEffects ME = CI->getMemoryEffects();
+
+ if (isModSet(ME.getModRef(IRMemLocation::Other))) {
+ WriteAccs.push_back(CI);
+ return;
+ }
+
+ if (isModSet(ME.getModRef(IRMemLocation::ArgMem))) {
+ for (auto &&Ops : CI->args()) {
+ if (Ops.get() == Ptr) {
+ WriteAccs.push_back(CI);
+ return;
+ }
+ }
+ }
+ continue;
+ }
+
+ llvm_unreachable("TODO: Can accs this ptr?");
+ }
+
+ Visited.insert(StartFromBB);
+ for (BasicBlock *Pred : predecessors(StartFromBB)) {
+ if (Visited.contains(Pred))
+ continue;
+
+ followBackwardsLookForWrites(Ptr, Pred, Pred->rbegin(), Visited, WriteAccs);
+ }
+};
+
+static Instruction *getUniquePreviousStore(Value *Ptr, BasicBlock *FromBB) {
+ SmallVector<Instruction *, 1> WriteAccs;
+ DenseSet<BasicBlock *> Visited;
+ followBackwardsLookForWrites(Ptr, FromBB, FromBB->rbegin(), Visited,
+ WriteAccs);
+ if (WriteAccs.size() == 1)
+ return WriteAccs.front();
+ return nullptr;
+}
+
+class BuildBuiltinsTests : public testing::Test {
+protected:
+ LLVMContext Ctx;
+ std::unique_ptr<Module> M;
+ DataLayout DL;
+ std::unique_ptr<TargetLibraryInfoImpl> TLII;
+ std::unique_ptr<TargetLibraryInfo> TLI;
+ Function *F = nullptr;
+ Argument *PtrArg = nullptr;
+ Argument *RetArg = nullptr;
+ Argument *ExpectedArg = nullptr;
+ Argument *DesiredArg = nullptr;
+
+ Argument *ValArg = nullptr;
+ Argument *PredArg = nullptr;
+ Argument *MemorderArg = nullptr;
+ Argument *FailMemorderArg = nullptr;
+
+ BasicBlock *EntryBB = nullptr;
+ IRBuilder<> Builder;
+
+ BuildBuiltinsTests() : Builder(Ctx) {}
+
+ void SetUp() override {
+ M.reset(new Module("TestModule", Ctx));
+ DL = M->getDataLayout();
+
+ Triple T(M->getTargetTriple());
+ TLII.reset(new TargetLibraryInfoImpl(T));
+ TLI.reset(new TargetLibraryInfo(*TLII));
+
+ FunctionType *FTy =
+ FunctionType::get(Type::getVoidTy(Ctx),
+ {PointerType::get(Ctx, 0), PointerType::get(Ctx, 0),
+ PointerType::get(Ctx, 0), PointerType::get(Ctx, 0),
+ Type::getInt32Ty(Ctx), Type::getInt1Ty(Ctx),
+ Type::getInt32Ty(Ctx), Type::getInt32Ty(Ctx)},
+ /*isVarArg=*/false);
+ F = Function::Create(FTy, Function::ExternalLinkage, "TestFunction",
+ M.get());
+ PtrArg = F->getArg(0);
+ PtrArg->setName("atomic_ptr");
+ RetArg = F->getArg(1);
+ RetArg->setName("ret_ptr");
+
+ ExpectedArg = F->getArg(2);
+ ExpectedArg->setName("expected_ptr");
+ DesiredArg = F->getArg(3);
+ DesiredArg->setName("desired_ptr");
+
+ ValArg = F->getArg(4);
+ ValArg->setName("valarg");
+ PredArg = F->getArg(5);
+ PredArg->setName("predarg");
+
+ MemorderArg = F->getArg(6);
+ MemorderArg->setName("memorderarg_success");
+ FailMemorderArg = F->getArg(7);
+ FailMemorderArg->setName("memorderarg_failure");
+
+ EntryBB = BasicBlock::Create(Ctx, "entry", F);
+ Builder.SetInsertPoint(EntryBB);
+ ReturnInst *RetInst = Builder.CreateRetVoid();
+ Builder.SetInsertPoint(RetInst);
+ }
+
+ void TearDown() override {
+ EntryBB = nullptr;
+ F = nullptr;
+ M.reset();
+ }
+};
+
+TEST_F(BuildBuiltinsTests, AtomicLoad) {
+ ASSERT_THAT_ERROR(emitAtomicLoadBuiltin(
+ /*AtomicPtr=*/PtrArg, /*RetPtr=*/RetArg,
+ /*TypeOrSize=*/Builder.getInt32Ty(),
+ /*IsVolatile=*/false,
+ /*Memorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*Scope=*/SyncScope::System,
+ /*Align=*/{},
+ /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_load"),
+ Succeeded());
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // clang-format off
+ // %atomic_load.atomic.load = load atomic i32, ptr %atomic_ptr seq_cst, align 1
+ // store i32 %atomic_load.atomic.load, ptr %ret_ptr, align 4
+ // clang-format on
+
+ // Follow use-def and load-store chains to discover instructions
+ StoreInst *Store = cast<StoreInst>(getUniquePreviousStore(RetArg, EntryBB));
+ LoadInst *AtomicLoadAtomicLoad = cast<LoadInst>(Store->getValueOperand());
+
+ // %atomic_load.atomic.load = load atomic i32, ptr %atomic_ptr seq_cst, a...
+ EXPECT_EQ(AtomicLoadAtomicLoad->getName(), "atomic_load.atomic.load");
+ EXPECT_EQ(AtomicLoadAtomicLoad->getParent(), EntryBB);
+ EXPECT_EQ(AtomicLoadAtomicLoad->getType(), Type::getInt32Ty(Ctx));
+ EXPECT_FALSE(AtomicLoadAtomicLoad->isVolatile());
+ EXPECT_EQ(AtomicLoadAtomicLoad->getAlign(), 1);
+ EXPECT_EQ(AtomicLoadAtomicLoad->getOrdering(),
+ AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(AtomicLoadAtomicLoad->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicLoadAtomicLoad->getPointerOperand(), PtrArg);
+
+ // store i32 %atomic_load.atomic.load, ptr %ret_ptr, align 4
+ EXPECT_TRUE(Store->getName().empty());
+ EXPECT_EQ(Store->getParent(), EntryBB);
+ EXPECT_TRUE(Store->isSimple());
+ EXPECT_EQ(Store->getValueOperand(), AtomicLoadAtomicLoad);
+ EXPECT_EQ(Store->getPointerOperand(), RetArg);
+}
+
+TEST_F(BuildBuiltinsTests, AtomicLoad_SizedLibcall) {
+ AtomicEmitOptions EO(DL, TLI.get());
+ EO.AllowInstruction = false;
+
+ ASSERT_THAT_ERROR(emitAtomicLoadBuiltin(
+ /*AtomicPtr=*/PtrArg, /*RetPtr=*/RetArg,
+ /*TypeOrSize=*/Builder.getInt32Ty(),
+ /*IsVolatile=*/false,
+ /*Memorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*Scope=*/SyncScope::System,
+ /*Align=*/{},
+ /*Builder=*/Builder,
+ /*EmitOptions=*/EO,
+ /*Name=*/"atomic_load"),
+ Succeeded());
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // Follow use-def and load-store chains to discover instructions
+ CallInst *Call = cast<CallInst>(getUniquePreviousStore(PtrArg, EntryBB));
+
+ // call void @__atomic_load(i64 4, ptr %atomic_ptr, ptr %ret_ptr, i32 5)
+ EXPECT_TRUE(Call->getName().empty());
+ EXPECT_EQ(Call->getParent(), EntryBB);
+ EXPECT_EQ(Call->getType(), Type::getVoidTy(Ctx));
+ EXPECT_EQ(Call->getName(), "");
+ EXPECT_FALSE(Call->isMustTailCall());
+ EXPECT_FALSE(Call->isTailCall());
+ EXPECT_EQ(Call->getCallingConv(), CallingConv::C);
+ EXPECT_EQ(cast<ConstantInt>(Call->getArgOperand(0))->getZExtValue(), 4);
+ EXPECT_EQ(Call->getArgOperand(1), PtrArg);
+ EXPECT_EQ(Call->getArgOperand(2), RetArg);
+ EXPECT_EQ(cast<ConstantInt>(Call->getArgOperand(3))->getZExtValue(), 5);
+ EXPECT_EQ(Call->getCalledFunction(), M->getFunction("__atomic_load"));
+}
+
+TEST_F(BuildBuiltinsTests, AtomicLoad_Libcall) {
+ AtomicEmitOptions EO(DL, TLI.get());
+ EO.AllowInstruction = false;
+ EO.AllowSizedLibcall = false;
+ ASSERT_THAT_ERROR(emitAtomicLoadBuiltin(
+ /*AtomicPtr=*/PtrArg, /*RetPtr=*/RetArg,
+ /*TypeOrSize=*/Builder.getInt32Ty(),
+ /*IsVolatile=*/false,
+ /*Memorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*Scope=*/SyncScope::System,
+ /*Align=*/{},
+ /*Builder=*/Builder,
+ /*EmitOptions=*/EO,
+ /*Name=*/"atomic_load"),
+ Succeeded());
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // Follow use-def and load-store chains to discover instructions
+ CallInst *Call = cast<CallInst>(getUniquePreviousStore(PtrArg, EntryBB));
+
+ // call void @__atomic_load(i64 4, ptr %atomic_ptr, ptr %ret_ptr, i32 5)
+ EXPECT_TRUE(Call->getName().empty());
+ EXPECT_EQ(Call->getParent(), EntryBB);
+ EXPECT_EQ(Call->getType(), Type::getVoidTy(Ctx));
+ EXPECT_EQ(Call->getName(), "");
+ EXPECT_FALSE(Call->isMustTailCall());
+ EXPECT_FALSE(Call->isTailCall());
+ EXPECT_EQ(Call->getCallingConv(), CallingConv::C);
+ EXPECT_EQ(cast<ConstantInt>(Call->getArgOperand(0))->getZExtValue(), 4);
+ EXPECT_EQ(Call->getArgOperand(1), PtrArg);
+ EXPECT_EQ(Call->getArgOperand(2), RetArg);
+ EXPECT_EQ(cast<ConstantInt>(Call->getArgOperand(3))->getZExtValue(), 5);
+ EXPECT_EQ(Call->getCalledFunction(), M->getFunction("__atomic_load"));
+}
+
+TEST_F(BuildBuiltinsTests, AtomicLoad_Volatile) {
+ ASSERT_THAT_ERROR(emitAtomicLoadBuiltin(
+ /*AtomicPtr=*/PtrArg, /*RetPtr=*/RetArg,
+ /*TypeOrSize=*/Builder.getInt32Ty(),
+ /*IsVolatile=*/true,
+ /*Memorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*Scope=*/SyncScope::System,
+ /*Align=*/{},
+ /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_load"),
+ Succeeded());
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // clang-format off
+ // %atomic_load.atomic.load = load atomic volatile i32, ptr %atomic_ptr seq_cst, align 1
+ // store volatile i32 %atomic_load.atomic.load, ptr %ret_ptr, align 4
+ // clang-format on
+
+ // Follow use-def and load-store chains to discover instructions
+ StoreInst *Store = cast<StoreInst>(getUniquePreviousStore(RetArg, EntryBB));
+ LoadInst *AtomicLoadAtomicLoad = cast<LoadInst>(Store->getValueOperand());
+
+ // %atomic_load.atomic.load = load atomic volatile i32, ptr %atomic_ptr s...
+ EXPECT_EQ(AtomicLoadAtomicLoad->getName(), "atomic_load.atomic.load");
+ EXPECT_EQ(AtomicLoadAtomicLoad->getParent(), EntryBB);
+ EXPECT_EQ(AtomicLoadAtomicLoad->getType(), Type::getInt32Ty(Ctx));
+ EXPECT_TRUE(AtomicLoadAtomicLoad->isVolatile());
+ EXPECT_EQ(AtomicLoadAtomicLoad->getAlign(), 1);
+ EXPECT_EQ(AtomicLoadAtomicLoad->getOrdering(),
+ AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(AtomicLoadAtomicLoad->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicLoadAtomicLoad->getPointerOperand(), PtrArg);
+
+ // store volatile i32 %atomic_load.atomic.load, ptr %ret_ptr, align 4
+ EXPECT_TRUE(Store->getName().empty());
+ EXPECT_EQ(Store->getParent(), EntryBB);
+ EXPECT_TRUE(Store->isVolatile());
+ EXPECT_EQ(Store->getAlign(), 4);
+ EXPECT_EQ(Store->getOrdering(), AtomicOrdering::NotAtomic);
+ EXPECT_EQ(Store->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(Store->getValueOperand(), AtomicLoadAtomicLoad);
+ EXPECT_EQ(Store->getPointerOperand(), RetArg);
+}
+
+TEST_F(BuildBuiltinsTests, AtomicLoad_Memorder) {
+ ASSERT_THAT_ERROR(emitAtomicLoadBuiltin(
+ /*AtomicPtr=*/PtrArg, /*RetPtr=*/RetArg,
+ /*TypeOrSize=*/Builder.getInt32Ty(),
+ /*IsVolatile=*/false,
+ /*Memorder=*/AtomicOrdering::Monotonic,
+ /*Scope=*/SyncScope::System,
+ /*Align=*/{},
+ /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_load"),
+ Succeeded());
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // clang-format off
+ // %atomic_load.atomic.load = load atomic i32, ptr %atomic_ptr monotonic, align 1
+ // store i32 %atomic_load.atomic.load, ptr %ret_ptr, align 4
+ // clang-format on
+
+ // Follow use-def and load-store chains to discover instructions
+ StoreInst *Store = cast<StoreInst>(getUniquePreviousStore(RetArg, EntryBB));
+ LoadInst *AtomicLoadAtomicLoad = cast<LoadInst>(Store->getValueOperand());
+
+ // %atomic_load.atomic.load = load atomic i32, ptr %atomic_ptr monotonic,...
+ EXPECT_EQ(AtomicLoadAtomicLoad->getName(), "atomic_load.atomic.load");
+ EXPECT_EQ(AtomicLoadAtomicLoad->getParent(), EntryBB);
+ EXPECT_EQ(AtomicLoadAtomicLoad->getType(), Type::getInt32Ty(Ctx));
+ EXPECT_FALSE(AtomicLoadAtomicLoad->isVolatile());
+ EXPECT_EQ(AtomicLoadAtomicLoad->getAlign(), 1);
+ EXPECT_EQ(AtomicLoadAtomicLoad->getOrdering(), AtomicOrdering::Monotonic);
+ EXPECT_EQ(AtomicLoadAtomicLoad->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicLoadAtomicLoad->getPointerOperand(), PtrArg);
+
+ // store i32 %atomic_load.atomic.load, ptr %ret_ptr, align 4
+ EXPECT_TRUE(Store->getName().empty());
+ EXPECT_EQ(Store->getParent(), EntryBB);
+ EXPECT_TRUE(Store->isSimple());
+ EXPECT_EQ(Store->getValueOperand(), AtomicLoadAtomicLoad);
+ EXPECT_EQ(Store->getPointerOperand(), RetArg);
+}
+
+TEST_F(BuildBuiltinsTests, AtomicLoad_Memorder_CABI) {
+ ASSERT_THAT_ERROR(emitAtomicLoadBuiltin(
+ /*AtomicPtr=*/PtrArg, /*RetPtr=*/RetArg,
+ /*TypeOrSize=*/Builder.getInt32Ty(),
+ /*IsVolatile=*/false,
+ /*Memorder=*/AtomicOrderingCABI::relaxed,
+ /*Scope=*/SyncScope::System,
+ /*Align=*/{},
+ /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_load"),
+ Succeeded());
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // clang-format off
+ // %atomic_load.atomic.load = load atomic i32, ptr %atomic_ptr monotonic, align 1
+ // store i32 %atomic_load.atomic.load, ptr %ret_ptr, align 4
+ // clang-format on
+
+ // Follow use-def and load-store chains to discover instructions
+ StoreInst *Store = cast<StoreInst>(getUniquePreviousStore(RetArg, EntryBB));
+ LoadInst *AtomicLoadAtomicLoad = cast<LoadInst>(Store->getValueOperand());
+
+ // %atomic_load.atomic.load = load atomic i32, ptr %atomic_ptr monotonic,...
+ EXPECT_EQ(AtomicLoadAtomicLoad->getName(), "atomic_load.atomic.load");
+ EXPECT_EQ(AtomicLoadAtomicLoad->getParent(), EntryBB);
+ EXPECT_EQ(AtomicLoadAtomicLoad->getType(), Type::getInt32Ty(Ctx));
+ EXPECT_FALSE(AtomicLoadAtomicLoad->isVolatile());
+ EXPECT_EQ(AtomicLoadAtomicLoad->getAlign(), 1);
+ EXPECT_EQ(AtomicLoadAtomicLoad->getOrdering(), AtomicOrdering::Monotonic);
+ EXPECT_EQ(AtomicLoadAtomicLoad->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicLoadAtomicLoad->getPointerOperand(), PtrArg);
+
+ // store i32 %atomic_load.atomic.load, ptr %ret_ptr, align 4
+ EXPECT_TRUE(Store->getName().empty());
+ EXPECT_EQ(Store->getParent(), EntryBB);
+ EXPECT_TRUE(Store->isSimple());
+ EXPECT_EQ(Store->getValueOperand(), AtomicLoadAtomicLoad);
+ EXPECT_EQ(Store->getPointerOperand(), RetArg);
+}
+
+TEST_F(BuildBuiltinsTests, AtomicLoad_Memorder_Switch) {
+ ASSERT_THAT_ERROR(emitAtomicLoadBuiltin(
+ /*AtomicPtr=*/PtrArg, /*RetPtr=*/RetArg,
+ /*TypeOrSize=*/Builder.getInt32Ty(),
+ /*IsVolatile=*/false,
+ /*Memorder=*/MemorderArg,
+ /*Scope=*/SyncScope::System,
+ /*Align=*/{},
+ /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_load"),
+ Succeeded());
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+ BasicBlock *ExitBB = Builder.GetInsertBlock();
+
+ // clang-format off
+ // entry:
+ // switch i32 %memorderarg_success, label %atomic_load.atomic.monotonic [
+ // i32 1, label %atomic_load.atomic.acquire
+ // i32 2, label %atomic_load.atomic.acquire
+ // i32 5, label %atomic_load.atomic.seqcst
+ // ]
+ //
+ // atomic_load.atomic.monotonic: ; preds = %entry
+ // %atomic_load.atomic.load = load atomic i32, ptr %atomic_ptr monotonic, align 1
+ // store i32 %atomic_load.atomic.load, ptr %ret_ptr, align 4
+ // br label %atomic_load.atomic.memorder.continue
+ //
+ // atomic_load.atomic.acquire: ; preds = %entry, %entry
+ // %atomic_load.atomic.load1 = load atomic i32, ptr %atomic_ptr acquire, align 1
+ // store i32 %atomic_load.atomic.load1, ptr %ret_ptr, align 4
+ // br label %atomic_load.atomic.memorder.continue
+ //
+ // atomic_load.atomic.seqcst: ; preds = %entry
+ // %atomic_load.atomic.load2 = load atomic i32, ptr %atomic_ptr seq_cst, align 1
+ // store i32 %atomic_load.atomic.load2, ptr %ret_ptr, align 4
+ // br label %atomic_load.atomic.memorder.continue
+ //
+ // atomic_load.atomic.memorder.continue: ; preds = %atomic_load.atomic.seqcst, %atomic_load.atomic.acquire, %atomic_load.atomic.monotonic
+ // %atomic_load.atomic.memorder.success = phi i1 [ true, %atomic_load.atomic.monotonic ], [ true, %atomic_load.atomic.acquire ], [ true, %atomic_load.atomic.seqcst ]
+ // ret void
+ // clang-format on
+
+ // Discover control flow graph
+ SwitchInst *Switch = cast<SwitchInst>(EntryBB->getTerminator());
+ BasicBlock *AtomicLoadAtomicAcquire =
+ cast<BasicBlock>(Switch->getSuccessor(1));
+ BasicBlock *AtomicLoadAtomicSeqcst =
+ cast<BasicBlock>(Switch->getSuccessor(3));
+ BasicBlock *AtomicLoadAtomicMonotonic =
+ cast<BasicBlock>(Switch->getDefaultDest());
+ BranchInst *Branch1 =
+ cast<BranchInst>(AtomicLoadAtomicMonotonic->getTerminator());
+ BranchInst *Branch2 =
+ cast<BranchInst>(AtomicLoadAtomicAcquire->getTerminator());
+ BranchInst *Branch3 =
+ cast<BranchInst>(AtomicLoadAtomicSeqcst->getTerminator());
+ ReturnInst *Return = cast<ReturnInst>(ExitBB->getTerminator());
+
+ // Follow use-def and load-store chains to discover instructions
+ StoreInst *Store1 =
+ cast<StoreInst>(getUniquePreviousStore(RetArg, AtomicLoadAtomicSeqcst));
+ LoadInst *AtomicLoadAtomicLoad2 = cast<LoadInst>(Store1->getValueOperand());
+ StoreInst *Store2 = cast<StoreInst>(
+ getUniquePreviousStore(RetArg, AtomicLoadAtomicMonotonic));
+ LoadInst *AtomicLoadAtomicLoad = cast<LoadInst>(Store2->getValueOperand());
+ StoreInst *Store3 =
+ cast<StoreInst>(getUniquePreviousStore(RetArg, AtomicLoadAtomicAcquire));
+ LoadInst *AtomicLoadAtomicLoad1 = cast<LoadInst>(Store3->getValueOperand());
+
+ // switch i32 %memorderarg_success, label %atomic_load.atomic.monotonic [
+ // i32 1, label %atomic_load.atomic.acquire
+ // i32 2, label %atomic_load.atomic.acquire
+ // i32 5, label %atomic_load.atomic.seqcst
+ // ]
+ EXPECT_TRUE(Switch->getName().empty());
+ EXPECT_EQ(Switch->getParent(), EntryBB);
+ EXPECT_EQ(Switch->getType(), Type::getVoidTy(Ctx));
+ EXPECT_EQ(Switch->getCondition(), MemorderArg);
+ EXPECT_EQ(Switch->getDefaultDest(), AtomicLoadAtomicMonotonic);
+ EXPECT_EQ(cast<ConstantInt>(Switch->getOperand(2))->getZExtValue(), 1);
+ EXPECT_EQ(Switch->getOperand(3), AtomicLoadAtomicAcquire);
+ EXPECT_EQ(cast<ConstantInt>(Switch->getOperand(4))->getZExtValue(), 2);
+ EXPECT_EQ(Switch->getOperand(5), AtomicLoadAtomicAcquire);
+ EXPECT_EQ(cast<ConstantInt>(Switch->getOperand(6))->getZExtValue(), 5);
+ EXPECT_EQ(Switch->getOperand(7), AtomicLoadAtomicSeqcst);
+
+ // %atomic_load.atomic.load = load atomic i32, ptr %atomic_ptr monotonic,...
+ EXPECT_EQ(AtomicLoadAtomicLoad->getName(), "atomic_load.atomic.load");
+ EXPECT_EQ(AtomicLoadAtomicLoad->getParent(), AtomicLoadAtomicMonotonic);
+ EXPECT_EQ(AtomicLoadAtomicLoad->getType(), Type::getInt32Ty(Ctx));
+ EXPECT_FALSE(AtomicLoadAtomicLoad->isVolatile());
+ EXPECT_EQ(AtomicLoadAtomicLoad->getAlign(), 1);
+ EXPECT_EQ(AtomicLoadAtomicLoad->getOrdering(), AtomicOrdering::Monotonic);
+ EXPECT_EQ(AtomicLoadAtomicLoad->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicLoadAtomicLoad->getPointerOperand(), PtrArg);
+
+ // store i32 %atomic_load.atomic.load, ptr %ret_ptr, align 4
+ EXPECT_TRUE(Store2->getName().empty());
+ EXPECT_EQ(Store2->getParent(), AtomicLoadAtomicMonotonic);
+ EXPECT_TRUE(Store2->isSimple());
+ EXPECT_EQ(Store2->getValueOperand(), AtomicLoadAtomicLoad);
+ EXPECT_EQ(Store2->getPointerOperand(), RetArg);
+
+ // br label %atomic_load.atomic.memorder.continue
+ EXPECT_TRUE(Branch1->getName().empty());
+ EXPECT_EQ(Branch1->getParent(), AtomicLoadAtomicMonotonic);
+ EXPECT_EQ(Branch1->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch1->isUnconditional());
+ EXPECT_EQ(Branch1->getOperand(0), ExitBB);
+
+ // %atomic_load.atomic.load1 = load atomic i32, ptr %atomic_ptr acquire, ...
+ EXPECT_EQ(AtomicLoadAtomicLoad1->getName(), "atomic_load.atomic.load1");
+ EXPECT_EQ(AtomicLoadAtomicLoad1->getParent(), AtomicLoadAtomicAcquire);
+ EXPECT_EQ(AtomicLoadAtomicLoad1->getType(), Type::getInt32Ty(Ctx));
+ EXPECT_FALSE(AtomicLoadAtomicLoad1->isVolatile());
+ EXPECT_EQ(AtomicLoadAtomicLoad1->getAlign(), 1);
+ EXPECT_EQ(AtomicLoadAtomicLoad1->getOrdering(), AtomicOrdering::Acquire);
+ EXPECT_EQ(AtomicLoadAtomicLoad1->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicLoadAtomicLoad1->getPointerOperand(), PtrArg);
+
+ // store i32 %atomic_load.atomic.load1, ptr %ret_ptr, align 4
+ EXPECT_TRUE(Store3->getName().empty());
+ EXPECT_EQ(Store3->getParent(), AtomicLoadAtomicAcquire);
+ EXPECT_TRUE(Store3->isSimple());
+ EXPECT_EQ(Store3->getValueOperand(), AtomicLoadAtomicLoad1);
+ EXPECT_EQ(Store3->getPointerOperand(), RetArg);
+
+ // br label %atomic_load.atomic.memorder.continue
+ EXPECT_TRUE(Branch2->getName().empty());
+ EXPECT_EQ(Branch2->getParent(), AtomicLoadAtomicAcquire);
+ EXPECT_EQ(Branch2->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch2->isUnconditional());
+ EXPECT_EQ(Branch2->getOperand(0), ExitBB);
+
+ // %atomic_load.atomic.load2 = load atomic i32, ptr %atomic_ptr seq_cst, ...
+ EXPECT_EQ(AtomicLoadAtomicLoad2->getName(), "atomic_load.atomic.load2");
+ EXPECT_EQ(AtomicLoadAtomicLoad2->getParent(), AtomicLoadAtomicSeqcst);
+ EXPECT_EQ(AtomicLoadAtomicLoad2->getType(), Type::getInt32Ty(Ctx));
+ EXPECT_FALSE(AtomicLoadAtomicLoad2->isVolatile());
+ EXPECT_EQ(AtomicLoadAtomicLoad2->getAlign(), 1);
+ EXPECT_EQ(AtomicLoadAtomicLoad2->getOrdering(),
+ AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(AtomicLoadAtomicLoad2->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicLoadAtomicLoad2->getPointerOperand(), PtrArg);
+
+ // store i32 %atomic_load.atomic.load2, ptr %ret_ptr, align 4
+ EXPECT_TRUE(Store1->getName().empty());
+ EXPECT_EQ(Store1->getParent(), AtomicLoadAtomicSeqcst);
+ EXPECT_TRUE(Store1->isSimple());
+ EXPECT_EQ(Store1->getValueOperand(), AtomicLoadAtomicLoad2);
+ EXPECT_EQ(Store1->getPointerOperand(), RetArg);
+
+ // br label %atomic_load.atomic.memorder.continue
+ EXPECT_TRUE(Branch3->getName().empty());
+ EXPECT_EQ(Branch3->getParent(), AtomicLoadAtomicSeqcst);
+ EXPECT_EQ(Branch3->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch3->isUnconditional());
+ EXPECT_EQ(Branch3->getOperand(0), ExitBB);
+
+ // ret void
+ EXPECT_TRUE(Return->getName().empty());
+ EXPECT_EQ(Return->getParent(), ExitBB);
+ EXPECT_EQ(Return->getType(), Type::getVoidTy(Ctx));
+}
+
+TEST_F(BuildBuiltinsTests, AtomicLoad_SyncScope) {
+ ASSERT_THAT_ERROR(emitAtomicLoadBuiltin(
+ /*AtomicPtr=*/PtrArg, /*RetPtr=*/RetArg,
+ /*TypeOrSize=*/Builder.getInt32Ty(),
+ /*IsVolatile=*/false,
+ /*Memorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*Scope=*/SyncScope::SingleThread,
+ /*Align=*/{},
+ /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_load"),
+ Succeeded());
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // clang-format off
+ // %atomic_load.atomic.load = load atomic i32, ptr %atomic_ptr syncscope("singlethread") seq_cst, align 1
+ // store i32 %atomic_load.atomic.load, ptr %ret_ptr, align 4
+ // clang-format on
+
+ // Follow use-def and load-store chains to discover instructions
+ StoreInst *Store = cast<StoreInst>(getUniquePreviousStore(RetArg, EntryBB));
+ LoadInst *AtomicLoadAtomicLoad = cast<LoadInst>(Store->getValueOperand());
+
+ // %atomic_load.atomic.load = load atomic i32, ptr %atomic_ptr syncscope(...
+ EXPECT_EQ(AtomicLoadAtomicLoad->getName(), "atomic_load.atomic.load");
+ EXPECT_EQ(AtomicLoadAtomicLoad->getParent(), EntryBB);
+ EXPECT_EQ(AtomicLoadAtomicLoad->getType(), Type::getInt32Ty(Ctx));
+ EXPECT_FALSE(AtomicLoadAtomicLoad->isVolatile());
+ EXPECT_EQ(AtomicLoadAtomicLoad->getAlign(), 1);
+ EXPECT_EQ(AtomicLoadAtomicLoad->getOrdering(),
+ AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(AtomicLoadAtomicLoad->getSyncScopeID(), SyncScope::SingleThread);
+ EXPECT_EQ(AtomicLoadAtomicLoad->getPointerOperand(), PtrArg);
+
+ // store i32 %atomic_load.atomic.load, ptr %ret_ptr, align 4
+ EXPECT_TRUE(Store->getName().empty());
+ EXPECT_EQ(Store->getParent(), EntryBB);
+ EXPECT_TRUE(Store->isSimple());
+ EXPECT_EQ(Store->getValueOperand(), AtomicLoadAtomicLoad);
+ EXPECT_EQ(Store->getPointerOperand(), RetArg);
+}
+
+TEST_F(BuildBuiltinsTests, AtomicLoad_Float) {
+ ASSERT_THAT_ERROR(emitAtomicLoadBuiltin(
+ /*AtomicPtr=*/PtrArg, /*RetPtr=*/RetArg,
+ /*TypeOrSize=*/Builder.getFloatTy(),
+ /*IsVolatile=*/false,
+ /*Memorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*Scope=*/SyncScope::System,
+ /*Align=*/{},
+ /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_load"),
+ Succeeded());
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // clang-format off
+ // %atomic_load.atomic.load = load atomic float, ptr %atomic_ptr seq_cst, align 1
+ // store float %atomic_load.atomic.load, ptr %ret_ptr, align 4
+ // clang-format on
+
+ // Follow use-def and load-store chains to discover instructions
+ StoreInst *Store = cast<StoreInst>(getUniquePreviousStore(RetArg, EntryBB));
+ LoadInst *AtomicLoadAtomicLoad = cast<LoadInst>(Store->getValueOperand());
+
+ // %atomic_load.atomic.load = load atomic float, ptr %atomic_ptr seq_cst,...
+ EXPECT_EQ(AtomicLoadAtomicLoad->getName(), "atomic_load.atomic.load");
+ EXPECT_EQ(AtomicLoadAtomicLoad->getParent(), EntryBB);
+ EXPECT_EQ(AtomicLoadAtomicLoad->getType(), Type::getFloatTy(Ctx));
+ EXPECT_FALSE(AtomicLoadAtomicLoad->isVolatile());
+ EXPECT_EQ(AtomicLoadAtomicLoad->getAlign(), 1);
+ EXPECT_EQ(AtomicLoadAtomicLoad->getOrdering(),
+ AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(AtomicLoadAtomicLoad->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicLoadAtomicLoad->getPointerOperand(), PtrArg);
+
+ // store float %atomic_load.atomic.load, ptr %ret_ptr, align 4
+ EXPECT_TRUE(Store->getName().empty());
+ EXPECT_EQ(Store->getParent(), EntryBB);
+ EXPECT_TRUE(Store->isSimple());
+ EXPECT_EQ(Store->getValueOperand(), AtomicLoadAtomicLoad);
+ EXPECT_EQ(Store->getPointerOperand(), RetArg);
+}
+
+TEST_F(BuildBuiltinsTests, AtomicLoad_FP80) {
+ ASSERT_THAT_ERROR(emitAtomicLoadBuiltin(
+ /*AtomicPtr=*/PtrArg, /*RetPtr=*/RetArg,
+ /*TypeOrSize=*/Type::getX86_FP80Ty(Ctx),
+ /*IsVolatile=*/false,
+ /*Memorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*Scope=*/SyncScope::System,
+ /*Align=*/{},
+ /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_load"),
+ Succeeded());
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // Follow use-def and load-store chains to discover instructions
+ CallInst *Call = cast<CallInst>(getUniquePreviousStore(PtrArg, EntryBB));
+
+ // call void @__atomic_load(i64 10, ptr %atomic_ptr, ptr %ret_ptr, i32 5)
+ EXPECT_TRUE(Call->getName().empty());
+ EXPECT_EQ(Call->getParent(), EntryBB);
+ EXPECT_EQ(Call->getType(), Type::getVoidTy(Ctx));
+ EXPECT_EQ(Call->getName(), "");
+ EXPECT_FALSE(Call->isMustTailCall());
+ EXPECT_FALSE(Call->isTailCall());
+ EXPECT_EQ(Call->getCallingConv(), CallingConv::C);
+ EXPECT_EQ(cast<ConstantInt>(Call->getArgOperand(0))->getZExtValue(), 10);
+ EXPECT_EQ(Call->getArgOperand(1), PtrArg);
+ EXPECT_EQ(Call->getArgOperand(2), RetArg);
+ EXPECT_EQ(cast<ConstantInt>(Call->getArgOperand(3))->getZExtValue(), 5);
+ EXPECT_EQ(Call->getCalledFunction(), M->getFunction("__atomic_load"));
+}
+
+TEST_F(BuildBuiltinsTests, AtomicLoad_Ptr) {
+ ASSERT_THAT_ERROR(emitAtomicLoadBuiltin(
+ /*AtomicPtr=*/PtrArg, /*RetPtr=*/RetArg,
+ /*TypeOrSize=*/Builder.getPtrTy(), /*IsVolatile=*/false,
+ /*Memorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*Scope=*/SyncScope::System,
+ /*Align=*/{},
+ /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_load"),
+ Succeeded());
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // clang-format off
+ // %atomic_load.atomic.load = load atomic ptr, ptr %atomic_ptr seq_cst, align 1
+ // store ptr %atomic_load.atomic.load, ptr %ret_ptr, align 8
+ // clang-format on
+
+ // Follow use-def and load-store chains to discover instructions
+ StoreInst *Store = cast<StoreInst>(getUniquePreviousStore(RetArg, EntryBB));
+ LoadInst *AtomicLoadAtomicLoad = cast<LoadInst>(Store->getValueOperand());
+
+ // %atomic_load.atomic.load = load atomic ptr, ptr %atomic_ptr seq_cst, a...
+ EXPECT_EQ(AtomicLoadAtomicLoad->getName(), "atomic_load.atomic.load");
+ EXPECT_EQ(AtomicLoadAtomicLoad->getParent(), EntryBB);
+ EXPECT_EQ(AtomicLoadAtomicLoad->getType(), PointerType::get(Ctx, 0));
+ EXPECT_FALSE(AtomicLoadAtomicLoad->isVolatile());
+ EXPECT_EQ(AtomicLoadAtomicLoad->getAlign(), 1);
+ EXPECT_EQ(AtomicLoadAtomicLoad->getOrdering(),
+ AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(AtomicLoadAtomicLoad->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicLoadAtomicLoad->getPointerOperand(), PtrArg);
+
+ // store ptr %atomic_load.atomic.load, ptr %ret_ptr, align 8
+ EXPECT_TRUE(Store->getName().empty());
+ EXPECT_EQ(Store->getParent(), EntryBB);
+ EXPECT_TRUE(Store->isSimple());
+ EXPECT_EQ(Store->getValueOperand(), AtomicLoadAtomicLoad);
+ EXPECT_EQ(Store->getPointerOperand(), RetArg);
+}
+
+TEST_F(BuildBuiltinsTests, AtomicLoad_Struct) {
+ // A struct that is small enough to be covered with a single instruction
+ StructType *STy =
+ StructType::get(Ctx, {Builder.getFloatTy(), Builder.getFloatTy()});
+
+ ASSERT_THAT_ERROR(emitAtomicLoadBuiltin(
+ /*AtomicPtr=*/PtrArg, /*RetPtr=*/RetArg,
+ /*TypeOrSize=*/STy,
+ /*IsVolatile=*/false,
+ /*Memorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*Scope=*/SyncScope::System,
+ /*Align=*/{},
+ /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_load"),
+ Succeeded());
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // clang-format off
+ // %atomic_load.atomic.load = load atomic i64, ptr %atomic_ptr seq_cst, align 1
+ // store i64 %atomic_load.atomic.load, ptr %ret_ptr, align 4
+ // clang-format on
+
+ // Follow use-def and load-store chains to discover instructions
+ StoreInst *Store = cast<StoreInst>(getUniquePreviousStore(RetArg, EntryBB));
+ LoadInst *AtomicLoadAtomicLoad = cast<LoadInst>(Store->getValueOperand());
+
+ // %atomic_load.atomic.load = load atomic i64, ptr %atomic_ptr seq_cst, a...
+ EXPECT_EQ(AtomicLoadAtomicLoad->getName(), "atomic_load.atomic.load");
+ EXPECT_EQ(AtomicLoadAtomicLoad->getParent(), EntryBB);
+ EXPECT_EQ(AtomicLoadAtomicLoad->getType(), Type::getInt64Ty(Ctx));
+ EXPECT_FALSE(AtomicLoadAtomicLoad->isVolatile());
+ EXPECT_EQ(AtomicLoadAtomicLoad->getAlign(), 1);
+ EXPECT_EQ(AtomicLoadAtomicLoad->getOrdering(),
+ AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(AtomicLoadAtomicLoad->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicLoadAtomicLoad->getPointerOperand(), PtrArg);
+
+ // store i64 %atomic_load.atomic.load, ptr %ret_ptr, align 4
+ EXPECT_TRUE(Store->getName().empty());
+ EXPECT_EQ(Store->getParent(), EntryBB);
+ EXPECT_TRUE(Store->isSimple());
+ EXPECT_EQ(Store->getValueOperand(), AtomicLoadAtomicLoad);
+ EXPECT_EQ(Store->getPointerOperand(), RetArg);
+}
+
+TEST_F(BuildBuiltinsTests, AtomicLoad_Array) {
+ // A type that is too large for atomic instructions
+ ArrayType *ATy = ArrayType::get(Builder.getFloatTy(), 19);
+
+ ASSERT_THAT_ERROR(emitAtomicLoadBuiltin(
+ /*AtomicPtr=*/PtrArg, /*RetPtr=*/RetArg,
+ /*TypeOrSize=*/ATy,
+ /*IsVolatile=*/false,
+ /*Memorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*Scope=*/SyncScope::System,
+ /*Align=*/{},
+ /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_load"),
+ Succeeded());
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // Follow use-def and load-store chains to discover instructions
+ CallInst *Call = cast<CallInst>(getUniquePreviousStore(PtrArg, EntryBB));
+
+ // call void @__atomic_load(i64 76, ptr %atomic_ptr, ptr %ret_ptr, i32 5)
+ EXPECT_TRUE(Call->getName().empty());
+ EXPECT_EQ(Call->getParent(), EntryBB);
+ EXPECT_EQ(Call->getType(), Type::getVoidTy(Ctx));
+ EXPECT_EQ(Call->getName(), "");
+ EXPECT_FALSE(Call->isMustTailCall());
+ EXPECT_FALSE(Call->isTailCall());
+ EXPECT_EQ(Call->getCallingConv(), CallingConv::C);
+ EXPECT_EQ(cast<ConstantInt>(Call->getArgOperand(0))->getZExtValue(), 76);
+ EXPECT_EQ(Call->getArgOperand(1), PtrArg);
+ EXPECT_EQ(Call->getArgOperand(2), RetArg);
+ EXPECT_EQ(cast<ConstantInt>(Call->getArgOperand(3))->getZExtValue(), 5);
+ EXPECT_EQ(Call->getCalledFunction(), M->getFunction("__atomic_load"));
+}
+
+TEST_F(BuildBuiltinsTests, AtomicLoad_Array_NoLibatomic) {
+ // Use a triple that does not support libatomic (according to
+ // initializeLibCalls in TargetLibraryInfo.cpp)
+ Triple T("x86_64-scei-ps4");
+ TLII.reset(new TargetLibraryInfoImpl(T));
+ TLI.reset(new TargetLibraryInfo(*TLII));
+
+ // A type that is too large for atomic instructions
+ ArrayType *ATy = ArrayType::get(Builder.getFloatTy(), 19);
+
+ ASSERT_THAT_ERROR(
+ emitAtomicLoadBuiltin(
+ /*AtomicPtr=*/PtrArg, /*RetPtr=*/RetArg, /*TypeOrSize=*/ATy,
+ /*IsVolatile=*/false,
+ /*Memorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*Scope=*/SyncScope::System,
+ /*Align=*/{},
+ /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_load"),
+ FailedWithMessage(
+ "__atomic_load builtin not supported by any available means"));
+}
+
+TEST_F(BuildBuiltinsTests, AtomicLoad_DataSize) {
+ ASSERT_THAT_ERROR(emitAtomicLoadBuiltin(
+ /*AtomicPtr=*/PtrArg, /*RetPtr=*/RetArg,
+ /*TypeOrSize=*/static_cast<uint64_t>(6),
+ /*IsVolatile=*/false,
+ /*Memorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*Scope=*/SyncScope::System, /*Align=*/{},
+ /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_load"),
+ Succeeded());
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // Follow use-def and load-store chains to discover instructions
+ CallInst *Call = cast<CallInst>(getUniquePreviousStore(PtrArg, EntryBB));
+
+ // call void @__atomic_load(i64 6, ptr %atomic_ptr, ptr %ret_ptr, i32 5)
+ EXPECT_TRUE(Call->getName().empty());
+ EXPECT_EQ(Call->getParent(), EntryBB);
+ EXPECT_EQ(Call->getType(), Type::getVoidTy(Ctx));
+ EXPECT_EQ(Call->getName(), "");
+ EXPECT_FALSE(Call->isMustTailCall());
+ EXPECT_FALSE(Call->isTailCall());
+ EXPECT_EQ(Call->getCallingConv(), CallingConv::C);
+ EXPECT_EQ(cast<ConstantInt>(Call->getArgOperand(0))->getZExtValue(), 6);
+ EXPECT_EQ(Call->getArgOperand(1), PtrArg);
+ EXPECT_EQ(Call->getArgOperand(2), RetArg);
+ EXPECT_EQ(cast<ConstantInt>(Call->getArgOperand(3))->getZExtValue(), 5);
+ EXPECT_EQ(Call->getCalledFunction(), M->getFunction("__atomic_load"));
+}
+
+TEST_F(BuildBuiltinsTests, AtomicLoad_Align) {
+ ASSERT_THAT_ERROR(emitAtomicLoadBuiltin(
+ /*AtomicPtr=*/PtrArg, /*RetPtr=*/RetArg,
+ /*TypeOrSize=*/Builder.getInt32Ty(),
+ /*IsVolatile=*/false,
+ /*Memorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*Scope=*/SyncScope::System,
+ /*Align=*/Align(8),
+ /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_load"),
+ Succeeded());
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // clang-format off
+ // %atomic_load.atomic.load = load atomic i32, ptr %atomic_ptr seq_cst, align 8
+ // store i32 %atomic_load.atomic.load, ptr %ret_ptr, align 4
+ // clang-format on
+
+ // Follow use-def and load-store chains to discover instructions
+ StoreInst *Store = cast<StoreInst>(getUniquePreviousStore(RetArg, EntryBB));
+ LoadInst *AtomicLoadAtomicLoad = cast<LoadInst>(Store->getValueOperand());
+
+ // %atomic_load.atomic.load = load atomic i32, ptr %atomic_ptr seq_cst, a...
+ EXPECT_EQ(AtomicLoadAtomicLoad->getName(), "atomic_load.atomic.load");
+ EXPECT_EQ(AtomicLoadAtomicLoad->getParent(), EntryBB);
+ EXPECT_EQ(AtomicLoadAtomicLoad->getType(), Type::getInt32Ty(Ctx));
+ EXPECT_FALSE(AtomicLoadAtomicLoad->isVolatile());
+ EXPECT_EQ(AtomicLoadAtomicLoad->getAlign(), 8);
+ EXPECT_EQ(AtomicLoadAtomicLoad->getOrdering(),
+ AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(AtomicLoadAtomicLoad->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicLoadAtomicLoad->getPointerOperand(), PtrArg);
+
+ // store i32 %atomic_load.atomic.load, ptr %ret_ptr, align 4
+ EXPECT_TRUE(Store->getName().empty());
+ EXPECT_EQ(Store->getParent(), EntryBB);
+ EXPECT_TRUE(Store->isSimple());
+ EXPECT_EQ(Store->getValueOperand(), AtomicLoadAtomicLoad);
+ EXPECT_EQ(Store->getPointerOperand(), RetArg);
+}
+
+TEST_F(BuildBuiltinsTests, AtomicStore) {
+ ASSERT_THAT_ERROR(emitAtomicStoreBuiltin(
+ /*AtomicPtr=*/PtrArg, /*RetPtr=*/RetArg,
+ /*TypeOrSize=*/Builder.getInt32Ty(),
+ /*IsVolatile=*/false,
+ /*Memorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*Scope=*/SyncScope::System,
+ /*Align=*/{},
+ /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_store"),
+ Succeeded());
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // clang-format off
+ // %atomic_store.atomic.val = load i32, ptr %ret_ptr, align 4
+ // store atomic i32 %atomic_store.atomic.val, ptr %atomic_ptr seq_cst, align 1
+ // clang-format on
+
+ // Follow use-def and load-store chains to discover instructions
+ StoreInst *Store = cast<StoreInst>(getUniquePreviousStore(PtrArg, EntryBB));
+ LoadInst *AtomicStoreAtomicVal = cast<LoadInst>(Store->getValueOperand());
+
+ // %atomic_store.atomic.val = load i32, ptr %ret_ptr, align 4
+ EXPECT_EQ(AtomicStoreAtomicVal->getName(), "atomic_store.atomic.val");
+ EXPECT_EQ(AtomicStoreAtomicVal->getParent(), EntryBB);
+ EXPECT_EQ(AtomicStoreAtomicVal->getType(), Type::getInt32Ty(Ctx));
+ EXPECT_TRUE(AtomicStoreAtomicVal->isSimple());
+ EXPECT_EQ(AtomicStoreAtomicVal->getPointerOperand(), RetArg);
+
+ // store atomic i32 %atomic_store.atomic.val, ptr %atomic_ptr seq_cst, al...
+ EXPECT_TRUE(Store->getName().empty());
+ EXPECT_EQ(Store->getParent(), EntryBB);
+ EXPECT_FALSE(Store->isVolatile());
+ EXPECT_EQ(Store->getAlign(), 1);
+ EXPECT_EQ(Store->getOrdering(), AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(Store->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(Store->getValueOperand(), AtomicStoreAtomicVal);
+ EXPECT_EQ(Store->getPointerOperand(), PtrArg);
+}
+
+TEST_F(BuildBuiltinsTests, AtomicStore_SizedLibcall) {
+ AtomicEmitOptions EO(DL, TLI.get());
+ EO.AllowInstruction = false;
+ ASSERT_THAT_ERROR(emitAtomicStoreBuiltin(
+ /*AtomicPtr=*/PtrArg, /*RetPtr=*/RetArg,
+ /*TypeOrSize=*/Builder.getInt32Ty(),
+ /*IsVolatile=*/false,
+ /*Memorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*Scope=*/SyncScope::System,
+ /*Align=*/{},
+ /*Builder=*/Builder,
+ /*EmitOptions=*/EO,
+ /*Name=*/"atomic_store"),
+ Succeeded());
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // Follow use-def and load-store chains to discover instructions
+ CallInst *Call = cast<CallInst>(getUniquePreviousStore(PtrArg, EntryBB));
+
+ // call void @__atomic_store(i64 4, ptr %atomic_ptr, ptr %ret_ptr, i32 5)
+ EXPECT_TRUE(Call->getName().empty());
+ EXPECT_EQ(Call->getParent(), EntryBB);
+ EXPECT_EQ(Call->getType(), Type::getVoidTy(Ctx));
+ EXPECT_EQ(Call->getName(), "");
+ EXPECT_FALSE(Call->isMustTailCall());
+ EXPECT_FALSE(Call->isTailCall());
+ EXPECT_EQ(Call->getCallingConv(), CallingConv::C);
+ EXPECT_EQ(cast<ConstantInt>(Call->getArgOperand(0))->getZExtValue(), 4);
+ EXPECT_EQ(Call->getArgOperand(1), PtrArg);
+ EXPECT_EQ(Call->getArgOperand(2), RetArg);
+ EXPECT_EQ(cast<ConstantInt>(Call->getArgOperand(3))->getZExtValue(), 5);
+ EXPECT_EQ(Call->getCalledFunction(), M->getFunction("__atomic_store"));
+}
+
+TEST_F(BuildBuiltinsTests, AtomicStore_Libcall) {
+ AtomicEmitOptions EO(DL, TLI.get());
+ EO.AllowInstruction = false;
+ EO.AllowSizedLibcall = false;
+ ASSERT_THAT_ERROR(emitAtomicStoreBuiltin(
+ /*AtomicPtr=*/PtrArg, /*RetPtr=*/RetArg,
+ /*TypeOrSize=*/Builder.getInt32Ty(),
+ /*IsVolatile=*/false,
+ /*Memorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*Scope=*/SyncScope::System,
+ /*Align=*/{},
+ /*Builder=*/Builder,
+ /*EmitOptions=*/EO,
+ /*Name=*/"atomic_store"),
+ Succeeded());
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // Follow use-def and load-store chains to discover instructions
+ CallInst *Call = cast<CallInst>(getUniquePreviousStore(PtrArg, EntryBB));
+
+ // call void @__atomic_store(i64 4, ptr %atomic_ptr, ptr %ret_ptr, i32 5)
+ EXPECT_TRUE(Call->getName().empty());
+ EXPECT_EQ(Call->getParent(), EntryBB);
+ EXPECT_EQ(Call->getType(), Type::getVoidTy(Ctx));
+ EXPECT_EQ(Call->getName(), "");
+ EXPECT_FALSE(Call->isMustTailCall());
+ EXPECT_FALSE(Call->isTailCall());
+ EXPECT_EQ(Call->getCallingConv(), CallingConv::C);
+ EXPECT_EQ(cast<ConstantInt>(Call->getArgOperand(0))->getZExtValue(), 4);
+ EXPECT_EQ(Call->getArgOperand(1), PtrArg);
+ EXPECT_EQ(Call->getArgOperand(2), RetArg);
+ EXPECT_EQ(cast<ConstantInt>(Call->getArgOperand(3))->getZExtValue(), 5);
+ EXPECT_EQ(Call->getCalledFunction(), M->getFunction("__atomic_store"));
+}
+
+TEST_F(BuildBuiltinsTests, AtomicStore_Volatile) {
+ ASSERT_THAT_ERROR(emitAtomicStoreBuiltin(
+ /*AtomicPtr=*/PtrArg, /*RetPtr=*/RetArg,
+ /*TypeOrSize=*/Builder.getInt32Ty(),
+ /*IsVolatile=*/true,
+ /*Memorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*Scope=*/SyncScope::System,
+ /*Align=*/{},
+ /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_store"),
+ Succeeded());
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // clang-format off
+ // %atomic_store.atomic.val = load i32, ptr %ret_ptr, align 4
+ // store atomic volatile i32 %atomic_store.atomic.val, ptr %atomic_ptr seq_cst, align 1
+ // clang-format on
+
+ // Follow use-def and load-store chains to discover instructions
+ StoreInst *Store = cast<StoreInst>(getUniquePreviousStore(PtrArg, EntryBB));
+ LoadInst *AtomicStoreAtomicVal = cast<LoadInst>(Store->getValueOperand());
+
+ // %atomic_store.atomic.val = load i32, ptr %ret_ptr, align 4
+ EXPECT_EQ(AtomicStoreAtomicVal->getName(), "atomic_store.atomic.val");
+ EXPECT_EQ(AtomicStoreAtomicVal->getParent(), EntryBB);
+ EXPECT_EQ(AtomicStoreAtomicVal->getType(), Type::getInt32Ty(Ctx));
+ EXPECT_TRUE(AtomicStoreAtomicVal->isSimple());
+ EXPECT_EQ(AtomicStoreAtomicVal->getPointerOperand(), RetArg);
+
+ // store atomic volatile i32 %atomic_store.atomic.val, ptr %atomic_ptr se...
+ EXPECT_TRUE(Store->getName().empty());
+ EXPECT_EQ(Store->getParent(), EntryBB);
+ EXPECT_TRUE(Store->isVolatile());
+ EXPECT_EQ(Store->getAlign(), 1);
+ EXPECT_EQ(Store->getOrdering(), AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(Store->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(Store->getValueOperand(), AtomicStoreAtomicVal);
+ EXPECT_EQ(Store->getPointerOperand(), PtrArg);
+}
+
+TEST_F(BuildBuiltinsTests, AtomicStore_Memorder) {
+ ASSERT_THAT_ERROR(emitAtomicStoreBuiltin(
+ /*AtomicPtr=*/PtrArg, /*RetPtr=*/RetArg,
+ /*TypeOrSize=*/Builder.getInt32Ty(),
+ /*IsVolatile=*/false,
+ /*Memorder=*/AtomicOrdering::Monotonic,
+ /*Scope=*/SyncScope::System,
+ /*Align=*/{},
+ /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_store"),
+ Succeeded());
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // clang-format off
+ // %atomic_store.atomic.val = load i32, ptr %ret_ptr, align 4
+ // store atomic i32 %atomic_store.atomic.val, ptr %atomic_ptr monotonic, align 1
+ // clang-format on
+
+ // Follow use-def and load-store chains to discover instructions
+ StoreInst *Store = cast<StoreInst>(getUniquePreviousStore(PtrArg, EntryBB));
+ LoadInst *AtomicStoreAtomicVal = cast<LoadInst>(Store->getValueOperand());
+
+ // %atomic_store.atomic.val = load i32, ptr %ret_ptr, align 4
+ EXPECT_EQ(AtomicStoreAtomicVal->getName(), "atomic_store.atomic.val");
+ EXPECT_EQ(AtomicStoreAtomicVal->getParent(), EntryBB);
+ EXPECT_EQ(AtomicStoreAtomicVal->getType(), Type::getInt32Ty(Ctx));
+ EXPECT_TRUE(AtomicStoreAtomicVal->isSimple());
+ EXPECT_EQ(AtomicStoreAtomicVal->getPointerOperand(), RetArg);
+
+ // store atomic i32 %atomic_store.atomic.val, ptr %atomic_ptr monotonic, ...
+ EXPECT_TRUE(Store->getName().empty());
+ EXPECT_EQ(Store->getParent(), EntryBB);
+ EXPECT_FALSE(Store->isVolatile());
+ EXPECT_EQ(Store->getAlign(), 1);
+ EXPECT_EQ(Store->getOrdering(), AtomicOrdering::Monotonic);
+ EXPECT_EQ(Store->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(Store->getValueOperand(), AtomicStoreAtomicVal);
+ EXPECT_EQ(Store->getPointerOperand(), PtrArg);
+}
+
+TEST_F(BuildBuiltinsTests, AtomicStore_Memorder_CABI) {
+ ASSERT_THAT_ERROR(emitAtomicStoreBuiltin(
+ /*AtomicPtr=*/PtrArg, /*RetPtr=*/RetArg,
+ /*TypeOrSize=*/Builder.getInt32Ty(),
+ /*IsVolatile=*/false,
+ /*Memorder=*/AtomicOrderingCABI::relaxed,
+ /*Scope=*/SyncScope::System,
+ /*Align=*/{},
+ /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_store"),
+ Succeeded());
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // clang-format off
+ // %atomic_store.atomic.val = load i32, ptr %ret_ptr, align 4
+ // store atomic i32 %atomic_store.atomic.val, ptr %atomic_ptr monotonic, align 1
+ // clang-format on
+
+ // Follow use-def and load-store chains to discover instructions
+ StoreInst *Store = cast<StoreInst>(getUniquePreviousStore(PtrArg, EntryBB));
+ LoadInst *AtomicStoreAtomicVal = cast<LoadInst>(Store->getValueOperand());
+
+ // %atomic_store.atomic.val = load i32, ptr %ret_ptr, align 4
+ EXPECT_EQ(AtomicStoreAtomicVal->getName(), "atomic_store.atomic.val");
+ EXPECT_EQ(AtomicStoreAtomicVal->getParent(), EntryBB);
+ EXPECT_EQ(AtomicStoreAtomicVal->getType(), Type::getInt32Ty(Ctx));
+ EXPECT_TRUE(AtomicStoreAtomicVal->isSimple());
+ EXPECT_EQ(AtomicStoreAtomicVal->getPointerOperand(), RetArg);
+
+ // store atomic i32 %atomic_store.atomic.val, ptr %atomic_ptr monotonic, ...
+ EXPECT_TRUE(Store->getName().empty());
+ EXPECT_EQ(Store->getParent(), EntryBB);
+ EXPECT_FALSE(Store->isVolatile());
+ EXPECT_EQ(Store->getAlign(), 1);
+ EXPECT_EQ(Store->getOrdering(), AtomicOrdering::Monotonic);
+ EXPECT_EQ(Store->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(Store->getValueOperand(), AtomicStoreAtomicVal);
+ EXPECT_EQ(Store->getPointerOperand(), PtrArg);
+}
+
+TEST_F(BuildBuiltinsTests, AtomicStore_Memorder_Switch) {
+ ASSERT_THAT_ERROR(emitAtomicStoreBuiltin(
+ /*AtomicPtr=*/PtrArg, /*RetPtr=*/RetArg,
+ /*TypeOrSize=*/Builder.getInt32Ty(),
+ /*IsVolatile=*/false,
+ /*Memorder=*/MemorderArg,
+ /*Scope=*/SyncScope::System,
+ /*Align=*/{},
+ /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_store"),
+ Succeeded());
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+ BasicBlock *ExitBB = Builder.GetInsertBlock();
+
+ // clang-format off
+ // entry:
+ // %atomic_store.atomic.val = load i32, ptr %ret_ptr, align 4
+ // switch i32 %memorderarg_success, label %atomic_store.atomic.monotonic [
+ // i32 3, label %atomic_store.atomic.release
+ // i32 5, label %atomic_store.atomic.seqcst
+ // ]
+ //
+ // atomic_store.atomic.monotonic: ; preds = %entry
+ // store atomic i32 %atomic_store.atomic.val, ptr %atomic_ptr monotonic, align 1
+ // br label %atomic_store.atomic.memorder.continue
+ //
+ // atomic_store.atomic.release: ; preds = %entry
+ // store atomic i32 %atomic_store.atomic.val, ptr %atomic_ptr release, align 1
+ // br label %atomic_store.atomic.memorder.continue
+ //
+ // atomic_store.atomic.seqcst: ; preds = %entry
+ // store atomic i32 %atomic_store.atomic.val, ptr %atomic_ptr seq_cst, align 1
+ // br label %atomic_store.atomic.memorder.continue
+ //
+ // atomic_store.atomic.memorder.continue: ; preds = %atomic_store.atomic.seqcst, %atomic_store.atomic.release, %atomic_store.atomic.monotonic
+ // %atomic_store.atomic.memorder.success = phi i1 [ true, %atomic_store.atomic.monotonic ], [ true, %atomic_store.atomic.release ], [ true, %atomic_store.atomic.seqcst ]
+ // ret void
+ // clang-format on
+
+ // Discover control flow graph
+ SwitchInst *Switch = cast<SwitchInst>(EntryBB->getTerminator());
+ BasicBlock *AtomicStoreAtomicRelease =
+ cast<BasicBlock>(Switch->getSuccessor(1));
+ BasicBlock *AtomicStoreAtomicSeqcst =
+ cast<BasicBlock>(Switch->getSuccessor(2));
+ BasicBlock *AtomicStoreAtomicMonotonic =
+ cast<BasicBlock>(Switch->getDefaultDest());
+ BranchInst *Branch1 =
+ cast<BranchInst>(AtomicStoreAtomicMonotonic->getTerminator());
+ BranchInst *Branch2 =
+ cast<BranchInst>(AtomicStoreAtomicRelease->getTerminator());
+ BranchInst *Branch3 =
+ cast<BranchInst>(AtomicStoreAtomicSeqcst->getTerminator());
+ ReturnInst *Return = cast<ReturnInst>(ExitBB->getTerminator());
+
+ // Follow use-def and load-store chains to discover instructions
+ StoreInst *Store1 =
+ cast<StoreInst>(getUniquePreviousStore(PtrArg, AtomicStoreAtomicSeqcst));
+ LoadInst *AtomicStoreAtomicVal = cast<LoadInst>(Store1->getValueOperand());
+ StoreInst *Store2 = cast<StoreInst>(
+ getUniquePreviousStore(PtrArg, AtomicStoreAtomicMonotonic));
+ StoreInst *Store3 =
+ cast<StoreInst>(getUniquePreviousStore(PtrArg, AtomicStoreAtomicRelease));
+
+ // %atomic_store.atomic.val = load i32, ptr %ret_ptr, align 4
+ EXPECT_EQ(AtomicStoreAtomicVal->getName(), "atomic_store.atomic.val");
+ EXPECT_EQ(AtomicStoreAtomicVal->getParent(), EntryBB);
+ EXPECT_EQ(AtomicStoreAtomicVal->getType(), Type::getInt32Ty(Ctx));
+ EXPECT_TRUE(AtomicStoreAtomicVal->isSimple());
+ EXPECT_EQ(AtomicStoreAtomicVal->getPointerOperand(), RetArg);
+
+ // switch i32 %memorderarg_success, label %atomic_store.atomic.monotonic [
+ // i32 3, label %atomic_store.atomic.release
+ // i32 5, label %atomic_store.atomic.seqcst
+ // ]
+ EXPECT_TRUE(Switch->getName().empty());
+ EXPECT_EQ(Switch->getParent(), EntryBB);
+ EXPECT_EQ(Switch->getType(), Type::getVoidTy(Ctx));
+ EXPECT_EQ(Switch->getCondition(), MemorderArg);
+ EXPECT_EQ(Switch->getDefaultDest(), AtomicStoreAtomicMonotonic);
+ EXPECT_EQ(cast<ConstantInt>(Switch->getOperand(2))->getZExtValue(), 3);
+ EXPECT_EQ(Switch->getOperand(3), AtomicStoreAtomicRelease);
+ EXPECT_EQ(cast<ConstantInt>(Switch->getOperand(4))->getZExtValue(), 5);
+ EXPECT_EQ(Switch->getOperand(5), AtomicStoreAtomicSeqcst);
+
+ // store atomic i32 %atomic_store.atomic.val, ptr %atomic_ptr monotonic, ...
+ EXPECT_TRUE(Store2->getName().empty());
+ EXPECT_EQ(Store2->getParent(), AtomicStoreAtomicMonotonic);
+ EXPECT_FALSE(Store2->isVolatile());
+ EXPECT_EQ(Store2->getAlign(), 1);
+ EXPECT_EQ(Store2->getOrdering(), AtomicOrdering::Monotonic);
+ EXPECT_EQ(Store2->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(Store2->getValueOperand(), AtomicStoreAtomicVal);
+ EXPECT_EQ(Store2->getPointerOperand(), PtrArg);
+
+ // br label %atomic_store.atomic.memorder.continue
+ EXPECT_TRUE(Branch1->getName().empty());
+ EXPECT_EQ(Branch1->getParent(), AtomicStoreAtomicMonotonic);
+ EXPECT_EQ(Branch1->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch1->isUnconditional());
+ EXPECT_EQ(Branch1->getOperand(0), ExitBB);
+
+ // store atomic i32 %atomic_store.atomic.val, ptr %atomic_ptr release, al...
+ EXPECT_TRUE(Store3->getName().empty());
+ EXPECT_EQ(Store3->getParent(), AtomicStoreAtomicRelease);
+ EXPECT_FALSE(Store3->isVolatile());
+ EXPECT_EQ(Store3->getAlign(), 1);
+ EXPECT_EQ(Store3->getOrdering(), AtomicOrdering::Release);
+ EXPECT_EQ(Store3->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(Store3->getValueOperand(), AtomicStoreAtomicVal);
+ EXPECT_EQ(Store3->getPointerOperand(), PtrArg);
+
+ // br label %atomic_store.atomic.memorder.continue
+ EXPECT_TRUE(Branch2->getName().empty());
+ EXPECT_EQ(Branch2->getParent(), AtomicStoreAtomicRelease);
+ EXPECT_EQ(Branch2->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch2->isUnconditional());
+ EXPECT_EQ(Branch2->getOperand(0), ExitBB);
+
+ // store atomic i32 %atomic_store.atomic.val, ptr %atomic_ptr seq_cst, al...
+ EXPECT_TRUE(Store1->getName().empty());
+ EXPECT_EQ(Store1->getParent(), AtomicStoreAtomicSeqcst);
+ EXPECT_FALSE(Store1->isVolatile());
+ EXPECT_EQ(Store1->getAlign(), 1);
+ EXPECT_EQ(Store1->getOrdering(), AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(Store1->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(Store1->getValueOperand(), AtomicStoreAtomicVal);
+ EXPECT_EQ(Store1->getPointerOperand(), PtrArg);
+
+ // br label %atomic_store.atomic.memorder.continue
+ EXPECT_TRUE(Branch3->getName().empty());
+ EXPECT_EQ(Branch3->getParent(), AtomicStoreAtomicSeqcst);
+ EXPECT_EQ(Branch3->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch3->isUnconditional());
+ EXPECT_EQ(Branch3->getOperand(0), ExitBB);
+
+ // ret void
+ EXPECT_TRUE(Return->getName().empty());
+ EXPECT_EQ(Return->getParent(), ExitBB);
+ EXPECT_EQ(Return->getType(), Type::getVoidTy(Ctx));
+}
+
+TEST_F(BuildBuiltinsTests, AtomicStore_SyncScope) {
+ ASSERT_THAT_ERROR(emitAtomicStoreBuiltin(
+ /*AtomicPtr=*/PtrArg, /*RetPtr=*/RetArg,
+ /*TypeOrSize=*/Builder.getInt32Ty(),
+ /*IsVolatile=*/false,
+ /*Memorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*Scope=*/SyncScope::SingleThread,
+ /*Align=*/{},
+ /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_store"),
+ Succeeded());
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // clang-format off
+ // %atomic_store.atomic.val = load i32, ptr %ret_ptr, align 4
+ // store atomic i32 %atomic_store.atomic.val, ptr %atomic_ptr syncscope("singlethread") seq_cst, align 1
+ // clang-format on
+
+ // Follow use-def and load-store chains to discover instructions
+ StoreInst *Store = cast<StoreInst>(getUniquePreviousStore(PtrArg, EntryBB));
+ LoadInst *AtomicStoreAtomicVal = cast<LoadInst>(Store->getValueOperand());
+
+ // %atomic_store.atomic.val = load i32, ptr %ret_ptr, align 4
+ EXPECT_EQ(AtomicStoreAtomicVal->getName(), "atomic_store.atomic.val");
+ EXPECT_EQ(AtomicStoreAtomicVal->getParent(), EntryBB);
+ EXPECT_EQ(AtomicStoreAtomicVal->getType(), Type::getInt32Ty(Ctx));
+ EXPECT_TRUE(AtomicStoreAtomicVal->isSimple());
+ EXPECT_EQ(AtomicStoreAtomicVal->getPointerOperand(), RetArg);
+
+ // store atomic i32 %atomic_store.atomic.val, ptr %atomic_ptr syncscope("...
+ EXPECT_TRUE(Store->getName().empty());
+ EXPECT_EQ(Store->getParent(), EntryBB);
+ EXPECT_FALSE(Store->isVolatile());
+ EXPECT_EQ(Store->getAlign(), 1);
+ EXPECT_EQ(Store->getOrdering(), AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(Store->getSyncScopeID(), SyncScope::SingleThread);
+ EXPECT_EQ(Store->getValueOperand(), AtomicStoreAtomicVal);
+ EXPECT_EQ(Store->getPointerOperand(), PtrArg);
+}
+
+TEST_F(BuildBuiltinsTests, AtomicStore_Float) {
+ ASSERT_THAT_ERROR(emitAtomicStoreBuiltin(
+ /*AtomicPtr=*/PtrArg, /*RetPtr=*/RetArg,
+ /*TypeOrSize=*/Builder.getFloatTy(),
+ /*IsVolatile=*/false,
+ /*Memorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*Scope=*/SyncScope::System,
+ /*Align=*/{},
+ /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_store"),
+ Succeeded());
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // clang-format off
+ // %atomic_store.atomic.val = load float, ptr %ret_ptr, align 4
+ // store atomic float %atomic_store.atomic.val, ptr %atomic_ptr seq_cst, align 1
+ // clang-format on
+
+ // Follow use-def and load-store chains to discover instructions
+ StoreInst *Store = cast<StoreInst>(getUniquePreviousStore(PtrArg, EntryBB));
+ LoadInst *AtomicStoreAtomicVal = cast<LoadInst>(Store->getValueOperand());
+
+ // %atomic_store.atomic.val = load float, ptr %ret_ptr, align 4
+ EXPECT_EQ(AtomicStoreAtomicVal->getName(), "atomic_store.atomic.val");
+ EXPECT_EQ(AtomicStoreAtomicVal->getParent(), EntryBB);
+ EXPECT_EQ(AtomicStoreAtomicVal->getType(), Type::getFloatTy(Ctx));
+ EXPECT_TRUE(AtomicStoreAtomicVal->isSimple());
+ EXPECT_EQ(AtomicStoreAtomicVal->getPointerOperand(), RetArg);
+
+ // store atomic float %atomic_store.atomic.val, ptr %atomic_ptr seq_cst, ...
+ EXPECT_TRUE(Store->getName().empty());
+ EXPECT_EQ(Store->getParent(), EntryBB);
+ EXPECT_FALSE(Store->isVolatile());
+ EXPECT_EQ(Store->getAlign(), 1);
+ EXPECT_EQ(Store->getOrdering(), AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(Store->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(Store->getValueOperand(), AtomicStoreAtomicVal);
+ EXPECT_EQ(Store->getPointerOperand(), PtrArg);
+}
+
+TEST_F(BuildBuiltinsTests, AtomicStore_FP80) {
+ ASSERT_THAT_ERROR(emitAtomicStoreBuiltin(
+ /*AtomicPtr=*/PtrArg, /*RetPtr=*/RetArg,
+ /*TypeOrSize=*/Type::getX86_FP80Ty(Ctx),
+ /*IsVolatile=*/false,
+ /*Memorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*Scope=*/SyncScope::System,
+ /*Align=*/{},
+ /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_store"),
+ Succeeded());
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // Follow use-def and load-store chains to discover instructions
+ CallInst *Call = cast<CallInst>(getUniquePreviousStore(PtrArg, EntryBB));
+
+ // call void @__atomic_store(i64 10, ptr %atomic_ptr, ptr %ret_ptr, i32 5)
+ EXPECT_TRUE(Call->getName().empty());
+ EXPECT_EQ(Call->getParent(), EntryBB);
+ EXPECT_EQ(Call->getType(), Type::getVoidTy(Ctx));
+ EXPECT_EQ(Call->getName(), "");
+ EXPECT_FALSE(Call->isMustTailCall());
+ EXPECT_FALSE(Call->isTailCall());
+ EXPECT_EQ(Call->getCallingConv(), CallingConv::C);
+ EXPECT_EQ(cast<ConstantInt>(Call->getArgOperand(0))->getZExtValue(), 10);
+ EXPECT_EQ(Call->getArgOperand(1), PtrArg);
+ EXPECT_EQ(Call->getArgOperand(2), RetArg);
+ EXPECT_EQ(cast<ConstantInt>(Call->getArgOperand(3))->getZExtValue(), 5);
+ EXPECT_EQ(Call->getCalledFunction(), M->getFunction("__atomic_store"));
+}
+
+TEST_F(BuildBuiltinsTests, AtomicStore_Ptr) {
+ ASSERT_THAT_ERROR(emitAtomicStoreBuiltin(
+ /*AtomicPtr=*/PtrArg, /*RetPtr=*/RetArg,
+ /*TypeOrSize=*/Builder.getPtrTy(), /*IsVolatile=*/false,
+ /*Memorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*Scope=*/SyncScope::System,
+ /*Align=*/{},
+ /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_store"),
+ Succeeded());
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // clang-format off
+ // %atomic_store.atomic.val = load ptr, ptr %ret_ptr, align 8
+ // store atomic ptr %atomic_store.atomic.val, ptr %atomic_ptr seq_cst, align 1
+ // clang-format on
+
+ // Follow use-def and load-store chains to discover instructions
+ StoreInst *Store = cast<StoreInst>(getUniquePreviousStore(PtrArg, EntryBB));
+ LoadInst *AtomicStoreAtomicVal = cast<LoadInst>(Store->getValueOperand());
+
+ // %atomic_store.atomic.val = load ptr, ptr %ret_ptr, align 8
+ EXPECT_EQ(AtomicStoreAtomicVal->getName(), "atomic_store.atomic.val");
+ EXPECT_EQ(AtomicStoreAtomicVal->getParent(), EntryBB);
+ EXPECT_EQ(AtomicStoreAtomicVal->getType(), PointerType::get(Ctx, 0));
+ EXPECT_TRUE(AtomicStoreAtomicVal->isSimple());
+ EXPECT_EQ(AtomicStoreAtomicVal->getPointerOperand(), RetArg);
+
+ // store atomic ptr %atomic_store.atomic.val, ptr %atomic_ptr seq_cst, al...
+ EXPECT_TRUE(Store->getName().empty());
+ EXPECT_EQ(Store->getParent(), EntryBB);
+ EXPECT_FALSE(Store->isVolatile());
+ EXPECT_EQ(Store->getAlign(), 1);
+ EXPECT_EQ(Store->getOrdering(), AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(Store->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(Store->getValueOperand(), AtomicStoreAtomicVal);
+ EXPECT_EQ(Store->getPointerOperand(), PtrArg);
+}
+
+TEST_F(BuildBuiltinsTests, AtomicStore_Struct) {
+ // A struct that is small enough to be covered with a single instruction
+ StructType *STy =
+ StructType::get(Ctx, {Builder.getFloatTy(), Builder.getFloatTy()});
+
+ ASSERT_THAT_ERROR(emitAtomicStoreBuiltin(
+ /*AtomicPtr=*/PtrArg, /*RetPtr=*/RetArg,
+ /*TypeOrSize=*/STy,
+ /*IsVolatile=*/false,
+ /*Memorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*Scope=*/SyncScope::System,
+ /*Align=*/{},
+ /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_store"),
+ Succeeded());
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // clang-format off
+ // %atomic_store.atomic.val = load i64, ptr %ret_ptr, align 4
+ // store atomic i64 %atomic_store.atomic.val, ptr %atomic_ptr seq_cst, align 1
+ // clang-format on
+
+ // Follow use-def and load-store chains to discover instructions
+ StoreInst *Store = cast<StoreInst>(getUniquePreviousStore(PtrArg, EntryBB));
+ LoadInst *AtomicStoreAtomicVal = cast<LoadInst>(Store->getValueOperand());
+
+ // %atomic_store.atomic.val = load i64, ptr %ret_ptr, align 4
+ EXPECT_EQ(AtomicStoreAtomicVal->getName(), "atomic_store.atomic.val");
+ EXPECT_EQ(AtomicStoreAtomicVal->getParent(), EntryBB);
+ EXPECT_EQ(AtomicStoreAtomicVal->getType(), Type::getInt64Ty(Ctx));
+ EXPECT_TRUE(AtomicStoreAtomicVal->isSimple());
+ EXPECT_EQ(AtomicStoreAtomicVal->getPointerOperand(), RetArg);
+
+ // store atomic i64 %atomic_store.atomic.val, ptr %atomic_ptr seq_cst, al...
+ EXPECT_TRUE(Store->getName().empty());
+ EXPECT_EQ(Store->getParent(), EntryBB);
+ EXPECT_FALSE(Store->isVolatile());
+ EXPECT_EQ(Store->getAlign(), 1);
+ EXPECT_EQ(Store->getOrdering(), AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(Store->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(Store->getValueOperand(), AtomicStoreAtomicVal);
+ EXPECT_EQ(Store->getPointerOperand(), PtrArg);
+}
+
+TEST_F(BuildBuiltinsTests, AtomicStore_Array) {
+ // A type that is too large for atomic instructions
+ ArrayType *ATy = ArrayType::get(Builder.getFloatTy(), 19);
+
+ ASSERT_THAT_ERROR(emitAtomicStoreBuiltin(
+ /*AtomicPtr=*/PtrArg, /*RetPtr=*/RetArg,
+ /*TypeOrSize=*/ATy,
+ /*IsVolatile=*/false,
+ /*Memorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*Scope=*/SyncScope::System,
+ /*Align=*/{},
+ /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_store"),
+ Succeeded());
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // Follow use-def and load-store chains to discover instructions
+ CallInst *Call = cast<CallInst>(getUniquePreviousStore(PtrArg, EntryBB));
+
+ // call void @__atomic_store(i64 76, ptr %atomic_ptr, ptr %ret_ptr, i32 5)
+ EXPECT_TRUE(Call->getName().empty());
+ EXPECT_EQ(Call->getParent(), EntryBB);
+ EXPECT_EQ(Call->getType(), Type::getVoidTy(Ctx));
+ EXPECT_EQ(Call->getName(), "");
+ EXPECT_FALSE(Call->isMustTailCall());
+ EXPECT_FALSE(Call->isTailCall());
+ EXPECT_EQ(Call->getCallingConv(), CallingConv::C);
+ EXPECT_EQ(cast<ConstantInt>(Call->getArgOperand(0))->getZExtValue(), 76);
+ EXPECT_EQ(Call->getArgOperand(1), PtrArg);
+ EXPECT_EQ(Call->getArgOperand(2), RetArg);
+ EXPECT_EQ(cast<ConstantInt>(Call->getArgOperand(3))->getZExtValue(), 5);
+ EXPECT_EQ(Call->getCalledFunction(), M->getFunction("__atomic_store"));
+}
+
+TEST_F(BuildBuiltinsTests, AtomicStore_Array_NoLibatomic) {
+ // Use a triple that does not support libatomic (according to
+ // initializeLibCalls in TargetLibraryInfo.cpp)
+ Triple T("x86_64-scei-ps4");
+ TLII.reset(new TargetLibraryInfoImpl(T));
+ TLI.reset(new TargetLibraryInfo(*TLII));
+
+ // A type that is too large for atomic instructions
+ ArrayType *ATy = ArrayType::get(Builder.getFloatTy(), 19);
+
+ ASSERT_THAT_ERROR(
+ emitAtomicStoreBuiltin(
+ /*AtomicPtr=*/PtrArg, /*RetPtr=*/RetArg, /*TypeOrSize=*/ATy,
+ /*IsVolatile=*/false,
+ /*Memorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*Scope=*/SyncScope::System,
+ /*Align=*/{},
+ /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_store"),
+ FailedWithMessage(
+ "__atomic_store builtin not supported by any available means"));
+}
+
+TEST_F(BuildBuiltinsTests, AtomicStore_DataSize) {
+ ASSERT_THAT_ERROR(emitAtomicStoreBuiltin(
+ /*AtomicPtr=*/PtrArg, /*RetPtr=*/RetArg,
+ /*TypeOrSize=*/static_cast<uint64_t>(6),
+ /*IsVolatile=*/false,
+ /*Memorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*Scope=*/SyncScope::System, /*Align=*/{}, Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_store"),
+ Succeeded());
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // Follow use-def and load-store chains to discover instructions
+ CallInst *Call = cast<CallInst>(getUniquePreviousStore(PtrArg, EntryBB));
+
+ // call void @__atomic_store(i64 6, ptr %atomic_ptr, ptr %ret_ptr, i32 5)
+ EXPECT_TRUE(Call->getName().empty());
+ EXPECT_EQ(Call->getParent(), EntryBB);
+ EXPECT_EQ(Call->getType(), Type::getVoidTy(Ctx));
+ EXPECT_EQ(Call->getName(), "");
+ EXPECT_FALSE(Call->isMustTailCall());
+ EXPECT_FALSE(Call->isTailCall());
+ EXPECT_EQ(Call->getCallingConv(), CallingConv::C);
+ EXPECT_EQ(cast<ConstantInt>(Call->getArgOperand(0))->getZExtValue(), 6);
+ EXPECT_EQ(Call->getArgOperand(1), PtrArg);
+ EXPECT_EQ(Call->getArgOperand(2), RetArg);
+ EXPECT_EQ(cast<ConstantInt>(Call->getArgOperand(3))->getZExtValue(), 5);
+ EXPECT_EQ(Call->getCalledFunction(), M->getFunction("__atomic_store"));
+}
+
+TEST_F(BuildBuiltinsTests, AtomicStore_Align) {
+ ASSERT_THAT_ERROR(emitAtomicStoreBuiltin(
+ /*AtomicPtr=*/PtrArg, /*RetPtr=*/RetArg,
+ /*TypeOrSize=*/Builder.getInt32Ty(),
+ /*IsVolatile=*/false,
+ /*Memorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*Scope=*/SyncScope::System,
+ /*Align=*/Align(8),
+ /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_store"),
+ Succeeded());
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // clang-format off
+ // %atomic_store.atomic.val = load i32, ptr %ret_ptr, align 4
+ // store atomic i32 %atomic_store.atomic.val, ptr %atomic_ptr seq_cst, align 8
+ // clang-format on
+
+ // Follow use-def and load-store chains to discover instructions
+ StoreInst *Store = cast<StoreInst>(getUniquePreviousStore(PtrArg, EntryBB));
+ LoadInst *AtomicStoreAtomicVal = cast<LoadInst>(Store->getValueOperand());
+
+ // %atomic_store.atomic.val = load i32, ptr %ret_ptr, align 4
+ EXPECT_EQ(AtomicStoreAtomicVal->getName(), "atomic_store.atomic.val");
+ EXPECT_EQ(AtomicStoreAtomicVal->getParent(), EntryBB);
+ EXPECT_EQ(AtomicStoreAtomicVal->getType(), Type::getInt32Ty(Ctx));
+ EXPECT_TRUE(AtomicStoreAtomicVal->isSimple());
+ EXPECT_EQ(AtomicStoreAtomicVal->getPointerOperand(), RetArg);
+
+ // store atomic i32 %atomic_store.atomic.val, ptr %atomic_ptr seq_cst, al...
+ EXPECT_TRUE(Store->getName().empty());
+ EXPECT_EQ(Store->getParent(), EntryBB);
+ EXPECT_FALSE(Store->isVolatile());
+ EXPECT_EQ(Store->getAlign(), 8);
+ EXPECT_EQ(Store->getOrdering(), AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(Store->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(Store->getValueOperand(), AtomicStoreAtomicVal);
+ EXPECT_EQ(Store->getPointerOperand(), PtrArg);
+}
+
+TEST_F(BuildBuiltinsTests, AtomicCmpxchg) {
+ Value *AtomicSuccess = nullptr;
+ ASSERT_THAT_EXPECTED(
+ emitAtomicCompareExchangeBuiltin(
+ /*AtomicPtr=*/PtrArg,
+ /*ExpectedPtr=*/ExpectedArg,
+ /*DesiredPtr=*/DesiredArg, /*TypeOrSize=*/Builder.getInt32Ty(),
+ /*IsWeak*/ false,
+ /*IsVolatile=*/false,
+ /*SuccessMemorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*FailureMemorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*Scope=*/SyncScope::System,
+ /*PrevPtr=*/nullptr,
+ /*Align=*/{}, /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_cmpxchg"),
+ StoreResult(AtomicSuccess));
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // clang-format off
+ // %atomic_cmpxchg.cmpxchg.expected = load i32, ptr %expected_ptr, align 4
+ // %atomic_cmpxchg.cmpxchg.desired = load i32, ptr %desired_ptr, align 4
+ // %atomic_cmpxchg.cmpxchg.pair = cmpxchg ptr %atomic_ptr, i32 %atomic_cmpxchg.cmpxchg.expected, i32 %atomic_cmpxchg.cmpxchg.desired seq_cst seq_cst, align 1
+ // %atomic_cmpxchg.cmpxchg.success = extractvalue { i32, i1 } %atomic_cmpxchg.cmpxchg.pair, 1
+ // clang-format on
+
+ // Follow use-def and load-store chains to discover instructions
+ AtomicCmpXchgInst *AtomicCmpxchgCmpxchgPair =
+ cast<AtomicCmpXchgInst>(getUniquePreviousStore(PtrArg, EntryBB));
+ LoadInst *AtomicCmpxchgCmpxchgExpected =
+ cast<LoadInst>(AtomicCmpxchgCmpxchgPair->getCompareOperand());
+ LoadInst *AtomicCmpxchgCmpxchgDesired =
+ cast<LoadInst>(AtomicCmpxchgCmpxchgPair->getNewValOperand());
+
+ // %atomic_cmpxchg.cmpxchg.expected = load i32, ptr %expected_ptr, align 4
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getName(),
+ "atomic_cmpxchg.cmpxchg.expected");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getParent(), EntryBB);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getType(), Type::getInt32Ty(Ctx));
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgExpected->isSimple());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getPointerOperand(), ExpectedArg);
+
+ // %atomic_cmpxchg.cmpxchg.desired = load i32, ptr %desired_ptr, align 4
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getName(),
+ "atomic_cmpxchg.cmpxchg.desired");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getParent(), EntryBB);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getType(), Type::getInt32Ty(Ctx));
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgDesired->isSimple());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getPointerOperand(), DesiredArg);
+
+ // %atomic_cmpxchg.cmpxchg.pair = cmpxchg ptr %atomic_ptr, i32 %atomic_cm...
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getName(), "atomic_cmpxchg.cmpxchg.pair");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getParent(), EntryBB);
+ EXPECT_FALSE(AtomicCmpxchgCmpxchgPair->isVolatile());
+ EXPECT_FALSE(AtomicCmpxchgCmpxchgPair->isWeak());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getSuccessOrdering(),
+ AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getFailureOrdering(),
+ AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getAlign(), 1);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getPointerOperand(), PtrArg);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getCompareOperand(),
+ AtomicCmpxchgCmpxchgExpected);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getNewValOperand(),
+ AtomicCmpxchgCmpxchgDesired);
+
+ // %atomic_cmpxchg.cmpxchg.success = extractvalue { i32, i1 } %atomic_cmp...
+ EXPECT_EQ(AtomicSuccess->getName(), "atomic_cmpxchg.cmpxchg.success");
+ EXPECT_EQ(cast<Instruction>(AtomicSuccess)->getParent(), EntryBB);
+ EXPECT_EQ(AtomicSuccess->getType(), Type::getInt1Ty(Ctx));
+ EXPECT_EQ(cast<ExtractValueInst>(AtomicSuccess)->getNumIndices(), 1);
+ EXPECT_EQ(cast<ExtractValueInst>(AtomicSuccess)->getIndices()[0], 1);
+ EXPECT_EQ(cast<ExtractValueInst>(AtomicSuccess)->getAggregateOperand(),
+ AtomicCmpxchgCmpxchgPair);
+}
+
+TEST_F(BuildBuiltinsTests, AtomicCmpxchg_SizedLibcall) {
+ AtomicEmitOptions EO(DL, TLI.get());
+ EO.AllowInstruction = false;
+
+ Value *AtomicSuccess = nullptr;
+ ASSERT_THAT_EXPECTED(
+ emitAtomicCompareExchangeBuiltin(
+ /*AtomicPtr=*/PtrArg,
+ /*ExpectedPtr=*/ExpectedArg,
+ /*DesiredPtr=*/DesiredArg, /*TypeOrSize=*/Builder.getInt32Ty(),
+ /*IsWeak*/ false,
+ /*IsVolatile=*/false,
+ /*SuccessMemorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*FailureMemorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*Scope=*/SyncScope::System,
+ /*PrevPtr=*/nullptr,
+ /*Align=*/{}, /*Builder=*/Builder,
+ /*EmitOptions=*/EO,
+ /*Name=*/"atomic_cmpxchg"),
+ StoreResult(AtomicSuccess));
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // clang-format off
+ // %__atomic_compare_exchange = call i8 @__atomic_compare_exchange(i64 4, ptr %atomic_ptr, ptr %expected_ptr, ptr %desired_ptr, i32 5, i32 5)
+ // %atomic_cmpxchg.cmpxchg.success = icmp eq i8 %__atomic_compare_exchange, 0
+ // clang-format on
+
+ // Follow use-def and load-store chains to discover instructions
+ CallInst *AtomicCompareExchange =
+ cast<CallInst>(getUniquePreviousStore(PtrArg, EntryBB));
+
+ // %__atomic_compare_exchange = call i8 @__atomic_compare_exchange(i64 4,...
+ EXPECT_EQ(AtomicCompareExchange->getName(), "__atomic_compare_exchange");
+ EXPECT_EQ(AtomicCompareExchange->getParent(), EntryBB);
+ EXPECT_EQ(AtomicCompareExchange->getType(), Type::getInt8Ty(Ctx));
+ EXPECT_EQ(AtomicCompareExchange->getName(), "__atomic_compare_exchange");
+ EXPECT_FALSE(AtomicCompareExchange->isMustTailCall());
+ EXPECT_FALSE(AtomicCompareExchange->isTailCall());
+ EXPECT_EQ(AtomicCompareExchange->getCallingConv(), CallingConv::C);
+ EXPECT_EQ(cast<ConstantInt>(AtomicCompareExchange->getArgOperand(0))
+ ->getZExtValue(),
+ 4);
+ EXPECT_EQ(AtomicCompareExchange->getArgOperand(1), PtrArg);
+ EXPECT_EQ(AtomicCompareExchange->getArgOperand(2), ExpectedArg);
+ EXPECT_EQ(AtomicCompareExchange->getArgOperand(3), DesiredArg);
+ EXPECT_EQ(cast<ConstantInt>(AtomicCompareExchange->getArgOperand(4))
+ ->getZExtValue(),
+ 5);
+ EXPECT_EQ(cast<ConstantInt>(AtomicCompareExchange->getArgOperand(5))
+ ->getZExtValue(),
+ 5);
+ EXPECT_EQ(AtomicCompareExchange->getCalledFunction(),
+ M->getFunction("__atomic_compare_exchange"));
+
+ // %atomic_cmpxchg.cmpxchg.success = icmp eq i8 %__atomic_compare_exchang...
+ EXPECT_EQ(AtomicSuccess->getName(), "atomic_cmpxchg.cmpxchg.success");
+ EXPECT_EQ(cast<Instruction>(AtomicSuccess)->getParent(), EntryBB);
+ EXPECT_EQ(AtomicSuccess->getType(), Type::getInt1Ty(Ctx));
+ EXPECT_EQ(cast<Instruction>(AtomicSuccess)->getOperand(0),
+ AtomicCompareExchange);
+ EXPECT_EQ(cast<ConstantInt>(cast<Instruction>(AtomicSuccess)->getOperand(1))
+ ->getZExtValue(),
+ 0);
+}
+
+TEST_F(BuildBuiltinsTests, AtomicCmpxchg_Libcall) {
+ AtomicEmitOptions EO(DL, TLI.get());
+ EO.AllowInstruction = false;
+ EO.AllowSizedLibcall = false;
+
+ Value *AtomicSuccess = nullptr;
+ ASSERT_THAT_EXPECTED(
+ emitAtomicCompareExchangeBuiltin(
+ /*AtomicPtr=*/PtrArg,
+ /*ExpectedPtr=*/ExpectedArg,
+ /*DesiredPtr=*/DesiredArg, /*TypeOrSize=*/Builder.getInt32Ty(),
+ /*IsWeak*/ false,
+ /*IsVolatile=*/false,
+ /*SuccessMemorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*FailureMemorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*Scope=*/SyncScope::System,
+ /*PrevPtr=*/nullptr,
+ /*Align=*/{}, /*Builder=*/Builder,
+ /*EmitOptions=*/EO,
+ /*Name=*/"atomic_cmpxchg"),
+ StoreResult(AtomicSuccess));
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // clang-format off
+ // %__atomic_compare_exchange = call i8 @__atomic_compare_exchange(i64 4, ptr %atomic_ptr, ptr %expected_ptr, ptr %desired_ptr, i32 5, i32 5)
+ // %atomic_cmpxchg.cmpxchg.success = icmp eq i8 %__atomic_compare_exchange, 0
+ // clang-format on
+
+ // Follow use-def and load-store chains to discover instructions
+ CallInst *AtomicCompareExchange =
+ cast<CallInst>(getUniquePreviousStore(PtrArg, EntryBB));
+
+ // %__atomic_compare_exchange = call i8 @__atomic_compare_exchange(i64 4,...
+ EXPECT_EQ(AtomicCompareExchange->getName(), "__atomic_compare_exchange");
+ EXPECT_EQ(AtomicCompareExchange->getParent(), EntryBB);
+ EXPECT_EQ(AtomicCompareExchange->getType(), Type::getInt8Ty(Ctx));
+ EXPECT_EQ(AtomicCompareExchange->getName(), "__atomic_compare_exchange");
+ EXPECT_FALSE(AtomicCompareExchange->isMustTailCall());
+ EXPECT_FALSE(AtomicCompareExchange->isTailCall());
+ EXPECT_EQ(AtomicCompareExchange->getCallingConv(), CallingConv::C);
+ EXPECT_EQ(cast<ConstantInt>(AtomicCompareExchange->getArgOperand(0))
+ ->getZExtValue(),
+ 4);
+ EXPECT_EQ(AtomicCompareExchange->getArgOperand(1), PtrArg);
+ EXPECT_EQ(AtomicCompareExchange->getArgOperand(2), ExpectedArg);
+ EXPECT_EQ(AtomicCompareExchange->getArgOperand(3), DesiredArg);
+ EXPECT_EQ(cast<ConstantInt>(AtomicCompareExchange->getArgOperand(4))
+ ->getZExtValue(),
+ 5);
+ EXPECT_EQ(cast<ConstantInt>(AtomicCompareExchange->getArgOperand(5))
+ ->getZExtValue(),
+ 5);
+ EXPECT_EQ(AtomicCompareExchange->getCalledFunction(),
+ M->getFunction("__atomic_compare_exchange"));
+
+ // %atomic_cmpxchg.cmpxchg.success = icmp eq i8 %__atomic_compare_exchang...
+ EXPECT_EQ(AtomicSuccess->getName(), "atomic_cmpxchg.cmpxchg.success");
+ EXPECT_EQ(cast<Instruction>(AtomicSuccess)->getParent(), EntryBB);
+ EXPECT_EQ(AtomicSuccess->getType(), Type::getInt1Ty(Ctx));
+ EXPECT_EQ(cast<Instruction>(AtomicSuccess)->getOperand(0),
+ AtomicCompareExchange);
+ EXPECT_EQ(cast<ConstantInt>(cast<Instruction>(AtomicSuccess)->getOperand(1))
+ ->getZExtValue(),
+ 0);
+}
+
+TEST_F(BuildBuiltinsTests, AtomicCmpxchg_Weak) {
+ Value *AtomicSuccess = nullptr;
+ ASSERT_THAT_EXPECTED(
+ emitAtomicCompareExchangeBuiltin(
+ /*AtomicPtr=*/PtrArg,
+ /*ExpectedPtr=*/ExpectedArg,
+ /*DesiredPtr=*/DesiredArg, /*TypeOrSize=*/Builder.getInt32Ty(),
+ /*IsWeak*/ true,
+ /*IsVolatile=*/false,
+ /*SuccessMemorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*FailureMemorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*Scope=*/SyncScope::System,
+ /*PrevPtr=*/nullptr,
+ /*Align=*/{}, /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_cmpxchg"),
+ StoreResult(AtomicSuccess));
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // clang-format off
+ // %atomic_cmpxchg.cmpxchg.expected = load i32, ptr %expected_ptr, align 4
+ // %atomic_cmpxchg.cmpxchg.desired = load i32, ptr %desired_ptr, align 4
+ // %atomic_cmpxchg.cmpxchg.pair = cmpxchg weak ptr %atomic_ptr, i32 %atomic_cmpxchg.cmpxchg.expected, i32 %atomic_cmpxchg.cmpxchg.desired seq_cst seq_cst, align 1
+ // %atomic_cmpxchg.cmpxchg.success = extractvalue { i32, i1 } %atomic_cmpxchg.cmpxchg.pair, 1
+ // clang-format on
+
+ // Follow use-def and load-store chains to discover instructions
+ AtomicCmpXchgInst *AtomicCmpxchgCmpxchgPair =
+ cast<AtomicCmpXchgInst>(getUniquePreviousStore(PtrArg, EntryBB));
+ LoadInst *AtomicCmpxchgCmpxchgExpected =
+ cast<LoadInst>(AtomicCmpxchgCmpxchgPair->getCompareOperand());
+ LoadInst *AtomicCmpxchgCmpxchgDesired =
+ cast<LoadInst>(AtomicCmpxchgCmpxchgPair->getNewValOperand());
+
+ // %atomic_cmpxchg.cmpxchg.expected = load i32, ptr %expected_ptr, align 4
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getName(),
+ "atomic_cmpxchg.cmpxchg.expected");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getParent(), EntryBB);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getType(), Type::getInt32Ty(Ctx));
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgExpected->isSimple());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getPointerOperand(), ExpectedArg);
+
+ // %atomic_cmpxchg.cmpxchg.desired = load i32, ptr %desired_ptr, align 4
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getName(),
+ "atomic_cmpxchg.cmpxchg.desired");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getParent(), EntryBB);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getType(), Type::getInt32Ty(Ctx));
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgDesired->isSimple());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getPointerOperand(), DesiredArg);
+
+ // %atomic_cmpxchg.cmpxchg.pair = cmpxchg weak ptr %atomic_ptr, i32 %atom...
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getName(), "atomic_cmpxchg.cmpxchg.pair");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getParent(), EntryBB);
+ EXPECT_FALSE(AtomicCmpxchgCmpxchgPair->isVolatile());
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair->isWeak());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getSuccessOrdering(),
+ AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getFailureOrdering(),
+ AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getAlign(), 1);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getPointerOperand(), PtrArg);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getCompareOperand(),
+ AtomicCmpxchgCmpxchgExpected);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getNewValOperand(),
+ AtomicCmpxchgCmpxchgDesired);
+
+ // %atomic_cmpxchg.cmpxchg.success = extractvalue { i32, i1 } %atomic_cmp...
+ EXPECT_EQ(AtomicSuccess->getName(), "atomic_cmpxchg.cmpxchg.success");
+ EXPECT_EQ(cast<Instruction>(AtomicSuccess)->getParent(), EntryBB);
+ EXPECT_EQ(AtomicSuccess->getType(), Type::getInt1Ty(Ctx));
+ EXPECT_EQ(cast<ExtractValueInst>(AtomicSuccess)->getNumIndices(), 1);
+ EXPECT_EQ(cast<ExtractValueInst>(AtomicSuccess)->getIndices()[0], 1);
+ EXPECT_EQ(cast<ExtractValueInst>(AtomicSuccess)->getAggregateOperand(),
+ AtomicCmpxchgCmpxchgPair);
+}
+
+TEST_F(BuildBuiltinsTests, AtomicCmpxchg_Volatile) {
+ Value *AtomicSuccess = nullptr;
+ ASSERT_THAT_EXPECTED(
+ emitAtomicCompareExchangeBuiltin(
+ /*AtomicPtr=*/PtrArg,
+ /*ExpectedPtr=*/ExpectedArg,
+ /*DesiredPtr=*/DesiredArg, /*TypeOrSize=*/Builder.getInt32Ty(),
+ /*IsWeak*/ false,
+ /*IsVolatile=*/true,
+ /*SuccessMemorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*FailureMemorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*Scope=*/SyncScope::System,
+ /*PrevPtr=*/nullptr,
+ /*Align=*/{}, /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_cmpxchg"),
+ StoreResult(AtomicSuccess));
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // clang-format off
+ // %atomic_cmpxchg.cmpxchg.expected = load i32, ptr %expected_ptr, align 4
+ // %atomic_cmpxchg.cmpxchg.desired = load i32, ptr %desired_ptr, align 4
+ // %atomic_cmpxchg.cmpxchg.pair = cmpxchg volatile ptr %atomic_ptr, i32 %atomic_cmpxchg.cmpxchg.expected, i32 %atomic_cmpxchg.cmpxchg.desired seq_cst seq_cst, align 1
+ // %atomic_cmpxchg.cmpxchg.success = extractvalue { i32, i1 } %atomic_cmpxchg.cmpxchg.pair, 1
+ // clang-format on
+
+ // Follow use-def and load-store chains to discover instructions
+ AtomicCmpXchgInst *AtomicCmpxchgCmpxchgPair =
+ cast<AtomicCmpXchgInst>(getUniquePreviousStore(PtrArg, EntryBB));
+ LoadInst *AtomicCmpxchgCmpxchgExpected =
+ cast<LoadInst>(AtomicCmpxchgCmpxchgPair->getCompareOperand());
+ LoadInst *AtomicCmpxchgCmpxchgDesired =
+ cast<LoadInst>(AtomicCmpxchgCmpxchgPair->getNewValOperand());
+
+ // %atomic_cmpxchg.cmpxchg.expected = load i32, ptr %expected_ptr, align 4
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getName(),
+ "atomic_cmpxchg.cmpxchg.expected");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getParent(), EntryBB);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getType(), Type::getInt32Ty(Ctx));
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgExpected->isSimple());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getPointerOperand(), ExpectedArg);
+
+ // %atomic_cmpxchg.cmpxchg.desired = load i32, ptr %desired_ptr, align 4
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getName(),
+ "atomic_cmpxchg.cmpxchg.desired");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getParent(), EntryBB);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getType(), Type::getInt32Ty(Ctx));
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgDesired->isSimple());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getPointerOperand(), DesiredArg);
+
+ // %atomic_cmpxchg.cmpxchg.pair = cmpxchg volatile ptr %atomic_ptr, i32 %...
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getName(), "atomic_cmpxchg.cmpxchg.pair");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getParent(), EntryBB);
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair->isVolatile());
+ EXPECT_FALSE(AtomicCmpxchgCmpxchgPair->isWeak());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getSuccessOrdering(),
+ AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getFailureOrdering(),
+ AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getAlign(), 1);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getPointerOperand(), PtrArg);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getCompareOperand(),
+ AtomicCmpxchgCmpxchgExpected);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getNewValOperand(),
+ AtomicCmpxchgCmpxchgDesired);
+
+ // %atomic_cmpxchg.cmpxchg.success = extractvalue { i32, i1 } %atomic_cmp...
+ EXPECT_EQ(AtomicSuccess->getName(), "atomic_cmpxchg.cmpxchg.success");
+ EXPECT_EQ(cast<Instruction>(AtomicSuccess)->getParent(), EntryBB);
+ EXPECT_EQ(AtomicSuccess->getType(), Type::getInt1Ty(Ctx));
+ EXPECT_EQ(cast<ExtractValueInst>(AtomicSuccess)->getNumIndices(), 1);
+ EXPECT_EQ(cast<ExtractValueInst>(AtomicSuccess)->getIndices()[0], 1);
+ EXPECT_EQ(cast<ExtractValueInst>(AtomicSuccess)->getAggregateOperand(),
+ AtomicCmpxchgCmpxchgPair);
+}
+
+TEST_F(BuildBuiltinsTests, AtomicCmpxchg_Memorder) {
+ Value *AtomicSuccess = nullptr;
+ ASSERT_THAT_EXPECTED(emitAtomicCompareExchangeBuiltin(
+ /*AtomicPtr=*/PtrArg,
+ /*ExpectedPtr=*/ExpectedArg,
+ /*DesiredPtr=*/DesiredArg,
+ /*TypeOrSize=*/Builder.getInt32Ty(),
+ /*IsWeak*/ false,
+ /*IsVolatile=*/true,
+ /*SuccessMemorder=*/AtomicOrdering::AcquireRelease,
+ /*FailureMemorder=*/AtomicOrdering::Monotonic,
+ /*Scope=*/SyncScope::System,
+ /*PrevPtr=*/nullptr,
+ /*Align=*/{}, /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_cmpxchg"),
+ StoreResult(AtomicSuccess));
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // clang-format off
+ // %atomic_cmpxchg.cmpxchg.expected = load i32, ptr %expected_ptr, align 4
+ // %atomic_cmpxchg.cmpxchg.desired = load i32, ptr %desired_ptr, align 4
+ // %atomic_cmpxchg.cmpxchg.pair = cmpxchg volatile ptr %atomic_ptr, i32 %atomic_cmpxchg.cmpxchg.expected, i32 %atomic_cmpxchg.cmpxchg.desired acq_rel monotonic, align 1
+ // %atomic_cmpxchg.cmpxchg.success = extractvalue { i32, i1 } %atomic_cmpxchg.cmpxchg.pair, 1
+ // clang-format on
+
+ // Follow use-def and load-store chains to discover instructions
+ AtomicCmpXchgInst *AtomicCmpxchgCmpxchgPair =
+ cast<AtomicCmpXchgInst>(getUniquePreviousStore(PtrArg, EntryBB));
+ LoadInst *AtomicCmpxchgCmpxchgExpected =
+ cast<LoadInst>(AtomicCmpxchgCmpxchgPair->getCompareOperand());
+ LoadInst *AtomicCmpxchgCmpxchgDesired =
+ cast<LoadInst>(AtomicCmpxchgCmpxchgPair->getNewValOperand());
+
+ // %atomic_cmpxchg.cmpxchg.expected = load i32, ptr %expected_ptr, align 4
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getName(),
+ "atomic_cmpxchg.cmpxchg.expected");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getParent(), EntryBB);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getType(), Type::getInt32Ty(Ctx));
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgExpected->isSimple());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getPointerOperand(), ExpectedArg);
+
+ // %atomic_cmpxchg.cmpxchg.desired = load i32, ptr %desired_ptr, align 4
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getName(),
+ "atomic_cmpxchg.cmpxchg.desired");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getParent(), EntryBB);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getType(), Type::getInt32Ty(Ctx));
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgDesired->isSimple());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getPointerOperand(), DesiredArg);
+
+ // %atomic_cmpxchg.cmpxchg.pair = cmpxchg volatile ptr %atomic_ptr, i32 %...
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getName(), "atomic_cmpxchg.cmpxchg.pair");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getParent(), EntryBB);
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair->isVolatile());
+ EXPECT_FALSE(AtomicCmpxchgCmpxchgPair->isWeak());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getSuccessOrdering(),
+ AtomicOrdering::AcquireRelease);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getFailureOrdering(),
+ AtomicOrdering::Monotonic);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getAlign(), 1);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getPointerOperand(), PtrArg);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getCompareOperand(),
+ AtomicCmpxchgCmpxchgExpected);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getNewValOperand(),
+ AtomicCmpxchgCmpxchgDesired);
+
+ // %atomic_cmpxchg.cmpxchg.success = extractvalue { i32, i1 } %atomic_cmp...
+ EXPECT_EQ(AtomicSuccess->getName(), "atomic_cmpxchg.cmpxchg.success");
+ EXPECT_EQ(cast<Instruction>(AtomicSuccess)->getParent(), EntryBB);
+ EXPECT_EQ(AtomicSuccess->getType(), Type::getInt1Ty(Ctx));
+ EXPECT_EQ(cast<ExtractValueInst>(AtomicSuccess)->getNumIndices(), 1);
+ EXPECT_EQ(cast<ExtractValueInst>(AtomicSuccess)->getIndices()[0], 1);
+ EXPECT_EQ(cast<ExtractValueInst>(AtomicSuccess)->getAggregateOperand(),
+ AtomicCmpxchgCmpxchgPair);
+}
+
+TEST_F(BuildBuiltinsTests, AtomicCmpxchg_Memorder_CABI) {
+ Value *AtomicSuccess = nullptr;
+ ASSERT_THAT_EXPECTED(emitAtomicCompareExchangeBuiltin(
+ /*AtomicPtr=*/PtrArg,
+ /*ExpectedPtr=*/ExpectedArg,
+ /*DesiredPtr=*/DesiredArg,
+ /*TypeOrSize=*/Builder.getInt32Ty(),
+ /*IsWeak*/ false,
+ /*IsVolatile=*/true,
+ /*SuccessMemorder=*/AtomicOrderingCABI::acq_rel,
+ /*FailureMemorder=*/AtomicOrderingCABI::relaxed,
+ /*Scope=*/SyncScope::System,
+ /*PrevPtr=*/nullptr,
+ /*Align=*/{}, /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_cmpxchg"),
+ StoreResult(AtomicSuccess));
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // clang-format off
+ // %atomic_cmpxchg.cmpxchg.expected = load i32, ptr %expected_ptr, align 4
+ // %atomic_cmpxchg.cmpxchg.desired = load i32, ptr %desired_ptr, align 4
+ // %atomic_cmpxchg.cmpxchg.pair = cmpxchg volatile ptr %atomic_ptr, i32 %atomic_cmpxchg.cmpxchg.expected, i32 %atomic_cmpxchg.cmpxchg.desired acq_rel monotonic, align 1
+ // %atomic_cmpxchg.cmpxchg.success = extractvalue { i32, i1 } %atomic_cmpxchg.cmpxchg.pair, 1
+ // clang-format on
+
+ // Follow use-def and load-store chains to discover instructions
+ AtomicCmpXchgInst *AtomicCmpxchgCmpxchgPair =
+ cast<AtomicCmpXchgInst>(getUniquePreviousStore(PtrArg, EntryBB));
+ LoadInst *AtomicCmpxchgCmpxchgExpected =
+ cast<LoadInst>(AtomicCmpxchgCmpxchgPair->getCompareOperand());
+ LoadInst *AtomicCmpxchgCmpxchgDesired =
+ cast<LoadInst>(AtomicCmpxchgCmpxchgPair->getNewValOperand());
+
+ // %atomic_cmpxchg.cmpxchg.expected = load i32, ptr %expected_ptr, align 4
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getName(),
+ "atomic_cmpxchg.cmpxchg.expected");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getParent(), EntryBB);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getType(), Type::getInt32Ty(Ctx));
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgExpected->isSimple());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getPointerOperand(), ExpectedArg);
+
+ // %atomic_cmpxchg.cmpxchg.desired = load i32, ptr %desired_ptr, align 4
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getName(),
+ "atomic_cmpxchg.cmpxchg.desired");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getParent(), EntryBB);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getType(), Type::getInt32Ty(Ctx));
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgDesired->isSimple());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getPointerOperand(), DesiredArg);
+
+ // %atomic_cmpxchg.cmpxchg.pair = cmpxchg volatile ptr %atomic_ptr, i32 %...
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getName(), "atomic_cmpxchg.cmpxchg.pair");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getParent(), EntryBB);
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair->isVolatile());
+ EXPECT_FALSE(AtomicCmpxchgCmpxchgPair->isWeak());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getSuccessOrdering(),
+ AtomicOrdering::AcquireRelease);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getFailureOrdering(),
+ AtomicOrdering::Monotonic);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getAlign(), 1);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getPointerOperand(), PtrArg);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getCompareOperand(),
+ AtomicCmpxchgCmpxchgExpected);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getNewValOperand(),
+ AtomicCmpxchgCmpxchgDesired);
+
+ // %atomic_cmpxchg.cmpxchg.success = extractvalue { i32, i1 } %atomic_cmp...
+ EXPECT_EQ(AtomicSuccess->getName(), "atomic_cmpxchg.cmpxchg.success");
+ EXPECT_EQ(cast<Instruction>(AtomicSuccess)->getParent(), EntryBB);
+ EXPECT_EQ(AtomicSuccess->getType(), Type::getInt1Ty(Ctx));
+ EXPECT_EQ(cast<ExtractValueInst>(AtomicSuccess)->getNumIndices(), 1);
+ EXPECT_EQ(cast<ExtractValueInst>(AtomicSuccess)->getIndices()[0], 1);
+ EXPECT_EQ(cast<ExtractValueInst>(AtomicSuccess)->getAggregateOperand(),
+ AtomicCmpxchgCmpxchgPair);
+}
+
+TEST_F(BuildBuiltinsTests, AtomicCmpxchg_Switch) {
+ Value *AtomicSuccess = nullptr;
+ ASSERT_THAT_EXPECTED(emitAtomicCompareExchangeBuiltin(
+ /*AtomicPtr=*/PtrArg,
+ /*ExpectedPtr=*/ExpectedArg,
+ /*DesiredPtr=*/DesiredArg,
+ /*TypeOrSize=*/Builder.getInt32Ty(),
+ /*IsWeak*/ PredArg,
+ /*IsVolatile=*/true,
+ /*SuccessMemorder=*/MemorderArg,
+ /*FailureMemorder=*/MemorderArg,
+ /*Scope=*/SyncScope::System,
+ /*PrevPtr=*/nullptr,
+ /*Align=*/{}, /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_cmpxchg"),
+ StoreResult(AtomicSuccess));
+ BasicBlock *ExitBB = Builder.GetInsertBlock();
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // clang-format off
+ // entry:
+ // %atomic_cmpxchg.cmpxchg.expected = load i32, ptr %expected_ptr, align 4
+ // %atomic_cmpxchg.cmpxchg.desired = load i32, ptr %desired_ptr, align 4
+ // switch i1 %predarg, label %atomic_cmpxchg.cmpxchg.weak [
+ // i1 false, label %atomic_cmpxchg.cmpxchg.strong
+ // ]
+ //
+ // atomic_cmpxchg.cmpxchg.strong: ; preds = %entry
+ // switch i32 %memorderarg_success, label %atomic_cmpxchg.cmpxchg.monotonic [
+ // i32 1, label %atomic_cmpxchg.cmpxchg.acquire
+ // i32 2, label %atomic_cmpxchg.cmpxchg.acquire
+ // i32 3, label %atomic_cmpxchg.cmpxchg.release
+ // i32 4, label %atomic_cmpxchg.cmpxchg.acqrel
+ // i32 5, label %atomic_cmpxchg.cmpxchg.seqcst
+ // ]
+ //
+ // atomic_cmpxchg.cmpxchg.monotonic: ; preds = %atomic_cmpxchg.cmpxchg.strong
+ // switch i32 %memorderarg_success, label %atomic_cmpxchg.cmpxchg.monotonic_fail [
+ // i32 1, label %atomic_cmpxchg.cmpxchg.acquire_fail
+ // i32 2, label %atomic_cmpxchg.cmpxchg.acquire_fail
+ // i32 5, label %atomic_cmpxchg.cmpxchg.seqcst_fail
+ // ]
+ //
+ // atomic_cmpxchg.cmpxchg.monotonic_fail: ; preds = %atomic_cmpxchg.cmpxchg.monotonic
+ // %atomic_cmpxchg.cmpxchg.pair = cmpxchg volatile ptr %atomic_ptr, i32 %atomic_cmpxchg.cmpxchg.expected, i32 %atomic_cmpxchg.cmpxchg.desired monotonic monotonic, align 1
+ // %atomic_cmpxchg.cmpxchg.success = extractvalue { i32, i1 } %atomic_cmpxchg.cmpxchg.pair, 1
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue
+ //
+ // atomic_cmpxchg.cmpxchg.acquire_fail: ; preds = %atomic_cmpxchg.cmpxchg.monotonic, %atomic_cmpxchg.cmpxchg.monotonic
+ // %atomic_cmpxchg.cmpxchg.pair1 = cmpxchg volatile ptr %atomic_ptr, i32 %atomic_cmpxchg.cmpxchg.expected, i32 %atomic_cmpxchg.cmpxchg.desired monotonic acquire, align 1
+ // %atomic_cmpxchg.cmpxchg.success2 = extractvalue { i32, i1 } %atomic_cmpxchg.cmpxchg.pair1, 1
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue
+ //
+ // atomic_cmpxchg.cmpxchg.seqcst_fail: ; preds = %atomic_cmpxchg.cmpxchg.monotonic
+ // %atomic_cmpxchg.cmpxchg.pair3 = cmpxchg volatile ptr %atomic_ptr, i32 %atomic_cmpxchg.cmpxchg.expected, i32 %atomic_cmpxchg.cmpxchg.desired monotonic seq_cst, align 1
+ // %atomic_cmpxchg.cmpxchg.success4 = extractvalue { i32, i1 } %atomic_cmpxchg.cmpxchg.pair3, 1
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue
+ //
+ // atomic_cmpxchg.cmpxchg.failorder.continue: ; preds = %atomic_cmpxchg.cmpxchg.seqcst_fail, %atomic_cmpxchg.cmpxchg.acquire_fail, %atomic_cmpxchg.cmpxchg.monotonic_fail
+ // %atomic_cmpxchg.cmpxchg.failorder.success = phi i1 [ %atomic_cmpxchg.cmpxchg.success, %atomic_cmpxchg.cmpxchg.monotonic_fail ], [ %atomic_cmpxchg.cmpxchg.success2, %atomic_cmpxchg.cmpxchg.acquire_fail ], [ %atomic_cmpxchg.cmpxchg.success4, %atomic_cmpxchg.cmpxchg.seqcst_fail ]
+ // br label %atomic_cmpxchg.cmpxchg.memorder.continue
+ //
+ // atomic_cmpxchg.cmpxchg.acquire: ; preds = %atomic_cmpxchg.cmpxchg.strong, %atomic_cmpxchg.cmpxchg.strong
+ // switch i32 %memorderarg_success, label %atomic_cmpxchg.cmpxchg.monotonic_fail6 [
+ // i32 1, label %atomic_cmpxchg.cmpxchg.acquire_fail7
+ // i32 2, label %atomic_cmpxchg.cmpxchg.acquire_fail7
+ // i32 5, label %atomic_cmpxchg.cmpxchg.seqcst_fail8
+ // ]
+ //
+ // atomic_cmpxchg.cmpxchg.monotonic_fail6: ; preds = %atomic_cmpxchg.cmpxchg.acquire
+ // %atomic_cmpxchg.cmpxchg.pair10 = cmpxchg volatile ptr %atomic_ptr, i32 %atomic_cmpxchg.cmpxchg.expected, i32 %atomic_cmpxchg.cmpxchg.desired acquire monotonic, align 1
+ // %atomic_cmpxchg.cmpxchg.success11 = extractvalue { i32, i1 } %atomic_cmpxchg.cmpxchg.pair10, 1
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue5
+ //
+ // atomic_cmpxchg.cmpxchg.acquire_fail7: ; preds = %atomic_cmpxchg.cmpxchg.acquire, %atomic_cmpxchg.cmpxchg.acquire
+ // %atomic_cmpxchg.cmpxchg.pair12 = cmpxchg volatile ptr %atomic_ptr, i32 %atomic_cmpxchg.cmpxchg.expected, i32 %atomic_cmpxchg.cmpxchg.desired acquire acquire, align 1
+ // %atomic_cmpxchg.cmpxchg.success13 = extractvalue { i32, i1 } %atomic_cmpxchg.cmpxchg.pair12, 1
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue5
+ //
+ // atomic_cmpxchg.cmpxchg.seqcst_fail8: ; preds = %atomic_cmpxchg.cmpxchg.acquire
+ // %atomic_cmpxchg.cmpxchg.pair14 = cmpxchg volatile ptr %atomic_ptr, i32 %atomic_cmpxchg.cmpxchg.expected, i32 %atomic_cmpxchg.cmpxchg.desired acquire seq_cst, align 1
+ // %atomic_cmpxchg.cmpxchg.success15 = extractvalue { i32, i1 } %atomic_cmpxchg.cmpxchg.pair14, 1
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue5
+ //
+ // atomic_cmpxchg.cmpxchg.failorder.continue5: ; preds = %atomic_cmpxchg.cmpxchg.seqcst_fail8, %atomic_cmpxchg.cmpxchg.acquire_fail7, %atomic_cmpxchg.cmpxchg.monotonic_fail6
+ // %atomic_cmpxchg.cmpxchg.failorder.success9 = phi i1 [ %atomic_cmpxchg.cmpxchg.success11, %atomic_cmpxchg.cmpxchg.monotonic_fail6 ], [ %atomic_cmpxchg.cmpxchg.success13, %atomic_cmpxchg.cmpxchg.acquire_fail7 ], [ %atomic_cmpxchg.cmpxchg.success15, %atomic_cmpxchg.cmpxchg.seqcst_fail8 ]
+ // br label %atomic_cmpxchg.cmpxchg.memorder.continue
+ //
+ // atomic_cmpxchg.cmpxchg.release: ; preds = %atomic_cmpxchg.cmpxchg.strong
+ // switch i32 %memorderarg_success, label %atomic_cmpxchg.cmpxchg.monotonic_fail17 [
+ // i32 1, label %atomic_cmpxchg.cmpxchg.acquire_fail18
+ // i32 2, label %atomic_cmpxchg.cmpxchg.acquire_fail18
+ // i32 5, label %atomic_cmpxchg.cmpxchg.seqcst_fail19
+ // ]
+ //
+ // atomic_cmpxchg.cmpxchg.monotonic_fail17: ; preds = %atomic_cmpxchg.cmpxchg.release
+ // %atomic_cmpxchg.cmpxchg.pair21 = cmpxchg volatile ptr %atomic_ptr, i32 %atomic_cmpxchg.cmpxchg.expected, i32 %atomic_cmpxchg.cmpxchg.desired release monotonic, align 1
+ // %atomic_cmpxchg.cmpxchg.success22 = extractvalue { i32, i1 } %atomic_cmpxchg.cmpxchg.pair21, 1
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue16
+ //
+ // atomic_cmpxchg.cmpxchg.acquire_fail18: ; preds = %atomic_cmpxchg.cmpxchg.release, %atomic_cmpxchg.cmpxchg.release
+ // %atomic_cmpxchg.cmpxchg.pair23 = cmpxchg volatile ptr %atomic_ptr, i32 %atomic_cmpxchg.cmpxchg.expected, i32 %atomic_cmpxchg.cmpxchg.desired release acquire, align 1
+ // %atomic_cmpxchg.cmpxchg.success24 = extractvalue { i32, i1 } %atomic_cmpxchg.cmpxchg.pair23, 1
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue16
+ //
+ // atomic_cmpxchg.cmpxchg.seqcst_fail19: ; preds = %atomic_cmpxchg.cmpxchg.release
+ // %atomic_cmpxchg.cmpxchg.pair25 = cmpxchg volatile ptr %atomic_ptr, i32 %atomic_cmpxchg.cmpxchg.expected, i32 %atomic_cmpxchg.cmpxchg.desired release seq_cst, align 1
+ // %atomic_cmpxchg.cmpxchg.success26 = extractvalue { i32, i1 } %atomic_cmpxchg.cmpxchg.pair25, 1
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue16
+ //
+ // atomic_cmpxchg.cmpxchg.failorder.continue16: ; preds = %atomic_cmpxchg.cmpxchg.seqcst_fail19, %atomic_cmpxchg.cmpxchg.acquire_fail18, %atomic_cmpxchg.cmpxchg.monotonic_fail17
+ // %atomic_cmpxchg.cmpxchg.failorder.success20 = phi i1 [ %atomic_cmpxchg.cmpxchg.success22, %atomic_cmpxchg.cmpxchg.monotonic_fail17 ], [ %atomic_cmpxchg.cmpxchg.success24, %atomic_cmpxchg.cmpxchg.acquire_fail18 ], [ %atomic_cmpxchg.cmpxchg.success26, %atomic_cmpxchg.cmpxchg.seqcst_fail19 ]
+ // br label %atomic_cmpxchg.cmpxchg.memorder.continue
+ //
+ // atomic_cmpxchg.cmpxchg.acqrel: ; preds = %atomic_cmpxchg.cmpxchg.strong
+ // switch i32 %memorderarg_success, label %atomic_cmpxchg.cmpxchg.monotonic_fail28 [
+ // i32 1, label %atomic_cmpxchg.cmpxchg.acquire_fail29
+ // i32 2, label %atomic_cmpxchg.cmpxchg.acquire_fail29
+ // i32 5, label %atomic_cmpxchg.cmpxchg.seqcst_fail30
+ // ]
+ //
+ // atomic_cmpxchg.cmpxchg.monotonic_fail28: ; preds = %atomic_cmpxchg.cmpxchg.acqrel
+ // %atomic_cmpxchg.cmpxchg.pair32 = cmpxchg volatile ptr %atomic_ptr, i32 %atomic_cmpxchg.cmpxchg.expected, i32 %atomic_cmpxchg.cmpxchg.desired acq_rel monotonic, align 1
+ // %atomic_cmpxchg.cmpxchg.success33 = extractvalue { i32, i1 } %atomic_cmpxchg.cmpxchg.pair32, 1
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue27
+ //
+ // atomic_cmpxchg.cmpxchg.acquire_fail29: ; preds = %atomic_cmpxchg.cmpxchg.acqrel, %atomic_cmpxchg.cmpxchg.acqrel
+ // %atomic_cmpxchg.cmpxchg.pair34 = cmpxchg volatile ptr %atomic_ptr, i32 %atomic_cmpxchg.cmpxchg.expected, i32 %atomic_cmpxchg.cmpxchg.desired acq_rel acquire, align 1
+ // %atomic_cmpxchg.cmpxchg.success35 = extractvalue { i32, i1 } %atomic_cmpxchg.cmpxchg.pair34, 1
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue27
+ //
+ // atomic_cmpxchg.cmpxchg.seqcst_fail30: ; preds = %atomic_cmpxchg.cmpxchg.acqrel
+ // %atomic_cmpxchg.cmpxchg.pair36 = cmpxchg volatile ptr %atomic_ptr, i32 %atomic_cmpxchg.cmpxchg.expected, i32 %atomic_cmpxchg.cmpxchg.desired acq_rel seq_cst, align 1
+ // %atomic_cmpxchg.cmpxchg.success37 = extractvalue { i32, i1 } %atomic_cmpxchg.cmpxchg.pair36, 1
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue27
+ //
+ // atomic_cmpxchg.cmpxchg.failorder.continue27: ; preds = %atomic_cmpxchg.cmpxchg.seqcst_fail30, %atomic_cmpxchg.cmpxchg.acquire_fail29, %atomic_cmpxchg.cmpxchg.monotonic_fail28
+ // %atomic_cmpxchg.cmpxchg.failorder.success31 = phi i1 [ %atomic_cmpxchg.cmpxchg.success33, %atomic_cmpxchg.cmpxchg.monotonic_fail28 ], [ %atomic_cmpxchg.cmpxchg.success35, %atomic_cmpxchg.cmpxchg.acquire_fail29 ], [ %atomic_cmpxchg.cmpxchg.success37, %atomic_cmpxchg.cmpxchg.seqcst_fail30 ]
+ // br label %atomic_cmpxchg.cmpxchg.memorder.continue
+ //
+ // atomic_cmpxchg.cmpxchg.seqcst: ; preds = %atomic_cmpxchg.cmpxchg.strong
+ // switch i32 %memorderarg_success, label %atomic_cmpxchg.cmpxchg.monotonic_fail39 [
+ // i32 1, label %atomic_cmpxchg.cmpxchg.acquire_fail40
+ // i32 2, label %atomic_cmpxchg.cmpxchg.acquire_fail40
+ // i32 5, label %atomic_cmpxchg.cmpxchg.seqcst_fail41
+ // ]
+ //
+ // atomic_cmpxchg.cmpxchg.monotonic_fail39: ; preds = %atomic_cmpxchg.cmpxchg.seqcst
+ // %atomic_cmpxchg.cmpxchg.pair43 = cmpxchg volatile ptr %atomic_ptr, i32 %atomic_cmpxchg.cmpxchg.expected, i32 %atomic_cmpxchg.cmpxchg.desired seq_cst monotonic, align 1
+ // %atomic_cmpxchg.cmpxchg.success44 = extractvalue { i32, i1 } %atomic_cmpxchg.cmpxchg.pair43, 1
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue38
+ //
+ // atomic_cmpxchg.cmpxchg.acquire_fail40: ; preds = %atomic_cmpxchg.cmpxchg.seqcst, %atomic_cmpxchg.cmpxchg.seqcst
+ // %atomic_cmpxchg.cmpxchg.pair45 = cmpxchg volatile ptr %atomic_ptr, i32 %atomic_cmpxchg.cmpxchg.expected, i32 %atomic_cmpxchg.cmpxchg.desired seq_cst acquire, align 1
+ // %atomic_cmpxchg.cmpxchg.success46 = extractvalue { i32, i1 } %atomic_cmpxchg.cmpxchg.pair45, 1
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue38
+ //
+ // atomic_cmpxchg.cmpxchg.seqcst_fail41: ; preds = %atomic_cmpxchg.cmpxchg.seqcst
+ // %atomic_cmpxchg.cmpxchg.pair47 = cmpxchg volatile ptr %atomic_ptr, i32 %atomic_cmpxchg.cmpxchg.expected, i32 %atomic_cmpxchg.cmpxchg.desired seq_cst seq_cst, align 1
+ // %atomic_cmpxchg.cmpxchg.success48 = extractvalue { i32, i1 } %atomic_cmpxchg.cmpxchg.pair47, 1
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue38
+ //
+ // atomic_cmpxchg.cmpxchg.failorder.continue38: ; preds = %atomic_cmpxchg.cmpxchg.seqcst_fail41, %atomic_cmpxchg.cmpxchg.acquire_fail40, %atomic_cmpxchg.cmpxchg.monotonic_fail39
+ // %atomic_cmpxchg.cmpxchg.failorder.success42 = phi i1 [ %atomic_cmpxchg.cmpxchg.success44, %atomic_cmpxchg.cmpxchg.monotonic_fail39 ], [ %atomic_cmpxchg.cmpxchg.success46, %atomic_cmpxchg.cmpxchg.acquire_fail40 ], [ %atomic_cmpxchg.cmpxchg.success48, %atomic_cmpxchg.cmpxchg.seqcst_fail41 ]
+ // br label %atomic_cmpxchg.cmpxchg.memorder.continue
+ //
+ // atomic_cmpxchg.cmpxchg.memorder.continue: ; preds = %atomic_cmpxchg.cmpxchg.failorder.continue38, %atomic_cmpxchg.cmpxchg.failorder.continue27, %atomic_cmpxchg.cmpxchg.failorder.continue16, %atomic_cmpxchg.cmpxchg.failorder.continue5, %atomic_cmpxchg.cmpxchg.failorder.continue
+ // %atomic_cmpxchg.cmpxchg.memorder.success = phi i1 [ %atomic_cmpxchg.cmpxchg.failorder.success, %atomic_cmpxchg.cmpxchg.failorder.continue ], [ %atomic_cmpxchg.cmpxchg.failorder.success9, %atomic_cmpxchg.cmpxchg.failorder.continue5 ], [ %atomic_cmpxchg.cmpxchg.failorder.success20, %atomic_cmpxchg.cmpxchg.failorder.continue16 ], [ %atomic_cmpxchg.cmpxchg.failorder.success31, %atomic_cmpxchg.cmpxchg.failorder.continue27 ], [ %atomic_cmpxchg.cmpxchg.failorder.success42, %atomic_cmpxchg.cmpxchg.failorder.continue38 ]
+ // br label %atomic_cmpxchg.cmpxchg.weak.continue
+ //
+ // atomic_cmpxchg.cmpxchg.weak: ; preds = %entry
+ // switch i32 %memorderarg_success, label %atomic_cmpxchg.cmpxchg.monotonic50 [
+ // i32 1, label %atomic_cmpxchg.cmpxchg.acquire51
+ // i32 2, label %atomic_cmpxchg.cmpxchg.acquire51
+ // i32 3, label %atomic_cmpxchg.cmpxchg.release52
+ // i32 4, label %atomic_cmpxchg.cmpxchg.acqrel53
+ // i32 5, label %atomic_cmpxchg.cmpxchg.seqcst54
+ // ]
+ //
+ // atomic_cmpxchg.cmpxchg.monotonic50: ; preds = %atomic_cmpxchg.cmpxchg.weak
+ // switch i32 %memorderarg_success, label %atomic_cmpxchg.cmpxchg.monotonic_fail57 [
+ // i32 1, label %atomic_cmpxchg.cmpxchg.acquire_fail58
+ // i32 2, label %atomic_cmpxchg.cmpxchg.acquire_fail58
+ // i32 5, label %atomic_cmpxchg.cmpxchg.seqcst_fail59
+ // ]
+ //
+ // atomic_cmpxchg.cmpxchg.monotonic_fail57: ; preds = %atomic_cmpxchg.cmpxchg.monotonic50
+ // %atomic_cmpxchg.cmpxchg.pair61 = cmpxchg weak volatile ptr %atomic_ptr, i32 %atomic_cmpxchg.cmpxchg.expected, i32 %atomic_cmpxchg.cmpxchg.desired monotonic monotonic, align 1
+ // %atomic_cmpxchg.cmpxchg.success62 = extractvalue { i32, i1 } %atomic_cmpxchg.cmpxchg.pair61, 1
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue56
+ //
+ // atomic_cmpxchg.cmpxchg.acquire_fail58: ; preds = %atomic_cmpxchg.cmpxchg.monotonic50, %atomic_cmpxchg.cmpxchg.monotonic50
+ // %atomic_cmpxchg.cmpxchg.pair63 = cmpxchg weak volatile ptr %atomic_ptr, i32 %atomic_cmpxchg.cmpxchg.expected, i32 %atomic_cmpxchg.cmpxchg.desired monotonic acquire, align 1
+ // %atomic_cmpxchg.cmpxchg.success64 = extractvalue { i32, i1 } %atomic_cmpxchg.cmpxchg.pair63, 1
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue56
+ //
+ // atomic_cmpxchg.cmpxchg.seqcst_fail59: ; preds = %atomic_cmpxchg.cmpxchg.monotonic50
+ // %atomic_cmpxchg.cmpxchg.pair65 = cmpxchg weak volatile ptr %atomic_ptr, i32 %atomic_cmpxchg.cmpxchg.expected, i32 %atomic_cmpxchg.cmpxchg.desired monotonic seq_cst, align 1
+ // %atomic_cmpxchg.cmpxchg.success66 = extractvalue { i32, i1 } %atomic_cmpxchg.cmpxchg.pair65, 1
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue56
+ //
+ // atomic_cmpxchg.cmpxchg.failorder.continue56: ; preds = %atomic_cmpxchg.cmpxchg.seqcst_fail59, %atomic_cmpxchg.cmpxchg.acquire_fail58, %atomic_cmpxchg.cmpxchg.monotonic_fail57
+ // %atomic_cmpxchg.cmpxchg.failorder.success60 = phi i1 [ %atomic_cmpxchg.cmpxchg.success62, %atomic_cmpxchg.cmpxchg.monotonic_fail57 ], [ %atomic_cmpxchg.cmpxchg.success64, %atomic_cmpxchg.cmpxchg.acquire_fail58 ], [ %atomic_cmpxchg.cmpxchg.success66, %atomic_cmpxchg.cmpxchg.seqcst_fail59 ]
+ // br label %atomic_cmpxchg.cmpxchg.memorder.continue49
+ //
+ // atomic_cmpxchg.cmpxchg.acquire51: ; preds = %atomic_cmpxchg.cmpxchg.weak, %atomic_cmpxchg.cmpxchg.weak
+ // switch i32 %memorderarg_success, label %atomic_cmpxchg.cmpxchg.monotonic_fail68 [
+ // i32 1, label %atomic_cmpxchg.cmpxchg.acquire_fail69
+ // i32 2, label %atomic_cmpxchg.cmpxchg.acquire_fail69
+ // i32 5, label %atomic_cmpxchg.cmpxchg.seqcst_fail70
+ // ]
+ //
+ // atomic_cmpxchg.cmpxchg.monotonic_fail68: ; preds = %atomic_cmpxchg.cmpxchg.acquire51
+ // %atomic_cmpxchg.cmpxchg.pair72 = cmpxchg weak volatile ptr %atomic_ptr, i32 %atomic_cmpxchg.cmpxchg.expected, i32 %atomic_cmpxchg.cmpxchg.desired acquire monotonic, align 1
+ // %atomic_cmpxchg.cmpxchg.success73 = extractvalue { i32, i1 } %atomic_cmpxchg.cmpxchg.pair72, 1
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue67
+ //
+ // atomic_cmpxchg.cmpxchg.acquire_fail69: ; preds = %atomic_cmpxchg.cmpxchg.acquire51, %atomic_cmpxchg.cmpxchg.acquire51
+ // %atomic_cmpxchg.cmpxchg.pair74 = cmpxchg weak volatile ptr %atomic_ptr, i32 %atomic_cmpxchg.cmpxchg.expected, i32 %atomic_cmpxchg.cmpxchg.desired acquire acquire, align 1
+ // %atomic_cmpxchg.cmpxchg.success75 = extractvalue { i32, i1 } %atomic_cmpxchg.cmpxchg.pair74, 1
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue67
+ //
+ // atomic_cmpxchg.cmpxchg.seqcst_fail70: ; preds = %atomic_cmpxchg.cmpxchg.acquire51
+ // %atomic_cmpxchg.cmpxchg.pair76 = cmpxchg weak volatile ptr %atomic_ptr, i32 %atomic_cmpxchg.cmpxchg.expected, i32 %atomic_cmpxchg.cmpxchg.desired acquire seq_cst, align 1
+ // %atomic_cmpxchg.cmpxchg.success77 = extractvalue { i32, i1 } %atomic_cmpxchg.cmpxchg.pair76, 1
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue67
+ //
+ // atomic_cmpxchg.cmpxchg.failorder.continue67: ; preds = %atomic_cmpxchg.cmpxchg.seqcst_fail70, %atomic_cmpxchg.cmpxchg.acquire_fail69, %atomic_cmpxchg.cmpxchg.monotonic_fail68
+ // %atomic_cmpxchg.cmpxchg.failorder.success71 = phi i1 [ %atomic_cmpxchg.cmpxchg.success73, %atomic_cmpxchg.cmpxchg.monotonic_fail68 ], [ %atomic_cmpxchg.cmpxchg.success75, %atomic_cmpxchg.cmpxchg.acquire_fail69 ], [ %atomic_cmpxchg.cmpxchg.success77, %atomic_cmpxchg.cmpxchg.seqcst_fail70 ]
+ // br label %atomic_cmpxchg.cmpxchg.memorder.continue49
+ //
+ // atomic_cmpxchg.cmpxchg.release52: ; preds = %atomic_cmpxchg.cmpxchg.weak
+ // switch i32 %memorderarg_success, label %atomic_cmpxchg.cmpxchg.monotonic_fail79 [
+ // i32 1, label %atomic_cmpxchg.cmpxchg.acquire_fail80
+ // i32 2, label %atomic_cmpxchg.cmpxchg.acquire_fail80
+ // i32 5, label %atomic_cmpxchg.cmpxchg.seqcst_fail81
+ // ]
+ //
+ // atomic_cmpxchg.cmpxchg.monotonic_fail79: ; preds = %atomic_cmpxchg.cmpxchg.release52
+ // %atomic_cmpxchg.cmpxchg.pair83 = cmpxchg weak volatile ptr %atomic_ptr, i32 %atomic_cmpxchg.cmpxchg.expected, i32 %atomic_cmpxchg.cmpxchg.desired release monotonic, align 1
+ // %atomic_cmpxchg.cmpxchg.success84 = extractvalue { i32, i1 } %atomic_cmpxchg.cmpxchg.pair83, 1
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue78
+ //
+ // atomic_cmpxchg.cmpxchg.acquire_fail80: ; preds = %atomic_cmpxchg.cmpxchg.release52, %atomic_cmpxchg.cmpxchg.release52
+ // %atomic_cmpxchg.cmpxchg.pair85 = cmpxchg weak volatile ptr %atomic_ptr, i32 %atomic_cmpxchg.cmpxchg.expected, i32 %atomic_cmpxchg.cmpxchg.desired release acquire, align 1
+ // %atomic_cmpxchg.cmpxchg.success86 = extractvalue { i32, i1 } %atomic_cmpxchg.cmpxchg.pair85, 1
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue78
+ //
+ // atomic_cmpxchg.cmpxchg.seqcst_fail81: ; preds = %atomic_cmpxchg.cmpxchg.release52
+ // %atomic_cmpxchg.cmpxchg.pair87 = cmpxchg weak volatile ptr %atomic_ptr, i32 %atomic_cmpxchg.cmpxchg.expected, i32 %atomic_cmpxchg.cmpxchg.desired release seq_cst, align 1
+ // %atomic_cmpxchg.cmpxchg.success88 = extractvalue { i32, i1 } %atomic_cmpxchg.cmpxchg.pair87, 1
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue78
+ //
+ // atomic_cmpxchg.cmpxchg.failorder.continue78: ; preds = %atomic_cmpxchg.cmpxchg.seqcst_fail81, %atomic_cmpxchg.cmpxchg.acquire_fail80, %atomic_cmpxchg.cmpxchg.monotonic_fail79
+ // %atomic_cmpxchg.cmpxchg.failorder.success82 = phi i1 [ %atomic_cmpxchg.cmpxchg.success84, %atomic_cmpxchg.cmpxchg.monotonic_fail79 ], [ %atomic_cmpxchg.cmpxchg.success86, %atomic_cmpxchg.cmpxchg.acquire_fail80 ], [ %atomic_cmpxchg.cmpxchg.success88, %atomic_cmpxchg.cmpxchg.seqcst_fail81 ]
+ // br label %atomic_cmpxchg.cmpxchg.memorder.continue49
+ //
+ // atomic_cmpxchg.cmpxchg.acqrel53: ; preds = %atomic_cmpxchg.cmpxchg.weak
+ // switch i32 %memorderarg_success, label %atomic_cmpxchg.cmpxchg.monotonic_fail90 [
+ // i32 1, label %atomic_cmpxchg.cmpxchg.acquire_fail91
+ // i32 2, label %atomic_cmpxchg.cmpxchg.acquire_fail91
+ // i32 5, label %atomic_cmpxchg.cmpxchg.seqcst_fail92
+ // ]
+ //
+ // atomic_cmpxchg.cmpxchg.monotonic_fail90: ; preds = %atomic_cmpxchg.cmpxchg.acqrel53
+ // %atomic_cmpxchg.cmpxchg.pair94 = cmpxchg weak volatile ptr %atomic_ptr, i32 %atomic_cmpxchg.cmpxchg.expected, i32 %atomic_cmpxchg.cmpxchg.desired acq_rel monotonic, align 1
+ // %atomic_cmpxchg.cmpxchg.success95 = extractvalue { i32, i1 } %atomic_cmpxchg.cmpxchg.pair94, 1
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue89
+ //
+ // atomic_cmpxchg.cmpxchg.acquire_fail91: ; preds = %atomic_cmpxchg.cmpxchg.acqrel53, %atomic_cmpxchg.cmpxchg.acqrel53
+ // %atomic_cmpxchg.cmpxchg.pair96 = cmpxchg weak volatile ptr %atomic_ptr, i32 %atomic_cmpxchg.cmpxchg.expected, i32 %atomic_cmpxchg.cmpxchg.desired acq_rel acquire, align 1
+ // %atomic_cmpxchg.cmpxchg.success97 = extractvalue { i32, i1 } %atomic_cmpxchg.cmpxchg.pair96, 1
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue89
+ //
+ // atomic_cmpxchg.cmpxchg.seqcst_fail92: ; preds = %atomic_cmpxchg.cmpxchg.acqrel53
+ // %atomic_cmpxchg.cmpxchg.pair98 = cmpxchg weak volatile ptr %atomic_ptr, i32 %atomic_cmpxchg.cmpxchg.expected, i32 %atomic_cmpxchg.cmpxchg.desired acq_rel seq_cst, align 1
+ // %atomic_cmpxchg.cmpxchg.success99 = extractvalue { i32, i1 } %atomic_cmpxchg.cmpxchg.pair98, 1
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue89
+ //
+ // atomic_cmpxchg.cmpxchg.failorder.continue89: ; preds = %atomic_cmpxchg.cmpxchg.seqcst_fail92, %atomic_cmpxchg.cmpxchg.acquire_fail91, %atomic_cmpxchg.cmpxchg.monotonic_fail90
+ // %atomic_cmpxchg.cmpxchg.failorder.success93 = phi i1 [ %atomic_cmpxchg.cmpxchg.success95, %atomic_cmpxchg.cmpxchg.monotonic_fail90 ], [ %atomic_cmpxchg.cmpxchg.success97, %atomic_cmpxchg.cmpxchg.acquire_fail91 ], [ %atomic_cmpxchg.cmpxchg.success99, %atomic_cmpxchg.cmpxchg.seqcst_fail92 ]
+ // br label %atomic_cmpxchg.cmpxchg.memorder.continue49
+ //
+ // atomic_cmpxchg.cmpxchg.seqcst54: ; preds = %atomic_cmpxchg.cmpxchg.weak
+ // switch i32 %memorderarg_success, label %atomic_cmpxchg.cmpxchg.monotonic_fail101 [
+ // i32 1, label %atomic_cmpxchg.cmpxchg.acquire_fail102
+ // i32 2, label %atomic_cmpxchg.cmpxchg.acquire_fail102
+ // i32 5, label %atomic_cmpxchg.cmpxchg.seqcst_fail103
+ // ]
+ //
+ // atomic_cmpxchg.cmpxchg.monotonic_fail101: ; preds = %atomic_cmpxchg.cmpxchg.seqcst54
+ // %atomic_cmpxchg.cmpxchg.pair105 = cmpxchg weak volatile ptr %atomic_ptr, i32 %atomic_cmpxchg.cmpxchg.expected, i32 %atomic_cmpxchg.cmpxchg.desired seq_cst monotonic, align 1
+ // %atomic_cmpxchg.cmpxchg.success106 = extractvalue { i32, i1 } %atomic_cmpxchg.cmpxchg.pair105, 1
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue100
+ //
+ // atomic_cmpxchg.cmpxchg.acquire_fail102: ; preds = %atomic_cmpxchg.cmpxchg.seqcst54, %atomic_cmpxchg.cmpxchg.seqcst54
+ // %atomic_cmpxchg.cmpxchg.pair107 = cmpxchg weak volatile ptr %atomic_ptr, i32 %atomic_cmpxchg.cmpxchg.expected, i32 %atomic_cmpxchg.cmpxchg.desired seq_cst acquire, align 1
+ // %atomic_cmpxchg.cmpxchg.success108 = extractvalue { i32, i1 } %atomic_cmpxchg.cmpxchg.pair107, 1
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue100
+ //
+ // atomic_cmpxchg.cmpxchg.seqcst_fail103: ; preds = %atomic_cmpxchg.cmpxchg.seqcst54
+ // %atomic_cmpxchg.cmpxchg.pair109 = cmpxchg weak volatile ptr %atomic_ptr, i32 %atomic_cmpxchg.cmpxchg.expected, i32 %atomic_cmpxchg.cmpxchg.desired seq_cst seq_cst, align 1
+ // %atomic_cmpxchg.cmpxchg.success110 = extractvalue { i32, i1 } %atomic_cmpxchg.cmpxchg.pair109, 1
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue100
+ //
+ // atomic_cmpxchg.cmpxchg.failorder.continue100: ; preds = %atomic_cmpxchg.cmpxchg.seqcst_fail103, %atomic_cmpxchg.cmpxchg.acquire_fail102, %atomic_cmpxchg.cmpxchg.monotonic_fail101
+ // %atomic_cmpxchg.cmpxchg.failorder.success104 = phi i1 [ %atomic_cmpxchg.cmpxchg.success106, %atomic_cmpxchg.cmpxchg.monotonic_fail101 ], [ %atomic_cmpxchg.cmpxchg.success108, %atomic_cmpxchg.cmpxchg.acquire_fail102 ], [ %atomic_cmpxchg.cmpxchg.success110, %atomic_cmpxchg.cmpxchg.seqcst_fail103 ]
+ // br label %atomic_cmpxchg.cmpxchg.memorder.continue49
+ //
+ // atomic_cmpxchg.cmpxchg.memorder.continue49: ; preds = %atomic_cmpxchg.cmpxchg.failorder.continue100, %atomic_cmpxchg.cmpxchg.failorder.continue89, %atomic_cmpxchg.cmpxchg.failorder.continue78, %atomic_cmpxchg.cmpxchg.failorder.continue67, %atomic_cmpxchg.cmpxchg.failorder.continue56
+ // %atomic_cmpxchg.cmpxchg.memorder.success55 = phi i1 [ %atomic_cmpxchg.cmpxchg.failorder.success60, %atomic_cmpxchg.cmpxchg.failorder.continue56 ], [ %atomic_cmpxchg.cmpxchg.failorder.success71, %atomic_cmpxchg.cmpxchg.failorder.continue67 ], [ %atomic_cmpxchg.cmpxchg.failorder.success82, %atomic_cmpxchg.cmpxchg.failorder.continue78 ], [ %atomic_cmpxchg.cmpxchg.failorder.success93, %atomic_cmpxchg.cmpxchg.failorder.continue89 ], [ %atomic_cmpxchg.cmpxchg.failorder.success104, %atomic_cmpxchg.cmpxchg.failorder.continue100 ]
+ // br label %atomic_cmpxchg.cmpxchg.weak.continue
+ //
+ // atomic_cmpxchg.cmpxchg.weak.continue: ; preds = %atomic_cmpxchg.cmpxchg.memorder.continue49, %atomic_cmpxchg.cmpxchg.memorder.continue
+ // %atomic_cmpxchg.cmpxchg.isweak.success = phi i1 [ %atomic_cmpxchg.cmpxchg.memorder.success, %atomic_cmpxchg.cmpxchg.memorder.continue ], [ %atomic_cmpxchg.cmpxchg.memorder.success55, %atomic_cmpxchg.cmpxchg.memorder.continue49 ]
+ // ret void
+ // clang-format on
+
+ // Discover control flow graph
+ SwitchInst *Switch1 = cast<SwitchInst>(EntryBB->getTerminator());
+ BasicBlock *AtomicCmpxchgCmpxchgStrong =
+ cast<BasicBlock>(Switch1->getSuccessor(1));
+ BasicBlock *AtomicCmpxchgCmpxchgWeak =
+ cast<BasicBlock>(Switch1->getDefaultDest());
+ SwitchInst *Switch2 =
+ cast<SwitchInst>(AtomicCmpxchgCmpxchgStrong->getTerminator());
+ BasicBlock *AtomicCmpxchgCmpxchgAcquire =
+ cast<BasicBlock>(Switch2->getSuccessor(1));
+ BasicBlock *AtomicCmpxchgCmpxchgRelease =
+ cast<BasicBlock>(Switch2->getSuccessor(3));
+ BasicBlock *AtomicCmpxchgCmpxchgAcqrel =
+ cast<BasicBlock>(Switch2->getSuccessor(4));
+ BasicBlock *AtomicCmpxchgCmpxchgSeqcst =
+ cast<BasicBlock>(Switch2->getSuccessor(5));
+ BasicBlock *AtomicCmpxchgCmpxchgMonotonic =
+ cast<BasicBlock>(Switch2->getDefaultDest());
+ SwitchInst *Switch3 =
+ cast<SwitchInst>(AtomicCmpxchgCmpxchgMonotonic->getTerminator());
+ BasicBlock *AtomicCmpxchgCmpxchgAcquireFail =
+ cast<BasicBlock>(Switch3->getSuccessor(1));
+ BasicBlock *AtomicCmpxchgCmpxchgSeqcstFail =
+ cast<BasicBlock>(Switch3->getSuccessor(3));
+ BasicBlock *AtomicCmpxchgCmpxchgMonotonicFail =
+ cast<BasicBlock>(Switch3->getDefaultDest());
+ BranchInst *Branch1 =
+ cast<BranchInst>(AtomicCmpxchgCmpxchgMonotonicFail->getTerminator());
+ BasicBlock *AtomicCmpxchgCmpxchgFailorderContinue =
+ cast<BasicBlock>(AtomicCmpxchgCmpxchgMonotonicFail->getUniqueSuccessor());
+ BranchInst *Branch2 =
+ cast<BranchInst>(AtomicCmpxchgCmpxchgAcquireFail->getTerminator());
+ BranchInst *Branch3 =
+ cast<BranchInst>(AtomicCmpxchgCmpxchgSeqcstFail->getTerminator());
+ BranchInst *Branch4 =
+ cast<BranchInst>(AtomicCmpxchgCmpxchgFailorderContinue->getTerminator());
+ BasicBlock *AtomicCmpxchgCmpxchgMemorderContinue = cast<BasicBlock>(
+ AtomicCmpxchgCmpxchgFailorderContinue->getUniqueSuccessor());
+ SwitchInst *Switch4 =
+ cast<SwitchInst>(AtomicCmpxchgCmpxchgAcquire->getTerminator());
+ BasicBlock *AtomicCmpxchgCmpxchgAcquireFail7 =
+ cast<BasicBlock>(Switch4->getSuccessor(1));
+ BasicBlock *AtomicCmpxchgCmpxchgSeqcstFail8 =
+ cast<BasicBlock>(Switch4->getSuccessor(3));
+ BasicBlock *AtomicCmpxchgCmpxchgMonotonicFail6 =
+ cast<BasicBlock>(Switch4->getDefaultDest());
+ BranchInst *Branch5 =
+ cast<BranchInst>(AtomicCmpxchgCmpxchgMonotonicFail6->getTerminator());
+ BasicBlock *AtomicCmpxchgCmpxchgFailorderContinue5 = cast<BasicBlock>(
+ AtomicCmpxchgCmpxchgMonotonicFail6->getUniqueSuccessor());
+ BranchInst *Branch6 =
+ cast<BranchInst>(AtomicCmpxchgCmpxchgAcquireFail7->getTerminator());
+ BranchInst *Branch7 =
+ cast<BranchInst>(AtomicCmpxchgCmpxchgSeqcstFail8->getTerminator());
+ BranchInst *Branch8 =
+ cast<BranchInst>(AtomicCmpxchgCmpxchgFailorderContinue5->getTerminator());
+ SwitchInst *Switch5 =
+ cast<SwitchInst>(AtomicCmpxchgCmpxchgRelease->getTerminator());
+ BasicBlock *AtomicCmpxchgCmpxchgAcquireFail18 =
+ cast<BasicBlock>(Switch5->getSuccessor(1));
+ BasicBlock *AtomicCmpxchgCmpxchgSeqcstFail19 =
+ cast<BasicBlock>(Switch5->getSuccessor(3));
+ BasicBlock *AtomicCmpxchgCmpxchgMonotonicFail17 =
+ cast<BasicBlock>(Switch5->getDefaultDest());
+ BranchInst *Branch9 =
+ cast<BranchInst>(AtomicCmpxchgCmpxchgMonotonicFail17->getTerminator());
+ BasicBlock *AtomicCmpxchgCmpxchgFailorderContinue16 = cast<BasicBlock>(
+ AtomicCmpxchgCmpxchgMonotonicFail17->getUniqueSuccessor());
+ BranchInst *Branch10 =
+ cast<BranchInst>(AtomicCmpxchgCmpxchgAcquireFail18->getTerminator());
+ BranchInst *Branch11 =
+ cast<BranchInst>(AtomicCmpxchgCmpxchgSeqcstFail19->getTerminator());
+ BranchInst *Branch12 = cast<BranchInst>(
+ AtomicCmpxchgCmpxchgFailorderContinue16->getTerminator());
+ SwitchInst *Switch6 =
+ cast<SwitchInst>(AtomicCmpxchgCmpxchgAcqrel->getTerminator());
+ BasicBlock *AtomicCmpxchgCmpxchgAcquireFail29 =
+ cast<BasicBlock>(Switch6->getSuccessor(1));
+ BasicBlock *AtomicCmpxchgCmpxchgSeqcstFail30 =
+ cast<BasicBlock>(Switch6->getSuccessor(3));
+ BasicBlock *AtomicCmpxchgCmpxchgMonotonicFail28 =
+ cast<BasicBlock>(Switch6->getDefaultDest());
+ BranchInst *Branch13 =
+ cast<BranchInst>(AtomicCmpxchgCmpxchgMonotonicFail28->getTerminator());
+ BasicBlock *AtomicCmpxchgCmpxchgFailorderContinue27 = cast<BasicBlock>(
+ AtomicCmpxchgCmpxchgMonotonicFail28->getUniqueSuccessor());
+ BranchInst *Branch14 =
+ cast<BranchInst>(AtomicCmpxchgCmpxchgAcquireFail29->getTerminator());
+ BranchInst *Branch15 =
+ cast<BranchInst>(AtomicCmpxchgCmpxchgSeqcstFail30->getTerminator());
+ BranchInst *Branch16 = cast<BranchInst>(
+ AtomicCmpxchgCmpxchgFailorderContinue27->getTerminator());
+ SwitchInst *Switch7 =
+ cast<SwitchInst>(AtomicCmpxchgCmpxchgSeqcst->getTerminator());
+ BasicBlock *AtomicCmpxchgCmpxchgAcquireFail40 =
+ cast<BasicBlock>(Switch7->getSuccessor(1));
+ BasicBlock *AtomicCmpxchgCmpxchgSeqcstFail41 =
+ cast<BasicBlock>(Switch7->getSuccessor(3));
+ BasicBlock *AtomicCmpxchgCmpxchgMonotonicFail39 =
+ cast<BasicBlock>(Switch7->getDefaultDest());
+ BranchInst *Branch17 =
+ cast<BranchInst>(AtomicCmpxchgCmpxchgMonotonicFail39->getTerminator());
+ BasicBlock *AtomicCmpxchgCmpxchgFailorderContinue38 = cast<BasicBlock>(
+ AtomicCmpxchgCmpxchgMonotonicFail39->getUniqueSuccessor());
+ BranchInst *Branch18 =
+ cast<BranchInst>(AtomicCmpxchgCmpxchgAcquireFail40->getTerminator());
+ BranchInst *Branch19 =
+ cast<BranchInst>(AtomicCmpxchgCmpxchgSeqcstFail41->getTerminator());
+ BranchInst *Branch20 = cast<BranchInst>(
+ AtomicCmpxchgCmpxchgFailorderContinue38->getTerminator());
+ BranchInst *Branch21 =
+ cast<BranchInst>(AtomicCmpxchgCmpxchgMemorderContinue->getTerminator());
+ SwitchInst *Switch8 =
+ cast<SwitchInst>(AtomicCmpxchgCmpxchgWeak->getTerminator());
+ BasicBlock *AtomicCmpxchgCmpxchgAcquire51 =
+ cast<BasicBlock>(Switch8->getSuccessor(1));
+ BasicBlock *AtomicCmpxchgCmpxchgRelease52 =
+ cast<BasicBlock>(Switch8->getSuccessor(3));
+ BasicBlock *AtomicCmpxchgCmpxchgAcqrel53 =
+ cast<BasicBlock>(Switch8->getSuccessor(4));
+ BasicBlock *AtomicCmpxchgCmpxchgSeqcst54 =
+ cast<BasicBlock>(Switch8->getSuccessor(5));
+ BasicBlock *AtomicCmpxchgCmpxchgMonotonic50 =
+ cast<BasicBlock>(Switch8->getDefaultDest());
+ SwitchInst *Switch9 =
+ cast<SwitchInst>(AtomicCmpxchgCmpxchgMonotonic50->getTerminator());
+ BasicBlock *AtomicCmpxchgCmpxchgAcquireFail58 =
+ cast<BasicBlock>(Switch9->getSuccessor(1));
+ BasicBlock *AtomicCmpxchgCmpxchgSeqcstFail59 =
+ cast<BasicBlock>(Switch9->getSuccessor(3));
+ BasicBlock *AtomicCmpxchgCmpxchgMonotonicFail57 =
+ cast<BasicBlock>(Switch9->getDefaultDest());
+ BranchInst *Branch22 =
+ cast<BranchInst>(AtomicCmpxchgCmpxchgMonotonicFail57->getTerminator());
+ BasicBlock *AtomicCmpxchgCmpxchgFailorderContinue56 = cast<BasicBlock>(
+ AtomicCmpxchgCmpxchgMonotonicFail57->getUniqueSuccessor());
+ BranchInst *Branch23 =
+ cast<BranchInst>(AtomicCmpxchgCmpxchgAcquireFail58->getTerminator());
+ BranchInst *Branch24 =
+ cast<BranchInst>(AtomicCmpxchgCmpxchgSeqcstFail59->getTerminator());
+ BranchInst *Branch25 = cast<BranchInst>(
+ AtomicCmpxchgCmpxchgFailorderContinue56->getTerminator());
+ BasicBlock *AtomicCmpxchgCmpxchgMemorderContinue49 = cast<BasicBlock>(
+ AtomicCmpxchgCmpxchgFailorderContinue56->getUniqueSuccessor());
+ SwitchInst *Switch10 =
+ cast<SwitchInst>(AtomicCmpxchgCmpxchgAcquire51->getTerminator());
+ BasicBlock *AtomicCmpxchgCmpxchgAcquireFail69 =
+ cast<BasicBlock>(Switch10->getSuccessor(1));
+ BasicBlock *AtomicCmpxchgCmpxchgSeqcstFail70 =
+ cast<BasicBlock>(Switch10->getSuccessor(3));
+ BasicBlock *AtomicCmpxchgCmpxchgMonotonicFail68 =
+ cast<BasicBlock>(Switch10->getDefaultDest());
+ BranchInst *Branch26 =
+ cast<BranchInst>(AtomicCmpxchgCmpxchgMonotonicFail68->getTerminator());
+ BasicBlock *AtomicCmpxchgCmpxchgFailorderContinue67 = cast<BasicBlock>(
+ AtomicCmpxchgCmpxchgMonotonicFail68->getUniqueSuccessor());
+ BranchInst *Branch27 =
+ cast<BranchInst>(AtomicCmpxchgCmpxchgAcquireFail69->getTerminator());
+ BranchInst *Branch28 =
+ cast<BranchInst>(AtomicCmpxchgCmpxchgSeqcstFail70->getTerminator());
+ BranchInst *Branch29 = cast<BranchInst>(
+ AtomicCmpxchgCmpxchgFailorderContinue67->getTerminator());
+ SwitchInst *Switch11 =
+ cast<SwitchInst>(AtomicCmpxchgCmpxchgRelease52->getTerminator());
+ BasicBlock *AtomicCmpxchgCmpxchgAcquireFail80 =
+ cast<BasicBlock>(Switch11->getSuccessor(1));
+ BasicBlock *AtomicCmpxchgCmpxchgSeqcstFail81 =
+ cast<BasicBlock>(Switch11->getSuccessor(3));
+ BasicBlock *AtomicCmpxchgCmpxchgMonotonicFail79 =
+ cast<BasicBlock>(Switch11->getDefaultDest());
+ BranchInst *Branch30 =
+ cast<BranchInst>(AtomicCmpxchgCmpxchgMonotonicFail79->getTerminator());
+ BasicBlock *AtomicCmpxchgCmpxchgFailorderContinue78 = cast<BasicBlock>(
+ AtomicCmpxchgCmpxchgMonotonicFail79->getUniqueSuccessor());
+ BranchInst *Branch31 =
+ cast<BranchInst>(AtomicCmpxchgCmpxchgAcquireFail80->getTerminator());
+ BranchInst *Branch32 =
+ cast<BranchInst>(AtomicCmpxchgCmpxchgSeqcstFail81->getTerminator());
+ BranchInst *Branch33 = cast<BranchInst>(
+ AtomicCmpxchgCmpxchgFailorderContinue78->getTerminator());
+ SwitchInst *Switch12 =
+ cast<SwitchInst>(AtomicCmpxchgCmpxchgAcqrel53->getTerminator());
+ BasicBlock *AtomicCmpxchgCmpxchgAcquireFail91 =
+ cast<BasicBlock>(Switch12->getSuccessor(1));
+ BasicBlock *AtomicCmpxchgCmpxchgSeqcstFail92 =
+ cast<BasicBlock>(Switch12->getSuccessor(3));
+ BasicBlock *AtomicCmpxchgCmpxchgMonotonicFail90 =
+ cast<BasicBlock>(Switch12->getDefaultDest());
+ BranchInst *Branch34 =
+ cast<BranchInst>(AtomicCmpxchgCmpxchgMonotonicFail90->getTerminator());
+ BasicBlock *AtomicCmpxchgCmpxchgFailorderContinue89 = cast<BasicBlock>(
+ AtomicCmpxchgCmpxchgMonotonicFail90->getUniqueSuccessor());
+ BranchInst *Branch35 =
+ cast<BranchInst>(AtomicCmpxchgCmpxchgAcquireFail91->getTerminator());
+ BranchInst *Branch36 =
+ cast<BranchInst>(AtomicCmpxchgCmpxchgSeqcstFail92->getTerminator());
+ BranchInst *Branch37 = cast<BranchInst>(
+ AtomicCmpxchgCmpxchgFailorderContinue89->getTerminator());
+ SwitchInst *Switch13 =
+ cast<SwitchInst>(AtomicCmpxchgCmpxchgSeqcst54->getTerminator());
+ BasicBlock *AtomicCmpxchgCmpxchgAcquireFail102 =
+ cast<BasicBlock>(Switch13->getSuccessor(1));
+ BasicBlock *AtomicCmpxchgCmpxchgSeqcstFail103 =
+ cast<BasicBlock>(Switch13->getSuccessor(3));
+ BasicBlock *AtomicCmpxchgCmpxchgMonotonicFail101 =
+ cast<BasicBlock>(Switch13->getDefaultDest());
+ BranchInst *Branch38 =
+ cast<BranchInst>(AtomicCmpxchgCmpxchgMonotonicFail101->getTerminator());
+ BasicBlock *AtomicCmpxchgCmpxchgFailorderContinue100 = cast<BasicBlock>(
+ AtomicCmpxchgCmpxchgMonotonicFail101->getUniqueSuccessor());
+ BranchInst *Branch39 =
+ cast<BranchInst>(AtomicCmpxchgCmpxchgAcquireFail102->getTerminator());
+ BranchInst *Branch40 =
+ cast<BranchInst>(AtomicCmpxchgCmpxchgSeqcstFail103->getTerminator());
+ BranchInst *Branch41 = cast<BranchInst>(
+ AtomicCmpxchgCmpxchgFailorderContinue100->getTerminator());
+ BranchInst *Branch42 =
+ cast<BranchInst>(AtomicCmpxchgCmpxchgMemorderContinue49->getTerminator());
+ ReturnInst *Return = cast<ReturnInst>(ExitBB->getTerminator());
+
+ // Follow use-def and load-store chains to discover instructions
+ AtomicCmpXchgInst *AtomicCmpxchgCmpxchgPair109 = cast<AtomicCmpXchgInst>(
+ getUniquePreviousStore(PtrArg, AtomicCmpxchgCmpxchgSeqcstFail103));
+ LoadInst *AtomicCmpxchgCmpxchgExpected =
+ cast<LoadInst>(AtomicCmpxchgCmpxchgPair109->getCompareOperand());
+ LoadInst *AtomicCmpxchgCmpxchgDesired =
+ cast<LoadInst>(AtomicCmpxchgCmpxchgPair109->getNewValOperand());
+ AtomicCmpXchgInst *AtomicCmpxchgCmpxchgPair105 = cast<AtomicCmpXchgInst>(
+ getUniquePreviousStore(PtrArg, AtomicCmpxchgCmpxchgMonotonicFail101));
+ AtomicCmpXchgInst *AtomicCmpxchgCmpxchgPair87 = cast<AtomicCmpXchgInst>(
+ getUniquePreviousStore(PtrArg, AtomicCmpxchgCmpxchgSeqcstFail81));
+ AtomicCmpXchgInst *AtomicCmpxchgCmpxchgPair1 = cast<AtomicCmpXchgInst>(
+ getUniquePreviousStore(PtrArg, AtomicCmpxchgCmpxchgAcquireFail));
+ AtomicCmpXchgInst *AtomicCmpxchgCmpxchgPair10 = cast<AtomicCmpXchgInst>(
+ getUniquePreviousStore(PtrArg, AtomicCmpxchgCmpxchgMonotonicFail6));
+ AtomicCmpXchgInst *AtomicCmpxchgCmpxchgPair12 = cast<AtomicCmpXchgInst>(
+ getUniquePreviousStore(PtrArg, AtomicCmpxchgCmpxchgAcquireFail7));
+ AtomicCmpXchgInst *AtomicCmpxchgCmpxchgPair94 = cast<AtomicCmpXchgInst>(
+ getUniquePreviousStore(PtrArg, AtomicCmpxchgCmpxchgMonotonicFail90));
+ AtomicCmpXchgInst *AtomicCmpxchgCmpxchgPair72 = cast<AtomicCmpXchgInst>(
+ getUniquePreviousStore(PtrArg, AtomicCmpxchgCmpxchgMonotonicFail68));
+ AtomicCmpXchgInst *AtomicCmpxchgCmpxchgPair = cast<AtomicCmpXchgInst>(
+ getUniquePreviousStore(PtrArg, AtomicCmpxchgCmpxchgMonotonicFail));
+ AtomicCmpXchgInst *AtomicCmpxchgCmpxchgPair23 = cast<AtomicCmpXchgInst>(
+ getUniquePreviousStore(PtrArg, AtomicCmpxchgCmpxchgAcquireFail18));
+ AtomicCmpXchgInst *AtomicCmpxchgCmpxchgPair3 = cast<AtomicCmpXchgInst>(
+ getUniquePreviousStore(PtrArg, AtomicCmpxchgCmpxchgSeqcstFail));
+ AtomicCmpXchgInst *AtomicCmpxchgCmpxchgPair32 = cast<AtomicCmpXchgInst>(
+ getUniquePreviousStore(PtrArg, AtomicCmpxchgCmpxchgMonotonicFail28));
+ AtomicCmpXchgInst *AtomicCmpxchgCmpxchgPair34 = cast<AtomicCmpXchgInst>(
+ getUniquePreviousStore(PtrArg, AtomicCmpxchgCmpxchgAcquireFail29));
+ AtomicCmpXchgInst *AtomicCmpxchgCmpxchgPair36 = cast<AtomicCmpXchgInst>(
+ getUniquePreviousStore(PtrArg, AtomicCmpxchgCmpxchgSeqcstFail30));
+ AtomicCmpXchgInst *AtomicCmpxchgCmpxchgPair43 = cast<AtomicCmpXchgInst>(
+ getUniquePreviousStore(PtrArg, AtomicCmpxchgCmpxchgMonotonicFail39));
+ AtomicCmpXchgInst *AtomicCmpxchgCmpxchgPair45 = cast<AtomicCmpXchgInst>(
+ getUniquePreviousStore(PtrArg, AtomicCmpxchgCmpxchgAcquireFail40));
+ AtomicCmpXchgInst *AtomicCmpxchgCmpxchgPair98 = cast<AtomicCmpXchgInst>(
+ getUniquePreviousStore(PtrArg, AtomicCmpxchgCmpxchgSeqcstFail92));
+ AtomicCmpXchgInst *AtomicCmpxchgCmpxchgPair85 = cast<AtomicCmpXchgInst>(
+ getUniquePreviousStore(PtrArg, AtomicCmpxchgCmpxchgAcquireFail80));
+ AtomicCmpXchgInst *AtomicCmpxchgCmpxchgPair14 = cast<AtomicCmpXchgInst>(
+ getUniquePreviousStore(PtrArg, AtomicCmpxchgCmpxchgSeqcstFail8));
+ AtomicCmpXchgInst *AtomicCmpxchgCmpxchgPair61 = cast<AtomicCmpXchgInst>(
+ getUniquePreviousStore(PtrArg, AtomicCmpxchgCmpxchgMonotonicFail57));
+ AtomicCmpXchgInst *AtomicCmpxchgCmpxchgPair74 = cast<AtomicCmpXchgInst>(
+ getUniquePreviousStore(PtrArg, AtomicCmpxchgCmpxchgAcquireFail69));
+ AtomicCmpXchgInst *AtomicCmpxchgCmpxchgPair47 = cast<AtomicCmpXchgInst>(
+ getUniquePreviousStore(PtrArg, AtomicCmpxchgCmpxchgSeqcstFail41));
+ AtomicCmpXchgInst *AtomicCmpxchgCmpxchgPair83 = cast<AtomicCmpXchgInst>(
+ getUniquePreviousStore(PtrArg, AtomicCmpxchgCmpxchgMonotonicFail79));
+ AtomicCmpXchgInst *AtomicCmpxchgCmpxchgPair25 = cast<AtomicCmpXchgInst>(
+ getUniquePreviousStore(PtrArg, AtomicCmpxchgCmpxchgSeqcstFail19));
+ AtomicCmpXchgInst *AtomicCmpxchgCmpxchgPair96 = cast<AtomicCmpXchgInst>(
+ getUniquePreviousStore(PtrArg, AtomicCmpxchgCmpxchgAcquireFail91));
+ AtomicCmpXchgInst *AtomicCmpxchgCmpxchgPair65 = cast<AtomicCmpXchgInst>(
+ getUniquePreviousStore(PtrArg, AtomicCmpxchgCmpxchgSeqcstFail59));
+ AtomicCmpXchgInst *AtomicCmpxchgCmpxchgPair107 = cast<AtomicCmpXchgInst>(
+ getUniquePreviousStore(PtrArg, AtomicCmpxchgCmpxchgAcquireFail102));
+ AtomicCmpXchgInst *AtomicCmpxchgCmpxchgPair63 = cast<AtomicCmpXchgInst>(
+ getUniquePreviousStore(PtrArg, AtomicCmpxchgCmpxchgAcquireFail58));
+ AtomicCmpXchgInst *AtomicCmpxchgCmpxchgPair76 = cast<AtomicCmpXchgInst>(
+ getUniquePreviousStore(PtrArg, AtomicCmpxchgCmpxchgSeqcstFail70));
+ AtomicCmpXchgInst *AtomicCmpxchgCmpxchgPair21 = cast<AtomicCmpXchgInst>(
+ getUniquePreviousStore(PtrArg, AtomicCmpxchgCmpxchgMonotonicFail17));
+
+ // %atomic_cmpxchg.cmpxchg.expected = load i32, ptr %expected_ptr, align 4
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getName(),
+ "atomic_cmpxchg.cmpxchg.expected");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getParent(), EntryBB);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getType(), Type::getInt32Ty(Ctx));
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgExpected->isSimple());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getPointerOperand(), ExpectedArg);
+
+ // %atomic_cmpxchg.cmpxchg.desired = load i32, ptr %desired_ptr, align 4
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getName(),
+ "atomic_cmpxchg.cmpxchg.desired");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getParent(), EntryBB);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getType(), Type::getInt32Ty(Ctx));
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgDesired->isSimple());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getPointerOperand(), DesiredArg);
+
+ // switch i1 %predarg, label %atomic_cmpxchg.cmpxchg.weak [
+ // i1 false, label %atomic_cmpxchg.cmpxchg.strong
+ // ]
+ EXPECT_TRUE(Switch1->getName().empty());
+ EXPECT_EQ(Switch1->getParent(), EntryBB);
+ EXPECT_EQ(Switch1->getType(), Type::getVoidTy(Ctx));
+ EXPECT_EQ(Switch1->getCondition(), PredArg);
+ EXPECT_EQ(Switch1->getDefaultDest(), AtomicCmpxchgCmpxchgWeak);
+ EXPECT_EQ(cast<ConstantInt>(Switch1->getOperand(2))->getZExtValue(), 0);
+ EXPECT_EQ(Switch1->getOperand(3), AtomicCmpxchgCmpxchgStrong);
+
+ // switch i32 %memorderarg_success, label %atomic_cmpxchg.cmpxchg.monoton...
+ // i32 1, label %atomic_cmpxchg.cmpxchg.acquire
+ // i32 2, label %atomic_cmpxchg.cmpxchg.acquire
+ // i32 3, label %atomic_cmpxchg.cmpxchg.release
+ // i32 4, label %atomic_cmpxchg.cmpxchg.acqrel
+ // i32 5, label %atomic_cmpxchg.cmpxchg.seqcst
+ // ]
+ EXPECT_TRUE(Switch2->getName().empty());
+ EXPECT_EQ(Switch2->getParent(), AtomicCmpxchgCmpxchgStrong);
+ EXPECT_EQ(Switch2->getType(), Type::getVoidTy(Ctx));
+ EXPECT_EQ(Switch2->getCondition(), MemorderArg);
+ EXPECT_EQ(Switch2->getDefaultDest(), AtomicCmpxchgCmpxchgMonotonic);
+ EXPECT_EQ(cast<ConstantInt>(Switch2->getOperand(2))->getZExtValue(), 1);
+ EXPECT_EQ(Switch2->getOperand(3), AtomicCmpxchgCmpxchgAcquire);
+ EXPECT_EQ(cast<ConstantInt>(Switch2->getOperand(4))->getZExtValue(), 2);
+ EXPECT_EQ(Switch2->getOperand(5), AtomicCmpxchgCmpxchgAcquire);
+ EXPECT_EQ(cast<ConstantInt>(Switch2->getOperand(6))->getZExtValue(), 3);
+ EXPECT_EQ(Switch2->getOperand(7), AtomicCmpxchgCmpxchgRelease);
+ EXPECT_EQ(cast<ConstantInt>(Switch2->getOperand(8))->getZExtValue(), 4);
+ EXPECT_EQ(Switch2->getOperand(9), AtomicCmpxchgCmpxchgAcqrel);
+ EXPECT_EQ(cast<ConstantInt>(Switch2->getOperand(10))->getZExtValue(), 5);
+ EXPECT_EQ(Switch2->getOperand(11), AtomicCmpxchgCmpxchgSeqcst);
+
+ // switch i32 %memorderarg_success, label %atomic_cmpxchg.cmpxchg.monoton...
+ // i32 1, label %atomic_cmpxchg.cmpxchg.acquire_fail
+ // i32 2, label %atomic_cmpxchg.cmpxchg.acquire_fail
+ // i32 5, label %atomic_cmpxchg.cmpxchg.seqcst_fail
+ // ]
+ EXPECT_TRUE(Switch3->getName().empty());
+ EXPECT_EQ(Switch3->getParent(), AtomicCmpxchgCmpxchgMonotonic);
+ EXPECT_EQ(Switch3->getType(), Type::getVoidTy(Ctx));
+ EXPECT_EQ(Switch3->getCondition(), MemorderArg);
+ EXPECT_EQ(Switch3->getDefaultDest(), AtomicCmpxchgCmpxchgMonotonicFail);
+ EXPECT_EQ(cast<ConstantInt>(Switch3->getOperand(2))->getZExtValue(), 1);
+ EXPECT_EQ(Switch3->getOperand(3), AtomicCmpxchgCmpxchgAcquireFail);
+ EXPECT_EQ(cast<ConstantInt>(Switch3->getOperand(4))->getZExtValue(), 2);
+ EXPECT_EQ(Switch3->getOperand(5), AtomicCmpxchgCmpxchgAcquireFail);
+ EXPECT_EQ(cast<ConstantInt>(Switch3->getOperand(6))->getZExtValue(), 5);
+ EXPECT_EQ(Switch3->getOperand(7), AtomicCmpxchgCmpxchgSeqcstFail);
+
+ // %atomic_cmpxchg.cmpxchg.pair = cmpxchg volatile ptr %atomic_ptr, i32 %...
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getName(), "atomic_cmpxchg.cmpxchg.pair");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getParent(),
+ AtomicCmpxchgCmpxchgMonotonicFail);
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair->isVolatile());
+ EXPECT_FALSE(AtomicCmpxchgCmpxchgPair->isWeak());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getSuccessOrdering(),
+ AtomicOrdering::Monotonic);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getFailureOrdering(),
+ AtomicOrdering::Monotonic);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getAlign(), 1);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getPointerOperand(), PtrArg);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getCompareOperand(),
+ AtomicCmpxchgCmpxchgExpected);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getNewValOperand(),
+ AtomicCmpxchgCmpxchgDesired);
+
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue
+ EXPECT_TRUE(Branch1->getName().empty());
+ EXPECT_EQ(Branch1->getParent(), AtomicCmpxchgCmpxchgMonotonicFail);
+ EXPECT_EQ(Branch1->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch1->isUnconditional());
+ EXPECT_EQ(Branch1->getOperand(0), AtomicCmpxchgCmpxchgFailorderContinue);
+
+ // %atomic_cmpxchg.cmpxchg.pair1 = cmpxchg volatile ptr %atomic_ptr, i32 ...
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair1->getName(),
+ "atomic_cmpxchg.cmpxchg.pair1");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair1->getParent(),
+ AtomicCmpxchgCmpxchgAcquireFail);
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair1->isVolatile());
+ EXPECT_FALSE(AtomicCmpxchgCmpxchgPair1->isWeak());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair1->getSuccessOrdering(),
+ AtomicOrdering::Monotonic);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair1->getFailureOrdering(),
+ AtomicOrdering::Acquire);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair1->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair1->getAlign(), 1);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair1->getPointerOperand(), PtrArg);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair1->getCompareOperand(),
+ AtomicCmpxchgCmpxchgExpected);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair1->getNewValOperand(),
+ AtomicCmpxchgCmpxchgDesired);
+
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue
+ EXPECT_TRUE(Branch2->getName().empty());
+ EXPECT_EQ(Branch2->getParent(), AtomicCmpxchgCmpxchgAcquireFail);
+ EXPECT_EQ(Branch2->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch2->isUnconditional());
+ EXPECT_EQ(Branch2->getOperand(0), AtomicCmpxchgCmpxchgFailorderContinue);
+
+ // %atomic_cmpxchg.cmpxchg.pair3 = cmpxchg volatile ptr %atomic_ptr, i32 ...
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair3->getName(),
+ "atomic_cmpxchg.cmpxchg.pair3");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair3->getParent(),
+ AtomicCmpxchgCmpxchgSeqcstFail);
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair3->isVolatile());
+ EXPECT_FALSE(AtomicCmpxchgCmpxchgPair3->isWeak());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair3->getSuccessOrdering(),
+ AtomicOrdering::Monotonic);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair3->getFailureOrdering(),
+ AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair3->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair3->getAlign(), 1);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair3->getPointerOperand(), PtrArg);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair3->getCompareOperand(),
+ AtomicCmpxchgCmpxchgExpected);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair3->getNewValOperand(),
+ AtomicCmpxchgCmpxchgDesired);
+
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue
+ EXPECT_TRUE(Branch3->getName().empty());
+ EXPECT_EQ(Branch3->getParent(), AtomicCmpxchgCmpxchgSeqcstFail);
+ EXPECT_EQ(Branch3->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch3->isUnconditional());
+ EXPECT_EQ(Branch3->getOperand(0), AtomicCmpxchgCmpxchgFailorderContinue);
+
+ // br label %atomic_cmpxchg.cmpxchg.memorder.continue
+ EXPECT_TRUE(Branch4->getName().empty());
+ EXPECT_EQ(Branch4->getParent(), AtomicCmpxchgCmpxchgFailorderContinue);
+ EXPECT_EQ(Branch4->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch4->isUnconditional());
+ EXPECT_EQ(Branch4->getOperand(0), AtomicCmpxchgCmpxchgMemorderContinue);
+
+ // switch i32 %memorderarg_success, label %atomic_cmpxchg.cmpxchg.monoton...
+ // i32 1, label %atomic_cmpxchg.cmpxchg.acquire_fail7
+ // i32 2, label %atomic_cmpxchg.cmpxchg.acquire_fail7
+ // i32 5, label %atomic_cmpxchg.cmpxchg.seqcst_fail8
+ // ]
+ EXPECT_TRUE(Switch4->getName().empty());
+ EXPECT_EQ(Switch4->getParent(), AtomicCmpxchgCmpxchgAcquire);
+ EXPECT_EQ(Switch4->getType(), Type::getVoidTy(Ctx));
+ EXPECT_EQ(Switch4->getCondition(), MemorderArg);
+ EXPECT_EQ(Switch4->getDefaultDest(), AtomicCmpxchgCmpxchgMonotonicFail6);
+ EXPECT_EQ(cast<ConstantInt>(Switch4->getOperand(2))->getZExtValue(), 1);
+ EXPECT_EQ(Switch4->getOperand(3), AtomicCmpxchgCmpxchgAcquireFail7);
+ EXPECT_EQ(cast<ConstantInt>(Switch4->getOperand(4))->getZExtValue(), 2);
+ EXPECT_EQ(Switch4->getOperand(5), AtomicCmpxchgCmpxchgAcquireFail7);
+ EXPECT_EQ(cast<ConstantInt>(Switch4->getOperand(6))->getZExtValue(), 5);
+ EXPECT_EQ(Switch4->getOperand(7), AtomicCmpxchgCmpxchgSeqcstFail8);
+
+ // %atomic_cmpxchg.cmpxchg.pair10 = cmpxchg volatile ptr %atomic_ptr, i32...
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair10->getName(),
+ "atomic_cmpxchg.cmpxchg.pair10");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair10->getParent(),
+ AtomicCmpxchgCmpxchgMonotonicFail6);
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair10->isVolatile());
+ EXPECT_FALSE(AtomicCmpxchgCmpxchgPair10->isWeak());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair10->getSuccessOrdering(),
+ AtomicOrdering::Acquire);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair10->getFailureOrdering(),
+ AtomicOrdering::Monotonic);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair10->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair10->getAlign(), 1);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair10->getPointerOperand(), PtrArg);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair10->getCompareOperand(),
+ AtomicCmpxchgCmpxchgExpected);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair10->getNewValOperand(),
+ AtomicCmpxchgCmpxchgDesired);
+
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue5
+ EXPECT_TRUE(Branch5->getName().empty());
+ EXPECT_EQ(Branch5->getParent(), AtomicCmpxchgCmpxchgMonotonicFail6);
+ EXPECT_EQ(Branch5->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch5->isUnconditional());
+ EXPECT_EQ(Branch5->getOperand(0), AtomicCmpxchgCmpxchgFailorderContinue5);
+
+ // %atomic_cmpxchg.cmpxchg.pair12 = cmpxchg volatile ptr %atomic_ptr, i32...
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair12->getName(),
+ "atomic_cmpxchg.cmpxchg.pair12");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair12->getParent(),
+ AtomicCmpxchgCmpxchgAcquireFail7);
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair12->isVolatile());
+ EXPECT_FALSE(AtomicCmpxchgCmpxchgPair12->isWeak());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair12->getSuccessOrdering(),
+ AtomicOrdering::Acquire);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair12->getFailureOrdering(),
+ AtomicOrdering::Acquire);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair12->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair12->getAlign(), 1);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair12->getPointerOperand(), PtrArg);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair12->getCompareOperand(),
+ AtomicCmpxchgCmpxchgExpected);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair12->getNewValOperand(),
+ AtomicCmpxchgCmpxchgDesired);
+
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue5
+ EXPECT_TRUE(Branch6->getName().empty());
+ EXPECT_EQ(Branch6->getParent(), AtomicCmpxchgCmpxchgAcquireFail7);
+ EXPECT_EQ(Branch6->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch6->isUnconditional());
+ EXPECT_EQ(Branch6->getOperand(0), AtomicCmpxchgCmpxchgFailorderContinue5);
+
+ // %atomic_cmpxchg.cmpxchg.pair14 = cmpxchg volatile ptr %atomic_ptr, i32...
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair14->getName(),
+ "atomic_cmpxchg.cmpxchg.pair14");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair14->getParent(),
+ AtomicCmpxchgCmpxchgSeqcstFail8);
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair14->isVolatile());
+ EXPECT_FALSE(AtomicCmpxchgCmpxchgPair14->isWeak());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair14->getSuccessOrdering(),
+ AtomicOrdering::Acquire);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair14->getFailureOrdering(),
+ AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair14->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair14->getAlign(), 1);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair14->getPointerOperand(), PtrArg);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair14->getCompareOperand(),
+ AtomicCmpxchgCmpxchgExpected);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair14->getNewValOperand(),
+ AtomicCmpxchgCmpxchgDesired);
+
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue5
+ EXPECT_TRUE(Branch7->getName().empty());
+ EXPECT_EQ(Branch7->getParent(), AtomicCmpxchgCmpxchgSeqcstFail8);
+ EXPECT_EQ(Branch7->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch7->isUnconditional());
+ EXPECT_EQ(Branch7->getOperand(0), AtomicCmpxchgCmpxchgFailorderContinue5);
+
+ // br label %atomic_cmpxchg.cmpxchg.memorder.continue
+ EXPECT_TRUE(Branch8->getName().empty());
+ EXPECT_EQ(Branch8->getParent(), AtomicCmpxchgCmpxchgFailorderContinue5);
+ EXPECT_EQ(Branch8->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch8->isUnconditional());
+ EXPECT_EQ(Branch8->getOperand(0), AtomicCmpxchgCmpxchgMemorderContinue);
+
+ // switch i32 %memorderarg_success, label %atomic_cmpxchg.cmpxchg.monoton...
+ // i32 1, label %atomic_cmpxchg.cmpxchg.acquire_fail18
+ // i32 2, label %atomic_cmpxchg.cmpxchg.acquire_fail18
+ // i32 5, label %atomic_cmpxchg.cmpxchg.seqcst_fail19
+ // ]
+ EXPECT_TRUE(Switch5->getName().empty());
+ EXPECT_EQ(Switch5->getParent(), AtomicCmpxchgCmpxchgRelease);
+ EXPECT_EQ(Switch5->getType(), Type::getVoidTy(Ctx));
+ EXPECT_EQ(Switch5->getCondition(), MemorderArg);
+ EXPECT_EQ(Switch5->getDefaultDest(), AtomicCmpxchgCmpxchgMonotonicFail17);
+ EXPECT_EQ(cast<ConstantInt>(Switch5->getOperand(2))->getZExtValue(), 1);
+ EXPECT_EQ(Switch5->getOperand(3), AtomicCmpxchgCmpxchgAcquireFail18);
+ EXPECT_EQ(cast<ConstantInt>(Switch5->getOperand(4))->getZExtValue(), 2);
+ EXPECT_EQ(Switch5->getOperand(5), AtomicCmpxchgCmpxchgAcquireFail18);
+ EXPECT_EQ(cast<ConstantInt>(Switch5->getOperand(6))->getZExtValue(), 5);
+ EXPECT_EQ(Switch5->getOperand(7), AtomicCmpxchgCmpxchgSeqcstFail19);
+
+ // %atomic_cmpxchg.cmpxchg.pair21 = cmpxchg volatile ptr %atomic_ptr, i32...
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair21->getName(),
+ "atomic_cmpxchg.cmpxchg.pair21");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair21->getParent(),
+ AtomicCmpxchgCmpxchgMonotonicFail17);
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair21->isVolatile());
+ EXPECT_FALSE(AtomicCmpxchgCmpxchgPair21->isWeak());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair21->getSuccessOrdering(),
+ AtomicOrdering::Release);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair21->getFailureOrdering(),
+ AtomicOrdering::Monotonic);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair21->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair21->getAlign(), 1);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair21->getPointerOperand(), PtrArg);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair21->getCompareOperand(),
+ AtomicCmpxchgCmpxchgExpected);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair21->getNewValOperand(),
+ AtomicCmpxchgCmpxchgDesired);
+
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue16
+ EXPECT_TRUE(Branch9->getName().empty());
+ EXPECT_EQ(Branch9->getParent(), AtomicCmpxchgCmpxchgMonotonicFail17);
+ EXPECT_EQ(Branch9->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch9->isUnconditional());
+ EXPECT_EQ(Branch9->getOperand(0), AtomicCmpxchgCmpxchgFailorderContinue16);
+
+ // %atomic_cmpxchg.cmpxchg.pair23 = cmpxchg volatile ptr %atomic_ptr, i32...
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair23->getName(),
+ "atomic_cmpxchg.cmpxchg.pair23");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair23->getParent(),
+ AtomicCmpxchgCmpxchgAcquireFail18);
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair23->isVolatile());
+ EXPECT_FALSE(AtomicCmpxchgCmpxchgPair23->isWeak());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair23->getSuccessOrdering(),
+ AtomicOrdering::Release);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair23->getFailureOrdering(),
+ AtomicOrdering::Acquire);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair23->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair23->getAlign(), 1);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair23->getPointerOperand(), PtrArg);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair23->getCompareOperand(),
+ AtomicCmpxchgCmpxchgExpected);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair23->getNewValOperand(),
+ AtomicCmpxchgCmpxchgDesired);
+
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue16
+ EXPECT_TRUE(Branch10->getName().empty());
+ EXPECT_EQ(Branch10->getParent(), AtomicCmpxchgCmpxchgAcquireFail18);
+ EXPECT_EQ(Branch10->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch10->isUnconditional());
+ EXPECT_EQ(Branch10->getOperand(0), AtomicCmpxchgCmpxchgFailorderContinue16);
+
+ // %atomic_cmpxchg.cmpxchg.pair25 = cmpxchg volatile ptr %atomic_ptr, i32...
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair25->getName(),
+ "atomic_cmpxchg.cmpxchg.pair25");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair25->getParent(),
+ AtomicCmpxchgCmpxchgSeqcstFail19);
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair25->isVolatile());
+ EXPECT_FALSE(AtomicCmpxchgCmpxchgPair25->isWeak());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair25->getSuccessOrdering(),
+ AtomicOrdering::Release);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair25->getFailureOrdering(),
+ AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair25->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair25->getAlign(), 1);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair25->getPointerOperand(), PtrArg);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair25->getCompareOperand(),
+ AtomicCmpxchgCmpxchgExpected);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair25->getNewValOperand(),
+ AtomicCmpxchgCmpxchgDesired);
+
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue16
+ EXPECT_TRUE(Branch11->getName().empty());
+ EXPECT_EQ(Branch11->getParent(), AtomicCmpxchgCmpxchgSeqcstFail19);
+ EXPECT_EQ(Branch11->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch11->isUnconditional());
+ EXPECT_EQ(Branch11->getOperand(0), AtomicCmpxchgCmpxchgFailorderContinue16);
+
+ // br label %atomic_cmpxchg.cmpxchg.memorder.continue
+ EXPECT_TRUE(Branch12->getName().empty());
+ EXPECT_EQ(Branch12->getParent(), AtomicCmpxchgCmpxchgFailorderContinue16);
+ EXPECT_EQ(Branch12->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch12->isUnconditional());
+ EXPECT_EQ(Branch12->getOperand(0), AtomicCmpxchgCmpxchgMemorderContinue);
+
+ // switch i32 %memorderarg_success, label %atomic_cmpxchg.cmpxchg.monoton...
+ // i32 1, label %atomic_cmpxchg.cmpxchg.acquire_fail29
+ // i32 2, label %atomic_cmpxchg.cmpxchg.acquire_fail29
+ // i32 5, label %atomic_cmpxchg.cmpxchg.seqcst_fail30
+ // ]
+ EXPECT_TRUE(Switch6->getName().empty());
+ EXPECT_EQ(Switch6->getParent(), AtomicCmpxchgCmpxchgAcqrel);
+ EXPECT_EQ(Switch6->getType(), Type::getVoidTy(Ctx));
+ EXPECT_EQ(Switch6->getCondition(), MemorderArg);
+ EXPECT_EQ(Switch6->getDefaultDest(), AtomicCmpxchgCmpxchgMonotonicFail28);
+ EXPECT_EQ(cast<ConstantInt>(Switch6->getOperand(2))->getZExtValue(), 1);
+ EXPECT_EQ(Switch6->getOperand(3), AtomicCmpxchgCmpxchgAcquireFail29);
+ EXPECT_EQ(cast<ConstantInt>(Switch6->getOperand(4))->getZExtValue(), 2);
+ EXPECT_EQ(Switch6->getOperand(5), AtomicCmpxchgCmpxchgAcquireFail29);
+ EXPECT_EQ(cast<ConstantInt>(Switch6->getOperand(6))->getZExtValue(), 5);
+ EXPECT_EQ(Switch6->getOperand(7), AtomicCmpxchgCmpxchgSeqcstFail30);
+
+ // %atomic_cmpxchg.cmpxchg.pair32 = cmpxchg volatile ptr %atomic_ptr, i32...
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair32->getName(),
+ "atomic_cmpxchg.cmpxchg.pair32");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair32->getParent(),
+ AtomicCmpxchgCmpxchgMonotonicFail28);
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair32->isVolatile());
+ EXPECT_FALSE(AtomicCmpxchgCmpxchgPair32->isWeak());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair32->getSuccessOrdering(),
+ AtomicOrdering::AcquireRelease);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair32->getFailureOrdering(),
+ AtomicOrdering::Monotonic);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair32->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair32->getAlign(), 1);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair32->getPointerOperand(), PtrArg);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair32->getCompareOperand(),
+ AtomicCmpxchgCmpxchgExpected);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair32->getNewValOperand(),
+ AtomicCmpxchgCmpxchgDesired);
+
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue27
+ EXPECT_TRUE(Branch13->getName().empty());
+ EXPECT_EQ(Branch13->getParent(), AtomicCmpxchgCmpxchgMonotonicFail28);
+ EXPECT_EQ(Branch13->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch13->isUnconditional());
+ EXPECT_EQ(Branch13->getOperand(0), AtomicCmpxchgCmpxchgFailorderContinue27);
+
+ // %atomic_cmpxchg.cmpxchg.pair34 = cmpxchg volatile ptr %atomic_ptr, i32...
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair34->getName(),
+ "atomic_cmpxchg.cmpxchg.pair34");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair34->getParent(),
+ AtomicCmpxchgCmpxchgAcquireFail29);
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair34->isVolatile());
+ EXPECT_FALSE(AtomicCmpxchgCmpxchgPair34->isWeak());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair34->getSuccessOrdering(),
+ AtomicOrdering::AcquireRelease);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair34->getFailureOrdering(),
+ AtomicOrdering::Acquire);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair34->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair34->getAlign(), 1);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair34->getPointerOperand(), PtrArg);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair34->getCompareOperand(),
+ AtomicCmpxchgCmpxchgExpected);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair34->getNewValOperand(),
+ AtomicCmpxchgCmpxchgDesired);
+
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue27
+ EXPECT_TRUE(Branch14->getName().empty());
+ EXPECT_EQ(Branch14->getParent(), AtomicCmpxchgCmpxchgAcquireFail29);
+ EXPECT_EQ(Branch14->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch14->isUnconditional());
+ EXPECT_EQ(Branch14->getOperand(0), AtomicCmpxchgCmpxchgFailorderContinue27);
+
+ // %atomic_cmpxchg.cmpxchg.pair36 = cmpxchg volatile ptr %atomic_ptr, i32...
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair36->getName(),
+ "atomic_cmpxchg.cmpxchg.pair36");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair36->getParent(),
+ AtomicCmpxchgCmpxchgSeqcstFail30);
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair36->isVolatile());
+ EXPECT_FALSE(AtomicCmpxchgCmpxchgPair36->isWeak());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair36->getSuccessOrdering(),
+ AtomicOrdering::AcquireRelease);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair36->getFailureOrdering(),
+ AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair36->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair36->getAlign(), 1);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair36->getPointerOperand(), PtrArg);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair36->getCompareOperand(),
+ AtomicCmpxchgCmpxchgExpected);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair36->getNewValOperand(),
+ AtomicCmpxchgCmpxchgDesired);
+
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue27
+ EXPECT_TRUE(Branch15->getName().empty());
+ EXPECT_EQ(Branch15->getParent(), AtomicCmpxchgCmpxchgSeqcstFail30);
+ EXPECT_EQ(Branch15->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch15->isUnconditional());
+ EXPECT_EQ(Branch15->getOperand(0), AtomicCmpxchgCmpxchgFailorderContinue27);
+
+ // br label %atomic_cmpxchg.cmpxchg.memorder.continue
+ EXPECT_TRUE(Branch16->getName().empty());
+ EXPECT_EQ(Branch16->getParent(), AtomicCmpxchgCmpxchgFailorderContinue27);
+ EXPECT_EQ(Branch16->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch16->isUnconditional());
+ EXPECT_EQ(Branch16->getOperand(0), AtomicCmpxchgCmpxchgMemorderContinue);
+
+ // switch i32 %memorderarg_success, label %atomic_cmpxchg.cmpxchg.monoton...
+ // i32 1, label %atomic_cmpxchg.cmpxchg.acquire_fail40
+ // i32 2, label %atomic_cmpxchg.cmpxchg.acquire_fail40
+ // i32 5, label %atomic_cmpxchg.cmpxchg.seqcst_fail41
+ // ]
+ EXPECT_TRUE(Switch7->getName().empty());
+ EXPECT_EQ(Switch7->getParent(), AtomicCmpxchgCmpxchgSeqcst);
+ EXPECT_EQ(Switch7->getType(), Type::getVoidTy(Ctx));
+ EXPECT_EQ(Switch7->getCondition(), MemorderArg);
+ EXPECT_EQ(Switch7->getDefaultDest(), AtomicCmpxchgCmpxchgMonotonicFail39);
+ EXPECT_EQ(cast<ConstantInt>(Switch7->getOperand(2))->getZExtValue(), 1);
+ EXPECT_EQ(Switch7->getOperand(3), AtomicCmpxchgCmpxchgAcquireFail40);
+ EXPECT_EQ(cast<ConstantInt>(Switch7->getOperand(4))->getZExtValue(), 2);
+ EXPECT_EQ(Switch7->getOperand(5), AtomicCmpxchgCmpxchgAcquireFail40);
+ EXPECT_EQ(cast<ConstantInt>(Switch7->getOperand(6))->getZExtValue(), 5);
+ EXPECT_EQ(Switch7->getOperand(7), AtomicCmpxchgCmpxchgSeqcstFail41);
+
+ // %atomic_cmpxchg.cmpxchg.pair43 = cmpxchg volatile ptr %atomic_ptr, i32...
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair43->getName(),
+ "atomic_cmpxchg.cmpxchg.pair43");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair43->getParent(),
+ AtomicCmpxchgCmpxchgMonotonicFail39);
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair43->isVolatile());
+ EXPECT_FALSE(AtomicCmpxchgCmpxchgPair43->isWeak());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair43->getSuccessOrdering(),
+ AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair43->getFailureOrdering(),
+ AtomicOrdering::Monotonic);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair43->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair43->getAlign(), 1);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair43->getPointerOperand(), PtrArg);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair43->getCompareOperand(),
+ AtomicCmpxchgCmpxchgExpected);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair43->getNewValOperand(),
+ AtomicCmpxchgCmpxchgDesired);
+
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue38
+ EXPECT_TRUE(Branch17->getName().empty());
+ EXPECT_EQ(Branch17->getParent(), AtomicCmpxchgCmpxchgMonotonicFail39);
+ EXPECT_EQ(Branch17->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch17->isUnconditional());
+ EXPECT_EQ(Branch17->getOperand(0), AtomicCmpxchgCmpxchgFailorderContinue38);
+
+ // %atomic_cmpxchg.cmpxchg.pair45 = cmpxchg volatile ptr %atomic_ptr, i32...
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair45->getName(),
+ "atomic_cmpxchg.cmpxchg.pair45");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair45->getParent(),
+ AtomicCmpxchgCmpxchgAcquireFail40);
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair45->isVolatile());
+ EXPECT_FALSE(AtomicCmpxchgCmpxchgPair45->isWeak());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair45->getSuccessOrdering(),
+ AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair45->getFailureOrdering(),
+ AtomicOrdering::Acquire);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair45->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair45->getAlign(), 1);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair45->getPointerOperand(), PtrArg);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair45->getCompareOperand(),
+ AtomicCmpxchgCmpxchgExpected);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair45->getNewValOperand(),
+ AtomicCmpxchgCmpxchgDesired);
+
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue38
+ EXPECT_TRUE(Branch18->getName().empty());
+ EXPECT_EQ(Branch18->getParent(), AtomicCmpxchgCmpxchgAcquireFail40);
+ EXPECT_EQ(Branch18->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch18->isUnconditional());
+ EXPECT_EQ(Branch18->getOperand(0), AtomicCmpxchgCmpxchgFailorderContinue38);
+
+ // %atomic_cmpxchg.cmpxchg.pair47 = cmpxchg volatile ptr %atomic_ptr, i32...
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair47->getName(),
+ "atomic_cmpxchg.cmpxchg.pair47");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair47->getParent(),
+ AtomicCmpxchgCmpxchgSeqcstFail41);
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair47->isVolatile());
+ EXPECT_FALSE(AtomicCmpxchgCmpxchgPair47->isWeak());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair47->getSuccessOrdering(),
+ AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair47->getFailureOrdering(),
+ AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair47->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair47->getAlign(), 1);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair47->getPointerOperand(), PtrArg);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair47->getCompareOperand(),
+ AtomicCmpxchgCmpxchgExpected);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair47->getNewValOperand(),
+ AtomicCmpxchgCmpxchgDesired);
+
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue38
+ EXPECT_TRUE(Branch19->getName().empty());
+ EXPECT_EQ(Branch19->getParent(), AtomicCmpxchgCmpxchgSeqcstFail41);
+ EXPECT_EQ(Branch19->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch19->isUnconditional());
+ EXPECT_EQ(Branch19->getOperand(0), AtomicCmpxchgCmpxchgFailorderContinue38);
+
+ // br label %atomic_cmpxchg.cmpxchg.memorder.continue
+ EXPECT_TRUE(Branch20->getName().empty());
+ EXPECT_EQ(Branch20->getParent(), AtomicCmpxchgCmpxchgFailorderContinue38);
+ EXPECT_EQ(Branch20->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch20->isUnconditional());
+ EXPECT_EQ(Branch20->getOperand(0), AtomicCmpxchgCmpxchgMemorderContinue);
+
+ // br label %atomic_cmpxchg.cmpxchg.weak.continue
+ EXPECT_TRUE(Branch21->getName().empty());
+ EXPECT_EQ(Branch21->getParent(), AtomicCmpxchgCmpxchgMemorderContinue);
+ EXPECT_EQ(Branch21->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch21->isUnconditional());
+ EXPECT_EQ(Branch21->getOperand(0), ExitBB);
+
+ // switch i32 %memorderarg_success, label %atomic_cmpxchg.cmpxchg.monoton...
+ // i32 1, label %atomic_cmpxchg.cmpxchg.acquire51
+ // i32 2, label %atomic_cmpxchg.cmpxchg.acquire51
+ // i32 3, label %atomic_cmpxchg.cmpxchg.release52
+ // i32 4, label %atomic_cmpxchg.cmpxchg.acqrel53
+ // i32 5, label %atomic_cmpxchg.cmpxchg.seqcst54
+ // ]
+ EXPECT_TRUE(Switch8->getName().empty());
+ EXPECT_EQ(Switch8->getParent(), AtomicCmpxchgCmpxchgWeak);
+ EXPECT_EQ(Switch8->getType(), Type::getVoidTy(Ctx));
+ EXPECT_EQ(Switch8->getCondition(), MemorderArg);
+ EXPECT_EQ(Switch8->getDefaultDest(), AtomicCmpxchgCmpxchgMonotonic50);
+ EXPECT_EQ(cast<ConstantInt>(Switch8->getOperand(2))->getZExtValue(), 1);
+ EXPECT_EQ(Switch8->getOperand(3), AtomicCmpxchgCmpxchgAcquire51);
+ EXPECT_EQ(cast<ConstantInt>(Switch8->getOperand(4))->getZExtValue(), 2);
+ EXPECT_EQ(Switch8->getOperand(5), AtomicCmpxchgCmpxchgAcquire51);
+ EXPECT_EQ(cast<ConstantInt>(Switch8->getOperand(6))->getZExtValue(), 3);
+ EXPECT_EQ(Switch8->getOperand(7), AtomicCmpxchgCmpxchgRelease52);
+ EXPECT_EQ(cast<ConstantInt>(Switch8->getOperand(8))->getZExtValue(), 4);
+ EXPECT_EQ(Switch8->getOperand(9), AtomicCmpxchgCmpxchgAcqrel53);
+ EXPECT_EQ(cast<ConstantInt>(Switch8->getOperand(10))->getZExtValue(), 5);
+ EXPECT_EQ(Switch8->getOperand(11), AtomicCmpxchgCmpxchgSeqcst54);
+
+ // switch i32 %memorderarg_success, label %atomic_cmpxchg.cmpxchg.monoton...
+ // i32 1, label %atomic_cmpxchg.cmpxchg.acquire_fail58
+ // i32 2, label %atomic_cmpxchg.cmpxchg.acquire_fail58
+ // i32 5, label %atomic_cmpxchg.cmpxchg.seqcst_fail59
+ // ]
+ EXPECT_TRUE(Switch9->getName().empty());
+ EXPECT_EQ(Switch9->getParent(), AtomicCmpxchgCmpxchgMonotonic50);
+ EXPECT_EQ(Switch9->getType(), Type::getVoidTy(Ctx));
+ EXPECT_EQ(Switch9->getCondition(), MemorderArg);
+ EXPECT_EQ(Switch9->getDefaultDest(), AtomicCmpxchgCmpxchgMonotonicFail57);
+ EXPECT_EQ(cast<ConstantInt>(Switch9->getOperand(2))->getZExtValue(), 1);
+ EXPECT_EQ(Switch9->getOperand(3), AtomicCmpxchgCmpxchgAcquireFail58);
+ EXPECT_EQ(cast<ConstantInt>(Switch9->getOperand(4))->getZExtValue(), 2);
+ EXPECT_EQ(Switch9->getOperand(5), AtomicCmpxchgCmpxchgAcquireFail58);
+ EXPECT_EQ(cast<ConstantInt>(Switch9->getOperand(6))->getZExtValue(), 5);
+ EXPECT_EQ(Switch9->getOperand(7), AtomicCmpxchgCmpxchgSeqcstFail59);
+
+ // %atomic_cmpxchg.cmpxchg.pair61 = cmpxchg weak volatile ptr %atomic_ptr...
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair61->getName(),
+ "atomic_cmpxchg.cmpxchg.pair61");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair61->getParent(),
+ AtomicCmpxchgCmpxchgMonotonicFail57);
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair61->isVolatile());
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair61->isWeak());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair61->getSuccessOrdering(),
+ AtomicOrdering::Monotonic);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair61->getFailureOrdering(),
+ AtomicOrdering::Monotonic);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair61->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair61->getAlign(), 1);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair61->getPointerOperand(), PtrArg);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair61->getCompareOperand(),
+ AtomicCmpxchgCmpxchgExpected);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair61->getNewValOperand(),
+ AtomicCmpxchgCmpxchgDesired);
+
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue56
+ EXPECT_TRUE(Branch22->getName().empty());
+ EXPECT_EQ(Branch22->getParent(), AtomicCmpxchgCmpxchgMonotonicFail57);
+ EXPECT_EQ(Branch22->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch22->isUnconditional());
+ EXPECT_EQ(Branch22->getOperand(0), AtomicCmpxchgCmpxchgFailorderContinue56);
+
+ // %atomic_cmpxchg.cmpxchg.pair63 = cmpxchg weak volatile ptr %atomic_ptr...
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair63->getName(),
+ "atomic_cmpxchg.cmpxchg.pair63");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair63->getParent(),
+ AtomicCmpxchgCmpxchgAcquireFail58);
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair63->isVolatile());
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair63->isWeak());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair63->getSuccessOrdering(),
+ AtomicOrdering::Monotonic);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair63->getFailureOrdering(),
+ AtomicOrdering::Acquire);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair63->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair63->getAlign(), 1);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair63->getPointerOperand(), PtrArg);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair63->getCompareOperand(),
+ AtomicCmpxchgCmpxchgExpected);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair63->getNewValOperand(),
+ AtomicCmpxchgCmpxchgDesired);
+
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue56
+ EXPECT_TRUE(Branch23->getName().empty());
+ EXPECT_EQ(Branch23->getParent(), AtomicCmpxchgCmpxchgAcquireFail58);
+ EXPECT_EQ(Branch23->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch23->isUnconditional());
+ EXPECT_EQ(Branch23->getOperand(0), AtomicCmpxchgCmpxchgFailorderContinue56);
+
+ // %atomic_cmpxchg.cmpxchg.pair65 = cmpxchg weak volatile ptr %atomic_ptr...
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair65->getName(),
+ "atomic_cmpxchg.cmpxchg.pair65");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair65->getParent(),
+ AtomicCmpxchgCmpxchgSeqcstFail59);
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair65->isVolatile());
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair65->isWeak());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair65->getSuccessOrdering(),
+ AtomicOrdering::Monotonic);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair65->getFailureOrdering(),
+ AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair65->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair65->getAlign(), 1);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair65->getPointerOperand(), PtrArg);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair65->getCompareOperand(),
+ AtomicCmpxchgCmpxchgExpected);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair65->getNewValOperand(),
+ AtomicCmpxchgCmpxchgDesired);
+
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue56
+ EXPECT_TRUE(Branch24->getName().empty());
+ EXPECT_EQ(Branch24->getParent(), AtomicCmpxchgCmpxchgSeqcstFail59);
+ EXPECT_EQ(Branch24->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch24->isUnconditional());
+ EXPECT_EQ(Branch24->getOperand(0), AtomicCmpxchgCmpxchgFailorderContinue56);
+
+ // br label %atomic_cmpxchg.cmpxchg.memorder.continue49
+ EXPECT_TRUE(Branch25->getName().empty());
+ EXPECT_EQ(Branch25->getParent(), AtomicCmpxchgCmpxchgFailorderContinue56);
+ EXPECT_EQ(Branch25->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch25->isUnconditional());
+ EXPECT_EQ(Branch25->getOperand(0), AtomicCmpxchgCmpxchgMemorderContinue49);
+
+ // switch i32 %memorderarg_success, label %atomic_cmpxchg.cmpxchg.monoton...
+ // i32 1, label %atomic_cmpxchg.cmpxchg.acquire_fail69
+ // i32 2, label %atomic_cmpxchg.cmpxchg.acquire_fail69
+ // i32 5, label %atomic_cmpxchg.cmpxchg.seqcst_fail70
+ // ]
+ EXPECT_TRUE(Switch10->getName().empty());
+ EXPECT_EQ(Switch10->getParent(), AtomicCmpxchgCmpxchgAcquire51);
+ EXPECT_EQ(Switch10->getType(), Type::getVoidTy(Ctx));
+ EXPECT_EQ(Switch10->getCondition(), MemorderArg);
+ EXPECT_EQ(Switch10->getDefaultDest(), AtomicCmpxchgCmpxchgMonotonicFail68);
+ EXPECT_EQ(cast<ConstantInt>(Switch10->getOperand(2))->getZExtValue(), 1);
+ EXPECT_EQ(Switch10->getOperand(3), AtomicCmpxchgCmpxchgAcquireFail69);
+ EXPECT_EQ(cast<ConstantInt>(Switch10->getOperand(4))->getZExtValue(), 2);
+ EXPECT_EQ(Switch10->getOperand(5), AtomicCmpxchgCmpxchgAcquireFail69);
+ EXPECT_EQ(cast<ConstantInt>(Switch10->getOperand(6))->getZExtValue(), 5);
+ EXPECT_EQ(Switch10->getOperand(7), AtomicCmpxchgCmpxchgSeqcstFail70);
+
+ // %atomic_cmpxchg.cmpxchg.pair72 = cmpxchg weak volatile ptr %atomic_ptr...
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair72->getName(),
+ "atomic_cmpxchg.cmpxchg.pair72");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair72->getParent(),
+ AtomicCmpxchgCmpxchgMonotonicFail68);
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair72->isVolatile());
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair72->isWeak());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair72->getSuccessOrdering(),
+ AtomicOrdering::Acquire);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair72->getFailureOrdering(),
+ AtomicOrdering::Monotonic);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair72->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair72->getAlign(), 1);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair72->getPointerOperand(), PtrArg);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair72->getCompareOperand(),
+ AtomicCmpxchgCmpxchgExpected);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair72->getNewValOperand(),
+ AtomicCmpxchgCmpxchgDesired);
+
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue67
+ EXPECT_TRUE(Branch26->getName().empty());
+ EXPECT_EQ(Branch26->getParent(), AtomicCmpxchgCmpxchgMonotonicFail68);
+ EXPECT_EQ(Branch26->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch26->isUnconditional());
+ EXPECT_EQ(Branch26->getOperand(0), AtomicCmpxchgCmpxchgFailorderContinue67);
+
+ // %atomic_cmpxchg.cmpxchg.pair74 = cmpxchg weak volatile ptr %atomic_ptr...
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair74->getName(),
+ "atomic_cmpxchg.cmpxchg.pair74");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair74->getParent(),
+ AtomicCmpxchgCmpxchgAcquireFail69);
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair74->isVolatile());
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair74->isWeak());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair74->getSuccessOrdering(),
+ AtomicOrdering::Acquire);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair74->getFailureOrdering(),
+ AtomicOrdering::Acquire);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair74->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair74->getAlign(), 1);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair74->getPointerOperand(), PtrArg);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair74->getCompareOperand(),
+ AtomicCmpxchgCmpxchgExpected);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair74->getNewValOperand(),
+ AtomicCmpxchgCmpxchgDesired);
+
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue67
+ EXPECT_TRUE(Branch27->getName().empty());
+ EXPECT_EQ(Branch27->getParent(), AtomicCmpxchgCmpxchgAcquireFail69);
+ EXPECT_EQ(Branch27->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch27->isUnconditional());
+ EXPECT_EQ(Branch27->getOperand(0), AtomicCmpxchgCmpxchgFailorderContinue67);
+
+ // %atomic_cmpxchg.cmpxchg.pair76 = cmpxchg weak volatile ptr %atomic_ptr...
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair76->getName(),
+ "atomic_cmpxchg.cmpxchg.pair76");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair76->getParent(),
+ AtomicCmpxchgCmpxchgSeqcstFail70);
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair76->isVolatile());
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair76->isWeak());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair76->getSuccessOrdering(),
+ AtomicOrdering::Acquire);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair76->getFailureOrdering(),
+ AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair76->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair76->getAlign(), 1);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair76->getPointerOperand(), PtrArg);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair76->getCompareOperand(),
+ AtomicCmpxchgCmpxchgExpected);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair76->getNewValOperand(),
+ AtomicCmpxchgCmpxchgDesired);
+
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue67
+ EXPECT_TRUE(Branch28->getName().empty());
+ EXPECT_EQ(Branch28->getParent(), AtomicCmpxchgCmpxchgSeqcstFail70);
+ EXPECT_EQ(Branch28->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch28->isUnconditional());
+ EXPECT_EQ(Branch28->getOperand(0), AtomicCmpxchgCmpxchgFailorderContinue67);
+
+ // br label %atomic_cmpxchg.cmpxchg.memorder.continue49
+ EXPECT_TRUE(Branch29->getName().empty());
+ EXPECT_EQ(Branch29->getParent(), AtomicCmpxchgCmpxchgFailorderContinue67);
+ EXPECT_EQ(Branch29->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch29->isUnconditional());
+ EXPECT_EQ(Branch29->getOperand(0), AtomicCmpxchgCmpxchgMemorderContinue49);
+
+ // switch i32 %memorderarg_success, label %atomic_cmpxchg.cmpxchg.monoton...
+ // i32 1, label %atomic_cmpxchg.cmpxchg.acquire_fail80
+ // i32 2, label %atomic_cmpxchg.cmpxchg.acquire_fail80
+ // i32 5, label %atomic_cmpxchg.cmpxchg.seqcst_fail81
+ // ]
+ EXPECT_TRUE(Switch11->getName().empty());
+ EXPECT_EQ(Switch11->getParent(), AtomicCmpxchgCmpxchgRelease52);
+ EXPECT_EQ(Switch11->getType(), Type::getVoidTy(Ctx));
+ EXPECT_EQ(Switch11->getCondition(), MemorderArg);
+ EXPECT_EQ(Switch11->getDefaultDest(), AtomicCmpxchgCmpxchgMonotonicFail79);
+ EXPECT_EQ(cast<ConstantInt>(Switch11->getOperand(2))->getZExtValue(), 1);
+ EXPECT_EQ(Switch11->getOperand(3), AtomicCmpxchgCmpxchgAcquireFail80);
+ EXPECT_EQ(cast<ConstantInt>(Switch11->getOperand(4))->getZExtValue(), 2);
+ EXPECT_EQ(Switch11->getOperand(5), AtomicCmpxchgCmpxchgAcquireFail80);
+ EXPECT_EQ(cast<ConstantInt>(Switch11->getOperand(6))->getZExtValue(), 5);
+ EXPECT_EQ(Switch11->getOperand(7), AtomicCmpxchgCmpxchgSeqcstFail81);
+
+ // %atomic_cmpxchg.cmpxchg.pair83 = cmpxchg weak volatile ptr %atomic_ptr...
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair83->getName(),
+ "atomic_cmpxchg.cmpxchg.pair83");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair83->getParent(),
+ AtomicCmpxchgCmpxchgMonotonicFail79);
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair83->isVolatile());
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair83->isWeak());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair83->getSuccessOrdering(),
+ AtomicOrdering::Release);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair83->getFailureOrdering(),
+ AtomicOrdering::Monotonic);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair83->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair83->getAlign(), 1);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair83->getPointerOperand(), PtrArg);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair83->getCompareOperand(),
+ AtomicCmpxchgCmpxchgExpected);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair83->getNewValOperand(),
+ AtomicCmpxchgCmpxchgDesired);
+
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue78
+ EXPECT_TRUE(Branch30->getName().empty());
+ EXPECT_EQ(Branch30->getParent(), AtomicCmpxchgCmpxchgMonotonicFail79);
+ EXPECT_EQ(Branch30->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch30->isUnconditional());
+ EXPECT_EQ(Branch30->getOperand(0), AtomicCmpxchgCmpxchgFailorderContinue78);
+
+ // %atomic_cmpxchg.cmpxchg.pair85 = cmpxchg weak volatile ptr %atomic_ptr...
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair85->getName(),
+ "atomic_cmpxchg.cmpxchg.pair85");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair85->getParent(),
+ AtomicCmpxchgCmpxchgAcquireFail80);
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair85->isVolatile());
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair85->isWeak());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair85->getSuccessOrdering(),
+ AtomicOrdering::Release);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair85->getFailureOrdering(),
+ AtomicOrdering::Acquire);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair85->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair85->getAlign(), 1);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair85->getPointerOperand(), PtrArg);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair85->getCompareOperand(),
+ AtomicCmpxchgCmpxchgExpected);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair85->getNewValOperand(),
+ AtomicCmpxchgCmpxchgDesired);
+
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue78
+ EXPECT_TRUE(Branch31->getName().empty());
+ EXPECT_EQ(Branch31->getParent(), AtomicCmpxchgCmpxchgAcquireFail80);
+ EXPECT_EQ(Branch31->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch31->isUnconditional());
+ EXPECT_EQ(Branch31->getOperand(0), AtomicCmpxchgCmpxchgFailorderContinue78);
+
+ // %atomic_cmpxchg.cmpxchg.pair87 = cmpxchg weak volatile ptr %atomic_ptr...
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair87->getName(),
+ "atomic_cmpxchg.cmpxchg.pair87");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair87->getParent(),
+ AtomicCmpxchgCmpxchgSeqcstFail81);
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair87->isVolatile());
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair87->isWeak());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair87->getSuccessOrdering(),
+ AtomicOrdering::Release);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair87->getFailureOrdering(),
+ AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair87->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair87->getAlign(), 1);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair87->getPointerOperand(), PtrArg);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair87->getCompareOperand(),
+ AtomicCmpxchgCmpxchgExpected);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair87->getNewValOperand(),
+ AtomicCmpxchgCmpxchgDesired);
+
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue78
+ EXPECT_TRUE(Branch32->getName().empty());
+ EXPECT_EQ(Branch32->getParent(), AtomicCmpxchgCmpxchgSeqcstFail81);
+ EXPECT_EQ(Branch32->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch32->isUnconditional());
+ EXPECT_EQ(Branch32->getOperand(0), AtomicCmpxchgCmpxchgFailorderContinue78);
+
+ // br label %atomic_cmpxchg.cmpxchg.memorder.continue49
+ EXPECT_TRUE(Branch33->getName().empty());
+ EXPECT_EQ(Branch33->getParent(), AtomicCmpxchgCmpxchgFailorderContinue78);
+ EXPECT_EQ(Branch33->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch33->isUnconditional());
+ EXPECT_EQ(Branch33->getOperand(0), AtomicCmpxchgCmpxchgMemorderContinue49);
+
+ // switch i32 %memorderarg_success, label %atomic_cmpxchg.cmpxchg.monoton...
+ // i32 1, label %atomic_cmpxchg.cmpxchg.acquire_fail91
+ // i32 2, label %atomic_cmpxchg.cmpxchg.acquire_fail91
+ // i32 5, label %atomic_cmpxchg.cmpxchg.seqcst_fail92
+ // ]
+ EXPECT_TRUE(Switch12->getName().empty());
+ EXPECT_EQ(Switch12->getParent(), AtomicCmpxchgCmpxchgAcqrel53);
+ EXPECT_EQ(Switch12->getType(), Type::getVoidTy(Ctx));
+ EXPECT_EQ(Switch12->getCondition(), MemorderArg);
+ EXPECT_EQ(Switch12->getDefaultDest(), AtomicCmpxchgCmpxchgMonotonicFail90);
+ EXPECT_EQ(cast<ConstantInt>(Switch12->getOperand(2))->getZExtValue(), 1);
+ EXPECT_EQ(Switch12->getOperand(3), AtomicCmpxchgCmpxchgAcquireFail91);
+ EXPECT_EQ(cast<ConstantInt>(Switch12->getOperand(4))->getZExtValue(), 2);
+ EXPECT_EQ(Switch12->getOperand(5), AtomicCmpxchgCmpxchgAcquireFail91);
+ EXPECT_EQ(cast<ConstantInt>(Switch12->getOperand(6))->getZExtValue(), 5);
+ EXPECT_EQ(Switch12->getOperand(7), AtomicCmpxchgCmpxchgSeqcstFail92);
+
+ // %atomic_cmpxchg.cmpxchg.pair94 = cmpxchg weak volatile ptr %atomic_ptr...
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair94->getName(),
+ "atomic_cmpxchg.cmpxchg.pair94");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair94->getParent(),
+ AtomicCmpxchgCmpxchgMonotonicFail90);
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair94->isVolatile());
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair94->isWeak());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair94->getSuccessOrdering(),
+ AtomicOrdering::AcquireRelease);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair94->getFailureOrdering(),
+ AtomicOrdering::Monotonic);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair94->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair94->getAlign(), 1);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair94->getPointerOperand(), PtrArg);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair94->getCompareOperand(),
+ AtomicCmpxchgCmpxchgExpected);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair94->getNewValOperand(),
+ AtomicCmpxchgCmpxchgDesired);
+
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue89
+ EXPECT_TRUE(Branch34->getName().empty());
+ EXPECT_EQ(Branch34->getParent(), AtomicCmpxchgCmpxchgMonotonicFail90);
+ EXPECT_EQ(Branch34->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch34->isUnconditional());
+ EXPECT_EQ(Branch34->getOperand(0), AtomicCmpxchgCmpxchgFailorderContinue89);
+
+ // %atomic_cmpxchg.cmpxchg.pair96 = cmpxchg weak volatile ptr %atomic_ptr...
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair96->getName(),
+ "atomic_cmpxchg.cmpxchg.pair96");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair96->getParent(),
+ AtomicCmpxchgCmpxchgAcquireFail91);
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair96->isVolatile());
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair96->isWeak());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair96->getSuccessOrdering(),
+ AtomicOrdering::AcquireRelease);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair96->getFailureOrdering(),
+ AtomicOrdering::Acquire);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair96->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair96->getAlign(), 1);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair96->getPointerOperand(), PtrArg);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair96->getCompareOperand(),
+ AtomicCmpxchgCmpxchgExpected);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair96->getNewValOperand(),
+ AtomicCmpxchgCmpxchgDesired);
+
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue89
+ EXPECT_TRUE(Branch35->getName().empty());
+ EXPECT_EQ(Branch35->getParent(), AtomicCmpxchgCmpxchgAcquireFail91);
+ EXPECT_EQ(Branch35->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch35->isUnconditional());
+ EXPECT_EQ(Branch35->getOperand(0), AtomicCmpxchgCmpxchgFailorderContinue89);
+
+ // %atomic_cmpxchg.cmpxchg.pair98 = cmpxchg weak volatile ptr %atomic_ptr...
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair98->getName(),
+ "atomic_cmpxchg.cmpxchg.pair98");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair98->getParent(),
+ AtomicCmpxchgCmpxchgSeqcstFail92);
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair98->isVolatile());
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair98->isWeak());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair98->getSuccessOrdering(),
+ AtomicOrdering::AcquireRelease);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair98->getFailureOrdering(),
+ AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair98->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair98->getAlign(), 1);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair98->getPointerOperand(), PtrArg);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair98->getCompareOperand(),
+ AtomicCmpxchgCmpxchgExpected);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair98->getNewValOperand(),
+ AtomicCmpxchgCmpxchgDesired);
+
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue89
+ EXPECT_TRUE(Branch36->getName().empty());
+ EXPECT_EQ(Branch36->getParent(), AtomicCmpxchgCmpxchgSeqcstFail92);
+ EXPECT_EQ(Branch36->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch36->isUnconditional());
+ EXPECT_EQ(Branch36->getOperand(0), AtomicCmpxchgCmpxchgFailorderContinue89);
+
+ // br label %atomic_cmpxchg.cmpxchg.memorder.continue49
+ EXPECT_TRUE(Branch37->getName().empty());
+ EXPECT_EQ(Branch37->getParent(), AtomicCmpxchgCmpxchgFailorderContinue89);
+ EXPECT_EQ(Branch37->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch37->isUnconditional());
+ EXPECT_EQ(Branch37->getOperand(0), AtomicCmpxchgCmpxchgMemorderContinue49);
+
+ // switch i32 %memorderarg_success, label %atomic_cmpxchg.cmpxchg.monoton...
+ // i32 1, label %atomic_cmpxchg.cmpxchg.acquire_fail102
+ // i32 2, label %atomic_cmpxchg.cmpxchg.acquire_fail102
+ // i32 5, label %atomic_cmpxchg.cmpxchg.seqcst_fail103
+ // ]
+ EXPECT_TRUE(Switch13->getName().empty());
+ EXPECT_EQ(Switch13->getParent(), AtomicCmpxchgCmpxchgSeqcst54);
+ EXPECT_EQ(Switch13->getType(), Type::getVoidTy(Ctx));
+ EXPECT_EQ(Switch13->getCondition(), MemorderArg);
+ EXPECT_EQ(Switch13->getDefaultDest(), AtomicCmpxchgCmpxchgMonotonicFail101);
+ EXPECT_EQ(cast<ConstantInt>(Switch13->getOperand(2))->getZExtValue(), 1);
+ EXPECT_EQ(Switch13->getOperand(3), AtomicCmpxchgCmpxchgAcquireFail102);
+ EXPECT_EQ(cast<ConstantInt>(Switch13->getOperand(4))->getZExtValue(), 2);
+ EXPECT_EQ(Switch13->getOperand(5), AtomicCmpxchgCmpxchgAcquireFail102);
+ EXPECT_EQ(cast<ConstantInt>(Switch13->getOperand(6))->getZExtValue(), 5);
+ EXPECT_EQ(Switch13->getOperand(7), AtomicCmpxchgCmpxchgSeqcstFail103);
+
+ // %atomic_cmpxchg.cmpxchg.pair105 = cmpxchg weak volatile ptr %atomic_pt...
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair105->getName(),
+ "atomic_cmpxchg.cmpxchg.pair105");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair105->getParent(),
+ AtomicCmpxchgCmpxchgMonotonicFail101);
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair105->isVolatile());
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair105->isWeak());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair105->getSuccessOrdering(),
+ AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair105->getFailureOrdering(),
+ AtomicOrdering::Monotonic);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair105->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair105->getAlign(), 1);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair105->getPointerOperand(), PtrArg);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair105->getCompareOperand(),
+ AtomicCmpxchgCmpxchgExpected);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair105->getNewValOperand(),
+ AtomicCmpxchgCmpxchgDesired);
+
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue100
+ EXPECT_TRUE(Branch38->getName().empty());
+ EXPECT_EQ(Branch38->getParent(), AtomicCmpxchgCmpxchgMonotonicFail101);
+ EXPECT_EQ(Branch38->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch38->isUnconditional());
+ EXPECT_EQ(Branch38->getOperand(0), AtomicCmpxchgCmpxchgFailorderContinue100);
+
+ // %atomic_cmpxchg.cmpxchg.pair107 = cmpxchg weak volatile ptr %atomic_pt...
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair107->getName(),
+ "atomic_cmpxchg.cmpxchg.pair107");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair107->getParent(),
+ AtomicCmpxchgCmpxchgAcquireFail102);
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair107->isVolatile());
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair107->isWeak());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair107->getSuccessOrdering(),
+ AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair107->getFailureOrdering(),
+ AtomicOrdering::Acquire);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair107->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair107->getAlign(), 1);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair107->getPointerOperand(), PtrArg);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair107->getCompareOperand(),
+ AtomicCmpxchgCmpxchgExpected);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair107->getNewValOperand(),
+ AtomicCmpxchgCmpxchgDesired);
+
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue100
+ EXPECT_TRUE(Branch39->getName().empty());
+ EXPECT_EQ(Branch39->getParent(), AtomicCmpxchgCmpxchgAcquireFail102);
+ EXPECT_EQ(Branch39->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch39->isUnconditional());
+ EXPECT_EQ(Branch39->getOperand(0), AtomicCmpxchgCmpxchgFailorderContinue100);
+
+ // %atomic_cmpxchg.cmpxchg.pair109 = cmpxchg weak volatile ptr %atomic_pt...
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair109->getName(),
+ "atomic_cmpxchg.cmpxchg.pair109");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair109->getParent(),
+ AtomicCmpxchgCmpxchgSeqcstFail103);
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair109->isVolatile());
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgPair109->isWeak());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair109->getSuccessOrdering(),
+ AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair109->getFailureOrdering(),
+ AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair109->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair109->getAlign(), 1);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair109->getPointerOperand(), PtrArg);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair109->getCompareOperand(),
+ AtomicCmpxchgCmpxchgExpected);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair109->getNewValOperand(),
+ AtomicCmpxchgCmpxchgDesired);
+
+ // br label %atomic_cmpxchg.cmpxchg.failorder.continue100
+ EXPECT_TRUE(Branch40->getName().empty());
+ EXPECT_EQ(Branch40->getParent(), AtomicCmpxchgCmpxchgSeqcstFail103);
+ EXPECT_EQ(Branch40->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch40->isUnconditional());
+ EXPECT_EQ(Branch40->getOperand(0), AtomicCmpxchgCmpxchgFailorderContinue100);
+
+ // br label %atomic_cmpxchg.cmpxchg.memorder.continue49
+ EXPECT_TRUE(Branch41->getName().empty());
+ EXPECT_EQ(Branch41->getParent(), AtomicCmpxchgCmpxchgFailorderContinue100);
+ EXPECT_EQ(Branch41->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch41->isUnconditional());
+ EXPECT_EQ(Branch41->getOperand(0), AtomicCmpxchgCmpxchgMemorderContinue49);
+
+ // br label %atomic_cmpxchg.cmpxchg.weak.continue
+ EXPECT_TRUE(Branch42->getName().empty());
+ EXPECT_EQ(Branch42->getParent(), AtomicCmpxchgCmpxchgMemorderContinue49);
+ EXPECT_EQ(Branch42->getType(), Type::getVoidTy(Ctx));
+ EXPECT_TRUE(Branch42->isUnconditional());
+ EXPECT_EQ(Branch42->getOperand(0), ExitBB);
+
+ // %atomic_cmpxchg.cmpxchg.isweak.success = phi i1 [ %atomic_cmpxchg.cmpx...
+ EXPECT_EQ(AtomicSuccess->getName(), "atomic_cmpxchg.cmpxchg.isweak.success");
+ EXPECT_EQ(cast<Instruction>(AtomicSuccess)->getParent(), ExitBB);
+ EXPECT_EQ(AtomicSuccess->getType(), Type::getInt1Ty(Ctx));
+ EXPECT_TRUE(isa<PHINode>(cast<Instruction>(AtomicSuccess)->getOperand(0)));
+ EXPECT_TRUE(isa<PHINode>(cast<Instruction>(AtomicSuccess)->getOperand(1)));
+
+ // ret void
+ EXPECT_TRUE(Return->getName().empty());
+ EXPECT_EQ(Return->getParent(), ExitBB);
+ EXPECT_EQ(Return->getType(), Type::getVoidTy(Ctx));
+}
+
+TEST_F(BuildBuiltinsTests, AtomicCmpxchg_SyncScope) {
+ Value *AtomicSuccess = nullptr;
+ ASSERT_THAT_EXPECTED(
+ emitAtomicCompareExchangeBuiltin(
+ /*AtomicPtr=*/PtrArg,
+ /*ExpectedPtr=*/ExpectedArg,
+ /*DesiredPtr=*/DesiredArg, /*TypeOrSize=*/Builder.getInt32Ty(),
+ /*IsWeak*/ false,
+ /*IsVolatile=*/false,
+ /*SuccessMemorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*FailureMemorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*Scope=*/SyncScope::SingleThread,
+ /*PrevPtr=*/nullptr,
+ /*Align=*/{}, /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_cmpxchg"),
+ StoreResult(AtomicSuccess));
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // clang-format off
+ // %atomic_cmpxchg.cmpxchg.expected = load i32, ptr %expected_ptr, align 4
+ // %atomic_cmpxchg.cmpxchg.desired = load i32, ptr %desired_ptr, align 4
+ // %atomic_cmpxchg.cmpxchg.pair = cmpxchg ptr %atomic_ptr, i32 %atomic_cmpxchg.cmpxchg.expected, i32 %atomic_cmpxchg.cmpxchg.desired syncscope("singlethread") seq_cst seq_cst, align 1
+ // %atomic_cmpxchg.cmpxchg.success = extractvalue { i32, i1 } %atomic_cmpxchg.cmpxchg.pair, 1
+ // clang-format on
+
+ // Follow use-def and load-store chains to discover instructions
+ AtomicCmpXchgInst *AtomicCmpxchgCmpxchgPair =
+ cast<AtomicCmpXchgInst>(getUniquePreviousStore(PtrArg, EntryBB));
+ LoadInst *AtomicCmpxchgCmpxchgExpected =
+ cast<LoadInst>(AtomicCmpxchgCmpxchgPair->getCompareOperand());
+ LoadInst *AtomicCmpxchgCmpxchgDesired =
+ cast<LoadInst>(AtomicCmpxchgCmpxchgPair->getNewValOperand());
+
+ // %atomic_cmpxchg.cmpxchg.expected = load i32, ptr %expected_ptr, align 4
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getName(),
+ "atomic_cmpxchg.cmpxchg.expected");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getParent(), EntryBB);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getType(), Type::getInt32Ty(Ctx));
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgExpected->isSimple());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getPointerOperand(), ExpectedArg);
+
+ // %atomic_cmpxchg.cmpxchg.desired = load i32, ptr %desired_ptr, align 4
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getName(),
+ "atomic_cmpxchg.cmpxchg.desired");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getParent(), EntryBB);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getType(), Type::getInt32Ty(Ctx));
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgDesired->isSimple());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getPointerOperand(), DesiredArg);
+
+ // %atomic_cmpxchg.cmpxchg.pair = cmpxchg ptr %atomic_ptr, i32 %atomic_cm...
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getName(), "atomic_cmpxchg.cmpxchg.pair");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getParent(), EntryBB);
+ EXPECT_FALSE(AtomicCmpxchgCmpxchgPair->isVolatile());
+ EXPECT_FALSE(AtomicCmpxchgCmpxchgPair->isWeak());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getSuccessOrdering(),
+ AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getFailureOrdering(),
+ AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getSyncScopeID(),
+ SyncScope::SingleThread);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getAlign(), 1);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getPointerOperand(), PtrArg);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getCompareOperand(),
+ AtomicCmpxchgCmpxchgExpected);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getNewValOperand(),
+ AtomicCmpxchgCmpxchgDesired);
+
+ // %atomic_cmpxchg.cmpxchg.success = extractvalue { i32, i1 } %atomic_cmp...
+ EXPECT_EQ(AtomicSuccess->getName(), "atomic_cmpxchg.cmpxchg.success");
+ EXPECT_EQ(cast<Instruction>(AtomicSuccess)->getParent(), EntryBB);
+ EXPECT_EQ(AtomicSuccess->getType(), Type::getInt1Ty(Ctx));
+ EXPECT_EQ(cast<ExtractValueInst>(AtomicSuccess)->getNumIndices(), 1);
+ EXPECT_EQ(cast<ExtractValueInst>(AtomicSuccess)->getIndices()[0], 1);
+ EXPECT_EQ(cast<ExtractValueInst>(AtomicSuccess)->getAggregateOperand(),
+ AtomicCmpxchgCmpxchgPair);
+}
+
+TEST_F(BuildBuiltinsTests, AtomicCmpxchg_Float) {
+ Value *AtomicSuccess = nullptr;
+ ASSERT_THAT_EXPECTED(
+ emitAtomicCompareExchangeBuiltin(
+ /*AtomicPtr=*/PtrArg,
+ /*ExpectedPtr=*/ExpectedArg,
+ /*DesiredPtr=*/DesiredArg, /*TypeOrSize=*/Builder.getFloatTy(),
+ /*IsWeak*/ false,
+ /*IsVolatile=*/false,
+ /*SuccessMemorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*FailureMemorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*Scope=*/SyncScope::System,
+ /*PrevPtr=*/nullptr,
+ /*Align=*/{}, /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_cmpxchg"),
+ StoreResult(AtomicSuccess));
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // clang-format off
+ // %atomic_cmpxchg.cmpxchg.expected = load i32, ptr %expected_ptr, align 4
+ // %atomic_cmpxchg.cmpxchg.desired = load i32, ptr %desired_ptr, align 4
+ // %atomic_cmpxchg.cmpxchg.pair = cmpxchg ptr %atomic_ptr, i32 %atomic_cmpxchg.cmpxchg.expected, i32 %atomic_cmpxchg.cmpxchg.desired seq_cst seq_cst, align 1
+ // %atomic_cmpxchg.cmpxchg.success = extractvalue { i32, i1 } %atomic_cmpxchg.cmpxchg.pair, 1
+ // clang-format on
+
+ // Follow use-def and load-store chains to discover instructions
+ AtomicCmpXchgInst *AtomicCmpxchgCmpxchgPair =
+ cast<AtomicCmpXchgInst>(getUniquePreviousStore(PtrArg, EntryBB));
+ LoadInst *AtomicCmpxchgCmpxchgExpected =
+ cast<LoadInst>(AtomicCmpxchgCmpxchgPair->getCompareOperand());
+ LoadInst *AtomicCmpxchgCmpxchgDesired =
+ cast<LoadInst>(AtomicCmpxchgCmpxchgPair->getNewValOperand());
+
+ // %atomic_cmpxchg.cmpxchg.expected = load i32, ptr %expected_ptr, align 4
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getName(),
+ "atomic_cmpxchg.cmpxchg.expected");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getParent(), EntryBB);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getType(), Type::getInt32Ty(Ctx));
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgExpected->isSimple());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getPointerOperand(), ExpectedArg);
+
+ // %atomic_cmpxchg.cmpxchg.desired = load i32, ptr %desired_ptr, align 4
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getName(),
+ "atomic_cmpxchg.cmpxchg.desired");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getParent(), EntryBB);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getType(), Type::getInt32Ty(Ctx));
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgDesired->isSimple());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getPointerOperand(), DesiredArg);
+
+ // %atomic_cmpxchg.cmpxchg.pair = cmpxchg ptr %atomic_ptr, i32 %atomic_cm...
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getName(), "atomic_cmpxchg.cmpxchg.pair");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getParent(), EntryBB);
+ EXPECT_FALSE(AtomicCmpxchgCmpxchgPair->isVolatile());
+ EXPECT_FALSE(AtomicCmpxchgCmpxchgPair->isWeak());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getSuccessOrdering(),
+ AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getFailureOrdering(),
+ AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getAlign(), 1);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getPointerOperand(), PtrArg);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getCompareOperand(),
+ AtomicCmpxchgCmpxchgExpected);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getNewValOperand(),
+ AtomicCmpxchgCmpxchgDesired);
+
+ // %atomic_cmpxchg.cmpxchg.success = extractvalue { i32, i1 } %atomic_cmp...
+ EXPECT_EQ(AtomicSuccess->getName(), "atomic_cmpxchg.cmpxchg.success");
+ EXPECT_EQ(cast<Instruction>(AtomicSuccess)->getParent(), EntryBB);
+ EXPECT_EQ(AtomicSuccess->getType(), Type::getInt1Ty(Ctx));
+ EXPECT_EQ(cast<ExtractValueInst>(AtomicSuccess)->getNumIndices(), 1);
+ EXPECT_EQ(cast<ExtractValueInst>(AtomicSuccess)->getIndices()[0], 1);
+ EXPECT_EQ(cast<ExtractValueInst>(AtomicSuccess)->getAggregateOperand(),
+ AtomicCmpxchgCmpxchgPair);
+}
+
+TEST_F(BuildBuiltinsTests, AtomicCmpxchg_FP80) {
+ Value *AtomicSuccess = nullptr;
+ ASSERT_THAT_EXPECTED(
+ emitAtomicCompareExchangeBuiltin(
+ /*AtomicPtr=*/PtrArg,
+ /*ExpectedPtr=*/ExpectedArg,
+ /*DesiredPtr=*/DesiredArg, /*TypeOrSize=*/Type::getX86_FP80Ty(Ctx),
+ /*IsWeak*/ false,
+ /*IsVolatile=*/false,
+ /*SuccessMemorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*FailureMemorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*Scope=*/SyncScope::System,
+ /*PrevPtr=*/nullptr,
+ /*Align=*/{}, /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_cmpxchg"),
+ StoreResult(AtomicSuccess));
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // clang-format off
+ // %__atomic_compare_exchange = call i8 @__atomic_compare_exchange(i64 10, ptr %atomic_ptr, ptr %expected_ptr, ptr %desired_ptr, i32 5, i32 5)
+ // %atomic_cmpxchg.cmpxchg.success = icmp eq i8 %__atomic_compare_exchange, 0
+ // clang-format on
+
+ // Follow use-def and load-store chains to discover instructions
+ CallInst *AtomicCompareExchange =
+ cast<CallInst>(getUniquePreviousStore(PtrArg, EntryBB));
+
+ // %__atomic_compare_exchange = call i8 @__atomic_compare_exchange(i64 10...
+ EXPECT_EQ(AtomicCompareExchange->getName(), "__atomic_compare_exchange");
+ EXPECT_EQ(AtomicCompareExchange->getParent(), EntryBB);
+ EXPECT_EQ(AtomicCompareExchange->getType(), Type::getInt8Ty(Ctx));
+ EXPECT_EQ(AtomicCompareExchange->getName(), "__atomic_compare_exchange");
+ EXPECT_FALSE(AtomicCompareExchange->isMustTailCall());
+ EXPECT_FALSE(AtomicCompareExchange->isTailCall());
+ EXPECT_EQ(AtomicCompareExchange->getCallingConv(), CallingConv::C);
+ EXPECT_EQ(cast<ConstantInt>(AtomicCompareExchange->getArgOperand(0))
+ ->getZExtValue(),
+ 10);
+ EXPECT_EQ(AtomicCompareExchange->getArgOperand(1), PtrArg);
+ EXPECT_EQ(AtomicCompareExchange->getArgOperand(2), ExpectedArg);
+ EXPECT_EQ(AtomicCompareExchange->getArgOperand(3), DesiredArg);
+ EXPECT_EQ(cast<ConstantInt>(AtomicCompareExchange->getArgOperand(4))
+ ->getZExtValue(),
+ 5);
+ EXPECT_EQ(cast<ConstantInt>(AtomicCompareExchange->getArgOperand(5))
+ ->getZExtValue(),
+ 5);
+ EXPECT_EQ(AtomicCompareExchange->getCalledFunction(),
+ M->getFunction("__atomic_compare_exchange"));
+
+ // %atomic_cmpxchg.cmpxchg.success = icmp eq i8 %__atomic_compare_exchang...
+ EXPECT_EQ(AtomicSuccess->getName(), "atomic_cmpxchg.cmpxchg.success");
+ EXPECT_EQ(cast<Instruction>(AtomicSuccess)->getParent(), EntryBB);
+ EXPECT_EQ(AtomicSuccess->getType(), Type::getInt1Ty(Ctx));
+ EXPECT_EQ(cast<Instruction>(AtomicSuccess)->getOperand(0),
+ AtomicCompareExchange);
+ EXPECT_EQ(cast<ConstantInt>(cast<Instruction>(AtomicSuccess)->getOperand(1))
+ ->getZExtValue(),
+ 0);
+}
+
+TEST_F(BuildBuiltinsTests, AtomicCmpxchg_Ptr) {
+ Value *AtomicSuccess = nullptr;
+ ASSERT_THAT_EXPECTED(
+ emitAtomicCompareExchangeBuiltin(
+ /*AtomicPtr=*/PtrArg,
+ /*ExpectedPtr=*/ExpectedArg,
+ /*DesiredPtr=*/DesiredArg, /*TypeOrSize=*/Builder.getPtrTy(),
+ /*IsWeak*/ false,
+ /*IsVolatile=*/false,
+ /*SuccessMemorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*FailureMemorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*Scope=*/SyncScope::System,
+ /*PrevPtr=*/nullptr,
+ /*Align=*/{}, /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_cmpxchg"),
+ StoreResult(AtomicSuccess));
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // clang-format off
+ // %atomic_cmpxchg.cmpxchg.expected = load ptr, ptr %expected_ptr, align 8
+ // %atomic_cmpxchg.cmpxchg.desired = load ptr, ptr %desired_ptr, align 8
+ // %atomic_cmpxchg.cmpxchg.pair = cmpxchg ptr %atomic_ptr, ptr %atomic_cmpxchg.cmpxchg.expected, ptr %atomic_cmpxchg.cmpxchg.desired seq_cst seq_cst, align 1
+ // %atomic_cmpxchg.cmpxchg.success = extractvalue { ptr, i1 } %atomic_cmpxchg.cmpxchg.pair, 1
+ // clang-format on
+
+ // Follow use-def and load-store chains to discover instructions
+ AtomicCmpXchgInst *AtomicCmpxchgCmpxchgPair =
+ cast<AtomicCmpXchgInst>(getUniquePreviousStore(PtrArg, EntryBB));
+ LoadInst *AtomicCmpxchgCmpxchgExpected =
+ cast<LoadInst>(AtomicCmpxchgCmpxchgPair->getCompareOperand());
+ LoadInst *AtomicCmpxchgCmpxchgDesired =
+ cast<LoadInst>(AtomicCmpxchgCmpxchgPair->getNewValOperand());
+
+ // %atomic_cmpxchg.cmpxchg.expected = load ptr, ptr %expected_ptr, align 8
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getName(),
+ "atomic_cmpxchg.cmpxchg.expected");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getParent(), EntryBB);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getType(), PointerType::get(Ctx, 0));
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgExpected->isSimple());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getPointerOperand(), ExpectedArg);
+
+ // %atomic_cmpxchg.cmpxchg.desired = load ptr, ptr %desired_ptr, align 8
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getName(),
+ "atomic_cmpxchg.cmpxchg.desired");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getParent(), EntryBB);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getType(), PointerType::get(Ctx, 0));
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgDesired->isSimple());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getPointerOperand(), DesiredArg);
+
+ // %atomic_cmpxchg.cmpxchg.pair = cmpxchg ptr %atomic_ptr, ptr %atomic_cm...
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getName(), "atomic_cmpxchg.cmpxchg.pair");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getParent(), EntryBB);
+ EXPECT_FALSE(AtomicCmpxchgCmpxchgPair->isVolatile());
+ EXPECT_FALSE(AtomicCmpxchgCmpxchgPair->isWeak());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getSuccessOrdering(),
+ AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getFailureOrdering(),
+ AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getAlign(), 1);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getPointerOperand(), PtrArg);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getCompareOperand(),
+ AtomicCmpxchgCmpxchgExpected);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getNewValOperand(),
+ AtomicCmpxchgCmpxchgDesired);
+
+ // %atomic_cmpxchg.cmpxchg.success = extractvalue { ptr, i1 } %atomic_cmp...
+ EXPECT_EQ(AtomicSuccess->getName(), "atomic_cmpxchg.cmpxchg.success");
+ EXPECT_EQ(cast<Instruction>(AtomicSuccess)->getParent(), EntryBB);
+ EXPECT_EQ(AtomicSuccess->getType(), Type::getInt1Ty(Ctx));
+ EXPECT_EQ(cast<ExtractValueInst>(AtomicSuccess)->getNumIndices(), 1);
+ EXPECT_EQ(cast<ExtractValueInst>(AtomicSuccess)->getIndices()[0], 1);
+ EXPECT_EQ(cast<ExtractValueInst>(AtomicSuccess)->getAggregateOperand(),
+ AtomicCmpxchgCmpxchgPair);
+}
+
+TEST_F(BuildBuiltinsTests, AtomicCmpxchg_Struct) {
+ // A struct that is small enough to be covered with a single instruction
+ StructType *STy =
+ StructType::get(Ctx, {Builder.getFloatTy(), Builder.getFloatTy()});
+
+ Value *AtomicSuccess = nullptr;
+ ASSERT_THAT_EXPECTED(
+ emitAtomicCompareExchangeBuiltin(
+ /*AtomicPtr=*/PtrArg,
+ /*ExpectedPtr=*/ExpectedArg,
+ /*DesiredPtr=*/DesiredArg, /*TypeOrSize=*/STy,
+ /*IsWeak*/ false,
+ /*IsVolatile=*/false,
+ /*SuccessMemorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*FailureMemorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*Scope=*/SyncScope::System,
+ /*PrevPtr=*/nullptr,
+ /*Align=*/{}, /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_cmpxchg"),
+ StoreResult(AtomicSuccess));
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // clang-format off
+ // %atomic_cmpxchg.cmpxchg.expected = load i64, ptr %expected_ptr, align 4
+ // %atomic_cmpxchg.cmpxchg.desired = load i64, ptr %desired_ptr, align 4
+ // %atomic_cmpxchg.cmpxchg.pair = cmpxchg ptr %atomic_ptr, i64 %atomic_cmpxchg.cmpxchg.expected, i64 %atomic_cmpxchg.cmpxchg.desired seq_cst seq_cst, align 1
+ // %atomic_cmpxchg.cmpxchg.success = extractvalue { i64, i1 } %atomic_cmpxchg.cmpxchg.pair, 1
+ // clang-format on
+
+ // Follow use-def and load-store chains to discover instructions
+ AtomicCmpXchgInst *AtomicCmpxchgCmpxchgPair =
+ cast<AtomicCmpXchgInst>(getUniquePreviousStore(PtrArg, EntryBB));
+ LoadInst *AtomicCmpxchgCmpxchgExpected =
+ cast<LoadInst>(AtomicCmpxchgCmpxchgPair->getCompareOperand());
+ LoadInst *AtomicCmpxchgCmpxchgDesired =
+ cast<LoadInst>(AtomicCmpxchgCmpxchgPair->getNewValOperand());
+
+ // %atomic_cmpxchg.cmpxchg.expected = load i64, ptr %expected_ptr, align 4
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getName(),
+ "atomic_cmpxchg.cmpxchg.expected");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getParent(), EntryBB);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getType(), Type::getInt64Ty(Ctx));
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgExpected->isSimple());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getPointerOperand(), ExpectedArg);
+
+ // %atomic_cmpxchg.cmpxchg.desired = load i64, ptr %desired_ptr, align 4
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getName(),
+ "atomic_cmpxchg.cmpxchg.desired");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getParent(), EntryBB);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getType(), Type::getInt64Ty(Ctx));
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgDesired->isSimple());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getPointerOperand(), DesiredArg);
+
+ // %atomic_cmpxchg.cmpxchg.pair = cmpxchg ptr %atomic_ptr, i64 %atomic_cm...
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getName(), "atomic_cmpxchg.cmpxchg.pair");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getParent(), EntryBB);
+ EXPECT_FALSE(AtomicCmpxchgCmpxchgPair->isVolatile());
+ EXPECT_FALSE(AtomicCmpxchgCmpxchgPair->isWeak());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getSuccessOrdering(),
+ AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getFailureOrdering(),
+ AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getAlign(), 1);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getPointerOperand(), PtrArg);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getCompareOperand(),
+ AtomicCmpxchgCmpxchgExpected);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getNewValOperand(),
+ AtomicCmpxchgCmpxchgDesired);
+
+ // %atomic_cmpxchg.cmpxchg.success = extractvalue { i64, i1 } %atomic_cmp...
+ EXPECT_EQ(AtomicSuccess->getName(), "atomic_cmpxchg.cmpxchg.success");
+ EXPECT_EQ(cast<Instruction>(AtomicSuccess)->getParent(), EntryBB);
+ EXPECT_EQ(AtomicSuccess->getType(), Type::getInt1Ty(Ctx));
+ EXPECT_EQ(cast<ExtractValueInst>(AtomicSuccess)->getNumIndices(), 1);
+ EXPECT_EQ(cast<ExtractValueInst>(AtomicSuccess)->getIndices()[0], 1);
+ EXPECT_EQ(cast<ExtractValueInst>(AtomicSuccess)->getAggregateOperand(),
+ AtomicCmpxchgCmpxchgPair);
+}
+
+TEST_F(BuildBuiltinsTests, AtomicCmpxchg_Array) {
+ // A type that is too large for atomic instructions
+ ArrayType *ATy = ArrayType::get(Builder.getFloatTy(), 19);
+
+ Value *AtomicSuccess = nullptr;
+ ASSERT_THAT_EXPECTED(
+ emitAtomicCompareExchangeBuiltin(
+ /*AtomicPtr=*/PtrArg,
+ /*ExpectedPtr=*/ExpectedArg,
+ /*DesiredPtr=*/DesiredArg, /*TypeOrSize=*/ATy,
+ /*IsWeak*/ false,
+ /*IsVolatile=*/false,
+ /*SuccessMemorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*FailureMemorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*Scope=*/SyncScope::System,
+ /*PrevPtr=*/nullptr,
+ /*Align=*/{}, /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_cmpxchg"),
+ StoreResult(AtomicSuccess));
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // clang-format off
+ // %__atomic_compare_exchange = call i8 @__atomic_compare_exchange(i64 76, ptr %atomic_ptr, ptr %expected_ptr, ptr %desired_ptr, i32 5, i32 5)
+ // %atomic_cmpxchg.cmpxchg.success = icmp eq i8 %__atomic_compare_exchange, 0
+ // clang-format on
+
+ // Follow use-def and load-store chains to discover instructions
+ CallInst *AtomicCompareExchange =
+ cast<CallInst>(getUniquePreviousStore(PtrArg, EntryBB));
+
+ // %__atomic_compare_exchange = call i8 @__atomic_compare_exchange(i64 76...
+ EXPECT_EQ(AtomicCompareExchange->getName(), "__atomic_compare_exchange");
+ EXPECT_EQ(AtomicCompareExchange->getParent(), EntryBB);
+ EXPECT_EQ(AtomicCompareExchange->getType(), Type::getInt8Ty(Ctx));
+ EXPECT_EQ(AtomicCompareExchange->getName(), "__atomic_compare_exchange");
+ EXPECT_FALSE(AtomicCompareExchange->isMustTailCall());
+ EXPECT_FALSE(AtomicCompareExchange->isTailCall());
+ EXPECT_EQ(AtomicCompareExchange->getCallingConv(), CallingConv::C);
+ EXPECT_EQ(cast<ConstantInt>(AtomicCompareExchange->getArgOperand(0))
+ ->getZExtValue(),
+ 76);
+ EXPECT_EQ(AtomicCompareExchange->getArgOperand(1), PtrArg);
+ EXPECT_EQ(AtomicCompareExchange->getArgOperand(2), ExpectedArg);
+ EXPECT_EQ(AtomicCompareExchange->getArgOperand(3), DesiredArg);
+ EXPECT_EQ(cast<ConstantInt>(AtomicCompareExchange->getArgOperand(4))
+ ->getZExtValue(),
+ 5);
+ EXPECT_EQ(cast<ConstantInt>(AtomicCompareExchange->getArgOperand(5))
+ ->getZExtValue(),
+ 5);
+ EXPECT_EQ(AtomicCompareExchange->getCalledFunction(),
+ M->getFunction("__atomic_compare_exchange"));
+
+ // %atomic_cmpxchg.cmpxchg.success = icmp eq i8 %__atomic_compare_exchang...
+ EXPECT_EQ(AtomicSuccess->getName(), "atomic_cmpxchg.cmpxchg.success");
+ EXPECT_EQ(cast<Instruction>(AtomicSuccess)->getParent(), EntryBB);
+ EXPECT_EQ(AtomicSuccess->getType(), Type::getInt1Ty(Ctx));
+ EXPECT_EQ(cast<Instruction>(AtomicSuccess)->getOperand(0),
+ AtomicCompareExchange);
+ EXPECT_EQ(cast<ConstantInt>(cast<Instruction>(AtomicSuccess)->getOperand(1))
+ ->getZExtValue(),
+ 0);
+}
+
+TEST_F(BuildBuiltinsTests, AtomicCmpxchg_Array_NoLibatomic) {
+ // Use a triple that does not support libatomic (according to
+ // initializeLibCalls in TargetLibraryInfo.cpp)
+ Triple T("x86_64-scei-ps4");
+ TLII.reset(new TargetLibraryInfoImpl(T));
+ TLI.reset(new TargetLibraryInfo(*TLII));
+
+ // A type that is too large for atomic instructions
+ ArrayType *ATy = ArrayType::get(Builder.getFloatTy(), 19);
+
+ ASSERT_THAT_EXPECTED(
+ emitAtomicCompareExchangeBuiltin(
+ /*AtomicPtr=*/PtrArg,
+ /*ExpectedPtr=*/ExpectedArg,
+ /*DesiredPtr=*/DesiredArg, /*TypeOrSize=*/ATy,
+ /*IsWeak*/ false,
+ /*IsVolatile=*/false,
+ /*SuccessMemorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*FailureMemorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*Scope=*/SyncScope::SingleThread,
+ /*PrevPtr=*/nullptr,
+ /*Align=*/{}, /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_cmpxchg"),
+ FailedWithMessage("__atomic_compare_exchange builtin not supported by "
+ "any available means"));
+}
+
+TEST_F(BuildBuiltinsTests, AtomicCmpxchg_DataSize) {
+ Value *AtomicSuccess = nullptr;
+ ASSERT_THAT_EXPECTED(
+ emitAtomicCompareExchangeBuiltin(
+ /*AtomicPtr=*/PtrArg,
+ /*ExpectedPtr=*/ExpectedArg,
+ /*DesiredPtr=*/DesiredArg, /*TypeOrSize=*/static_cast<uint64_t>(6),
+ /*IsWeak*/ false,
+ /*IsVolatile=*/false,
+ /*SuccessMemorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*FailureMemorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*Scope=*/SyncScope::System,
+ /*PrevPtr=*/nullptr,
+ /*Align=*/{}, /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_cmpxchg"),
+ StoreResult(AtomicSuccess));
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // clang-format off
+ // %__atomic_compare_exchange = call i8 @__atomic_compare_exchange(i64 6, ptr %atomic_ptr, ptr %expected_ptr, ptr %desired_ptr, i32 5, i32 5)
+ // %atomic_cmpxchg.cmpxchg.success = icmp eq i8 %__atomic_compare_exchange, 0
+ // clang-format on
+
+ // Follow use-def and load-store chains to discover instructions
+ CallInst *AtomicCompareExchange =
+ cast<CallInst>(getUniquePreviousStore(PtrArg, EntryBB));
+
+ // %__atomic_compare_exchange = call i8 @__atomic_compare_exchange(i64 6,...
+ EXPECT_EQ(AtomicCompareExchange->getName(), "__atomic_compare_exchange");
+ EXPECT_EQ(AtomicCompareExchange->getParent(), EntryBB);
+ EXPECT_EQ(AtomicCompareExchange->getType(), Type::getInt8Ty(Ctx));
+ EXPECT_EQ(AtomicCompareExchange->getName(), "__atomic_compare_exchange");
+ EXPECT_FALSE(AtomicCompareExchange->isMustTailCall());
+ EXPECT_FALSE(AtomicCompareExchange->isTailCall());
+ EXPECT_EQ(AtomicCompareExchange->getCallingConv(), CallingConv::C);
+ EXPECT_EQ(cast<ConstantInt>(AtomicCompareExchange->getArgOperand(0))
+ ->getZExtValue(),
+ 6);
+ EXPECT_EQ(AtomicCompareExchange->getArgOperand(1), PtrArg);
+ EXPECT_EQ(AtomicCompareExchange->getArgOperand(2), ExpectedArg);
+ EXPECT_EQ(AtomicCompareExchange->getArgOperand(3), DesiredArg);
+ EXPECT_EQ(cast<ConstantInt>(AtomicCompareExchange->getArgOperand(4))
+ ->getZExtValue(),
+ 5);
+ EXPECT_EQ(cast<ConstantInt>(AtomicCompareExchange->getArgOperand(5))
+ ->getZExtValue(),
+ 5);
+ EXPECT_EQ(AtomicCompareExchange->getCalledFunction(),
+ M->getFunction("__atomic_compare_exchange"));
+
+ // %atomic_cmpxchg.cmpxchg.success = icmp eq i8 %__atomic_compare_exchang...
+ EXPECT_EQ(AtomicSuccess->getName(), "atomic_cmpxchg.cmpxchg.success");
+ EXPECT_EQ(cast<Instruction>(AtomicSuccess)->getParent(), EntryBB);
+ EXPECT_EQ(AtomicSuccess->getType(), Type::getInt1Ty(Ctx));
+ EXPECT_EQ(cast<Instruction>(AtomicSuccess)->getOperand(0),
+ AtomicCompareExchange);
+ EXPECT_EQ(cast<ConstantInt>(cast<Instruction>(AtomicSuccess)->getOperand(1))
+ ->getZExtValue(),
+ 0);
+}
+
+TEST_F(BuildBuiltinsTests, AtomicCmpxchg_Align) {
+ Value *AtomicSuccess = nullptr;
+ ASSERT_THAT_EXPECTED(
+ emitAtomicCompareExchangeBuiltin(
+ /*AtomicPtr=*/PtrArg,
+ /*ExpectedPtr=*/ExpectedArg,
+ /*DesiredPtr=*/DesiredArg, /*TypeOrSize=*/Builder.getFloatTy(),
+ /*IsWeak*/ false,
+ /*IsVolatile=*/false,
+ /*SuccessMemorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*FailureMemorder=*/AtomicOrdering::SequentiallyConsistent,
+ /*Scope=*/SyncScope::System,
+ /*PrevPtr=*/nullptr,
+ /*Align=*/Align(8), /*Builder=*/Builder,
+ /*EmitOptions=*/AtomicEmitOptions(DL, TLI.get()),
+ /*Name=*/"atomic_cmpxchg"),
+ StoreResult(AtomicSuccess));
+ EXPECT_FALSE(verifyModule(*M, &errs()));
+
+ // clang-format off
+ // %atomic_cmpxchg.cmpxchg.expected = load i32, ptr %expected_ptr, align 4
+ // %atomic_cmpxchg.cmpxchg.desired = load i32, ptr %desired_ptr, align 4
+ // %atomic_cmpxchg.cmpxchg.pair = cmpxchg ptr %atomic_ptr, i32 %atomic_cmpxchg.cmpxchg.expected, i32 %atomic_cmpxchg.cmpxchg.desired seq_cst seq_cst, align 8
+ // %atomic_cmpxchg.cmpxchg.success = extractvalue { i32, i1 } %atomic_cmpxchg.cmpxchg.pair, 1
+ // clang-format on
+
+ // Follow use-def and load-store chains to discover instructions
+ AtomicCmpXchgInst *AtomicCmpxchgCmpxchgPair =
+ cast<AtomicCmpXchgInst>(getUniquePreviousStore(PtrArg, EntryBB));
+ LoadInst *AtomicCmpxchgCmpxchgExpected =
+ cast<LoadInst>(AtomicCmpxchgCmpxchgPair->getCompareOperand());
+ LoadInst *AtomicCmpxchgCmpxchgDesired =
+ cast<LoadInst>(AtomicCmpxchgCmpxchgPair->getNewValOperand());
+
+ // %atomic_cmpxchg.cmpxchg.expected = load i32, ptr %expected_ptr, align 4
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getName(),
+ "atomic_cmpxchg.cmpxchg.expected");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getParent(), EntryBB);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getType(), Type::getInt32Ty(Ctx));
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgExpected->isSimple());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgExpected->getPointerOperand(), ExpectedArg);
+
+ // %atomic_cmpxchg.cmpxchg.desired = load i32, ptr %desired_ptr, align 4
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getName(),
+ "atomic_cmpxchg.cmpxchg.desired");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getParent(), EntryBB);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getType(), Type::getInt32Ty(Ctx));
+ EXPECT_TRUE(AtomicCmpxchgCmpxchgDesired->isSimple());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgDesired->getPointerOperand(), DesiredArg);
+
+ // %atomic_cmpxchg.cmpxchg.pair = cmpxchg ptr %atomic_ptr, i32 %atomic_cm...
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getName(), "atomic_cmpxchg.cmpxchg.pair");
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getParent(), EntryBB);
+ EXPECT_FALSE(AtomicCmpxchgCmpxchgPair->isVolatile());
+ EXPECT_FALSE(AtomicCmpxchgCmpxchgPair->isWeak());
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getSuccessOrdering(),
+ AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getFailureOrdering(),
+ AtomicOrdering::SequentiallyConsistent);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getSyncScopeID(), SyncScope::System);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getAlign(), 8);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getPointerOperand(), PtrArg);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getCompareOperand(),
+ AtomicCmpxchgCmpxchgExpected);
+ EXPECT_EQ(AtomicCmpxchgCmpxchgPair->getNewValOperand(),
+ AtomicCmpxchgCmpxchgDesired);
+
+ // %atomic_cmpxchg.cmpxchg.success = extractvalue { i32, i1 } %atomic_cmp...
+ EXPECT_EQ(AtomicSuccess->getName(), "atomic_cmpxchg.cmpxchg.success");
+ EXPECT_EQ(cast<Instruction>(AtomicSuccess)->getParent(), EntryBB);
+ EXPECT_EQ(AtomicSuccess->getType(), Type::getInt1Ty(Ctx));
+ EXPECT_EQ(cast<ExtractValueInst>(AtomicSuccess)->getNumIndices(), 1);
+ EXPECT_EQ(cast<ExtractValueInst>(AtomicSuccess)->getIndices()[0], 1);
+ EXPECT_EQ(cast<ExtractValueInst>(AtomicSuccess)->getAggregateOperand(),
+ AtomicCmpxchgCmpxchgPair);
+}
+
+} // namespace
diff --git a/llvm/unittests/Transforms/Utils/CMakeLists.txt b/llvm/unittests/Transforms/Utils/CMakeLists.txt
index 5c7ec28709c16..e422bea80c68e 100644
--- a/llvm/unittests/Transforms/Utils/CMakeLists.txt
+++ b/llvm/unittests/Transforms/Utils/CMakeLists.txt
@@ -5,6 +5,7 @@ set(LLVM_LINK_COMPONENTS
Core
ProfileData
Support
+ TestingSupport
TransformUtils
Passes
Vectorize
@@ -13,6 +14,7 @@ set(LLVM_LINK_COMPONENTS
add_llvm_unittest(UtilsTests
ASanStackFrameLayoutTest.cpp
BasicBlockUtilsTest.cpp
+ BuildBuiltinsTest.cpp
CallPromotionUtilsTest.cpp
CloningTest.cpp
CodeExtractorTest.cpp
>From b072cabc62d380729206bdd92938c13061a85b20 Mon Sep 17 00:00:00 2001
From: Michael Kruse <llvm-project at meinersbur.de>
Date: Mon, 7 Apr 2025 12:07:31 +0200
Subject: [PATCH 2/2] Undo moving spltBB
---
.../llvm/Frontend/OpenMP/OMPIRBuilder.h | 48 ++++++++++++
.../llvm/Transforms/Utils/BasicBlockUtils.h | 49 ------------
llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp | 75 +++++++++++++++++++
llvm/lib/Transforms/Utils/BasicBlockUtils.cpp | 75 -------------------
llvm/lib/Transforms/Utils/BuildBuiltins.cpp | 63 ++++++++++++++++
5 files changed, 186 insertions(+), 124 deletions(-)
diff --git a/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h b/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h
index 2c2c1a8c6166b..28909cef4748d 100644
--- a/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h
+++ b/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h
@@ -33,6 +33,54 @@ struct TargetRegionEntryInfo;
class OffloadEntriesInfoManager;
class OpenMPIRBuilder;
+/// Move the instruction after an InsertPoint to the beginning of another
+/// BasicBlock.
+///
+/// The instructions after \p IP are moved to the beginning of \p New which must
+/// not have any PHINodes. If \p CreateBranch is true, a branch instruction to
+/// \p New will be added such that there is no semantic change. Otherwise, the
+/// \p IP insert block remains degenerate and it is up to the caller to insert a
+/// terminator. \p DL is used as the debug location for the branch instruction
+/// if one is created.
+void spliceBB(IRBuilderBase::InsertPoint IP, BasicBlock *New, bool CreateBranch,
+ DebugLoc DL);
+
+/// Splice a BasicBlock at an IRBuilder's current insertion point. Its new
+/// insert location will stick to after the instruction before the insertion
+/// point (instead of moving with the instruction the InsertPoint stores
+/// internally).
+void spliceBB(IRBuilder<> &Builder, BasicBlock *New, bool CreateBranch);
+
+/// Split a BasicBlock at an InsertPoint, even if the block is degenerate
+/// (missing the terminator).
+///
+/// llvm::SplitBasicBlock and BasicBlock::splitBasicBlock require a well-formed
+/// BasicBlock. \p Name is used for the new successor block. If \p CreateBranch
+/// is true, a branch to the new successor will new created such that
+/// semantically there is no change; otherwise the block of the insertion point
+/// remains degenerate and it is the caller's responsibility to insert a
+/// terminator. \p DL is used as the debug location for the branch instruction
+/// if one is created. Returns the new successor block.
+BasicBlock *splitBB(IRBuilderBase::InsertPoint IP, bool CreateBranch,
+ DebugLoc DL, llvm::Twine Name = {});
+
+/// Split a BasicBlock at \p Builder's insertion point, even if the block is
+/// degenerate (missing the terminator). Its new insert location will stick to
+/// after the instruction before the insertion point (instead of moving with the
+/// instruction the InsertPoint stores internally).
+BasicBlock *splitBB(IRBuilderBase &Builder, bool CreateBranch,
+ llvm::Twine Name = {});
+
+/// Split a BasicBlock at \p Builder's insertion point, even if the block is
+/// degenerate (missing the terminator). Its new insert location will stick to
+/// after the instruction before the insertion point (instead of moving with the
+/// instruction the InsertPoint stores internally).
+BasicBlock *splitBB(IRBuilder<> &Builder, bool CreateBranch, llvm::Twine Name);
+
+/// Like splitBB, but reuses the current block's name for the new name.
+BasicBlock *splitBBWithSuffix(IRBuilderBase &Builder, bool CreateBranch,
+ llvm::Twine Suffix = ".split");
+
/// Captures attributes that affect generating LLVM-IR using the
/// OpenMPIRBuilder and related classes. Note that not all attributes are
/// required for all classes or functions. In some use cases the configuration
diff --git a/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h b/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h
index 7746313c82209..6faff3d1fd8e3 100644
--- a/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h
+++ b/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h
@@ -20,7 +20,6 @@
#include "llvm/ADT/SetVector.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Dominators.h"
-#include "llvm/IR/IRBuilder.h"
#include <cassert>
namespace llvm {
@@ -385,54 +384,6 @@ void SplitLandingPadPredecessors(
DomTreeUpdater *DTU = nullptr, LoopInfo *LI = nullptr,
MemorySSAUpdater *MSSAU = nullptr, bool PreserveLCSSA = false);
-/// Move the instruction after an InsertPoint to the beginning of another
-/// BasicBlock.
-///
-/// The instructions after \p IP are moved to the beginning of \p New which must
-/// not have any PHINodes. If \p CreateBranch is true, a branch instruction to
-/// \p New will be added such that there is no semantic change. Otherwise, the
-/// \p IP insert block remains degenerate and it is up to the caller to insert a
-/// terminator. \p DL is used as the debug location for the branch instruction
-/// if one is created.
-void spliceBB(IRBuilderBase::InsertPoint IP, BasicBlock *New, bool CreateBranch,
- DebugLoc DL);
-
-/// Splice a BasicBlock at an IRBuilder's current insertion point. Its new
-/// insert location will stick to after the instruction before the insertion
-/// point (instead of moving with the instruction the InsertPoint stores
-/// internally).
-void spliceBB(IRBuilder<> &Builder, BasicBlock *New, bool CreateBranch);
-
-/// Split a BasicBlock at an InsertPoint, even if the block is degenerate
-/// (missing the terminator).
-///
-/// llvm::SplitBasicBlock and BasicBlock::splitBasicBlock require a well-formed
-/// BasicBlock. \p Name is used for the new successor block. If \p CreateBranch
-/// is true, a branch to the new successor will new created such that
-/// semantically there is no change; otherwise the block of the insertion point
-/// remains degenerate and it is the caller's responsibility to insert a
-/// terminator. \p DL is used as the debug location for the branch instruction
-/// if one is created. Returns the new successor block.
-BasicBlock *splitBB(IRBuilderBase::InsertPoint IP, bool CreateBranch,
- DebugLoc DL, llvm::Twine Name = {});
-
-/// Split a BasicBlock at \p Builder's insertion point, even if the block is
-/// degenerate (missing the terminator). Its new insert location will stick to
-/// after the instruction before the insertion point (instead of moving with the
-/// instruction the InsertPoint stores internally).
-BasicBlock *splitBB(IRBuilderBase &Builder, bool CreateBranch,
- llvm::Twine Name = {});
-
-/// Split a BasicBlock at \p Builder's insertion point, even if the block is
-/// degenerate (missing the terminator). Its new insert location will stick to
-/// after the instruction before the insertion point (instead of moving with the
-/// instruction the InsertPoint stores internally).
-BasicBlock *splitBB(IRBuilder<> &Builder, bool CreateBranch, llvm::Twine Name);
-
-/// Like splitBB, but reuses the current block's name for the new name.
-BasicBlock *splitBBWithSuffix(IRBuilderBase &Builder, bool CreateBranch,
- llvm::Twine Suffix = ".split");
-
/// This method duplicates the specified return instruction into a predecessor
/// which ends in an unconditional branch. If the return instruction returns a
/// value defined by a PHI, propagate the right value into the return. It
diff --git a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
index 1096ccab52c77..2e5ce5308eea5 100644
--- a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
+++ b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
@@ -316,6 +316,81 @@ static void redirectTo(BasicBlock *Source, BasicBlock *Target, DebugLoc DL) {
NewBr->setDebugLoc(DL);
}
+void llvm::spliceBB(IRBuilderBase::InsertPoint IP, BasicBlock *New,
+ bool CreateBranch, DebugLoc DL) {
+ assert(New->getFirstInsertionPt() == New->begin() &&
+ "Target BB must not have PHI nodes");
+
+ // Move instructions to new block.
+ BasicBlock *Old = IP.getBlock();
+ New->splice(New->begin(), Old, IP.getPoint(), Old->end());
+
+ if (CreateBranch) {
+ auto *NewBr = BranchInst::Create(New, Old);
+ NewBr->setDebugLoc(DL);
+ }
+}
+
+void llvm::spliceBB(IRBuilder<> &Builder, BasicBlock *New, bool CreateBranch) {
+ DebugLoc DebugLoc = Builder.getCurrentDebugLocation();
+ BasicBlock *Old = Builder.GetInsertBlock();
+
+ spliceBB(Builder.saveIP(), New, CreateBranch, DebugLoc);
+ if (CreateBranch)
+ Builder.SetInsertPoint(Old->getTerminator());
+ else
+ Builder.SetInsertPoint(Old);
+
+ // SetInsertPoint also updates the Builder's debug location, but we want to
+ // keep the one the Builder was configured to use.
+ Builder.SetCurrentDebugLocation(DebugLoc);
+}
+
+BasicBlock *llvm::splitBB(IRBuilderBase::InsertPoint IP, bool CreateBranch,
+ DebugLoc DL, llvm::Twine Name) {
+ BasicBlock *Old = IP.getBlock();
+ BasicBlock *New = BasicBlock::Create(
+ Old->getContext(), Name.isTriviallyEmpty() ? Old->getName() : Name,
+ Old->getParent(), Old->getNextNode());
+ spliceBB(IP, New, CreateBranch, DL);
+ New->replaceSuccessorsPhiUsesWith(Old, New);
+ return New;
+}
+
+BasicBlock *llvm::splitBB(IRBuilderBase &Builder, bool CreateBranch,
+ llvm::Twine Name) {
+ DebugLoc DebugLoc = Builder.getCurrentDebugLocation();
+ BasicBlock *New = splitBB(Builder.saveIP(), CreateBranch, DebugLoc, Name);
+ if (CreateBranch)
+ Builder.SetInsertPoint(Builder.GetInsertBlock()->getTerminator());
+ else
+ Builder.SetInsertPoint(Builder.GetInsertBlock());
+ // SetInsertPoint also updates the Builder's debug location, but we want to
+ // keep the one the Builder was configured to use.
+ Builder.SetCurrentDebugLocation(DebugLoc);
+ return New;
+}
+
+BasicBlock *llvm::splitBB(IRBuilder<> &Builder, bool CreateBranch,
+ llvm::Twine Name) {
+ DebugLoc DebugLoc = Builder.getCurrentDebugLocation();
+ BasicBlock *New = splitBB(Builder.saveIP(), CreateBranch, DebugLoc, Name);
+ if (CreateBranch)
+ Builder.SetInsertPoint(Builder.GetInsertBlock()->getTerminator());
+ else
+ Builder.SetInsertPoint(Builder.GetInsertBlock());
+ // SetInsertPoint also updates the Builder's debug location, but we want to
+ // keep the one the Builder was configured to use.
+ Builder.SetCurrentDebugLocation(DebugLoc);
+ return New;
+}
+
+BasicBlock *llvm::splitBBWithSuffix(IRBuilderBase &Builder, bool CreateBranch,
+ llvm::Twine Suffix) {
+ BasicBlock *Old = Builder.GetInsertBlock();
+ return splitBB(Builder, CreateBranch, Old->getName() + Suffix);
+}
+
// This function creates a fake integer value and a fake use for the integer
// value. It returns the fake value created. This is useful in modeling the
// extra arguments to the outlined functions.
diff --git a/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp b/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
index edf59054c9599..ce5bf0c7207c7 100644
--- a/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
+++ b/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
@@ -1548,81 +1548,6 @@ void llvm::SplitLandingPadPredecessors(BasicBlock *OrigBB,
PreserveLCSSA);
}
-void llvm::spliceBB(IRBuilderBase::InsertPoint IP, BasicBlock *New,
- bool CreateBranch, DebugLoc DL) {
- assert(New->getFirstInsertionPt() == New->begin() &&
- "Target BB must not have PHI nodes");
-
- // Move instructions to new block.
- BasicBlock *Old = IP.getBlock();
- New->splice(New->begin(), Old, IP.getPoint(), Old->end());
-
- if (CreateBranch) {
- auto *NewBr = BranchInst::Create(New, Old);
- NewBr->setDebugLoc(DL);
- }
-}
-
-void llvm::spliceBB(IRBuilder<> &Builder, BasicBlock *New, bool CreateBranch) {
- DebugLoc DebugLoc = Builder.getCurrentDebugLocation();
- BasicBlock *Old = Builder.GetInsertBlock();
-
- spliceBB(Builder.saveIP(), New, CreateBranch, DebugLoc);
- if (CreateBranch)
- Builder.SetInsertPoint(Old->getTerminator());
- else
- Builder.SetInsertPoint(Old);
-
- // SetInsertPoint also updates the Builder's debug location, but we want to
- // keep the one the Builder was configured to use.
- Builder.SetCurrentDebugLocation(DebugLoc);
-}
-
-BasicBlock *llvm::splitBB(IRBuilderBase::InsertPoint IP, bool CreateBranch,
- DebugLoc DL, llvm::Twine Name) {
- BasicBlock *Old = IP.getBlock();
- BasicBlock *New = BasicBlock::Create(
- Old->getContext(), Name.isTriviallyEmpty() ? Old->getName() : Name,
- Old->getParent(), Old->getNextNode());
- spliceBB(IP, New, CreateBranch, DL);
- New->replaceSuccessorsPhiUsesWith(Old, New);
- return New;
-}
-
-BasicBlock *llvm::splitBB(IRBuilderBase &Builder, bool CreateBranch,
- llvm::Twine Name) {
- DebugLoc DebugLoc = Builder.getCurrentDebugLocation();
- BasicBlock *New = splitBB(Builder.saveIP(), CreateBranch, DebugLoc, Name);
- if (CreateBranch)
- Builder.SetInsertPoint(Builder.GetInsertBlock()->getTerminator());
- else
- Builder.SetInsertPoint(Builder.GetInsertBlock());
- // SetInsertPoint also updates the Builder's debug location, but we want to
- // keep the one the Builder was configured to use.
- Builder.SetCurrentDebugLocation(DebugLoc);
- return New;
-}
-
-BasicBlock *llvm::splitBB(IRBuilder<> &Builder, bool CreateBranch,
- llvm::Twine Name) {
- DebugLoc DebugLoc = Builder.getCurrentDebugLocation();
- BasicBlock *New = splitBB(Builder.saveIP(), CreateBranch, DebugLoc, Name);
- if (CreateBranch)
- Builder.SetInsertPoint(Builder.GetInsertBlock()->getTerminator());
- else
- Builder.SetInsertPoint(Builder.GetInsertBlock());
- // SetInsertPoint also updates the Builder's debug location, but we want to
- // keep the one the Builder was configured to use.
- Builder.SetCurrentDebugLocation(DebugLoc);
- return New;
-}
-
-BasicBlock *llvm::splitBBWithSuffix(IRBuilderBase &Builder, bool CreateBranch,
- llvm::Twine Suffix) {
- BasicBlock *Old = Builder.GetInsertBlock();
- return splitBB(Builder, CreateBranch, Old->getName() + Suffix);
-}
-
ReturnInst *llvm::FoldReturnIntoUncondBranch(ReturnInst *RI, BasicBlock *BB,
BasicBlock *Pred,
DomTreeUpdater *DTU) {
diff --git a/llvm/lib/Transforms/Utils/BuildBuiltins.cpp b/llvm/lib/Transforms/Utils/BuildBuiltins.cpp
index f290583b1d14b..41282186e9ab8 100644
--- a/llvm/lib/Transforms/Utils/BuildBuiltins.cpp
+++ b/llvm/lib/Transforms/Utils/BuildBuiltins.cpp
@@ -46,6 +46,69 @@ static bool canUseSizedAtomicCall(unsigned Size, Align Alignment,
Size <= LargestSize;
}
+/// Move the instruction after an InsertPoint to the beginning of another
+/// BasicBlock.
+///
+/// The instructions after \p IP are moved to the beginning of \p New which must
+/// not have any PHINodes. If \p CreateBranch is true, a branch instruction to
+/// \p New will be added such that there is no semantic change. Otherwise, the
+/// \p IP insert block remains degenerate and it is up to the caller to insert a
+/// terminator. \p DL is used as the debug location for the branch instruction
+/// if one is created.
+static void spliceBB(IRBuilderBase::InsertPoint IP, BasicBlock *New,
+ bool CreateBranch, DebugLoc DL) {
+ assert(New->getFirstInsertionPt() == New->begin() &&
+ "Target BB must not have PHI nodes");
+
+ // Move instructions to new block.
+ BasicBlock *Old = IP.getBlock();
+ New->splice(New->begin(), Old, IP.getPoint(), Old->end());
+
+ if (CreateBranch) {
+ auto *NewBr = BranchInst::Create(New, Old);
+ NewBr->setDebugLoc(DL);
+ }
+}
+
+/// Split a BasicBlock at an InsertPoint, even if the block is degenerate
+/// (missing the terminator).
+///
+/// llvm::SplitBasicBlock and BasicBlock::splitBasicBlock require a well-formed
+/// BasicBlock. \p Name is used for the new successor block. If \p CreateBranch
+/// is true, a branch to the new successor will new created such that
+/// semantically there is no change; otherwise the block of the insertion point
+/// remains degenerate and it is the caller's responsibility to insert a
+/// terminator. \p DL is used as the debug location for the branch instruction
+/// if one is created. Returns the new successor block.
+static BasicBlock *splitBB(IRBuilderBase::InsertPoint IP, bool CreateBranch,
+ DebugLoc DL, llvm::Twine Name) {
+ BasicBlock *Old = IP.getBlock();
+ BasicBlock *New = BasicBlock::Create(
+ Old->getContext(), Name.isTriviallyEmpty() ? Old->getName() : Name,
+ Old->getParent(), Old->getNextNode());
+ spliceBB(IP, New, CreateBranch, DL);
+ New->replaceSuccessorsPhiUsesWith(Old, New);
+ return New;
+}
+
+/// Split a BasicBlock at \p Builder's insertion point, even if the block is
+/// degenerate (missing the terminator). Its new insert location will stick to
+/// after the instruction before the insertion point (instead of moving with the
+/// instruction the InsertPoint stores internally).
+static BasicBlock *splitBB(IRBuilderBase &Builder, bool CreateBranch,
+ llvm::Twine Name) {
+ DebugLoc DebugLoc = Builder.getCurrentDebugLocation();
+ BasicBlock *New = splitBB(Builder.saveIP(), CreateBranch, DebugLoc, Name);
+ if (CreateBranch)
+ Builder.SetInsertPoint(Builder.GetInsertBlock()->getTerminator());
+ else
+ Builder.SetInsertPoint(Builder.GetInsertBlock());
+ // SetInsertPoint also updates the Builder's debug location, but we want to
+ // keep the one the Builder was configured to use.
+ Builder.SetCurrentDebugLocation(DebugLoc);
+ return New;
+}
+
// Helper to check if a type is in a variant
template <typename T, typename Variant> struct is_in_variant;
More information about the llvm-commits
mailing list