[llvm] [Attributor] Change allocation size and load/store offsets using AAPointerInfo for Alloca instructions (PR #72029)
Vidush Singhal via llvm-commits
llvm-commits at lists.llvm.org
Mon Jun 3 15:47:43 PDT 2024
https://github.com/vidsinghal updated https://github.com/llvm/llvm-project/pull/72029
>From 281d2cb025783e76a17228fc4087c12790f6dba3 Mon Sep 17 00:00:00 2001
From: vidsinghal <vidush.sl at gmail.com>
Date: Sun, 5 Nov 2023 22:30:42 -0500
Subject: [PATCH] [Attributor] Fix Load/Store Offsets if multiple bins are
present for a pointer allocation.
---
llvm/include/llvm/Transforms/IPO/Attributor.h | 67 ++-
.../Transforms/IPO/AttributorAttributes.cpp | 402 ++++++++++++++----
.../Attributor/ArgumentPromotion/crash.ll | 6 +-
.../live_called_from_dead.ll | 4 +-
.../live_called_from_dead_2.ll | 4 +-
.../nonzero-address-spaces.ll | 3 +-
.../Attributor/IPConstantProp/pthreads.ll | 4 +-
llvm/test/Transforms/Attributor/allocator.ll | 153 +++++--
.../Attributor/call-simplify-pointer-info.ll | 56 ++-
.../Transforms/Attributor/heap_to_stack.ll | 3 +-
.../Attributor/heap_to_stack_gpu.ll | 3 +-
llvm/test/Transforms/Attributor/liveness.ll | 8 +-
.../multiple-offsets-pointer-info.ll | 344 ++++++++++-----
llvm/test/Transforms/Attributor/nodelete.ll | 4 +-
.../Transforms/Attributor/pointer-info.ll | 6 +-
.../value-simplify-pointer-info-vec.ll | 14 +-
.../Attributor/value-simplify-pointer-info.ll | 49 ++-
.../OpenMP/custom_state_machines.ll | 172 ++++++--
.../OpenMP/custom_state_machines_pre_lto.ll | 258 ++++++++---
.../Transforms/OpenMP/spmdization_guarding.ll | 2 +
20 files changed, 1157 insertions(+), 405 deletions(-)
diff --git a/llvm/include/llvm/Transforms/IPO/Attributor.h b/llvm/include/llvm/Transforms/IPO/Attributor.h
index 30c51250af61c..b9296488450b7 100644
--- a/llvm/include/llvm/Transforms/IPO/Attributor.h
+++ b/llvm/include/llvm/Transforms/IPO/Attributor.h
@@ -5143,9 +5143,7 @@ struct DenormalFPMathState : public AbstractState {
return Mode != Other.Mode || ModeF32 != Other.ModeF32;
}
- bool isValid() const {
- return Mode.isValid() && ModeF32.isValid();
- }
+ bool isValid() const { return Mode.isValid() && ModeF32.isValid(); }
static DenormalMode::DenormalModeKind
unionDenormalKind(DenormalMode::DenormalModeKind Callee,
@@ -5185,9 +5183,7 @@ struct DenormalFPMathState : public AbstractState {
// state.
DenormalState getAssumed() const { return Known; }
- bool isValidState() const override {
- return Known.isValid();
- }
+ bool isValidState() const override { return Known.isValid(); }
/// Return true if there are no dynamic components to the denormal mode worth
/// specializing.
@@ -5198,9 +5194,7 @@ struct DenormalFPMathState : public AbstractState {
Known.ModeF32.Output != DenormalMode::Dynamic;
}
- bool isAtFixpoint() const override {
- return IsAtFixedpoint;
- }
+ bool isAtFixpoint() const override { return IsAtFixedpoint; }
ChangeStatus indicateFixpoint() {
bool Changed = !IsAtFixedpoint;
@@ -6108,6 +6102,52 @@ struct AAPointerInfo : public AbstractAttribute {
Type *Ty;
};
+ /// A helper containing a list of offsets computed for a Use. Ideally this
+ /// list should be strictly ascending, but we ensure that only when we
+ /// actually translate the list of offsets to a RangeList.
+ struct OffsetInfo {
+ using VecTy = SmallVector<int64_t>;
+ using const_iterator = VecTy::const_iterator;
+ VecTy Offsets;
+
+ const_iterator begin() const { return Offsets.begin(); }
+ const_iterator end() const { return Offsets.end(); }
+
+ bool operator==(const OffsetInfo &RHS) const {
+ return Offsets == RHS.Offsets;
+ }
+
+ bool operator!=(const OffsetInfo &RHS) const { return !(*this == RHS); }
+
+ void insert(int64_t Offset) { Offsets.push_back(Offset); }
+ bool isUnassigned() const { return Offsets.size() == 0; }
+
+ bool isUnknown() const {
+ if (isUnassigned())
+ return false;
+ if (Offsets.size() == 1)
+ return Offsets.front() == AA::RangeTy::Unknown;
+ return false;
+ }
+
+ void setUnknown() {
+ Offsets.clear();
+ Offsets.push_back(AA::RangeTy::Unknown);
+ }
+
+ void addToAll(int64_t Inc) {
+ for (auto &Offset : Offsets) {
+ Offset += Inc;
+ }
+ }
+
+ /// Copy offsets from \p R into the current list.
+ ///
+ /// Ideally all lists should be strictly ascending, but we defer that to the
+ /// actual use of the list. So we just blindly append here.
+ void merge(const OffsetInfo &R) { Offsets.append(R.Offsets); }
+ };
+
/// Create an abstract attribute view for the position \p IRP.
static AAPointerInfo &createForPosition(const IRPosition &IRP, Attributor &A);
@@ -6122,6 +6162,9 @@ struct AAPointerInfo : public AbstractAttribute {
virtual const_bin_iterator begin() const = 0;
virtual const_bin_iterator end() const = 0;
virtual int64_t numOffsetBins() const = 0;
+ virtual void dumpState(raw_ostream &O) const = 0;
+ virtual const Access &getBinAccess(unsigned Index) const = 0;
+ virtual const DenseMap<Value *, OffsetInfo> &getOffsetInfoMap() const = 0;
/// Call \p CB on all accesses that might interfere with \p Range and return
/// true if all such accesses were known and the callback returned true for
@@ -6151,6 +6194,9 @@ struct AAPointerInfo : public AbstractAttribute {
return (AA->getIdAddr() == &ID);
}
+ /// Offsets Info Map
+ DenseMap<Value *, OffsetInfo> OffsetInfoMap;
+
/// Unique ID (due to the unique address)
static const char ID;
};
@@ -6293,6 +6339,9 @@ struct AAAllocationInfo : public StateWrapper<BooleanState, AbstractAttribute> {
virtual std::optional<TypeSize> getAllocatedSize() const = 0;
+ using NewOffsetsTy = DenseMap<AA::RangeTy, AA::RangeTy>;
+ virtual const NewOffsetsTy &getNewOffsets() const = 0;
+
/// See AbstractAttribute::getName()
const std::string getName() const override { return "AAAllocationInfo"; }
diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
index 1b3bf3c732ed0..745e8a3026884 100644
--- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
+++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
@@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
+#include "llvm/ADT/DenseMap.h"
#include "llvm/Transforms/IPO/Attributor.h"
#include "llvm/ADT/APInt.h"
@@ -72,9 +73,12 @@
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/ValueMapper.h"
#include <cassert>
+#include <climits>
+#include <cstdint>
#include <numeric>
#include <optional>
#include <string>
+#include <utility>
using namespace llvm;
@@ -419,7 +423,8 @@ struct AAReturnedFromReturnedValues : public BaseType {
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
StateType S(StateType::getBestState(this->getState()));
- clampReturnedValueStates<AAType, StateType, IRAttributeKind, RecurseForSelectAndPHI>(
+ clampReturnedValueStates<AAType, StateType, IRAttributeKind,
+ RecurseForSelectAndPHI>(
A, *this, S,
PropagateCallBaseContext ? this->getCallBaseContext() : nullptr);
// TODO: If we know we visited all returned values, thus no are assumed
@@ -1001,54 +1006,9 @@ ChangeStatus AA::PointerInfo::State::addAccess(
namespace {
-/// A helper containing a list of offsets computed for a Use. Ideally this
-/// list should be strictly ascending, but we ensure that only when we
-/// actually translate the list of offsets to a RangeList.
-struct OffsetInfo {
- using VecTy = SmallVector<int64_t>;
- using const_iterator = VecTy::const_iterator;
- VecTy Offsets;
-
- const_iterator begin() const { return Offsets.begin(); }
- const_iterator end() const { return Offsets.end(); }
-
- bool operator==(const OffsetInfo &RHS) const {
- return Offsets == RHS.Offsets;
- }
-
- bool operator!=(const OffsetInfo &RHS) const { return !(*this == RHS); }
-
- void insert(int64_t Offset) { Offsets.push_back(Offset); }
- bool isUnassigned() const { return Offsets.size() == 0; }
-
- bool isUnknown() const {
- if (isUnassigned())
- return false;
- if (Offsets.size() == 1)
- return Offsets.front() == AA::RangeTy::Unknown;
- return false;
- }
-
- void setUnknown() {
- Offsets.clear();
- Offsets.push_back(AA::RangeTy::Unknown);
- }
-
- void addToAll(int64_t Inc) {
- for (auto &Offset : Offsets) {
- Offset += Inc;
- }
- }
-
- /// Copy offsets from \p R into the current list.
- ///
- /// Ideally all lists should be strictly ascending, but we defer that to the
- /// actual use of the list. So we just blindly append here.
- void merge(const OffsetInfo &R) { Offsets.append(R.Offsets); }
-};
-
#ifndef NDEBUG
-static raw_ostream &operator<<(raw_ostream &OS, const OffsetInfo &OI) {
+static raw_ostream &operator<<(raw_ostream &OS,
+ const AAPointerInfo::OffsetInfo &OI) {
ListSeparator LS;
OS << "[";
for (auto Offset : OI) {
@@ -1083,6 +1043,15 @@ struct AAPointerInfoImpl
return State::numOffsetBins();
}
+ virtual const Access &getBinAccess(unsigned Index) const override {
+ return getAccess(Index);
+ }
+
+ virtual const DenseMap<Value *, OffsetInfo> &
+ getOffsetInfoMap() const override {
+ return OffsetInfoMap;
+ }
+
bool forallInterferingAccesses(
AA::RangeTy Range,
function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
@@ -1429,7 +1398,7 @@ struct AAPointerInfoImpl
void trackPointerInfoStatistics(const IRPosition &IRP) const {}
/// Dump the state into \p O.
- void dumpState(raw_ostream &O) {
+ virtual void dumpState(raw_ostream &O) const override {
for (auto &It : OffsetBins) {
O << "[" << It.first.Offset << "-" << It.first.Offset + It.first.Size
<< "] : " << It.getSecond().size() << "\n";
@@ -1595,7 +1564,6 @@ ChangeStatus AAPointerInfoFloating::updateImpl(Attributor &A) {
const DataLayout &DL = A.getDataLayout();
Value &AssociatedValue = getAssociatedValue();
- DenseMap<Value *, OffsetInfo> OffsetInfoMap;
OffsetInfoMap[&AssociatedValue].insert(0);
auto HandlePassthroughUser = [&](Value *Usr, Value *CurPtr, bool &Follow) {
@@ -6975,10 +6943,9 @@ ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) {
if (AI.LibraryFunctionId != LibFunc___kmpc_alloc_shared) {
Instruction *CtxI = isa<InvokeInst>(AI.CB) ? AI.CB : AI.CB->getNextNode();
if (!Explorer || !Explorer->findInContextOf(UniqueFree, CtxI)) {
- LLVM_DEBUG(
- dbgs()
- << "[H2S] unique free call might not be executed with the allocation "
- << *UniqueFree << "\n");
+ LLVM_DEBUG(dbgs() << "[H2S] unique free call might not be executed "
+ "with the allocation "
+ << *UniqueFree << "\n");
return false;
}
}
@@ -10431,11 +10398,12 @@ struct AANoFPClassFloating : public AANoFPClassImpl {
struct AANoFPClassReturned final
: AAReturnedFromReturnedValues<AANoFPClass, AANoFPClassImpl,
- AANoFPClassImpl::StateType, false, Attribute::None, false> {
+ AANoFPClassImpl::StateType, false,
+ Attribute::None, false> {
AANoFPClassReturned(const IRPosition &IRP, Attributor &A)
: AAReturnedFromReturnedValues<AANoFPClass, AANoFPClassImpl,
- AANoFPClassImpl::StateType, false, Attribute::None, false>(
- IRP, A) {}
+ AANoFPClassImpl::StateType, false,
+ Attribute::None, false>(IRP, A) {}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override {
@@ -12692,6 +12660,11 @@ struct AAAllocationInfoImpl : public AAAllocationInfo {
return AssumedAllocatedSize;
}
+ const NewOffsetsTy &getNewOffsets() const override {
+ assert(isValidState() && "the AA is invalid");
+ return NewComputedOffsets;
+ }
+
std::optional<TypeSize> findInitialAllocationSize(Instruction *I,
const DataLayout &DL) {
@@ -12732,46 +12705,59 @@ struct AAAllocationInfoImpl : public AAAllocationInfo {
const DataLayout &DL = A.getDataLayout();
const auto AllocationSize = findInitialAllocationSize(I, DL);
- // If allocation size is nullopt, we give up.
+ // If allocation size is nullopt, we give up
if (!AllocationSize)
return indicatePessimisticFixpoint();
- // For zero sized allocations, we give up.
+ // For zero sized allocations, we give up
// Since we can't reduce further
if (*AllocationSize == 0)
return indicatePessimisticFixpoint();
- int64_t BinSize = PI->numOffsetBins();
-
- // TODO: implement for multiple bins
- if (BinSize > 1)
- return indicatePessimisticFixpoint();
+ int64_t NumBins = PI->numOffsetBins();
- if (BinSize == 0) {
+ if (NumBins == 0) {
auto NewAllocationSize = std::optional<TypeSize>(TypeSize(0, false));
if (!changeAllocationSize(NewAllocationSize))
return ChangeStatus::UNCHANGED;
return ChangeStatus::CHANGED;
}
- // TODO: refactor this to be part of multiple bin case
- const auto &It = PI->begin();
+ // For each access bin
+ // Compute its new start Offset and store the results in a new map
+ // (NewOffsetBins)
+ unsigned long PrevBinEndOffset = 0;
+ bool ChangedOffsets = false;
- // TODO: handle if Offset is not zero
- if (It->first.Offset != 0)
- return indicatePessimisticFixpoint();
+ for (AAPointerInfo::OffsetBinsTy::const_iterator It = PI->begin();
+ It != PI->end(); It++) {
+ const AA::RangeTy &OldRange = It->getFirst();
- uint64_t SizeOfBin = It->first.Offset + It->first.Size;
+ // If any range has an unknown offset or size, we should leave the
+ // allocation unmodified
+ if (OldRange.offsetOrSizeAreUnknown()) {
+ return indicatePessimisticFixpoint();
+ }
- if (SizeOfBin >= *AllocationSize)
- return indicatePessimisticFixpoint();
+ unsigned long NewStartOffset = PrevBinEndOffset;
+ unsigned long NewEndOffset = NewStartOffset + OldRange.Size;
+ PrevBinEndOffset = NewEndOffset;
+ ChangedOffsets |= setNewOffsets(OldRange, OldRange.Offset, NewStartOffset,
+ OldRange.Size);
+ }
+
+ // Set the new size of the allocation, the new size of the Allocation should
+ // be the size of PrevBinEndOffset * 8, in bits
auto NewAllocationSize =
- std::optional<TypeSize>(TypeSize(SizeOfBin * 8, false));
+ std::optional<TypeSize>(TypeSize(PrevBinEndOffset * 8, false));
if (!changeAllocationSize(NewAllocationSize))
return ChangeStatus::UNCHANGED;
+ if (!ChangedOffsets)
+ return ChangeStatus::UNCHANGED;
+
return ChangeStatus::CHANGED;
}
@@ -12781,31 +12767,60 @@ struct AAAllocationInfoImpl : public AAAllocationInfo {
assert(isValidState() &&
"Manifest should only be called if the state is valid.");
- Instruction *I = getIRPosition().getCtxI();
+ bool Changed = false;
+ const IRPosition &IRP = getIRPosition();
+ Instruction *I = IRP.getCtxI();
- auto FixedAllocatedSizeInBits = getAllocatedSize()->getFixedValue();
+ // check if simplified values exist
+ SmallVector<AA::ValueAndContext> Values;
+ bool UsedAssumedInformation = false;
+ if (A.getAssumedSimplifiedValues(IRP, *this, Values, AA::AnyScope,
+ UsedAssumedInformation)) {
+
+ bool HasSimplifiedValue = false;
+ for (auto &ValAndContext : Values) {
+ // if any simplified values exist other than the original instruction,
+ // we should not modify this alloca
+ if (ValAndContext.getValue() && ValAndContext.getValue() != I) {
+ HasSimplifiedValue = true;
+ break;
+ }
+ }
- unsigned long NumBytesToAllocate = (FixedAllocatedSizeInBits + 7) / 8;
+ if (HasSimplifiedValue)
+ return ChangeStatus::UNCHANGED;
+ }
+
+ if (getAllocatedSize() == HasNoAllocationSize)
+ return llvm::ChangeStatus::UNCHANGED;
+
+ unsigned long FixedAllocatedSizeInBits =
+ getAllocatedSize()->getFixedValue();
+ unsigned long NumBytesToAllocate = (FixedAllocatedSizeInBits + 7) / 8;
switch (I->getOpcode()) {
// TODO: add case for malloc like calls
case Instruction::Alloca: {
AllocaInst *AI = cast<AllocaInst>(I);
+ const DataLayout &DL = A.getDataLayout();
+ auto OriginalAllocationSize = AI->getAllocationSizeInBits(DL);
+
+ if (*OriginalAllocationSize <= FixedAllocatedSizeInBits) {
+ return ChangeStatus::UNCHANGED;
+ }
Type *CharType = Type::getInt8Ty(I->getContext());
+ Type *CharArrayType = ArrayType::get(CharType, NumBytesToAllocate);
- auto *NumBytesToValue =
- ConstantInt::get(I->getContext(), APInt(32, NumBytesToAllocate));
+ BasicBlock::iterator InsertPt = AI->getIterator();
+ InsertPt = std::next(InsertPt);
+ AllocaInst *NewAllocaInst = new AllocaInst(
+ CharArrayType, AI->getAddressSpace(), AI->getName(), InsertPt);
- BasicBlock::iterator insertPt = AI->getIterator();
- insertPt = std::next(insertPt);
- AllocaInst *NewAllocaInst =
- new AllocaInst(CharType, AI->getAddressSpace(), NumBytesToValue,
- AI->getAlign(), AI->getName(), insertPt);
+ Changed |= A.changeAfterManifest(IRPosition::inst(*AI), *NewAllocaInst);
- if (A.changeAfterManifest(IRPosition::inst(*AI), *NewAllocaInst))
- return ChangeStatus::CHANGED;
+ A.deleteAfterManifest(*AI);
break;
}
@@ -12813,7 +12828,185 @@ struct AAAllocationInfoImpl : public AAAllocationInfo {
break;
}
- return ChangeStatus::UNCHANGED;
+ const AAPointerInfo *PI =
+ A.getOrCreateAAFor<AAPointerInfo>(IRP, *this, DepClassTy::REQUIRED);
+
+ if (!PI)
+ return ChangeStatus::UNCHANGED;
+
+ if (!PI->getState().isValidState())
+ return ChangeStatus::UNCHANGED;
+
+ const auto &NewOffsetsMap = getNewOffsets();
+
+ // Store a map where each instruction maps to a set of bins accessed by that
+ // instruction
+ DenseMap<Instruction *, DenseMap<AA::RangeTy, AA::RangeTy>>
+ AccessedInstructionsToBinsMap;
+ for (AAPointerInfo::OffsetBinsTy::const_iterator It = PI->begin();
+ It != PI->end(); It++) {
+
+ const auto &OldOffsetRange = It->getFirst();
+
+ // If the OldOffsetRange is not in the map, offsets for that bin did not
+ // change We should just continue and skip changing the offsets in that
+ // case
+ if (!NewOffsetsMap.contains(OldOffsetRange))
+ continue;
+
+ const auto &NewOffsetRange = NewOffsetsMap.lookup(OldOffsetRange);
+
+ for (const auto AccIndex : It->getSecond()) {
+ const auto &AccessInstruction = PI->getBinAccess(AccIndex);
+ Instruction *LocalInst = AccessInstruction.getLocalInst();
+
+ DenseMap<AA::RangeTy, AA::RangeTy> &NewBinsForInstruction =
+ AccessedInstructionsToBinsMap.getOrInsertDefault(LocalInst);
+ NewBinsForInstruction.insert(
+ std::make_pair(OldOffsetRange, NewOffsetRange));
+ }
+ }
+
+ for (auto &It : AccessedInstructionsToBinsMap) {
+
+ Instruction *LocalInst = It.first;
+
+ // If there a potential values that replace the accessed instruction, we
+ // should use those instead
+ SmallVector<AA::ValueAndContext> Vals;
+ if (A.getAssumedSimplifiedValues(IRPosition::inst(*LocalInst), *this,
+ Vals, AA::AnyScope,
+ UsedAssumedInformation)) {
+
+ bool HasSimplifiedValue = false;
+ for (auto &ValAndContext : Vals) {
+ // don't modify load/store if any simplified value exists
+ if (ValAndContext.getValue() &&
+ ValAndContext.getValue() != LocalInst) {
+ HasSimplifiedValue = true;
+ break;
+ }
+ }
+
+ if (HasSimplifiedValue)
+ continue;
+ }
+
+ // Get a hold of a map, mapping old to new bins
+ DenseMap<AA::RangeTy, AA::RangeTy> &OldToNewBins = It.second;
+
+ int64_t MinOffset = INT_MAX;
+ int64_t MinOffsetOld = INT_MAX;
+
+ // Find the lowest starting address amongst the bins accessed by the
+ // current instruction
+ for (auto &It2 : OldToNewBins) {
+
+ auto &OldRange = It2.getFirst();
+ auto &NewRange = It2.getSecond();
+
+ if (NewRange.Offset < MinOffset) {
+ MinOffset = NewRange.Offset;
+ MinOffsetOld = OldRange.Offset;
+ }
+ }
+
+ switch (LocalInst->getOpcode()) {
+ case Instruction::Load: {
+ // The number of bytes to shift the load/store by
+ int64_t ShiftValue = MinOffset - MinOffsetOld;
+ LoadInst *OldLoadInst = cast<LoadInst>(LocalInst);
+ Value *PointerOperand = OldLoadInst->getPointerOperand();
+ Type *PointeeTy = OldLoadInst->getPointerOperandType();
+
+ IntegerType *Int64TyInteger =
+ IntegerType::get(LocalInst->getContext(), 64);
+
+ Value *IndexList[1] = {ConstantInt::get(Int64TyInteger, ShiftValue)};
+ Value *GepToNewAddress = GetElementPtrInst::Create(
+ PointeeTy, PointerOperand, IndexList, "NewGep", OldLoadInst);
+
+ LoadInst *NewLoadInst = new LoadInst(
+ OldLoadInst->getType(), GepToNewAddress, OldLoadInst->getName(),
+ false, OldLoadInst->getAlign(), OldLoadInst);
+
+ Changed |=
+ A.changeAfterManifest(IRPosition::inst(*OldLoadInst), *NewLoadInst);
+
+ A.deleteAfterManifest(*OldLoadInst);
+ break;
+ }
+ case Instruction::Store: {
+ // The number of bytes to shift the load/store by
+ int64_t ShiftValue = MinOffset - MinOffsetOld;
+ StoreInst *OldStoreInst = cast<StoreInst>(LocalInst);
+ Value *PointerOperand = OldStoreInst->getPointerOperand();
+ Type *PointeeTy = OldStoreInst->getPointerOperandType();
+
+ IntegerType *Int64TyInteger =
+ IntegerType::get(LocalInst->getContext(), 64);
+
+ Value *IndexList[1] = {ConstantInt::get(Int64TyInteger, ShiftValue)};
+ Value *GepToNewAddress = GetElementPtrInst::Create(
+ PointeeTy, PointerOperand, IndexList, "NewGep", OldStoreInst);
+
+ StoreInst *NewStoreInst =
+ new StoreInst(OldStoreInst->getValueOperand(), GepToNewAddress,
+ false, OldStoreInst->getAlign(), OldStoreInst);
+
+ Changed |= A.changeAfterManifest(IRPosition::inst(*OldStoreInst),
+ *NewStoreInst);
+
+ A.deleteAfterManifest(*OldStoreInst);
+ break;
+ }
+ case Instruction::Call: {
+ auto &OffsetInfoMap = PI->getOffsetInfoMap();
+
+ CallInst *Call = cast<CallInst>(LocalInst);
+ int ArgPosition = 0;
+ for (const auto &CallArg : Call->args()) {
+ for (const auto &User : I->users()) {
+ if (User == CallArg) {
+ IntegerType *Int64TyInteger =
+ IntegerType::get(LocalInst->getContext(), 64);
+
+ auto OffsetsVecArg = OffsetInfoMap.lookup(User).Offsets;
+ int MinOffsetArg = INT_MAX;
+ for (auto &Range : OffsetsVecArg) {
+ if (Range < MinOffsetArg)
+ MinOffsetArg = Range;
+ }
+
+ // find new offset of old offset
+ int NewRange = INT_MAX;
+ for (auto OldToNewRange : NewOffsetsMap) {
+ auto Old = OldToNewRange.getFirst();
+ if (Old.Offset == MinOffsetArg)
+ NewRange = OldToNewRange.getSecond().Offset;
+ }
+
+ auto ShiftValue = NewRange - MinOffsetArg;
+ Value *IndexList[1] = {
+ ConstantInt::get(Int64TyInteger, ShiftValue)};
+ Type *UserTy = User->getType();
+ Instruction *UserInstruction = cast<Instruction>(User);
+ Value *GepToNewAddress = GetElementPtrInst::Create(
+ UserTy, UserInstruction, IndexList, "NewGep", Call);
+ Call->setArgOperand(ArgPosition, GepToNewAddress);
+ break;
+ }
+ }
+ ArgPosition++;
+ }
+ break;
+ }
+ }
+ }
+
+ if (!Changed)
+ return ChangeStatus::UNCHANGED;
+ return ChangeStatus::CHANGED;
}
/// See AbstractAttribute::getAsStr().
@@ -12827,8 +13020,28 @@ struct AAAllocationInfoImpl : public AAAllocationInfo {
")";
}
+ void dumpNewOffsetBins(raw_ostream &O) {
+
+ O << "Printing Map from [OldOffsetsRange] : [NewOffsetsRange] if the "
+ "offsets changed."
+ << "\n";
+ const auto &NewOffsetsMap = getNewOffsets();
+ for (auto It = NewOffsetsMap.begin(); It != NewOffsetsMap.end(); It++) {
+
+ const auto &OldRange = It->getFirst();
+ const auto &NewRange = It->getSecond();
+
+ O << "[" << OldRange.Offset << "," << OldRange.Offset + OldRange.Size
+ << "] : ";
+ O << "[" << NewRange.Offset << "," << NewRange.Offset + NewRange.Size
+ << "]";
+ O << "\n";
+ }
+ }
+
private:
std::optional<TypeSize> AssumedAllocatedSize = HasNoAllocationSize;
+ NewOffsetsTy NewComputedOffsets;
// Maintain the computed allocation size of the object.
// Returns (bool) weather the size of the allocation was modified or not.
@@ -12840,6 +13053,21 @@ struct AAAllocationInfoImpl : public AAAllocationInfo {
}
return false;
}
+
+ // Maps an old byte range to its new Offset range in the new allocation.
+ // Returns (bool) weather the old byte range's offsets changed or not.
+ bool setNewOffsets(const AA::RangeTy &OldRange, int64_t OldOffset,
+ int64_t NewComputedOffset, int64_t Size) {
+
+ if (OldOffset == NewComputedOffset)
+ return false;
+
+ AA::RangeTy &NewRange = NewComputedOffsets.getOrInsertDefault(OldRange);
+ NewRange.Offset = NewComputedOffset;
+ NewRange.Size = Size;
+
+ return true;
+ }
};
struct AAAllocationInfoFloating : AAAllocationInfoImpl {
diff --git a/llvm/test/Transforms/Attributor/ArgumentPromotion/crash.ll b/llvm/test/Transforms/Attributor/ArgumentPromotion/crash.ll
index 595cb37c6c93e..f0efa2a0ae3c1 100644
--- a/llvm/test/Transforms/Attributor/ArgumentPromotion/crash.ll
+++ b/llvm/test/Transforms/Attributor/ArgumentPromotion/crash.ll
@@ -106,10 +106,8 @@ define i32 @test_inf_promote_caller(i32 %arg) {
; CGSCC-LABEL: define {{[^@]+}}@test_inf_promote_caller
; CGSCC-SAME: (i32 [[ARG:%.*]]) #[[ATTR3:[0-9]+]] {
; CGSCC-NEXT: bb:
-; CGSCC-NEXT: [[TMP:%.*]] = alloca [[S:%.*]], align 8
-; CGSCC-NEXT: [[TMP3:%.*]] = alloca i8, i32 0, align 8
-; CGSCC-NEXT: [[TMP1:%.*]] = alloca [[S]], align 8
-; CGSCC-NEXT: [[TMP14:%.*]] = alloca i8, i32 0, align 8
+; CGSCC-NEXT: [[TMP3:%.*]] = alloca [0 x i8], align 1
+; CGSCC-NEXT: [[TMP14:%.*]] = alloca [0 x i8], align 1
; CGSCC-NEXT: ret i32 0
;
bb:
diff --git a/llvm/test/Transforms/Attributor/ArgumentPromotion/live_called_from_dead.ll b/llvm/test/Transforms/Attributor/ArgumentPromotion/live_called_from_dead.ll
index 2df81d6cb1832..63dbc4da7da37 100644
--- a/llvm/test/Transforms/Attributor/ArgumentPromotion/live_called_from_dead.ll
+++ b/llvm/test/Transforms/Attributor/ArgumentPromotion/live_called_from_dead.ll
@@ -36,9 +36,7 @@ define internal i32 @caller(ptr %B) {
; CGSCC: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(none)
; CGSCC-LABEL: define {{[^@]+}}@caller
; CGSCC-SAME: () #[[ATTR0]] {
-; CGSCC-NEXT: [[A:%.*]] = alloca i32, align 4
-; CGSCC-NEXT: [[A2:%.*]] = alloca i8, i32 0, align 4
-; CGSCC-NEXT: [[A1:%.*]] = alloca i8, i32 0, align 4
+; CGSCC-NEXT: [[A1:%.*]] = alloca [0 x i8], align 1
; CGSCC-NEXT: ret i32 0
;
%A = alloca i32
diff --git a/llvm/test/Transforms/Attributor/ArgumentPromotion/live_called_from_dead_2.ll b/llvm/test/Transforms/Attributor/ArgumentPromotion/live_called_from_dead_2.ll
index 7c28de24beea2..956fa0e88b028 100644
--- a/llvm/test/Transforms/Attributor/ArgumentPromotion/live_called_from_dead_2.ll
+++ b/llvm/test/Transforms/Attributor/ArgumentPromotion/live_called_from_dead_2.ll
@@ -53,9 +53,7 @@ define internal i32 @caller(ptr %B) {
; CGSCC: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: write)
; CGSCC-LABEL: define {{[^@]+}}@caller
; CGSCC-SAME: (ptr noalias nocapture nofree noundef nonnull writeonly align 4 dereferenceable(4) [[B:%.*]]) #[[ATTR0]] {
-; CGSCC-NEXT: [[A:%.*]] = alloca i32, align 4
-; CGSCC-NEXT: [[A2:%.*]] = alloca i8, i32 0, align 4
-; CGSCC-NEXT: [[A1:%.*]] = alloca i8, i32 0, align 4
+; CGSCC-NEXT: [[A1:%.*]] = alloca [0 x i8], align 1
; CGSCC-NEXT: [[C:%.*]] = call i32 @test(ptr noalias nocapture nofree noundef nonnull writeonly align 4 dereferenceable(4) [[B]]) #[[ATTR2:[0-9]+]]
; CGSCC-NEXT: ret i32 0
;
diff --git a/llvm/test/Transforms/Attributor/ArgumentPromotion/nonzero-address-spaces.ll b/llvm/test/Transforms/Attributor/ArgumentPromotion/nonzero-address-spaces.ll
index b588a399e5bd9..7b5e1276ac212 100644
--- a/llvm/test/Transforms/Attributor/ArgumentPromotion/nonzero-address-spaces.ll
+++ b/llvm/test/Transforms/Attributor/ArgumentPromotion/nonzero-address-spaces.ll
@@ -29,8 +29,7 @@ define internal i32 @foo(ptr) {
; CHECK-LABEL: define {{[^@]+}}@foo
; CHECK-SAME: () addrspace(1) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[RETVAL1:%.*]] = alloca i8, i32 0, align 4
+; CHECK-NEXT: [[RETVAL1:%.*]] = alloca [0 x i8], align 1
; CHECK-NEXT: call addrspace(0) void asm sideeffect "ldr r0, [r0] \0Abx lr \0A", ""()
; CHECK-NEXT: unreachable
;
diff --git a/llvm/test/Transforms/Attributor/IPConstantProp/pthreads.ll b/llvm/test/Transforms/Attributor/IPConstantProp/pthreads.ll
index 490894d129023..af2d1ef1eabba 100644
--- a/llvm/test/Transforms/Attributor/IPConstantProp/pthreads.ll
+++ b/llvm/test/Transforms/Attributor/IPConstantProp/pthreads.ll
@@ -34,8 +34,8 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
define dso_local i32 @main() {
; TUNIT-LABEL: define {{[^@]+}}@main() {
; TUNIT-NEXT: entry:
-; TUNIT-NEXT: [[ALLOC11:%.*]] = alloca i8, i32 0, align 8
-; TUNIT-NEXT: [[ALLOC22:%.*]] = alloca i8, i32 0, align 8
+; TUNIT-NEXT: [[ALLOC11:%.*]] = alloca [0 x i8], align 1
+; TUNIT-NEXT: [[ALLOC22:%.*]] = alloca [0 x i8], align 1
; TUNIT-NEXT: [[THREAD:%.*]] = alloca i64, align 8
; TUNIT-NEXT: [[CALL:%.*]] = call i32 @pthread_create(ptr noundef nonnull align 8 dereferenceable(8) [[THREAD]], ptr noundef align 4294967296 null, ptr noundef nonnull @foo, ptr nofree readnone align 4294967296 undef)
; TUNIT-NEXT: [[CALL1:%.*]] = call i32 @pthread_create(ptr noundef nonnull align 8 dereferenceable(8) [[THREAD]], ptr noundef align 4294967296 null, ptr noundef nonnull @bar, ptr noalias nocapture nofree nonnull readnone align 8 dereferenceable(8) undef)
diff --git a/llvm/test/Transforms/Attributor/allocator.ll b/llvm/test/Transforms/Attributor/allocator.ll
index f2d9ecd1d8fa4..1c6cd32a539c5 100644
--- a/llvm/test/Transforms/Attributor/allocator.ll
+++ b/llvm/test/Transforms/Attributor/allocator.ll
@@ -13,8 +13,8 @@ define dso_local void @positive_alloca_1(i32 noundef %val) #0 {
; CHECK-LABEL: define dso_local void @positive_alloca_1
; CHECK-SAME: (i32 noundef [[VAL:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[VAL_ADDR1:%.*]] = alloca i8, i32 4, align 4
-; CHECK-NEXT: [[F2:%.*]] = alloca i8, i32 4, align 4
+; CHECK-NEXT: [[VAL_ADDR1:%.*]] = alloca [4 x i8], align 1
+; CHECK-NEXT: [[F2:%.*]] = alloca [4 x i8], align 1
; CHECK-NEXT: store i32 [[VAL]], ptr [[VAL_ADDR1]], align 4
; CHECK-NEXT: store i32 10, ptr [[F2]], align 4
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[F2]], align 4
@@ -164,37 +164,54 @@ entry:
;TODO: The allocation can be reduced here.
;However, the offsets (load/store etc.) Need to be changed.
; Function Attrs: noinline nounwind uwtable
-define dso_local { i64, ptr } @positive_test_not_a_single_start_offset(i32 noundef %val) #0 {
-; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(none)
-; CHECK-LABEL: define dso_local { i64, ptr } @positive_test_not_a_single_start_offset
-; CHECK-SAME: (i32 noundef [[VAL:%.*]]) #[[ATTR0:[0-9]+]] {
+define dso_local void @positive_test_not_a_single_start_offset(i32 noundef %val) #0 {
+; CHECK-LABEL: define dso_local void @positive_test_not_a_single_start_offset
+; CHECK-SAME: (i32 noundef [[VAL:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[RETVAL:%.*]] = alloca [[STRUCT_FOO:%.*]], align 8
; CHECK-NEXT: [[VAL_ADDR:%.*]] = alloca i32, align 4
+; CHECK-NEXT: [[F1:%.*]] = alloca [5 x i8], align 1
; CHECK-NEXT: store i32 [[VAL]], ptr [[VAL_ADDR]], align 4
-; CHECK-NEXT: store i32 2, ptr [[RETVAL]], align 8
-; CHECK-NEXT: [[FIELD3:%.*]] = getelementptr inbounds [[STRUCT_FOO]], ptr [[RETVAL]], i32 0, i32 2
-; CHECK-NEXT: store ptr [[VAL_ADDR]], ptr [[FIELD3]], align 8
-; CHECK-NEXT: [[TMP0:%.*]] = load { i64, ptr }, ptr [[RETVAL]], align 8
-; CHECK-NEXT: ret { i64, ptr } [[TMP0]]
+; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 2, [[VAL]]
+; CHECK-NEXT: store i32 [[MUL]], ptr [[F1]], align 4
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[F1]], align 4
+; CHECK-NEXT: [[CALL:%.*]] = call i32 (ptr, ...) @printf(ptr noundef nonnull dereferenceable(17) @.str, i32 noundef [[TMP0]])
+; CHECK-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_FOO:%.*]], ptr [[F1]], i32 0, i32 2
+; CHECK-NEXT: [[CONV1:%.*]] = trunc i32 [[TMP0]] to i8
+; CHECK-NEXT: [[NEWGEP:%.*]] = getelementptr ptr, ptr [[C]], i64 -4
+; CHECK-NEXT: store i8 [[CONV1]], ptr [[NEWGEP]], align 4
+; CHECK-NEXT: [[C2:%.*]] = getelementptr inbounds [[STRUCT_FOO]], ptr [[F1]], i32 0, i32 2
+; CHECK-NEXT: [[NEWGEP2:%.*]] = getelementptr ptr, ptr [[C2]], i64 -4
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[NEWGEP2]], align 4
+; CHECK-NEXT: [[CONV:%.*]] = sext i8 [[TMP1]] to i32
+; CHECK-NEXT: [[CALL3:%.*]] = call i32 (ptr, ...) @printf(ptr noundef nonnull dereferenceable(17) @.str, i32 noundef [[CONV]])
+; CHECK-NEXT: ret void
;
entry:
- %retval = alloca %struct.Foo, align 8
%val.addr = alloca i32, align 4
+ %f = alloca %struct.Foo, align 4
store i32 %val, ptr %val.addr, align 4
- %field1 = getelementptr inbounds %struct.Foo, ptr %retval, i32 0, i32 0
- store i32 2, ptr %field1, align 8
- %field3 = getelementptr inbounds %struct.Foo, ptr %retval, i32 0, i32 2
- store ptr %val.addr, ptr %field3, align 8
- %0 = load { i64, ptr }, ptr %retval, align 8
- ret { i64, ptr } %0
+ %0 = load i32, ptr %val.addr, align 4
+ %mul = mul nsw i32 2, %0
+ %a = getelementptr inbounds %struct.Foo, ptr %f, i32 0, i32 0
+ store i32 %mul, ptr %a, align 4
+ %a1 = getelementptr inbounds %struct.Foo, ptr %f, i32 0, i32 0
+ %1 = load i32, ptr %a1, align 4
+ %call = call i32 (ptr, ...) @printf(ptr noundef @.str, i32 noundef %1)
+ %c = getelementptr inbounds %struct.Foo, ptr %f, i32 0, i32 2
+ %conv1 = trunc i32 %1 to i8
+ store i8 %conv1, ptr %c, align 4
+ %c2 = getelementptr inbounds %struct.Foo, ptr %f, i32 0, i32 2
+ %2 = load i8, ptr %c2, align 4
+ %conv = sext i8 %2 to i32
+ %call3 = call i32 (ptr, ...) @printf(ptr noundef @.str, i32 noundef %conv)
+ ret void
}
; Function Attrs: noinline nounwind uwtable
define dso_local void @positive_test_reduce_array_allocation_1() {
; CHECK-LABEL: define dso_local void @positive_test_reduce_array_allocation_1() {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[ARRAY1:%.*]] = alloca i8, i32 4, align 8
+; CHECK-NEXT: [[ARRAY1:%.*]] = alloca [4 x i8], align 1
; CHECK-NEXT: store i32 0, ptr [[ARRAY1]], align 8
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAY1]], align 8
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[TMP0]], 2
@@ -275,37 +292,37 @@ entry:
define dso_local void @positive_test_reduce_array_allocation_2() #0 {
; CHECK-LABEL: define dso_local void @positive_test_reduce_array_allocation_2() {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[ARRAY:%.*]] = alloca ptr, align 8
-; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
+; CHECK-NEXT: [[ARRAY1:%.*]] = alloca ptr, align 8
+; CHECK-NEXT: [[I2:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[CALL:%.*]] = call noalias ptr @malloc(i64 noundef 40000)
-; CHECK-NEXT: store ptr [[CALL]], ptr [[ARRAY]], align 8
-; CHECK-NEXT: store i32 0, ptr [[I]], align 4
+; CHECK-NEXT: store ptr [[CALL]], ptr [[ARRAY1]], align 8
+; CHECK-NEXT: store i32 0, ptr [[I2]], align 4
; CHECK-NEXT: br label [[FOR_COND:%.*]]
; CHECK: for.cond:
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[I]], align 4
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[I2]], align 4
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], 10000
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[I]], align 4
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[I]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[I2]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[I2]], align 4
; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP2]] to i64
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[CALL]], i64 [[IDXPROM]]
; CHECK-NEXT: store i32 [[TMP1]], ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: br label [[FOR_INC:%.*]]
; CHECK: for.inc:
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[I]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[I2]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], 2
-; CHECK-NEXT: store i32 [[ADD]], ptr [[I]], align 4
+; CHECK-NEXT: store i32 [[ADD]], ptr [[I2]], align 4
; CHECK-NEXT: br label [[FOR_COND]]
; CHECK: for.end:
-; CHECK-NEXT: store i32 0, ptr [[I]], align 4
+; CHECK-NEXT: store i32 0, ptr [[I2]], align 4
; CHECK-NEXT: br label [[FOR_COND1:%.*]]
; CHECK: for.cond1:
-; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[I2]], align 4
; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i32 [[TMP4]], 10000
; CHECK-NEXT: br i1 [[CMP2]], label [[FOR_BODY3:%.*]], label [[FOR_END9:%.*]]
; CHECK: for.body3:
-; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[I2]], align 4
; CHECK-NEXT: [[IDXPROM4:%.*]] = sext i32 [[TMP5]] to i64
; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, ptr [[CALL]], i64 [[IDXPROM4]]
; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX5]], align 4
@@ -313,28 +330,28 @@ define dso_local void @positive_test_reduce_array_allocation_2() #0 {
; CHECK-NEXT: store i32 [[ADD6]], ptr [[ARRAYIDX5]], align 4
; CHECK-NEXT: br label [[FOR_INC7:%.*]]
; CHECK: for.inc7:
-; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[I]], align 4
+; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[I2]], align 4
; CHECK-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP7]], 2
-; CHECK-NEXT: store i32 [[ADD8]], ptr [[I]], align 4
+; CHECK-NEXT: store i32 [[ADD8]], ptr [[I2]], align 4
; CHECK-NEXT: br label [[FOR_COND1]]
; CHECK: for.end9:
-; CHECK-NEXT: store i32 0, ptr [[I]], align 4
+; CHECK-NEXT: store i32 0, ptr [[I2]], align 4
; CHECK-NEXT: br label [[FOR_COND10:%.*]]
; CHECK: for.cond10:
-; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[I]], align 4
+; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[I2]], align 4
; CHECK-NEXT: [[CMP11:%.*]] = icmp slt i32 [[TMP8]], 10000
; CHECK-NEXT: br i1 [[CMP11]], label [[FOR_BODY12:%.*]], label [[FOR_END18:%.*]]
; CHECK: for.body12:
-; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4
+; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[I2]], align 4
; CHECK-NEXT: [[IDXPROM13:%.*]] = sext i32 [[TMP9]] to i64
; CHECK-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds i32, ptr [[CALL]], i64 [[IDXPROM13]]
; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX14]], align 4
; CHECK-NEXT: [[CALL15:%.*]] = call i32 (ptr, ...) @printf(ptr noundef nonnull dereferenceable(17) @.str, i32 noundef [[TMP10]])
; CHECK-NEXT: br label [[FOR_INC16:%.*]]
; CHECK: for.inc16:
-; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4
+; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[I2]], align 4
; CHECK-NEXT: [[ADD17:%.*]] = add nsw i32 [[TMP11]], 2
-; CHECK-NEXT: store i32 [[ADD17]], ptr [[I]], align 4
+; CHECK-NEXT: store i32 [[ADD17]], ptr [[I2]], align 4
; CHECK-NEXT: br label [[FOR_COND10]]
; CHECK: for.end18:
; CHECK-NEXT: ret void
@@ -426,7 +443,7 @@ define dso_local void @pthread_test(){
; TUNIT-NEXT: [[ARG1:%.*]] = alloca i8, align 8
; TUNIT-NEXT: [[THREAD:%.*]] = alloca i64, align 8
; TUNIT-NEXT: [[CALL1:%.*]] = call i32 @pthread_create(ptr noundef nonnull align 8 dereferenceable(8) [[THREAD]], ptr noundef align 4294967296 null, ptr noundef nonnull @pthread_allocation_should_remain_same, ptr noundef nonnull align 8 dereferenceable(1) [[ARG1]])
-; TUNIT-NEXT: [[F1:%.*]] = alloca i8, i32 4, align 4
+; TUNIT-NEXT: [[F1:%.*]] = alloca [4 x i8], align 1
; TUNIT-NEXT: [[CALL2:%.*]] = call i32 @pthread_create(ptr noundef nonnull align 8 dereferenceable(8) [[THREAD]], ptr noundef align 4294967296 null, ptr noundef nonnull @pthread_allocation_should_be_reduced, ptr noalias nocapture nofree nonnull readnone align 4 dereferenceable(12) undef)
; TUNIT-NEXT: [[F2:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
; TUNIT-NEXT: [[CALL3:%.*]] = call i32 @pthread_create(ptr noundef nonnull align 8 dereferenceable(8) [[THREAD]], ptr noundef align 4294967296 null, ptr noundef nonnull @pthread_check_captured_pointer, ptr noundef nonnull align 4 dereferenceable(12) [[F2]])
@@ -499,6 +516,58 @@ entry:
ret void
}
+define dso_local void @alloca_array_multi_offset(){
+; CHECK: Function Attrs: nofree norecurse nosync nounwind memory(none)
+; CHECK-LABEL: define dso_local void @alloca_array_multi_offset
+; CHECK-SAME: () #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
+; CHECK-NEXT: store i32 0, ptr [[I]], align 4
+; CHECK-NEXT: br label [[FOR_COND:%.*]]
+; CHECK: for.cond:
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[I]], align 4
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], 10
+; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
+; CHECK: for.body:
+; CHECK-NEXT: br label [[FOR_INC:%.*]]
+; CHECK: for.inc:
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[I]], align 4
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 2
+; CHECK-NEXT: store i32 [[ADD]], ptr [[I]], align 4
+; CHECK-NEXT: br label [[FOR_COND]]
+; CHECK: for.end:
+; CHECK-NEXT: ret void
+;
+entry:
+ %arr = alloca i8, i32 10, align 4
+ %i = alloca i32, align 4
+ store i32 0, ptr %i, align 4
+ br label %for.cond
+
+for.cond:
+ %0 = load i32, ptr %i, align 4
+ %cmp = icmp slt i32 %0, 10
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body:
+ %1 = load i32, ptr %i, align 4
+ %2 = load ptr, ptr %arr, align 8
+ %3 = load i32, ptr %i, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %2, i32 %3
+ store i32 %1, ptr %arrayidx, align 4
+ br label %for.inc
+
+for.inc:
+ %4 = load i32, ptr %i, align 4
+ %add = add nsw i32 %4, 2
+ store i32 %add, ptr %i, align 4
+ br label %for.cond
+
+for.end:
+ ret void
+
+}
+
declare external void @external_call(ptr)
@@ -511,9 +580,9 @@ declare i32 @printf(ptr noundef, ...) #1
; Function Attrs: nounwind allocsize(0)
declare noalias ptr @malloc(i64 noundef) #1
;.
-; TUNIT: attributes #[[ATTR0]] = { mustprogress nofree norecurse nosync nounwind willreturn memory(none) }
+; TUNIT: attributes #[[ATTR0]] = { nofree norecurse nosync nounwind memory(none) }
;.
-; CGSCC: attributes #[[ATTR0]] = { mustprogress nofree norecurse nosync nounwind willreturn memory(none) }
+; CGSCC: attributes #[[ATTR0]] = { nofree norecurse nosync nounwind memory(none) }
;.
; TUNIT: [[META0:![0-9]+]] = !{[[META1:![0-9]+]]}
; TUNIT: [[META1]] = !{i64 2, i64 3, i1 false}
diff --git a/llvm/test/Transforms/Attributor/call-simplify-pointer-info.ll b/llvm/test/Transforms/Attributor/call-simplify-pointer-info.ll
index 5bb795911ce40..f2d660f12fbaa 100644
--- a/llvm/test/Transforms/Attributor/call-simplify-pointer-info.ll
+++ b/llvm/test/Transforms/Attributor/call-simplify-pointer-info.ll
@@ -36,8 +36,10 @@ define i8 @call_simplifiable_1() {
; TUNIT-LABEL: define {{[^@]+}}@call_simplifiable_1
; TUNIT-SAME: () #[[ATTR0:[0-9]+]] {
; TUNIT-NEXT: entry:
-; TUNIT-NEXT: [[BYTES:%.*]] = alloca [1024 x i8], align 16
-; TUNIT-NEXT: [[I0:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 2
+; TUNIT-NEXT: [[BYTES1:%.*]] = alloca [1 x i8], align 1
+; TUNIT-NEXT: [[I0:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 2
+; TUNIT-NEXT: [[NEWGEP:%.*]] = getelementptr ptr, ptr [[I0]], i64 -2
+; TUNIT-NEXT: store i8 2, ptr [[NEWGEP]], align 2
; TUNIT-NEXT: ret i8 2
;
; CGSCC: Function Attrs: mustprogress nofree nosync nounwind willreturn memory(none)
@@ -93,9 +95,13 @@ define i8 @call_simplifiable_2() {
; TUNIT-LABEL: define {{[^@]+}}@call_simplifiable_2
; TUNIT-SAME: () #[[ATTR0]] {
; TUNIT-NEXT: entry:
-; TUNIT-NEXT: [[BYTES:%.*]] = alloca [1024 x i8], align 16
-; TUNIT-NEXT: [[I0:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 2
-; TUNIT-NEXT: [[I1:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 3
+; TUNIT-NEXT: [[BYTES1:%.*]] = alloca [2 x i8], align 1
+; TUNIT-NEXT: [[I0:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 2
+; TUNIT-NEXT: [[NEWGEP:%.*]] = getelementptr ptr, ptr [[I0]], i64 -2
+; TUNIT-NEXT: store i8 2, ptr [[NEWGEP]], align 2
+; TUNIT-NEXT: [[I1:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 3
+; TUNIT-NEXT: [[NEWGEP2:%.*]] = getelementptr ptr, ptr [[I1]], i64 -2
+; TUNIT-NEXT: store i8 3, ptr [[NEWGEP2]], align 1
; TUNIT-NEXT: ret i8 4
;
; CGSCC: Function Attrs: mustprogress nofree nosync nounwind willreturn memory(none)
@@ -125,8 +131,10 @@ define i8 @call_simplifiable_3() {
; TUNIT-LABEL: define {{[^@]+}}@call_simplifiable_3
; TUNIT-SAME: () #[[ATTR0]] {
; TUNIT-NEXT: entry:
-; TUNIT-NEXT: [[BYTES:%.*]] = alloca [1024 x i8], align 16
-; TUNIT-NEXT: [[I2:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 2
+; TUNIT-NEXT: [[BYTES1:%.*]] = alloca [1 x i8], align 1
+; TUNIT-NEXT: [[I2:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 2
+; TUNIT-NEXT: [[NEWGEP:%.*]] = getelementptr ptr, ptr [[I2]], i64 -2
+; TUNIT-NEXT: store i8 2, ptr [[NEWGEP]], align 2
; TUNIT-NEXT: ret i8 2
;
; CGSCC: Function Attrs: mustprogress nofree nosync nounwind willreturn memory(none)
@@ -198,12 +206,16 @@ define i8 @call_partially_simplifiable_1() {
; TUNIT-LABEL: define {{[^@]+}}@call_partially_simplifiable_1
; TUNIT-SAME: () #[[ATTR0]] {
; TUNIT-NEXT: entry:
-; TUNIT-NEXT: [[BYTES:%.*]] = alloca [1024 x i8], align 16
-; TUNIT-NEXT: [[I2:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 2
-; TUNIT-NEXT: store i8 2, ptr [[I2]], align 2
-; TUNIT-NEXT: [[I3:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 3
-; TUNIT-NEXT: store i8 3, ptr [[I3]], align 1
-; TUNIT-NEXT: [[I4:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 4
+; TUNIT-NEXT: [[BYTES1:%.*]] = alloca [3 x i8], align 1
+; TUNIT-NEXT: [[I2:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 2
+; TUNIT-NEXT: [[NEWGEP:%.*]] = getelementptr ptr, ptr [[I2]], i64 -2
+; TUNIT-NEXT: store i8 2, ptr [[NEWGEP]], align 2
+; TUNIT-NEXT: [[I3:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 3
+; TUNIT-NEXT: [[NEWGEP3:%.*]] = getelementptr ptr, ptr [[I3]], i64 -1
+; TUNIT-NEXT: store i8 3, ptr [[NEWGEP3]], align 1
+; TUNIT-NEXT: [[I4:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 4
+; TUNIT-NEXT: [[NEWGEP2:%.*]] = getelementptr ptr, ptr [[I4]], i64 -3
+; TUNIT-NEXT: store i8 4, ptr [[NEWGEP2]], align 4
; TUNIT-NEXT: [[R:%.*]] = call i8 @sum_two_different_loads(ptr nocapture nofree noundef nonnull readonly align 2 dereferenceable(1022) [[I2]], ptr nocapture nofree noundef nonnull readonly dereferenceable(1021) [[I3]]) #[[ATTR3]]
; TUNIT-NEXT: ret i8 [[R]]
;
@@ -239,13 +251,17 @@ define i8 @call_partially_simplifiable_2(i1 %cond) {
; TUNIT-LABEL: define {{[^@]+}}@call_partially_simplifiable_2
; TUNIT-SAME: (i1 [[COND:%.*]]) #[[ATTR2:[0-9]+]] {
; TUNIT-NEXT: entry:
-; TUNIT-NEXT: [[BYTES:%.*]] = alloca [1024 x i8], align 16
-; TUNIT-NEXT: [[I51:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 51
-; TUNIT-NEXT: [[I52:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 52
-; TUNIT-NEXT: store i8 2, ptr [[I52]], align 4
-; TUNIT-NEXT: [[I53:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 53
-; TUNIT-NEXT: store i8 3, ptr [[I53]], align 1
-; TUNIT-NEXT: [[I54:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 54
+; TUNIT-NEXT: [[BYTES1:%.*]] = alloca [4 x i8], align 1
+; TUNIT-NEXT: [[I51:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 51
+; TUNIT-NEXT: [[I52:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 52
+; TUNIT-NEXT: [[NEWGEP:%.*]] = getelementptr ptr, ptr [[I52]], i64 -52
+; TUNIT-NEXT: store i8 2, ptr [[NEWGEP]], align 4
+; TUNIT-NEXT: [[I53:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 53
+; TUNIT-NEXT: [[NEWGEP3:%.*]] = getelementptr ptr, ptr [[I53]], i64 -50
+; TUNIT-NEXT: store i8 3, ptr [[NEWGEP3]], align 1
+; TUNIT-NEXT: [[I54:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 54
+; TUNIT-NEXT: [[NEWGEP2:%.*]] = getelementptr ptr, ptr [[I54]], i64 -53
+; TUNIT-NEXT: store i8 4, ptr [[NEWGEP2]], align 2
; TUNIT-NEXT: [[SEL:%.*]] = select i1 [[COND]], ptr [[I51]], ptr [[I52]]
; TUNIT-NEXT: [[R:%.*]] = call i8 @sum_two_different_loads(ptr nocapture nofree nonnull readonly dereferenceable(972) [[SEL]], ptr nocapture nofree noundef nonnull readonly dereferenceable(971) [[I53]]) #[[ATTR3]]
; TUNIT-NEXT: ret i8 [[R]]
diff --git a/llvm/test/Transforms/Attributor/heap_to_stack.ll b/llvm/test/Transforms/Attributor/heap_to_stack.ll
index 33ac066e43d09..846373e05be1a 100644
--- a/llvm/test/Transforms/Attributor/heap_to_stack.ll
+++ b/llvm/test/Transforms/Attributor/heap_to_stack.ll
@@ -502,8 +502,7 @@ define i32 @malloc_in_loop(i32 %arg) {
; CHECK-SAME: (i32 [[ARG:%.*]]) {
; CHECK-NEXT: bb:
; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[I1:%.*]] = alloca ptr, align 8
-; CHECK-NEXT: [[I11:%.*]] = alloca i8, i32 0, align 8
+; CHECK-NEXT: [[I11:%.*]] = alloca [0 x i8], align 1
; CHECK-NEXT: store i32 [[ARG]], ptr [[I]], align 4
; CHECK-NEXT: br label [[BB2:%.*]]
; CHECK: bb2:
diff --git a/llvm/test/Transforms/Attributor/heap_to_stack_gpu.ll b/llvm/test/Transforms/Attributor/heap_to_stack_gpu.ll
index 2a5b3e94291a2..70aace8100abd 100644
--- a/llvm/test/Transforms/Attributor/heap_to_stack_gpu.ll
+++ b/llvm/test/Transforms/Attributor/heap_to_stack_gpu.ll
@@ -452,8 +452,7 @@ define i32 @malloc_in_loop(i32 %arg) {
; CHECK-SAME: (i32 [[ARG:%.*]]) {
; CHECK-NEXT: bb:
; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[I1:%.*]] = alloca ptr, align 8
-; CHECK-NEXT: [[I11:%.*]] = alloca i8, i32 0, align 8
+; CHECK-NEXT: [[I11:%.*]] = alloca [0 x i8], align 1
; CHECK-NEXT: store i32 [[ARG]], ptr [[I]], align 4
; CHECK-NEXT: br label [[BB2:%.*]]
; CHECK: bb2:
diff --git a/llvm/test/Transforms/Attributor/liveness.ll b/llvm/test/Transforms/Attributor/liveness.ll
index f17bd5795a174..9eb79f8a46723 100644
--- a/llvm/test/Transforms/Attributor/liveness.ll
+++ b/llvm/test/Transforms/Attributor/liveness.ll
@@ -2587,8 +2587,8 @@ define void @bad_gep() {
; TUNIT-LABEL: define {{[^@]+}}@bad_gep
; TUNIT-SAME: () #[[ATTR13]] {
; TUNIT-NEXT: entry:
-; TUNIT-NEXT: [[N1:%.*]] = alloca i8, i32 0, align 1
-; TUNIT-NEXT: [[M2:%.*]] = alloca i8, i32 0, align 1
+; TUNIT-NEXT: [[N1:%.*]] = alloca [0 x i8], align 1
+; TUNIT-NEXT: [[M2:%.*]] = alloca [0 x i8], align 1
; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 1, ptr noalias nocapture nofree noundef nonnull dereferenceable(1) [[N1]]) #[[ATTR18:[0-9]+]]
; TUNIT-NEXT: br label [[EXIT:%.*]]
; TUNIT: while.body:
@@ -2605,8 +2605,8 @@ define void @bad_gep() {
; CGSCC-LABEL: define {{[^@]+}}@bad_gep
; CGSCC-SAME: () #[[ATTR6]] {
; CGSCC-NEXT: entry:
-; CGSCC-NEXT: [[N1:%.*]] = alloca i8, i32 0, align 1
-; CGSCC-NEXT: [[M2:%.*]] = alloca i8, i32 0, align 1
+; CGSCC-NEXT: [[N1:%.*]] = alloca [0 x i8], align 1
+; CGSCC-NEXT: [[M2:%.*]] = alloca [0 x i8], align 1
; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 1, ptr noalias nocapture nofree noundef nonnull dereferenceable(1) [[N1]]) #[[ATTR21:[0-9]+]]
; CGSCC-NEXT: br label [[EXIT:%.*]]
; CGSCC: while.body:
diff --git a/llvm/test/Transforms/Attributor/multiple-offsets-pointer-info.ll b/llvm/test/Transforms/Attributor/multiple-offsets-pointer-info.ll
index c6945d65acb29..a120524026378 100644
--- a/llvm/test/Transforms/Attributor/multiple-offsets-pointer-info.ll
+++ b/llvm/test/Transforms/Attributor/multiple-offsets-pointer-info.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --check-attributes --check-globals
-; RUN: opt -aa-pipeline=basic-aa -passes=attributor -attributor-manifest-internal -attributor-annotate-decl-cs -S < %s | FileCheck %s --check-prefixes=CHECK,TUNIT
-; RUN: opt -aa-pipeline=basic-aa -passes=attributor-cgscc -attributor-manifest-internal -attributor-annotate-decl-cs -S < %s | FileCheck %s --check-prefixes=CHECK,CGSCC
+; RUN: opt -aa-pipeline=basic-aa -passes=attributor -attributor-manifest-internal -attributor-annotate-decl-cs -S < %s | FileCheck %s --check-prefixes=CHECK,TUNIT
+; RUN: opt -aa-pipeline=basic-aa -passes=attributor-cgscc -attributor-manifest-internal -attributor-annotate-decl-cs -S < %s | FileCheck %s --check-prefixes=CHECK,CGSCC
%struct.T = type { i32, [10 x [20 x i8]] }
@@ -50,20 +50,26 @@ define i8 @select_offsets_simplifiable_2(i1 %cnd1, i1 %cnd2) {
; CHECK-LABEL: define {{[^@]+}}@select_offsets_simplifiable_2
; CHECK-SAME: (i1 [[CND1:%.*]], i1 [[CND2:%.*]]) #[[ATTR1:[0-9]+]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[BYTES:%.*]] = alloca [1024 x i8], align 16
-; CHECK-NEXT: [[GEP23:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 23
-; CHECK-NEXT: store i8 23, ptr [[GEP23]], align 4
-; CHECK-NEXT: [[GEP29:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 29
-; CHECK-NEXT: store i8 29, ptr [[GEP29]], align 4
-; CHECK-NEXT: [[GEP7:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 7
-; CHECK-NEXT: store i8 7, ptr [[GEP7]], align 4
-; CHECK-NEXT: [[GEP31:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 31
+; CHECK-NEXT: [[BYTES1:%.*]] = alloca [4 x i8], align 1
+; CHECK-NEXT: [[GEP23:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 23
+; CHECK-NEXT: [[NEWGEP:%.*]] = getelementptr ptr, ptr [[GEP23]], i64 -22
+; CHECK-NEXT: store i8 23, ptr [[NEWGEP]], align 4
+; CHECK-NEXT: [[GEP29:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 29
+; CHECK-NEXT: [[NEWGEP5:%.*]] = getelementptr ptr, ptr [[GEP29]], i64 -27
+; CHECK-NEXT: store i8 29, ptr [[NEWGEP5]], align 4
+; CHECK-NEXT: [[GEP7:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 7
+; CHECK-NEXT: [[NEWGEP4:%.*]] = getelementptr ptr, ptr [[GEP7]], i64 -4
+; CHECK-NEXT: store i8 7, ptr [[NEWGEP4]], align 4
+; CHECK-NEXT: [[GEP31:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 31
+; CHECK-NEXT: [[NEWGEP6:%.*]] = getelementptr ptr, ptr [[GEP31]], i64 -31
+; CHECK-NEXT: store i8 42, ptr [[NEWGEP6]], align 4
; CHECK-NEXT: [[SEL0:%.*]] = select i1 [[CND1]], i64 20, i64 26
; CHECK-NEXT: [[SEL1:%.*]] = select i1 [[CND2]], i64 [[SEL0]], i64 4
-; CHECK-NEXT: [[GEP_SEL:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 [[SEL1]]
+; CHECK-NEXT: [[GEP_SEL:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 [[SEL1]]
; CHECK-NEXT: [[GEP_PLUS:%.*]] = getelementptr inbounds i8, ptr [[GEP_SEL]], i64 3
-; CHECK-NEXT: [[I:%.*]] = load i8, ptr [[GEP_PLUS]], align 4
-; CHECK-NEXT: ret i8 [[I]]
+; CHECK-NEXT: [[NEWGEP2:%.*]] = getelementptr ptr, ptr [[GEP_PLUS]], i64 -22
+; CHECK-NEXT: [[I3:%.*]] = load i8, ptr [[NEWGEP2]], align 4
+; CHECK-NEXT: ret i8 [[I3]]
;
entry:
%Bytes = alloca [1024 x i8], align 16
@@ -93,10 +99,15 @@ define i8 @select_offsets_simplifiable_3(i1 %cnd1, i1 %cnd2) {
; CHECK-LABEL: define {{[^@]+}}@select_offsets_simplifiable_3
; CHECK-SAME: (i1 [[CND1:%.*]], i1 [[CND2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[BUNDLE:%.*]] = alloca [[STRUCT_T:%.*]], align 64
+; CHECK-NEXT: [[BUNDLE1:%.*]] = alloca [5 x i8], align 1
+; CHECK-NEXT: [[GEP_FIXED:%.*]] = getelementptr inbounds [[STRUCT_T:%.*]], ptr [[BUNDLE1]], i64 0, i32 1, i64 1, i64 1
+; CHECK-NEXT: [[NEWGEP:%.*]] = getelementptr ptr, ptr [[GEP_FIXED]], i64 -22
+; CHECK-NEXT: store i8 100, ptr [[NEWGEP]], align 4
; CHECK-NEXT: [[SEL1:%.*]] = select i1 [[CND1]], i64 1, i64 3
; CHECK-NEXT: [[SEL2:%.*]] = select i1 [[CND2]], i64 5, i64 11
-; CHECK-NEXT: [[GEP_SEL:%.*]] = getelementptr inbounds [[STRUCT_T]], ptr [[BUNDLE]], i64 0, i32 1, i64 [[SEL1]], i64 [[SEL2]]
+; CHECK-NEXT: [[GEP_SEL:%.*]] = getelementptr inbounds [[STRUCT_T]], ptr [[BUNDLE1]], i64 0, i32 1, i64 [[SEL1]], i64 [[SEL2]]
+; CHECK-NEXT: [[NEWGEP2:%.*]] = getelementptr ptr, ptr [[GEP_SEL]], i64 -69
+; CHECK-NEXT: store i8 42, ptr [[NEWGEP2]], align 4
; CHECK-NEXT: ret i8 100
;
entry:
@@ -117,10 +128,12 @@ define i8 @select_offsets_simplifiable_4(i1 %cnd1, i1 %cnd2) {
; CHECK-LABEL: define {{[^@]+}}@select_offsets_simplifiable_4
; CHECK-SAME: (i1 [[CND1:%.*]], i1 [[CND2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[BYTES:%.*]] = alloca [1024 x i8], align 16
+; CHECK-NEXT: [[BYTES1:%.*]] = alloca [3 x i8], align 1
; CHECK-NEXT: [[SEL0:%.*]] = select i1 [[CND1]], i64 23, i64 29
; CHECK-NEXT: [[SEL1:%.*]] = select i1 [[CND2]], i64 [[SEL0]], i64 7
-; CHECK-NEXT: [[GEP_SEL:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 [[SEL1]]
+; CHECK-NEXT: [[GEP_SEL:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 [[SEL1]]
+; CHECK-NEXT: [[NEWGEP:%.*]] = getelementptr ptr, ptr [[GEP_SEL]], i64 -23
+; CHECK-NEXT: store i8 100, ptr [[NEWGEP]], align 4
; CHECK-NEXT: ret i8 100
;
entry:
@@ -135,18 +148,35 @@ entry:
}
define i8 @select_offsets_not_simplifiable_1(i1 %cnd1, i1 %cnd2) {
-; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(none)
-; CHECK-LABEL: define {{[^@]+}}@select_offsets_not_simplifiable_1
-; CHECK-SAME: (i1 [[CND1:%.*]], i1 [[CND2:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[BYTES:%.*]] = alloca [1024 x i8], align 16
-; CHECK-NEXT: [[SEL0:%.*]] = select i1 [[CND1]], i64 23, i64 29
-; CHECK-NEXT: [[SEL1:%.*]] = select i1 [[CND2]], i64 [[SEL0]], i64 7
-; CHECK-NEXT: [[GEP23:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 23
-; CHECK-NEXT: store i8 100, ptr [[GEP23]], align 4
-; CHECK-NEXT: [[GEP_SEL:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 [[SEL1]]
-; CHECK-NEXT: [[I:%.*]] = load i8, ptr [[GEP_SEL]], align 4
-; CHECK-NEXT: ret i8 [[I]]
+; TUNIT: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(none)
+; TUNIT-LABEL: define {{[^@]+}}@select_offsets_not_simplifiable_1
+; TUNIT-SAME: (i1 [[CND1:%.*]], i1 [[CND2:%.*]]) #[[ATTR1]] {
+; TUNIT-NEXT: entry:
+; TUNIT-NEXT: [[BYTES1:%.*]] = alloca [3 x i8], align 1
+; TUNIT-NEXT: [[SEL0:%.*]] = select i1 [[CND1]], i64 23, i64 29
+; TUNIT-NEXT: [[SEL1:%.*]] = select i1 [[CND2]], i64 [[SEL0]], i64 7
+; TUNIT-NEXT: [[GEP23:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 23
+; TUNIT-NEXT: [[NEWGEP:%.*]] = getelementptr ptr, ptr [[GEP23]], i64 -23
+; TUNIT-NEXT: store i8 100, ptr [[NEWGEP]], align 4
+; TUNIT-NEXT: [[GEP_SEL:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 [[SEL1]]
+; TUNIT-NEXT: [[NEWGEP2:%.*]] = getelementptr ptr, ptr [[GEP_SEL]], i64 -23
+; TUNIT-NEXT: [[I3:%.*]] = load i8, ptr [[NEWGEP2]], align 4
+; TUNIT-NEXT: ret i8 [[I3]]
+;
+; CGSCC: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(none)
+; CGSCC-LABEL: define {{[^@]+}}@select_offsets_not_simplifiable_1
+; CGSCC-SAME: (i1 [[CND1:%.*]], i1 [[CND2:%.*]]) #[[ATTR1]] {
+; CGSCC-NEXT: entry:
+; CGSCC-NEXT: [[BYTES1:%.*]] = alloca [3 x i8], align 1
+; CGSCC-NEXT: [[SEL0:%.*]] = select i1 [[CND1]], i64 23, i64 29
+; CGSCC-NEXT: [[SEL1:%.*]] = select i1 [[CND2]], i64 [[SEL0]], i64 7
+; CGSCC-NEXT: [[GEP23:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 23
+; CGSCC-NEXT: [[NEWGEP3:%.*]] = getelementptr ptr, ptr [[GEP23]], i64 -23
+; CGSCC-NEXT: store i8 100, ptr [[NEWGEP3]], align 4
+; CGSCC-NEXT: [[GEP_SEL:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 [[SEL1]]
+; CGSCC-NEXT: [[NEWGEP:%.*]] = getelementptr ptr, ptr [[GEP_SEL]], i64 -23
+; CGSCC-NEXT: [[I2:%.*]] = load i8, ptr [[NEWGEP]], align 4
+; CGSCC-NEXT: ret i8 [[I2]]
;
entry:
%Bytes = alloca [1024 x i8], align 16
@@ -164,15 +194,17 @@ define i8 @select_offsets_not_simplifiable_2(i1 %cnd1, i1 %cnd2) {
; CHECK-LABEL: define {{[^@]+}}@select_offsets_not_simplifiable_2
; CHECK-SAME: (i1 [[CND1:%.*]], i1 [[CND2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[BYTES:%.*]] = alloca [1024 x i8], align 16
+; CHECK-NEXT: [[BYTES1:%.*]] = alloca [3 x i8], align 1
; CHECK-NEXT: [[SEL0:%.*]] = select i1 [[CND1]], i64 23, i64 29
; CHECK-NEXT: [[SEL1:%.*]] = select i1 [[CND2]], i64 [[SEL0]], i64 7
-; CHECK-NEXT: [[GEP32:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 32
-; CHECK-NEXT: store i8 100, ptr [[GEP32]], align 16
-; CHECK-NEXT: [[GEP_SEL:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 [[SEL1]]
+; CHECK-NEXT: [[GEP32:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 32
+; CHECK-NEXT: [[NEWGEP3:%.*]] = getelementptr ptr, ptr [[GEP32]], i64 -32
+; CHECK-NEXT: store i8 100, ptr [[NEWGEP3]], align 16
+; CHECK-NEXT: [[GEP_SEL:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 [[SEL1]]
; CHECK-NEXT: [[GEP_PLUS:%.*]] = getelementptr inbounds i8, ptr [[GEP_SEL]], i64 3
-; CHECK-NEXT: [[I:%.*]] = load i8, ptr [[GEP_PLUS]], align 4
-; CHECK-NEXT: ret i8 [[I]]
+; CHECK-NEXT: [[NEWGEP:%.*]] = getelementptr ptr, ptr [[GEP_PLUS]], i64 -32
+; CHECK-NEXT: [[I2:%.*]] = load i8, ptr [[NEWGEP]], align 4
+; CHECK-NEXT: ret i8 [[I2]]
;
entry:
%Bytes = alloca [1024 x i8], align 16
@@ -237,19 +269,37 @@ entry:
}
define i8 @select_offsets_not_simplifiable_5(i1 %cnd1, i1 %cnd2) {
-; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(none)
-; CHECK-LABEL: define {{[^@]+}}@select_offsets_not_simplifiable_5
-; CHECK-SAME: (i1 [[CND1:%.*]], i1 [[CND2:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[BUNDLE:%.*]] = alloca [[STRUCT_T:%.*]], align 64
-; CHECK-NEXT: [[GEP_FIXED:%.*]] = getelementptr inbounds [[STRUCT_T]], ptr [[BUNDLE]], i64 0, i32 1, i64 3, i64 5
-; CHECK-NEXT: store i8 100, ptr [[GEP_FIXED]], align 4
-; CHECK-NEXT: [[SEL1:%.*]] = select i1 [[CND1]], i64 1, i64 3
-; CHECK-NEXT: [[SEL2:%.*]] = select i1 [[CND2]], i64 5, i64 11
-; CHECK-NEXT: [[GEP_SEL:%.*]] = getelementptr inbounds [[STRUCT_T]], ptr [[BUNDLE]], i64 0, i32 1, i64 [[SEL1]], i64 [[SEL2]]
-; CHECK-NEXT: store i8 42, ptr [[GEP_SEL]], align 4
-; CHECK-NEXT: [[I:%.*]] = load i8, ptr [[GEP_FIXED]], align 4
-; CHECK-NEXT: ret i8 [[I]]
+; TUNIT: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(none)
+; TUNIT-LABEL: define {{[^@]+}}@select_offsets_not_simplifiable_5
+; TUNIT-SAME: (i1 [[CND1:%.*]], i1 [[CND2:%.*]]) #[[ATTR1]] {
+; TUNIT-NEXT: entry:
+; TUNIT-NEXT: [[BUNDLE1:%.*]] = alloca [4 x i8], align 1
+; TUNIT-NEXT: [[GEP_FIXED:%.*]] = getelementptr inbounds [[STRUCT_T:%.*]], ptr [[BUNDLE1]], i64 0, i32 1, i64 3, i64 5
+; TUNIT-NEXT: [[NEWGEP:%.*]] = getelementptr ptr, ptr [[GEP_FIXED]], i64 -69
+; TUNIT-NEXT: store i8 100, ptr [[NEWGEP]], align 4
+; TUNIT-NEXT: [[SEL1:%.*]] = select i1 [[CND1]], i64 1, i64 3
+; TUNIT-NEXT: [[SEL2:%.*]] = select i1 [[CND2]], i64 5, i64 11
+; TUNIT-NEXT: [[GEP_SEL:%.*]] = getelementptr inbounds [[STRUCT_T]], ptr [[BUNDLE1]], i64 0, i32 1, i64 [[SEL1]], i64 [[SEL2]]
+; TUNIT-NEXT: [[NEWGEP2:%.*]] = getelementptr ptr, ptr [[GEP_SEL]], i64 -69
+; TUNIT-NEXT: store i8 42, ptr [[NEWGEP2]], align 4
+; TUNIT-NEXT: [[I:%.*]] = load i8, ptr [[GEP_FIXED]], align 4
+; TUNIT-NEXT: ret i8 [[I]]
+;
+; CGSCC: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(none)
+; CGSCC-LABEL: define {{[^@]+}}@select_offsets_not_simplifiable_5
+; CGSCC-SAME: (i1 [[CND1:%.*]], i1 [[CND2:%.*]]) #[[ATTR1]] {
+; CGSCC-NEXT: entry:
+; CGSCC-NEXT: [[BUNDLE1:%.*]] = alloca [4 x i8], align 1
+; CGSCC-NEXT: [[GEP_FIXED:%.*]] = getelementptr inbounds [[STRUCT_T:%.*]], ptr [[BUNDLE1]], i64 0, i32 1, i64 3, i64 5
+; CGSCC-NEXT: [[NEWGEP2:%.*]] = getelementptr ptr, ptr [[GEP_FIXED]], i64 -69
+; CGSCC-NEXT: store i8 100, ptr [[NEWGEP2]], align 4
+; CGSCC-NEXT: [[SEL1:%.*]] = select i1 [[CND1]], i64 1, i64 3
+; CGSCC-NEXT: [[SEL2:%.*]] = select i1 [[CND2]], i64 5, i64 11
+; CGSCC-NEXT: [[GEP_SEL:%.*]] = getelementptr inbounds [[STRUCT_T]], ptr [[BUNDLE1]], i64 0, i32 1, i64 [[SEL1]], i64 [[SEL2]]
+; CGSCC-NEXT: [[NEWGEP:%.*]] = getelementptr ptr, ptr [[GEP_SEL]], i64 -69
+; CGSCC-NEXT: store i8 42, ptr [[NEWGEP]], align 4
+; CGSCC-NEXT: [[I:%.*]] = load i8, ptr [[GEP_FIXED]], align 4
+; CGSCC-NEXT: ret i8 [[I]]
;
entry:
%bundle = alloca %struct.T, align 64
@@ -267,16 +317,35 @@ entry:
}
define i8 @select_gep_simplifiable_1(i1 %cnd1, i1 %cnd2) {
-; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(write)
-; CHECK-LABEL: define {{[^@]+}}@select_gep_simplifiable_1
-; CHECK-SAME: (i1 [[CND1:%.*]], i1 [[CND2:%.*]]) #[[ATTR2:[0-9]+]] {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[BYTES:%.*]] = alloca [1024 x i8], align 16
-; CHECK-NEXT: [[GEP7:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 7
-; CHECK-NEXT: [[GEP23:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 23
-; CHECK-NEXT: [[SEL_PTR:%.*]] = select i1 [[CND1]], ptr [[GEP7]], ptr [[GEP23]]
-; CHECK-NEXT: store i8 42, ptr [[SEL_PTR]], align 4
-; CHECK-NEXT: ret i8 21
+; TUNIT: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(write)
+; TUNIT-LABEL: define {{[^@]+}}@select_gep_simplifiable_1
+; TUNIT-SAME: (i1 [[CND1:%.*]], i1 [[CND2:%.*]]) #[[ATTR2:[0-9]+]] {
+; TUNIT-NEXT: entry:
+; TUNIT-NEXT: [[BYTES1:%.*]] = alloca [3 x i8], align 1
+; TUNIT-NEXT: [[GEP3:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 3
+; TUNIT-NEXT: [[NEWGEP2:%.*]] = getelementptr ptr, ptr [[GEP3]], i64 -1
+; TUNIT-NEXT: store i8 21, ptr [[NEWGEP2]], align 4
+; TUNIT-NEXT: [[GEP7:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 7
+; TUNIT-NEXT: [[GEP23:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 23
+; TUNIT-NEXT: [[SEL_PTR:%.*]] = select i1 [[CND1]], ptr [[GEP7]], ptr [[GEP23]]
+; TUNIT-NEXT: [[NEWGEP:%.*]] = getelementptr ptr, ptr [[SEL_PTR]], i64 -23
+; TUNIT-NEXT: store i8 42, ptr [[NEWGEP]], align 4
+; TUNIT-NEXT: ret i8 21
+;
+; CGSCC: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(write)
+; CGSCC-LABEL: define {{[^@]+}}@select_gep_simplifiable_1
+; CGSCC-SAME: (i1 [[CND1:%.*]], i1 [[CND2:%.*]]) #[[ATTR2:[0-9]+]] {
+; CGSCC-NEXT: entry:
+; CGSCC-NEXT: [[BYTES1:%.*]] = alloca [3 x i8], align 1
+; CGSCC-NEXT: [[GEP3:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 3
+; CGSCC-NEXT: [[NEWGEP:%.*]] = getelementptr ptr, ptr [[GEP3]], i64 -1
+; CGSCC-NEXT: store i8 21, ptr [[NEWGEP]], align 4
+; CGSCC-NEXT: [[GEP7:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 7
+; CGSCC-NEXT: [[GEP23:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 23
+; CGSCC-NEXT: [[SEL_PTR:%.*]] = select i1 [[CND1]], ptr [[GEP7]], ptr [[GEP23]]
+; CGSCC-NEXT: [[NEWGEP2:%.*]] = getelementptr ptr, ptr [[SEL_PTR]], i64 -23
+; CGSCC-NEXT: store i8 42, ptr [[NEWGEP2]], align 4
+; CGSCC-NEXT: ret i8 21
;
entry:
%Bytes = alloca [1024 x i8], align 16
@@ -295,12 +364,14 @@ define i8 @select_gep_not_simplifiable_1(i1 %cnd1, i1 %cnd2) {
; CHECK-LABEL: define {{[^@]+}}@select_gep_not_simplifiable_1
; CHECK-SAME: (i1 [[CND1:%.*]], i1 [[CND2:%.*]]) #[[ATTR3:[0-9]+]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[BYTES:%.*]] = alloca [1024 x i8], align 16
-; CHECK-NEXT: [[GEP7:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 7
-; CHECK-NEXT: store i8 1, ptr [[GEP7]], align 4
-; CHECK-NEXT: [[GEP23:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 23
+; CHECK-NEXT: [[BYTES1:%.*]] = alloca [2 x i8], align 1
+; CHECK-NEXT: [[GEP7:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 7
+; CHECK-NEXT: [[NEWGEP:%.*]] = getelementptr ptr, ptr [[GEP7]], i64 -6
+; CHECK-NEXT: store i8 1, ptr [[NEWGEP]], align 4
+; CHECK-NEXT: [[GEP23:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 23
; CHECK-NEXT: [[SEL_PTR:%.*]] = select i1 [[CND1]], ptr [[GEP7]], ptr [[GEP23]]
-; CHECK-NEXT: store i8 42, ptr [[SEL_PTR]], align 4
+; CHECK-NEXT: [[NEWGEP2:%.*]] = getelementptr ptr, ptr [[SEL_PTR]], i64 -23
+; CHECK-NEXT: store i8 42, ptr [[NEWGEP2]], align 4
; CHECK-NEXT: [[I:%.*]] = load i8, ptr [[GEP7]], align 4
; CHECK-NEXT: ret i8 [[I]]
;
@@ -318,25 +389,55 @@ entry:
; FIXME: The whole function is just "ret i8 21".
define i8 @phi_gep_simplifiable_1(i1 %cnd1, i1 %cnd2) {
-; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn
-; CHECK-LABEL: define {{[^@]+}}@phi_gep_simplifiable_1
-; CHECK-SAME: (i1 noundef [[CND1:%.*]], i1 [[CND2:%.*]]) #[[ATTR3]] {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[BYTES:%.*]] = alloca [1024 x i8], align 16
-; CHECK-NEXT: br i1 [[CND1]], label [[THEN:%.*]], label [[ELSE:%.*]]
-; CHECK: then:
-; CHECK-NEXT: [[GEP23:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 23
-; CHECK-NEXT: store i8 21, ptr [[GEP23]], align 4
-; CHECK-NEXT: br label [[JOIN:%.*]]
-; CHECK: else:
-; CHECK-NEXT: [[GEP31:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 31
-; CHECK-NEXT: store i8 21, ptr [[GEP31]], align 4
-; CHECK-NEXT: br label [[JOIN]]
-; CHECK: join:
-; CHECK-NEXT: [[PHI_PTR:%.*]] = phi ptr [ [[GEP23]], [[THEN]] ], [ [[GEP31]], [[ELSE]] ]
-; CHECK-NEXT: [[GEP29:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 29
-; CHECK-NEXT: [[I:%.*]] = load i8, ptr [[PHI_PTR]], align 4
-; CHECK-NEXT: ret i8 [[I]]
+; TUNIT: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn
+; TUNIT-LABEL: define {{[^@]+}}@phi_gep_simplifiable_1
+; TUNIT-SAME: (i1 noundef [[CND1:%.*]], i1 [[CND2:%.*]]) #[[ATTR3]] {
+; TUNIT-NEXT: entry:
+; TUNIT-NEXT: [[BYTES1:%.*]] = alloca [3 x i8], align 1
+; TUNIT-NEXT: br i1 [[CND1]], label [[THEN:%.*]], label [[ELSE:%.*]]
+; TUNIT: then:
+; TUNIT-NEXT: [[GEP23:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 23
+; TUNIT-NEXT: [[NEWGEP2:%.*]] = getelementptr ptr, ptr [[GEP23]], i64 -22
+; TUNIT-NEXT: store i8 21, ptr [[NEWGEP2]], align 4
+; TUNIT-NEXT: br label [[JOIN:%.*]]
+; TUNIT: else:
+; TUNIT-NEXT: [[GEP31:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 31
+; TUNIT-NEXT: [[NEWGEP3:%.*]] = getelementptr ptr, ptr [[GEP31]], i64 -31
+; TUNIT-NEXT: store i8 21, ptr [[NEWGEP3]], align 4
+; TUNIT-NEXT: br label [[JOIN]]
+; TUNIT: join:
+; TUNIT-NEXT: [[PHI_PTR:%.*]] = phi ptr [ [[GEP23]], [[THEN]] ], [ [[GEP31]], [[ELSE]] ]
+; TUNIT-NEXT: [[GEP29:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 29
+; TUNIT-NEXT: [[NEWGEP:%.*]] = getelementptr ptr, ptr [[GEP29]], i64 -27
+; TUNIT-NEXT: store i8 42, ptr [[NEWGEP]], align 4
+; TUNIT-NEXT: [[NEWGEP4:%.*]] = getelementptr ptr, ptr [[PHI_PTR]], i64 -31
+; TUNIT-NEXT: [[I5:%.*]] = load i8, ptr [[NEWGEP4]], align 4
+; TUNIT-NEXT: ret i8 [[I5]]
+;
+; CGSCC: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn
+; CGSCC-LABEL: define {{[^@]+}}@phi_gep_simplifiable_1
+; CGSCC-SAME: (i1 noundef [[CND1:%.*]], i1 [[CND2:%.*]]) #[[ATTR3]] {
+; CGSCC-NEXT: entry:
+; CGSCC-NEXT: [[BYTES1:%.*]] = alloca [3 x i8], align 1
+; CGSCC-NEXT: br i1 [[CND1]], label [[THEN:%.*]], label [[ELSE:%.*]]
+; CGSCC: then:
+; CGSCC-NEXT: [[GEP23:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 23
+; CGSCC-NEXT: [[NEWGEP:%.*]] = getelementptr ptr, ptr [[GEP23]], i64 -22
+; CGSCC-NEXT: store i8 21, ptr [[NEWGEP]], align 4
+; CGSCC-NEXT: br label [[JOIN:%.*]]
+; CGSCC: else:
+; CGSCC-NEXT: [[GEP31:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 31
+; CGSCC-NEXT: [[NEWGEP3:%.*]] = getelementptr ptr, ptr [[GEP31]], i64 -31
+; CGSCC-NEXT: store i8 21, ptr [[NEWGEP3]], align 4
+; CGSCC-NEXT: br label [[JOIN]]
+; CGSCC: join:
+; CGSCC-NEXT: [[PHI_PTR:%.*]] = phi ptr [ [[GEP23]], [[THEN]] ], [ [[GEP31]], [[ELSE]] ]
+; CGSCC-NEXT: [[GEP29:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 29
+; CGSCC-NEXT: [[NEWGEP2:%.*]] = getelementptr ptr, ptr [[GEP29]], i64 -27
+; CGSCC-NEXT: store i8 42, ptr [[NEWGEP2]], align 4
+; CGSCC-NEXT: [[NEWGEP4:%.*]] = getelementptr ptr, ptr [[PHI_PTR]], i64 -31
+; CGSCC-NEXT: [[I5:%.*]] = load i8, ptr [[NEWGEP4]], align 4
+; CGSCC-NEXT: ret i8 [[I5]]
;
entry:
%Bytes = alloca [1024 x i8], align 16
@@ -366,19 +467,23 @@ join:
define i8 @phi_gep_simplifiable_2(i1 %cnd1, i1 %cnd2) {
; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(write)
; CHECK-LABEL: define {{[^@]+}}@phi_gep_simplifiable_2
-; CHECK-SAME: (i1 noundef [[CND1:%.*]], i1 [[CND2:%.*]]) #[[ATTR2]] {
+; CHECK-SAME: (i1 noundef [[CND1:%.*]], i1 [[CND2:%.*]]) #[[ATTR2:[0-9]+]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[BYTES:%.*]] = alloca [1024 x i8], align 16
+; CHECK-NEXT: [[BYTES1:%.*]] = alloca [3 x i8], align 1
+; CHECK-NEXT: [[GEP29:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 29
+; CHECK-NEXT: [[NEWGEP2:%.*]] = getelementptr ptr, ptr [[GEP29]], i64 -27
+; CHECK-NEXT: store i8 42, ptr [[NEWGEP2]], align 4
; CHECK-NEXT: br i1 [[CND1]], label [[THEN:%.*]], label [[ELSE:%.*]]
; CHECK: then:
-; CHECK-NEXT: [[GEP23:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 23
+; CHECK-NEXT: [[GEP23:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 23
; CHECK-NEXT: br label [[JOIN:%.*]]
; CHECK: else:
-; CHECK-NEXT: [[GEP31:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 31
+; CHECK-NEXT: [[GEP31:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 31
; CHECK-NEXT: br label [[JOIN]]
; CHECK: join:
; CHECK-NEXT: [[PHI_PTR:%.*]] = phi ptr [ [[GEP23]], [[THEN]] ], [ [[GEP31]], [[ELSE]] ]
-; CHECK-NEXT: store i8 21, ptr [[PHI_PTR]], align 4
+; CHECK-NEXT: [[NEWGEP:%.*]] = getelementptr ptr, ptr [[PHI_PTR]], i64 -31
+; CHECK-NEXT: store i8 21, ptr [[NEWGEP]], align 4
; CHECK-NEXT: ret i8 42
;
entry:
@@ -405,23 +510,45 @@ join:
}
define i8 @phi_gep_not_simplifiable_1(i1 %cnd1, i1 %cnd2) {
-; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn
-; CHECK-LABEL: define {{[^@]+}}@phi_gep_not_simplifiable_1
-; CHECK-SAME: (i1 noundef [[CND1:%.*]], i1 [[CND2:%.*]]) #[[ATTR3]] {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[BYTES:%.*]] = alloca [1024 x i8], align 16
-; CHECK-NEXT: [[GEP23:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 23
-; CHECK-NEXT: br i1 [[CND1]], label [[THEN:%.*]], label [[ELSE:%.*]]
-; CHECK: then:
-; CHECK-NEXT: br label [[JOIN:%.*]]
-; CHECK: else:
-; CHECK-NEXT: [[GEP31:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 31
-; CHECK-NEXT: br label [[JOIN]]
-; CHECK: join:
-; CHECK-NEXT: [[PHI_PTR:%.*]] = phi ptr [ [[GEP23]], [[THEN]] ], [ [[GEP31]], [[ELSE]] ]
-; CHECK-NEXT: store i8 42, ptr [[GEP23]], align 4
-; CHECK-NEXT: [[I:%.*]] = load i8, ptr [[PHI_PTR]], align 4
-; CHECK-NEXT: ret i8 [[I]]
+; TUNIT: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn
+; TUNIT-LABEL: define {{[^@]+}}@phi_gep_not_simplifiable_1
+; TUNIT-SAME: (i1 noundef [[CND1:%.*]], i1 [[CND2:%.*]]) #[[ATTR3]] {
+; TUNIT-NEXT: entry:
+; TUNIT-NEXT: [[BYTES1:%.*]] = alloca [2 x i8], align 1
+; TUNIT-NEXT: [[GEP23:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 23
+; TUNIT-NEXT: br i1 [[CND1]], label [[THEN:%.*]], label [[ELSE:%.*]]
+; TUNIT: then:
+; TUNIT-NEXT: br label [[JOIN:%.*]]
+; TUNIT: else:
+; TUNIT-NEXT: [[GEP31:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 31
+; TUNIT-NEXT: br label [[JOIN]]
+; TUNIT: join:
+; TUNIT-NEXT: [[PHI_PTR:%.*]] = phi ptr [ [[GEP23]], [[THEN]] ], [ [[GEP31]], [[ELSE]] ]
+; TUNIT-NEXT: [[NEWGEP:%.*]] = getelementptr ptr, ptr [[GEP23]], i64 -22
+; TUNIT-NEXT: store i8 42, ptr [[NEWGEP]], align 4
+; TUNIT-NEXT: [[NEWGEP2:%.*]] = getelementptr ptr, ptr [[PHI_PTR]], i64 -31
+; TUNIT-NEXT: [[I3:%.*]] = load i8, ptr [[NEWGEP2]], align 4
+; TUNIT-NEXT: ret i8 [[I3]]
+;
+; CGSCC: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn
+; CGSCC-LABEL: define {{[^@]+}}@phi_gep_not_simplifiable_1
+; CGSCC-SAME: (i1 noundef [[CND1:%.*]], i1 [[CND2:%.*]]) #[[ATTR3]] {
+; CGSCC-NEXT: entry:
+; CGSCC-NEXT: [[BYTES1:%.*]] = alloca [2 x i8], align 1
+; CGSCC-NEXT: [[GEP23:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 23
+; CGSCC-NEXT: br i1 [[CND1]], label [[THEN:%.*]], label [[ELSE:%.*]]
+; CGSCC: then:
+; CGSCC-NEXT: br label [[JOIN:%.*]]
+; CGSCC: else:
+; CGSCC-NEXT: [[GEP31:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 31
+; CGSCC-NEXT: br label [[JOIN]]
+; CGSCC: join:
+; CGSCC-NEXT: [[PHI_PTR:%.*]] = phi ptr [ [[GEP23]], [[THEN]] ], [ [[GEP31]], [[ELSE]] ]
+; CGSCC-NEXT: [[NEWGEP3:%.*]] = getelementptr ptr, ptr [[GEP23]], i64 -22
+; CGSCC-NEXT: store i8 42, ptr [[NEWGEP3]], align 4
+; CGSCC-NEXT: [[NEWGEP:%.*]] = getelementptr ptr, ptr [[PHI_PTR]], i64 -31
+; CGSCC-NEXT: [[I2:%.*]] = load i8, ptr [[NEWGEP]], align 4
+; CGSCC-NEXT: ret i8 [[I2]]
;
entry:
%Bytes = alloca [1024 x i8], align 16
@@ -485,7 +612,7 @@ define i8 @phi_offsets(i1 %cnd1, i1 %cnd2) {
; CHECK-LABEL: define {{[^@]+}}@phi_offsets
; CHECK-SAME: (i1 noundef [[CND1:%.*]], i1 [[CND2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[BYTES:%.*]] = alloca [1024 x i8], align 16
+; CHECK-NEXT: [[BYTES1:%.*]] = alloca [3 x i8], align 1
; CHECK-NEXT: br i1 [[CND1]], label [[THEN:%.*]], label [[ELSE:%.*]]
; CHECK: then:
; CHECK-NEXT: br label [[JOIN:%.*]]
@@ -493,7 +620,9 @@ define i8 @phi_offsets(i1 %cnd1, i1 %cnd2) {
; CHECK-NEXT: br label [[JOIN]]
; CHECK: join:
; CHECK-NEXT: [[PHI:%.*]] = phi i64 [ 3, [[THEN]] ], [ 11, [[ELSE]] ]
-; CHECK-NEXT: [[GEP_PHI:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 [[PHI]]
+; CHECK-NEXT: [[GEP_PHI:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 [[PHI]]
+; CHECK-NEXT: [[NEWGEP:%.*]] = getelementptr ptr, ptr [[GEP_PHI]], i64 -10
+; CHECK-NEXT: store i8 42, ptr [[NEWGEP]], align 4
; CHECK-NEXT: ret i8 100
;
entry:
@@ -521,6 +650,3 @@ join:
; CHECK: attributes #[[ATTR2]] = { mustprogress nofree norecurse nosync nounwind willreturn memory(write) }
; CHECK: attributes #[[ATTR3]] = { mustprogress nofree norecurse nosync nounwind willreturn }
;.
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; CGSCC: {{.*}}
-; TUNIT: {{.*}}
diff --git a/llvm/test/Transforms/Attributor/nodelete.ll b/llvm/test/Transforms/Attributor/nodelete.ll
index c28cb28379348..9a4a1098d010a 100644
--- a/llvm/test/Transforms/Attributor/nodelete.ll
+++ b/llvm/test/Transforms/Attributor/nodelete.ll
@@ -10,15 +10,13 @@ define hidden i64 @f1() align 2 {
; TUNIT-LABEL: define {{[^@]+}}@f1
; TUNIT-SAME: () #[[ATTR0:[0-9]+]] align 2 {
; TUNIT-NEXT: entry:
-; TUNIT-NEXT: [[REF_TMP1:%.*]] = alloca i8, i32 0, align 8
; TUNIT-NEXT: ret i64 undef
;
; CGSCC: Function Attrs: mustprogress nofree nosync nounwind willreturn memory(none)
; CGSCC-LABEL: define {{[^@]+}}@f1
; CGSCC-SAME: () #[[ATTR0:[0-9]+]] align 2 {
; CGSCC-NEXT: entry:
-; CGSCC-NEXT: [[REF_TMP:%.*]] = alloca [[A:%.*]], align 8
-; CGSCC-NEXT: [[REF_TMP1:%.*]] = alloca i8, i32 0, align 8
+; CGSCC-NEXT: [[REF_TMP1:%.*]] = alloca [0 x i8], align 1
; CGSCC-NEXT: [[CALL2:%.*]] = call i64 @f2() #[[ATTR2:[0-9]+]]
; CGSCC-NEXT: ret i64 [[CALL2]]
;
diff --git a/llvm/test/Transforms/Attributor/pointer-info.ll b/llvm/test/Transforms/Attributor/pointer-info.ll
index 6afdbdaee317c..c8fec4f1de7b4 100644
--- a/llvm/test/Transforms/Attributor/pointer-info.ll
+++ b/llvm/test/Transforms/Attributor/pointer-info.ll
@@ -10,10 +10,12 @@ define void @foo(ptr %ptr) {
; TUNIT-LABEL: define {{[^@]+}}@foo
; TUNIT-SAME: (ptr nocapture nofree readnone [[PTR:%.*]]) #[[ATTR0:[0-9]+]] {
; TUNIT-NEXT: entry:
-; TUNIT-NEXT: [[TMP0:%.*]] = alloca [[STRUCT_TEST_A:%.*]], align 8
+; TUNIT-NEXT: [[TMP0:%.*]] = alloca [8 x i8], align 1
; TUNIT-NEXT: br label [[CALL_BR:%.*]]
; TUNIT: call.br:
-; TUNIT-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_TEST_A]], ptr [[TMP0]], i64 0, i32 2
+; TUNIT-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_TEST_A:%.*]], ptr [[TMP0]], i64 0, i32 2
+; TUNIT-NEXT: [[NEWGEP:%.*]] = getelementptr ptr, ptr [[TMP1]], i64 -16
+; TUNIT-NEXT: store ptr [[PTR]], ptr [[NEWGEP]], align 8
; TUNIT-NEXT: tail call void @bar(ptr noalias nocapture nofree noundef nonnull readonly byval([[STRUCT_TEST_A]]) align 8 dereferenceable(24) [[TMP0]]) #[[ATTR2:[0-9]+]]
; TUNIT-NEXT: ret void
;
diff --git a/llvm/test/Transforms/Attributor/value-simplify-pointer-info-vec.ll b/llvm/test/Transforms/Attributor/value-simplify-pointer-info-vec.ll
index 70793ec5c7f83..c07b8f9a3efe8 100644
--- a/llvm/test/Transforms/Attributor/value-simplify-pointer-info-vec.ll
+++ b/llvm/test/Transforms/Attributor/value-simplify-pointer-info-vec.ll
@@ -83,6 +83,12 @@ define i32 @vec_write_4() {
; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(none)
; CHECK-LABEL: define {{[^@]+}}@vec_write_4
; CHECK-SAME: () #[[ATTR0]] {
+; CHECK-NEXT: [[A1:%.*]] = alloca [12 x i8], align 1
+; CHECK-NEXT: [[NEWGEP:%.*]] = getelementptr ptr, ptr [[A1]], i64 4
+; CHECK-NEXT: store i32 3, ptr [[NEWGEP]], align 16
+; CHECK-NEXT: [[G:%.*]] = getelementptr i32, ptr [[A1]], i64 1
+; CHECK-NEXT: [[NEWGEP2:%.*]] = getelementptr ptr, ptr [[G]], i64 -4
+; CHECK-NEXT: store <2 x i32> <i32 5, i32 5>, ptr [[NEWGEP2]], align 8
; CHECK-NEXT: ret i32 13
;
%a = alloca <4 x i32>
@@ -101,8 +107,12 @@ define i32 @vec_write_5(i32 %arg) {
; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(none)
; CHECK-LABEL: define {{[^@]+}}@vec_write_5
; CHECK-SAME: (i32 [[ARG:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[A:%.*]] = alloca <4 x i32>, align 16
-; CHECK-NEXT: store i32 [[ARG]], ptr [[A]], align 16
+; CHECK-NEXT: [[A1:%.*]] = alloca [12 x i8], align 1
+; CHECK-NEXT: [[NEWGEP2:%.*]] = getelementptr ptr, ptr [[A1]], i64 4
+; CHECK-NEXT: store i32 [[ARG]], ptr [[NEWGEP2]], align 16
+; CHECK-NEXT: [[G:%.*]] = getelementptr i32, ptr [[A1]], i64 1
+; CHECK-NEXT: [[NEWGEP:%.*]] = getelementptr ptr, ptr [[G]], i64 -4
+; CHECK-NEXT: store <2 x i32> <i32 5, i32 5>, ptr [[NEWGEP]], align 8
; CHECK-NEXT: [[ADD1:%.*]] = add i32 [[ARG]], 5
; CHECK-NEXT: [[ADD2:%.*]] = add i32 5, [[ADD1]]
; CHECK-NEXT: ret i32 [[ADD2]]
diff --git a/llvm/test/Transforms/Attributor/value-simplify-pointer-info.ll b/llvm/test/Transforms/Attributor/value-simplify-pointer-info.ll
index 7a35b5c856097..a8947cbdb66de 100644
--- a/llvm/test/Transforms/Attributor/value-simplify-pointer-info.ll
+++ b/llvm/test/Transforms/Attributor/value-simplify-pointer-info.ll
@@ -628,8 +628,11 @@ define i32 @multi_obj_simplifiable_2(i32 %cnd) {
; TUNIT: cond.false:
; TUNIT-NEXT: br label [[COND_END]]
; TUNIT: cond.end:
+; TUNIT-NEXT: [[COND:%.*]] = phi ptr [ @GI2, [[COND_TRUE]] ], [ [[L]], [[COND_FALSE]] ]
+; TUNIT-NEXT: store i32 5, ptr [[COND]], align 4, !tbaa [[TBAA3]]
+; TUNIT-NEXT: [[L:%.*]] = load i32, ptr [[COND]], align 4, !tbaa [[TBAA3]]
; TUNIT-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noalias nocapture nofree noundef nonnull align 4 dereferenceable(4) [[L]]) #[[ATTR17]]
-; TUNIT-NEXT: ret i32 5
+; TUNIT-NEXT: ret i32 [[L]]
;
; CGSCC: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn
; CGSCC-LABEL: define {{[^@]+}}@multi_obj_simplifiable_2
@@ -644,8 +647,11 @@ define i32 @multi_obj_simplifiable_2(i32 %cnd) {
; CGSCC: cond.false:
; CGSCC-NEXT: br label [[COND_END]]
; CGSCC: cond.end:
+; CGSCC-NEXT: [[COND:%.*]] = phi ptr [ @GI2, [[COND_TRUE]] ], [ [[L]], [[COND_FALSE]] ]
+; CGSCC-NEXT: store i32 5, ptr [[COND]], align 4, !tbaa [[TBAA3]]
+; CGSCC-NEXT: [[L:%.*]] = load i32, ptr [[COND]], align 4, !tbaa [[TBAA3]]
; CGSCC-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noalias nocapture nofree noundef nonnull align 4 dereferenceable(4) [[L]]) #[[ATTR20]]
-; CGSCC-NEXT: ret i32 5
+; CGSCC-NEXT: ret i32 [[L]]
;
entry:
%L = alloca i32, align 4
@@ -2663,21 +2669,22 @@ if.end: ; preds = %if.then, %entry
; We mark %dst as writeonly and %src as readonly, that is (for now) all we can expect.
define dso_local void @test_nested_memory(ptr %dst, ptr %src) {
; TUNIT-LABEL: define {{[^@]+}}@test_nested_memory
-; TUNIT-SAME: (ptr nocapture nofree writeonly [[DST:%.*]], ptr nocapture nofree readonly [[SRC:%.*]]) {
+; TUNIT-SAME: (ptr nofree [[DST:%.*]], ptr nofree [[SRC:%.*]]) {
; TUNIT-NEXT: entry:
-; TUNIT-NEXT: [[CALL_H2S:%.*]] = alloca i8, i64 24, align 1
-; TUNIT-NEXT: [[LOCAL:%.*]] = alloca [[STRUCT_STY:%.*]], align 8
-; TUNIT-NEXT: [[INNER:%.*]] = getelementptr inbounds [[STRUCT_STY]], ptr [[LOCAL]], i64 0, i32 2
-; TUNIT-NEXT: store ptr @global, ptr [[INNER]], align 8
-; TUNIT-NEXT: store ptr [[DST]], ptr [[CALL_H2S]], align 8
-; TUNIT-NEXT: [[SRC2:%.*]] = getelementptr inbounds i8, ptr [[CALL_H2S]], i64 8
+; TUNIT-NEXT: [[LOCAL1:%.*]] = alloca [8 x i8], align 1
+; TUNIT-NEXT: [[INNER:%.*]] = getelementptr inbounds [[STRUCT_STY:%.*]], ptr [[LOCAL1]], i64 0, i32 2
+; TUNIT-NEXT: [[NEWGEP:%.*]] = getelementptr ptr, ptr [[INNER]], i64 -16
+; TUNIT-NEXT: store ptr @global, ptr [[NEWGEP]], align 8
+; TUNIT-NEXT: [[CALL:%.*]] = call noalias dereferenceable_or_null(24) ptr @malloc(i64 noundef 24)
+; TUNIT-NEXT: store ptr [[DST]], ptr [[CALL]], align 8
+; TUNIT-NEXT: [[SRC2:%.*]] = getelementptr inbounds i8, ptr [[CALL]], i64 8
; TUNIT-NEXT: store ptr [[SRC]], ptr [[SRC2]], align 8
-; TUNIT-NEXT: store ptr [[CALL_H2S]], ptr getelementptr inbounds ([[STRUCT_STY]], ptr @global, i64 0, i32 2), align 8
-; TUNIT-NEXT: [[TMP0:%.*]] = load ptr, ptr [[LOCAL]], align 8
-; TUNIT-NEXT: [[LOCAL_B8:%.*]] = getelementptr i8, ptr [[LOCAL]], i64 8
-; TUNIT-NEXT: [[TMP1:%.*]] = load ptr, ptr [[LOCAL_B8]], align 8
-; TUNIT-NEXT: [[LOCAL_B16:%.*]] = getelementptr i8, ptr [[LOCAL]], i64 16
-; TUNIT-NEXT: [[TMP2:%.*]] = load ptr, ptr [[LOCAL_B16]], align 8
+; TUNIT-NEXT: store ptr [[CALL]], ptr getelementptr inbounds ([[STRUCT_STY]], ptr @global, i64 0, i32 2), align 8
+; TUNIT-NEXT: [[TMP0:%.*]] = load ptr, ptr [[LOCAL1]], align 8
+; TUNIT-NEXT: [[LOCAL1_B8:%.*]] = getelementptr i8, ptr [[LOCAL1]], i64 8
+; TUNIT-NEXT: [[TMP1:%.*]] = load ptr, ptr [[LOCAL1_B8]], align 8
+; TUNIT-NEXT: [[LOCAL1_B16:%.*]] = getelementptr i8, ptr [[LOCAL1]], i64 16
+; TUNIT-NEXT: [[TMP2:%.*]] = load ptr, ptr [[LOCAL1_B16]], align 8
; TUNIT-NEXT: call fastcc void @nested_memory_callee(ptr [[TMP0]], ptr [[TMP1]], ptr [[TMP2]]) #[[ATTR21:[0-9]+]]
; TUNIT-NEXT: ret void
;
@@ -3017,8 +3024,10 @@ define i8 @gep_index_from_binary_operator(i1 %cnd1, i1 %cnd2) {
; CHECK-LABEL: define {{[^@]+}}@gep_index_from_binary_operator
; CHECK-SAME: (i1 [[CND1:%.*]], i1 [[CND2:%.*]]) #[[ATTR4]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[BYTES:%.*]] = alloca [1024 x i8], align 16
-; CHECK-NEXT: [[GEP_FIXED:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 12
+; CHECK-NEXT: [[BYTES1:%.*]] = alloca [1 x i8], align 1
+; CHECK-NEXT: [[GEP_FIXED:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 12
+; CHECK-NEXT: [[NEWGEP:%.*]] = getelementptr ptr, ptr [[GEP_FIXED]], i64 -12
+; CHECK-NEXT: store i8 100, ptr [[NEWGEP]], align 4
; CHECK-NEXT: ret i8 100
;
entry:
@@ -3036,8 +3045,10 @@ define i8 @gep_index_from_memory(i1 %cnd1, i1 %cnd2) {
; CHECK-LABEL: define {{[^@]+}}@gep_index_from_memory
; CHECK-SAME: (i1 [[CND1:%.*]], i1 [[CND2:%.*]]) #[[ATTR4]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[BYTES:%.*]] = alloca [1024 x i8], align 16
-; CHECK-NEXT: [[GEP_LOADED:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 12
+; CHECK-NEXT: [[BYTES1:%.*]] = alloca [1 x i8], align 1
+; CHECK-NEXT: [[GEP_LOADED:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES1]], i64 0, i64 12
+; CHECK-NEXT: [[NEWGEP:%.*]] = getelementptr ptr, ptr [[GEP_LOADED]], i64 -12
+; CHECK-NEXT: store i8 100, ptr [[NEWGEP]], align 4
; CHECK-NEXT: ret i8 100
;
entry:
diff --git a/llvm/test/Transforms/OpenMP/custom_state_machines.ll b/llvm/test/Transforms/OpenMP/custom_state_machines.ll
index e6ddf16f06763..3086d697f6c73 100644
--- a/llvm/test/Transforms/OpenMP/custom_state_machines.ll
+++ b/llvm/test/Transforms/OpenMP/custom_state_machines.ll
@@ -1028,6 +1028,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; AMDGPU: user_code.entry:
; AMDGPU-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; AMDGPU-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; AMDGPU-NEXT: call void @__omp_outlined__1(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; AMDGPU-NEXT: call void @__kmpc_target_deinit()
; AMDGPU-NEXT: ret void
@@ -1039,13 +1040,17 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__1
; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; AMDGPU-NEXT: entry:
+; AMDGPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
+; AMDGPU-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
; AMDGPU-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
-; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__2, ptr @__omp_outlined__2_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; AMDGPU-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; AMDGPU-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__2, ptr @__omp_outlined__2_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
; AMDGPU-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9]]
-; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
+; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
; AMDGPU-NEXT: ret void
;
;
@@ -1152,6 +1157,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; AMDGPU: user_code.entry:
; AMDGPU-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; AMDGPU-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; AMDGPU-NEXT: call void @__omp_outlined__4(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; AMDGPU-NEXT: call void @__kmpc_target_deinit()
; AMDGPU-NEXT: ret void
@@ -1163,12 +1169,16 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__4
; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; AMDGPU-NEXT: entry:
+; AMDGPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
+; AMDGPU-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
; AMDGPU-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
; AMDGPU-NEXT: call void @simple_state_machine_interprocedural_before.internalized() #[[ATTR9]]
; AMDGPU-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9]]
-; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; AMDGPU-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; AMDGPU-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
; AMDGPU-NEXT: call void @simple_state_machine_interprocedural_after.internalized() #[[ATTR9]]
; AMDGPU-NEXT: ret void
;
@@ -1289,6 +1299,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; AMDGPU: user_code.entry:
; AMDGPU-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; AMDGPU-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; AMDGPU-NEXT: call void @__omp_outlined__6(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; AMDGPU-NEXT: call void @__kmpc_target_deinit()
; AMDGPU-NEXT: ret void
@@ -1300,12 +1311,16 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__6
; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; AMDGPU-NEXT: entry:
+; AMDGPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
-; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; AMDGPU-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; AMDGPU-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; AMDGPU-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
; AMDGPU-NEXT: [[CALL:%.*]] = call i32 @unknown() #[[ATTR11]]
-; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__8, ptr @__omp_outlined__8_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
+; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__8, ptr @__omp_outlined__8_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
; AMDGPU-NEXT: ret void
;
;
@@ -1406,6 +1421,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; AMDGPU: user_code.entry:
; AMDGPU-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; AMDGPU-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; AMDGPU-NEXT: call void @__omp_outlined__9(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; AMDGPU-NEXT: call void @__kmpc_target_deinit()
; AMDGPU-NEXT: ret void
@@ -1417,12 +1433,16 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__9
; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; AMDGPU-NEXT: entry:
+; AMDGPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
-; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__10, ptr @__omp_outlined__10_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; AMDGPU-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; AMDGPU-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; AMDGPU-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__10, ptr @__omp_outlined__10_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
; AMDGPU-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
-; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__11, ptr @__omp_outlined__11_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
+; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__11, ptr @__omp_outlined__11_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
; AMDGPU-NEXT: ret void
;
;
@@ -1523,6 +1543,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; AMDGPU: user_code.entry:
; AMDGPU-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; AMDGPU-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; AMDGPU-NEXT: call void @__omp_outlined__12(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; AMDGPU-NEXT: call void @__kmpc_target_deinit()
; AMDGPU-NEXT: ret void
@@ -1534,12 +1555,16 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__12
; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; AMDGPU-NEXT: entry:
+; AMDGPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
+; AMDGPU-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
; AMDGPU-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
-; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__13, ptr @__omp_outlined__13_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
-; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__14, ptr @__omp_outlined__14_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
+; AMDGPU-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; AMDGPU-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__13, ptr @__omp_outlined__13_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__14, ptr @__omp_outlined__14_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
; AMDGPU-NEXT: ret void
;
;
@@ -1932,6 +1957,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; NVPTX: user_code.entry:
; NVPTX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; NVPTX-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; NVPTX-NEXT: call void @__omp_outlined__1(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; NVPTX-NEXT: call void @__kmpc_target_deinit()
; NVPTX-NEXT: ret void
@@ -1943,13 +1969,17 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__1
; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; NVPTX-NEXT: entry:
+; NVPTX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
+; NVPTX-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
; NVPTX-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
-; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__2, ptr @__omp_outlined__2_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; NVPTX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; NVPTX-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__2, ptr @__omp_outlined__2_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
; NVPTX-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9]]
-; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
+; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
; NVPTX-NEXT: ret void
;
;
@@ -2055,6 +2085,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; NVPTX: user_code.entry:
; NVPTX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; NVPTX-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; NVPTX-NEXT: call void @__omp_outlined__4(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; NVPTX-NEXT: call void @__kmpc_target_deinit()
; NVPTX-NEXT: ret void
@@ -2066,12 +2097,16 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__4
; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; NVPTX-NEXT: entry:
+; NVPTX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
+; NVPTX-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
; NVPTX-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
; NVPTX-NEXT: call void @simple_state_machine_interprocedural_before.internalized() #[[ATTR9]]
; NVPTX-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9]]
-; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; NVPTX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; NVPTX-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
; NVPTX-NEXT: call void @simple_state_machine_interprocedural_after.internalized() #[[ATTR9]]
; NVPTX-NEXT: ret void
;
@@ -2191,6 +2226,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; NVPTX: user_code.entry:
; NVPTX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; NVPTX-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; NVPTX-NEXT: call void @__omp_outlined__6(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; NVPTX-NEXT: call void @__kmpc_target_deinit()
; NVPTX-NEXT: ret void
@@ -2202,12 +2238,16 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__6
; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; NVPTX-NEXT: entry:
+; NVPTX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
-; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; NVPTX-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; NVPTX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; NVPTX-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
; NVPTX-NEXT: [[CALL:%.*]] = call i32 @unknown() #[[ATTR11]]
-; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__8, ptr @__omp_outlined__8_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
+; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__8, ptr @__omp_outlined__8_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
; NVPTX-NEXT: ret void
;
;
@@ -2307,6 +2347,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; NVPTX: user_code.entry:
; NVPTX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; NVPTX-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; NVPTX-NEXT: call void @__omp_outlined__9(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; NVPTX-NEXT: call void @__kmpc_target_deinit()
; NVPTX-NEXT: ret void
@@ -2318,12 +2359,16 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__9
; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; NVPTX-NEXT: entry:
+; NVPTX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
-; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__10, ptr @__omp_outlined__10_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; NVPTX-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; NVPTX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; NVPTX-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__10, ptr @__omp_outlined__10_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
; NVPTX-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
-; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__11, ptr @__omp_outlined__11_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
+; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__11, ptr @__omp_outlined__11_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
; NVPTX-NEXT: ret void
;
;
@@ -2423,6 +2468,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; NVPTX: user_code.entry:
; NVPTX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; NVPTX-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; NVPTX-NEXT: call void @__omp_outlined__12(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; NVPTX-NEXT: call void @__kmpc_target_deinit()
; NVPTX-NEXT: ret void
@@ -2434,12 +2480,16 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__12
; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; NVPTX-NEXT: entry:
+; NVPTX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
+; NVPTX-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
; NVPTX-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
-; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__13, ptr @__omp_outlined__13_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
-; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__14, ptr @__omp_outlined__14_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
+; NVPTX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; NVPTX-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__13, ptr @__omp_outlined__13_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__14, ptr @__omp_outlined__14_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
; NVPTX-NEXT: ret void
;
;
@@ -2792,6 +2842,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; AMDGPU-DISABLED: user_code.entry:
; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; AMDGPU-DISABLED-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__1(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; AMDGPU-DISABLED-NEXT: call void @__kmpc_target_deinit()
; AMDGPU-DISABLED-NEXT: ret void
@@ -2803,13 +2854,17 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__1
; AMDGPU-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; AMDGPU-DISABLED-NEXT: entry:
+; AMDGPU-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
; AMDGPU-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
+; AMDGPU-DISABLED-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
; AMDGPU-DISABLED-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
-; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__2, ptr @__omp_outlined__2_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__2, ptr @__omp_outlined__2_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
; AMDGPU-DISABLED-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9]]
-; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
+; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
; AMDGPU-DISABLED-NEXT: ret void
;
;
@@ -2870,6 +2925,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; AMDGPU-DISABLED: user_code.entry:
; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; AMDGPU-DISABLED-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__4(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; AMDGPU-DISABLED-NEXT: call void @__kmpc_target_deinit()
; AMDGPU-DISABLED-NEXT: ret void
@@ -2881,12 +2937,16 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__4
; AMDGPU-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; AMDGPU-DISABLED-NEXT: entry:
+; AMDGPU-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
+; AMDGPU-DISABLED-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
; AMDGPU-DISABLED-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
; AMDGPU-DISABLED-NEXT: call void @simple_state_machine_interprocedural_before.internalized() #[[ATTR9]]
; AMDGPU-DISABLED-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9]]
-; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
; AMDGPU-DISABLED-NEXT: call void @simple_state_machine_interprocedural_after.internalized() #[[ATTR9]]
; AMDGPU-DISABLED-NEXT: ret void
;
@@ -2965,6 +3025,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; AMDGPU-DISABLED: user_code.entry:
; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; AMDGPU-DISABLED-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__6(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; AMDGPU-DISABLED-NEXT: call void @__kmpc_target_deinit()
; AMDGPU-DISABLED-NEXT: ret void
@@ -2976,12 +3037,16 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__6
; AMDGPU-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; AMDGPU-DISABLED-NEXT: entry:
+; AMDGPU-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
; AMDGPU-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
-; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; AMDGPU-DISABLED-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
; AMDGPU-DISABLED-NEXT: [[CALL:%.*]] = call i32 @unknown() #[[ATTR11]]
-; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__8, ptr @__omp_outlined__8_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
+; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__8, ptr @__omp_outlined__8_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
; AMDGPU-DISABLED-NEXT: ret void
;
;
@@ -3042,6 +3107,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; AMDGPU-DISABLED: user_code.entry:
; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; AMDGPU-DISABLED-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__9(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; AMDGPU-DISABLED-NEXT: call void @__kmpc_target_deinit()
; AMDGPU-DISABLED-NEXT: ret void
@@ -3053,12 +3119,16 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__9
; AMDGPU-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; AMDGPU-DISABLED-NEXT: entry:
+; AMDGPU-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
; AMDGPU-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
-; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__10, ptr @__omp_outlined__10_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; AMDGPU-DISABLED-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__10, ptr @__omp_outlined__10_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
; AMDGPU-DISABLED-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
-; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__11, ptr @__omp_outlined__11_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
+; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__11, ptr @__omp_outlined__11_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
; AMDGPU-DISABLED-NEXT: ret void
;
;
@@ -3119,6 +3189,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; AMDGPU-DISABLED: user_code.entry:
; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; AMDGPU-DISABLED-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__12(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; AMDGPU-DISABLED-NEXT: call void @__kmpc_target_deinit()
; AMDGPU-DISABLED-NEXT: ret void
@@ -3130,12 +3201,16 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__12
; AMDGPU-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; AMDGPU-DISABLED-NEXT: entry:
+; AMDGPU-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
; AMDGPU-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
+; AMDGPU-DISABLED-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
; AMDGPU-DISABLED-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
-; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__13, ptr @__omp_outlined__13_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
-; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__14, ptr @__omp_outlined__14_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
+; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__13, ptr @__omp_outlined__13_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__14, ptr @__omp_outlined__14_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
; AMDGPU-DISABLED-NEXT: ret void
;
;
@@ -3459,6 +3534,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; NVPTX-DISABLED: user_code.entry:
; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; NVPTX-DISABLED-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; NVPTX-DISABLED-NEXT: call void @__omp_outlined__1(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; NVPTX-DISABLED-NEXT: call void @__kmpc_target_deinit()
; NVPTX-DISABLED-NEXT: ret void
@@ -3470,13 +3546,17 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__1
; NVPTX-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; NVPTX-DISABLED-NEXT: entry:
+; NVPTX-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
; NVPTX-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
+; NVPTX-DISABLED-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
; NVPTX-DISABLED-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
-; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__2, ptr @__omp_outlined__2_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__2, ptr @__omp_outlined__2_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
; NVPTX-DISABLED-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9]]
-; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
+; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
; NVPTX-DISABLED-NEXT: ret void
;
;
@@ -3537,6 +3617,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; NVPTX-DISABLED: user_code.entry:
; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; NVPTX-DISABLED-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; NVPTX-DISABLED-NEXT: call void @__omp_outlined__4(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; NVPTX-DISABLED-NEXT: call void @__kmpc_target_deinit()
; NVPTX-DISABLED-NEXT: ret void
@@ -3548,12 +3629,16 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__4
; NVPTX-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; NVPTX-DISABLED-NEXT: entry:
+; NVPTX-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
+; NVPTX-DISABLED-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
; NVPTX-DISABLED-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
; NVPTX-DISABLED-NEXT: call void @simple_state_machine_interprocedural_before.internalized() #[[ATTR9]]
; NVPTX-DISABLED-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9]]
-; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
; NVPTX-DISABLED-NEXT: call void @simple_state_machine_interprocedural_after.internalized() #[[ATTR9]]
; NVPTX-DISABLED-NEXT: ret void
;
@@ -3632,6 +3717,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; NVPTX-DISABLED: user_code.entry:
; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; NVPTX-DISABLED-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; NVPTX-DISABLED-NEXT: call void @__omp_outlined__6(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; NVPTX-DISABLED-NEXT: call void @__kmpc_target_deinit()
; NVPTX-DISABLED-NEXT: ret void
@@ -3643,12 +3729,16 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__6
; NVPTX-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; NVPTX-DISABLED-NEXT: entry:
+; NVPTX-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
; NVPTX-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
-; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; NVPTX-DISABLED-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
; NVPTX-DISABLED-NEXT: [[CALL:%.*]] = call i32 @unknown() #[[ATTR11]]
-; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__8, ptr @__omp_outlined__8_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
+; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__8, ptr @__omp_outlined__8_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
; NVPTX-DISABLED-NEXT: ret void
;
;
@@ -3709,6 +3799,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; NVPTX-DISABLED: user_code.entry:
; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; NVPTX-DISABLED-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; NVPTX-DISABLED-NEXT: call void @__omp_outlined__9(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; NVPTX-DISABLED-NEXT: call void @__kmpc_target_deinit()
; NVPTX-DISABLED-NEXT: ret void
@@ -3720,12 +3811,16 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__9
; NVPTX-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; NVPTX-DISABLED-NEXT: entry:
+; NVPTX-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
; NVPTX-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
-; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__10, ptr @__omp_outlined__10_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; NVPTX-DISABLED-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__10, ptr @__omp_outlined__10_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
; NVPTX-DISABLED-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
-; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__11, ptr @__omp_outlined__11_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
+; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__11, ptr @__omp_outlined__11_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
; NVPTX-DISABLED-NEXT: ret void
;
;
@@ -3786,6 +3881,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; NVPTX-DISABLED: user_code.entry:
; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; NVPTX-DISABLED-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; NVPTX-DISABLED-NEXT: call void @__omp_outlined__12(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; NVPTX-DISABLED-NEXT: call void @__kmpc_target_deinit()
; NVPTX-DISABLED-NEXT: ret void
@@ -3797,12 +3893,16 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__12
; NVPTX-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; NVPTX-DISABLED-NEXT: entry:
+; NVPTX-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
; NVPTX-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
+; NVPTX-DISABLED-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
; NVPTX-DISABLED-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
-; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__13, ptr @__omp_outlined__13_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
-; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__14, ptr @__omp_outlined__14_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
+; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__13, ptr @__omp_outlined__13_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__14, ptr @__omp_outlined__14_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
; NVPTX-DISABLED-NEXT: ret void
;
;
diff --git a/llvm/test/Transforms/OpenMP/custom_state_machines_pre_lto.ll b/llvm/test/Transforms/OpenMP/custom_state_machines_pre_lto.ll
index d20821d450365..8324654a6ebeb 100644
--- a/llvm/test/Transforms/OpenMP/custom_state_machines_pre_lto.ll
+++ b/llvm/test/Transforms/OpenMP/custom_state_machines_pre_lto.ll
@@ -992,6 +992,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; AMDGPU1: user_code.entry:
; AMDGPU1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; AMDGPU1-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; AMDGPU1-NEXT: call void @__omp_outlined__1(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; AMDGPU1-NEXT: call void @__kmpc_target_deinit()
; AMDGPU1-NEXT: ret void
@@ -1003,13 +1004,17 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU1-LABEL: define {{[^@]+}}@__omp_outlined__1
; AMDGPU1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; AMDGPU1-NEXT: entry:
+; AMDGPU1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
; AMDGPU1-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
+; AMDGPU1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
; AMDGPU1-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
-; AMDGPU1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__2, ptr @__omp_outlined__2_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; AMDGPU1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; AMDGPU1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; AMDGPU1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__2, ptr @__omp_outlined__2_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
; AMDGPU1-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9]]
-; AMDGPU1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
+; AMDGPU1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
; AMDGPU1-NEXT: ret void
;
;
@@ -1070,6 +1075,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; AMDGPU1: user_code.entry:
; AMDGPU1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; AMDGPU1-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; AMDGPU1-NEXT: call void @__omp_outlined__4(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; AMDGPU1-NEXT: call void @__kmpc_target_deinit()
; AMDGPU1-NEXT: ret void
@@ -1081,12 +1087,16 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU1-LABEL: define {{[^@]+}}@__omp_outlined__4
; AMDGPU1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; AMDGPU1-NEXT: entry:
+; AMDGPU1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
+; AMDGPU1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
; AMDGPU1-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
; AMDGPU1-NEXT: call void @simple_state_machine_interprocedural_before.internalized() #[[ATTR9]]
; AMDGPU1-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9]]
-; AMDGPU1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; AMDGPU1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; AMDGPU1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; AMDGPU1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
; AMDGPU1-NEXT: call void @simple_state_machine_interprocedural_after.internalized() #[[ATTR9]]
; AMDGPU1-NEXT: ret void
;
@@ -1165,6 +1175,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; AMDGPU1: user_code.entry:
; AMDGPU1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; AMDGPU1-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; AMDGPU1-NEXT: call void @__omp_outlined__6(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; AMDGPU1-NEXT: call void @__kmpc_target_deinit()
; AMDGPU1-NEXT: ret void
@@ -1176,12 +1187,16 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU1-LABEL: define {{[^@]+}}@__omp_outlined__6
; AMDGPU1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; AMDGPU1-NEXT: entry:
+; AMDGPU1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
; AMDGPU1-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
-; AMDGPU1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; AMDGPU1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; AMDGPU1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; AMDGPU1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; AMDGPU1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
; AMDGPU1-NEXT: [[CALL:%.*]] = call i32 @unknown() #[[ATTR11]]
-; AMDGPU1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__8, ptr @__omp_outlined__8_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
+; AMDGPU1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__8, ptr @__omp_outlined__8_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
; AMDGPU1-NEXT: ret void
;
;
@@ -1242,6 +1257,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; AMDGPU1: user_code.entry:
; AMDGPU1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; AMDGPU1-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; AMDGPU1-NEXT: call void @__omp_outlined__9(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; AMDGPU1-NEXT: call void @__kmpc_target_deinit()
; AMDGPU1-NEXT: ret void
@@ -1253,12 +1269,16 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU1-LABEL: define {{[^@]+}}@__omp_outlined__9
; AMDGPU1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; AMDGPU1-NEXT: entry:
+; AMDGPU1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
; AMDGPU1-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
-; AMDGPU1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__10, ptr @__omp_outlined__10_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; AMDGPU1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; AMDGPU1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; AMDGPU1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; AMDGPU1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__10, ptr @__omp_outlined__10_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
; AMDGPU1-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
-; AMDGPU1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__11, ptr @__omp_outlined__11_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
+; AMDGPU1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__11, ptr @__omp_outlined__11_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
; AMDGPU1-NEXT: ret void
;
;
@@ -1319,6 +1339,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; AMDGPU1: user_code.entry:
; AMDGPU1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; AMDGPU1-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; AMDGPU1-NEXT: call void @__omp_outlined__12(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; AMDGPU1-NEXT: call void @__kmpc_target_deinit()
; AMDGPU1-NEXT: ret void
@@ -1330,12 +1351,16 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU1-LABEL: define {{[^@]+}}@__omp_outlined__12
; AMDGPU1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; AMDGPU1-NEXT: entry:
+; AMDGPU1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
; AMDGPU1-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
+; AMDGPU1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
; AMDGPU1-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
-; AMDGPU1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__13, ptr @__omp_outlined__13_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
-; AMDGPU1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__14, ptr @__omp_outlined__14_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
+; AMDGPU1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; AMDGPU1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; AMDGPU1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__13, ptr @__omp_outlined__13_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; AMDGPU1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__14, ptr @__omp_outlined__14_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
; AMDGPU1-NEXT: ret void
;
;
@@ -1654,6 +1679,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; NVPTX1: user_code.entry:
; NVPTX1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; NVPTX1-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; NVPTX1-NEXT: call void @__omp_outlined__1(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; NVPTX1-NEXT: call void @__kmpc_target_deinit()
; NVPTX1-NEXT: ret void
@@ -1665,13 +1691,17 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX1-LABEL: define {{[^@]+}}@__omp_outlined__1
; NVPTX1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; NVPTX1-NEXT: entry:
+; NVPTX1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
; NVPTX1-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
+; NVPTX1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
; NVPTX1-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
-; NVPTX1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__2, ptr @__omp_outlined__2_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; NVPTX1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; NVPTX1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; NVPTX1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__2, ptr @__omp_outlined__2_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
; NVPTX1-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9]]
-; NVPTX1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
+; NVPTX1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
; NVPTX1-NEXT: ret void
;
;
@@ -1732,6 +1762,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; NVPTX1: user_code.entry:
; NVPTX1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; NVPTX1-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; NVPTX1-NEXT: call void @__omp_outlined__4(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; NVPTX1-NEXT: call void @__kmpc_target_deinit()
; NVPTX1-NEXT: ret void
@@ -1743,12 +1774,16 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX1-LABEL: define {{[^@]+}}@__omp_outlined__4
; NVPTX1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; NVPTX1-NEXT: entry:
+; NVPTX1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
+; NVPTX1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
; NVPTX1-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
; NVPTX1-NEXT: call void @simple_state_machine_interprocedural_before.internalized() #[[ATTR9]]
; NVPTX1-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9]]
-; NVPTX1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; NVPTX1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; NVPTX1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; NVPTX1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
; NVPTX1-NEXT: call void @simple_state_machine_interprocedural_after.internalized() #[[ATTR9]]
; NVPTX1-NEXT: ret void
;
@@ -1827,6 +1862,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; NVPTX1: user_code.entry:
; NVPTX1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; NVPTX1-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; NVPTX1-NEXT: call void @__omp_outlined__6(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; NVPTX1-NEXT: call void @__kmpc_target_deinit()
; NVPTX1-NEXT: ret void
@@ -1838,12 +1874,16 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX1-LABEL: define {{[^@]+}}@__omp_outlined__6
; NVPTX1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; NVPTX1-NEXT: entry:
+; NVPTX1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
; NVPTX1-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
-; NVPTX1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; NVPTX1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; NVPTX1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; NVPTX1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; NVPTX1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
; NVPTX1-NEXT: [[CALL:%.*]] = call i32 @unknown() #[[ATTR11]]
-; NVPTX1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__8, ptr @__omp_outlined__8_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
+; NVPTX1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__8, ptr @__omp_outlined__8_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
; NVPTX1-NEXT: ret void
;
;
@@ -1904,6 +1944,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; NVPTX1: user_code.entry:
; NVPTX1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; NVPTX1-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; NVPTX1-NEXT: call void @__omp_outlined__9(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; NVPTX1-NEXT: call void @__kmpc_target_deinit()
; NVPTX1-NEXT: ret void
@@ -1915,12 +1956,16 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX1-LABEL: define {{[^@]+}}@__omp_outlined__9
; NVPTX1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; NVPTX1-NEXT: entry:
+; NVPTX1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
; NVPTX1-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
-; NVPTX1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__10, ptr @__omp_outlined__10_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; NVPTX1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; NVPTX1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; NVPTX1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; NVPTX1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__10, ptr @__omp_outlined__10_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
; NVPTX1-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
-; NVPTX1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__11, ptr @__omp_outlined__11_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
+; NVPTX1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__11, ptr @__omp_outlined__11_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
; NVPTX1-NEXT: ret void
;
;
@@ -1981,6 +2026,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; NVPTX1: user_code.entry:
; NVPTX1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; NVPTX1-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; NVPTX1-NEXT: call void @__omp_outlined__12(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; NVPTX1-NEXT: call void @__kmpc_target_deinit()
; NVPTX1-NEXT: ret void
@@ -1992,12 +2038,16 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX1-LABEL: define {{[^@]+}}@__omp_outlined__12
; NVPTX1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; NVPTX1-NEXT: entry:
+; NVPTX1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
; NVPTX1-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
+; NVPTX1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
; NVPTX1-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
-; NVPTX1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__13, ptr @__omp_outlined__13_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
-; NVPTX1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__14, ptr @__omp_outlined__14_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
+; NVPTX1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; NVPTX1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; NVPTX1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__13, ptr @__omp_outlined__13_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; NVPTX1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__14, ptr @__omp_outlined__14_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
; NVPTX1-NEXT: ret void
;
;
@@ -2316,6 +2366,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; AMDGPU2: user_code.entry:
; AMDGPU2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; AMDGPU2-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; AMDGPU2-NEXT: call void @__omp_outlined__1(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; AMDGPU2-NEXT: call void @__kmpc_target_deinit()
; AMDGPU2-NEXT: ret void
@@ -2327,13 +2378,17 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU2-LABEL: define {{[^@]+}}@__omp_outlined__1
; AMDGPU2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; AMDGPU2-NEXT: entry:
+; AMDGPU2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
; AMDGPU2-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
+; AMDGPU2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
; AMDGPU2-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
-; AMDGPU2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__2, ptr @__omp_outlined__2_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; AMDGPU2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; AMDGPU2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; AMDGPU2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__2, ptr @__omp_outlined__2_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
; AMDGPU2-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9]]
-; AMDGPU2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
+; AMDGPU2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
; AMDGPU2-NEXT: ret void
;
;
@@ -2394,6 +2449,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; AMDGPU2: user_code.entry:
; AMDGPU2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; AMDGPU2-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; AMDGPU2-NEXT: call void @__omp_outlined__4(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; AMDGPU2-NEXT: call void @__kmpc_target_deinit()
; AMDGPU2-NEXT: ret void
@@ -2405,12 +2461,16 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU2-LABEL: define {{[^@]+}}@__omp_outlined__4
; AMDGPU2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; AMDGPU2-NEXT: entry:
+; AMDGPU2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
+; AMDGPU2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
; AMDGPU2-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
; AMDGPU2-NEXT: call void @simple_state_machine_interprocedural_before.internalized() #[[ATTR9]]
; AMDGPU2-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9]]
-; AMDGPU2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; AMDGPU2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; AMDGPU2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; AMDGPU2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
; AMDGPU2-NEXT: call void @simple_state_machine_interprocedural_after.internalized() #[[ATTR9]]
; AMDGPU2-NEXT: ret void
;
@@ -2489,6 +2549,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; AMDGPU2: user_code.entry:
; AMDGPU2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; AMDGPU2-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; AMDGPU2-NEXT: call void @__omp_outlined__6(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; AMDGPU2-NEXT: call void @__kmpc_target_deinit()
; AMDGPU2-NEXT: ret void
@@ -2500,12 +2561,16 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU2-LABEL: define {{[^@]+}}@__omp_outlined__6
; AMDGPU2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; AMDGPU2-NEXT: entry:
+; AMDGPU2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
; AMDGPU2-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
-; AMDGPU2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; AMDGPU2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; AMDGPU2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; AMDGPU2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; AMDGPU2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
; AMDGPU2-NEXT: [[CALL:%.*]] = call i32 @unknown() #[[ATTR11]]
-; AMDGPU2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__8, ptr @__omp_outlined__8_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
+; AMDGPU2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__8, ptr @__omp_outlined__8_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
; AMDGPU2-NEXT: ret void
;
;
@@ -2566,6 +2631,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; AMDGPU2: user_code.entry:
; AMDGPU2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; AMDGPU2-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; AMDGPU2-NEXT: call void @__omp_outlined__9(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; AMDGPU2-NEXT: call void @__kmpc_target_deinit()
; AMDGPU2-NEXT: ret void
@@ -2577,12 +2643,16 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU2-LABEL: define {{[^@]+}}@__omp_outlined__9
; AMDGPU2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; AMDGPU2-NEXT: entry:
+; AMDGPU2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
; AMDGPU2-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
-; AMDGPU2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__10, ptr @__omp_outlined__10_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; AMDGPU2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; AMDGPU2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; AMDGPU2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; AMDGPU2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__10, ptr @__omp_outlined__10_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
; AMDGPU2-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
-; AMDGPU2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__11, ptr @__omp_outlined__11_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
+; AMDGPU2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__11, ptr @__omp_outlined__11_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
; AMDGPU2-NEXT: ret void
;
;
@@ -2643,6 +2713,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; AMDGPU2: user_code.entry:
; AMDGPU2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; AMDGPU2-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; AMDGPU2-NEXT: call void @__omp_outlined__12(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; AMDGPU2-NEXT: call void @__kmpc_target_deinit()
; AMDGPU2-NEXT: ret void
@@ -2654,12 +2725,16 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU2-LABEL: define {{[^@]+}}@__omp_outlined__12
; AMDGPU2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; AMDGPU2-NEXT: entry:
+; AMDGPU2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
; AMDGPU2-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
+; AMDGPU2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
; AMDGPU2-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
-; AMDGPU2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__13, ptr @__omp_outlined__13_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
-; AMDGPU2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__14, ptr @__omp_outlined__14_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
+; AMDGPU2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; AMDGPU2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; AMDGPU2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__13, ptr @__omp_outlined__13_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; AMDGPU2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__14, ptr @__omp_outlined__14_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
; AMDGPU2-NEXT: ret void
;
;
@@ -2978,6 +3053,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; AMDGPU3: user_code.entry:
; AMDGPU3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; AMDGPU3-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; AMDGPU3-NEXT: call void @__omp_outlined__1(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; AMDGPU3-NEXT: call void @__kmpc_target_deinit()
; AMDGPU3-NEXT: ret void
@@ -2989,13 +3065,17 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU3-LABEL: define {{[^@]+}}@__omp_outlined__1
; AMDGPU3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; AMDGPU3-NEXT: entry:
+; AMDGPU3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
; AMDGPU3-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
+; AMDGPU3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
; AMDGPU3-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
-; AMDGPU3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__2, ptr @__omp_outlined__2_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; AMDGPU3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; AMDGPU3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; AMDGPU3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__2, ptr @__omp_outlined__2_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
; AMDGPU3-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9]]
-; AMDGPU3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
+; AMDGPU3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
; AMDGPU3-NEXT: ret void
;
;
@@ -3056,6 +3136,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; AMDGPU3: user_code.entry:
; AMDGPU3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; AMDGPU3-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; AMDGPU3-NEXT: call void @__omp_outlined__4(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; AMDGPU3-NEXT: call void @__kmpc_target_deinit()
; AMDGPU3-NEXT: ret void
@@ -3067,12 +3148,16 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU3-LABEL: define {{[^@]+}}@__omp_outlined__4
; AMDGPU3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; AMDGPU3-NEXT: entry:
+; AMDGPU3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
+; AMDGPU3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
; AMDGPU3-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
; AMDGPU3-NEXT: call void @simple_state_machine_interprocedural_before.internalized() #[[ATTR9]]
; AMDGPU3-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9]]
-; AMDGPU3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; AMDGPU3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; AMDGPU3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; AMDGPU3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
; AMDGPU3-NEXT: call void @simple_state_machine_interprocedural_after.internalized() #[[ATTR9]]
; AMDGPU3-NEXT: ret void
;
@@ -3151,6 +3236,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; AMDGPU3: user_code.entry:
; AMDGPU3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; AMDGPU3-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; AMDGPU3-NEXT: call void @__omp_outlined__6(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; AMDGPU3-NEXT: call void @__kmpc_target_deinit()
; AMDGPU3-NEXT: ret void
@@ -3162,12 +3248,16 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU3-LABEL: define {{[^@]+}}@__omp_outlined__6
; AMDGPU3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; AMDGPU3-NEXT: entry:
+; AMDGPU3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
; AMDGPU3-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
-; AMDGPU3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; AMDGPU3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; AMDGPU3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; AMDGPU3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; AMDGPU3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
; AMDGPU3-NEXT: [[CALL:%.*]] = call i32 @unknown() #[[ATTR11]]
-; AMDGPU3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__8, ptr @__omp_outlined__8_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
+; AMDGPU3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__8, ptr @__omp_outlined__8_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
; AMDGPU3-NEXT: ret void
;
;
@@ -3228,6 +3318,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; AMDGPU3: user_code.entry:
; AMDGPU3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; AMDGPU3-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; AMDGPU3-NEXT: call void @__omp_outlined__9(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; AMDGPU3-NEXT: call void @__kmpc_target_deinit()
; AMDGPU3-NEXT: ret void
@@ -3239,12 +3330,16 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU3-LABEL: define {{[^@]+}}@__omp_outlined__9
; AMDGPU3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; AMDGPU3-NEXT: entry:
+; AMDGPU3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
; AMDGPU3-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
-; AMDGPU3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__10, ptr @__omp_outlined__10_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; AMDGPU3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; AMDGPU3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; AMDGPU3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; AMDGPU3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__10, ptr @__omp_outlined__10_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
; AMDGPU3-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
-; AMDGPU3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__11, ptr @__omp_outlined__11_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
+; AMDGPU3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__11, ptr @__omp_outlined__11_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
; AMDGPU3-NEXT: ret void
;
;
@@ -3305,6 +3400,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; AMDGPU3: user_code.entry:
; AMDGPU3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; AMDGPU3-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; AMDGPU3-NEXT: call void @__omp_outlined__12(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; AMDGPU3-NEXT: call void @__kmpc_target_deinit()
; AMDGPU3-NEXT: ret void
@@ -3316,12 +3412,16 @@ attributes #9 = { convergent nounwind readonly willreturn }
; AMDGPU3-LABEL: define {{[^@]+}}@__omp_outlined__12
; AMDGPU3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; AMDGPU3-NEXT: entry:
+; AMDGPU3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; AMDGPU3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
; AMDGPU3-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
+; AMDGPU3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
; AMDGPU3-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
-; AMDGPU3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__13, ptr @__omp_outlined__13_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
-; AMDGPU3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__14, ptr @__omp_outlined__14_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
+; AMDGPU3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; AMDGPU3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; AMDGPU3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__13, ptr @__omp_outlined__13_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; AMDGPU3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__14, ptr @__omp_outlined__14_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
; AMDGPU3-NEXT: ret void
;
;
@@ -3640,6 +3740,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; NVPTX2: user_code.entry:
; NVPTX2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; NVPTX2-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; NVPTX2-NEXT: call void @__omp_outlined__1(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; NVPTX2-NEXT: call void @__kmpc_target_deinit()
; NVPTX2-NEXT: ret void
@@ -3651,13 +3752,17 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX2-LABEL: define {{[^@]+}}@__omp_outlined__1
; NVPTX2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; NVPTX2-NEXT: entry:
+; NVPTX2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
; NVPTX2-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
+; NVPTX2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
; NVPTX2-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
-; NVPTX2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__2, ptr @__omp_outlined__2_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; NVPTX2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; NVPTX2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; NVPTX2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__2, ptr @__omp_outlined__2_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
; NVPTX2-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9]]
-; NVPTX2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
+; NVPTX2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
; NVPTX2-NEXT: ret void
;
;
@@ -3718,6 +3823,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; NVPTX2: user_code.entry:
; NVPTX2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; NVPTX2-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; NVPTX2-NEXT: call void @__omp_outlined__4(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; NVPTX2-NEXT: call void @__kmpc_target_deinit()
; NVPTX2-NEXT: ret void
@@ -3729,12 +3835,16 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX2-LABEL: define {{[^@]+}}@__omp_outlined__4
; NVPTX2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; NVPTX2-NEXT: entry:
+; NVPTX2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
+; NVPTX2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
; NVPTX2-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
; NVPTX2-NEXT: call void @simple_state_machine_interprocedural_before.internalized() #[[ATTR9]]
; NVPTX2-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9]]
-; NVPTX2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; NVPTX2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; NVPTX2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; NVPTX2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
; NVPTX2-NEXT: call void @simple_state_machine_interprocedural_after.internalized() #[[ATTR9]]
; NVPTX2-NEXT: ret void
;
@@ -3813,6 +3923,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; NVPTX2: user_code.entry:
; NVPTX2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; NVPTX2-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; NVPTX2-NEXT: call void @__omp_outlined__6(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; NVPTX2-NEXT: call void @__kmpc_target_deinit()
; NVPTX2-NEXT: ret void
@@ -3824,12 +3935,16 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX2-LABEL: define {{[^@]+}}@__omp_outlined__6
; NVPTX2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; NVPTX2-NEXT: entry:
+; NVPTX2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
; NVPTX2-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
-; NVPTX2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; NVPTX2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; NVPTX2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; NVPTX2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; NVPTX2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
; NVPTX2-NEXT: [[CALL:%.*]] = call i32 @unknown() #[[ATTR11]]
-; NVPTX2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__8, ptr @__omp_outlined__8_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
+; NVPTX2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__8, ptr @__omp_outlined__8_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
; NVPTX2-NEXT: ret void
;
;
@@ -3890,6 +4005,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; NVPTX2: user_code.entry:
; NVPTX2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; NVPTX2-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; NVPTX2-NEXT: call void @__omp_outlined__9(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; NVPTX2-NEXT: call void @__kmpc_target_deinit()
; NVPTX2-NEXT: ret void
@@ -3901,12 +4017,16 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX2-LABEL: define {{[^@]+}}@__omp_outlined__9
; NVPTX2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; NVPTX2-NEXT: entry:
+; NVPTX2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
; NVPTX2-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
-; NVPTX2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__10, ptr @__omp_outlined__10_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; NVPTX2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; NVPTX2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; NVPTX2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; NVPTX2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__10, ptr @__omp_outlined__10_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
; NVPTX2-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
-; NVPTX2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__11, ptr @__omp_outlined__11_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
+; NVPTX2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__11, ptr @__omp_outlined__11_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
; NVPTX2-NEXT: ret void
;
;
@@ -3967,6 +4087,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; NVPTX2: user_code.entry:
; NVPTX2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; NVPTX2-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; NVPTX2-NEXT: call void @__omp_outlined__12(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; NVPTX2-NEXT: call void @__kmpc_target_deinit()
; NVPTX2-NEXT: ret void
@@ -3978,12 +4099,16 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX2-LABEL: define {{[^@]+}}@__omp_outlined__12
; NVPTX2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; NVPTX2-NEXT: entry:
+; NVPTX2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
; NVPTX2-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
+; NVPTX2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
; NVPTX2-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
-; NVPTX2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__13, ptr @__omp_outlined__13_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
-; NVPTX2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__14, ptr @__omp_outlined__14_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
+; NVPTX2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; NVPTX2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; NVPTX2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__13, ptr @__omp_outlined__13_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; NVPTX2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__14, ptr @__omp_outlined__14_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
; NVPTX2-NEXT: ret void
;
;
@@ -4302,6 +4427,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; NVPTX3: user_code.entry:
; NVPTX3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; NVPTX3-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; NVPTX3-NEXT: call void @__omp_outlined__1(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; NVPTX3-NEXT: call void @__kmpc_target_deinit()
; NVPTX3-NEXT: ret void
@@ -4313,13 +4439,17 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX3-LABEL: define {{[^@]+}}@__omp_outlined__1
; NVPTX3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; NVPTX3-NEXT: entry:
+; NVPTX3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
; NVPTX3-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
+; NVPTX3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
; NVPTX3-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
-; NVPTX3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__2, ptr @__omp_outlined__2_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; NVPTX3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; NVPTX3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; NVPTX3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__2, ptr @__omp_outlined__2_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
; NVPTX3-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9]]
-; NVPTX3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
+; NVPTX3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
; NVPTX3-NEXT: ret void
;
;
@@ -4380,6 +4510,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; NVPTX3: user_code.entry:
; NVPTX3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; NVPTX3-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; NVPTX3-NEXT: call void @__omp_outlined__4(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; NVPTX3-NEXT: call void @__kmpc_target_deinit()
; NVPTX3-NEXT: ret void
@@ -4391,12 +4522,16 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX3-LABEL: define {{[^@]+}}@__omp_outlined__4
; NVPTX3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; NVPTX3-NEXT: entry:
+; NVPTX3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
+; NVPTX3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
; NVPTX3-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
; NVPTX3-NEXT: call void @simple_state_machine_interprocedural_before.internalized() #[[ATTR9]]
; NVPTX3-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9]]
-; NVPTX3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; NVPTX3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; NVPTX3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; NVPTX3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
; NVPTX3-NEXT: call void @simple_state_machine_interprocedural_after.internalized() #[[ATTR9]]
; NVPTX3-NEXT: ret void
;
@@ -4475,6 +4610,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; NVPTX3: user_code.entry:
; NVPTX3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; NVPTX3-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; NVPTX3-NEXT: call void @__omp_outlined__6(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; NVPTX3-NEXT: call void @__kmpc_target_deinit()
; NVPTX3-NEXT: ret void
@@ -4486,12 +4622,16 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX3-LABEL: define {{[^@]+}}@__omp_outlined__6
; NVPTX3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; NVPTX3-NEXT: entry:
+; NVPTX3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
; NVPTX3-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
-; NVPTX3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; NVPTX3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; NVPTX3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; NVPTX3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; NVPTX3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
; NVPTX3-NEXT: [[CALL:%.*]] = call i32 @unknown() #[[ATTR11]]
-; NVPTX3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__8, ptr @__omp_outlined__8_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
+; NVPTX3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__8, ptr @__omp_outlined__8_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
; NVPTX3-NEXT: ret void
;
;
@@ -4552,6 +4692,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; NVPTX3: user_code.entry:
; NVPTX3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; NVPTX3-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; NVPTX3-NEXT: call void @__omp_outlined__9(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; NVPTX3-NEXT: call void @__kmpc_target_deinit()
; NVPTX3-NEXT: ret void
@@ -4563,12 +4704,16 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX3-LABEL: define {{[^@]+}}@__omp_outlined__9
; NVPTX3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; NVPTX3-NEXT: entry:
+; NVPTX3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
; NVPTX3-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
-; NVPTX3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__10, ptr @__omp_outlined__10_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; NVPTX3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; NVPTX3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; NVPTX3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; NVPTX3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__10, ptr @__omp_outlined__10_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
; NVPTX3-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
-; NVPTX3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__11, ptr @__omp_outlined__11_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
+; NVPTX3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__11, ptr @__omp_outlined__11_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
; NVPTX3-NEXT: ret void
;
;
@@ -4629,6 +4774,7 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
; NVPTX3: user_code.entry:
; NVPTX3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
+; NVPTX3-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4
; NVPTX3-NEXT: call void @__omp_outlined__12(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
; NVPTX3-NEXT: call void @__kmpc_target_deinit()
; NVPTX3-NEXT: ret void
@@ -4640,12 +4786,16 @@ attributes #9 = { convergent nounwind readonly willreturn }
; NVPTX3-LABEL: define {{[^@]+}}@__omp_outlined__12
; NVPTX3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
; NVPTX3-NEXT: entry:
+; NVPTX3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
; NVPTX3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
; NVPTX3-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
+; NVPTX3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
; NVPTX3-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
-; NVPTX3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__13, ptr @__omp_outlined__13_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
-; NVPTX3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__14, ptr @__omp_outlined__14_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
+; NVPTX3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+; NVPTX3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+; NVPTX3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__13, ptr @__omp_outlined__13_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
+; NVPTX3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__14, ptr @__omp_outlined__14_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
; NVPTX3-NEXT: ret void
;
;
diff --git a/llvm/test/Transforms/OpenMP/spmdization_guarding.ll b/llvm/test/Transforms/OpenMP/spmdization_guarding.ll
index bd128b7f74d79..6c7b2fc6d5dd8 100644
--- a/llvm/test/Transforms/OpenMP/spmdization_guarding.ll
+++ b/llvm/test/Transforms/OpenMP/spmdization_guarding.ll
@@ -119,6 +119,7 @@ define weak void @__omp_offloading_2a_fbfa7a_sequential_loop_l6(ptr %dyn, ptr %x
; CHECK-NEXT: call void @__kmpc_barrier_simple_spmd(ptr @[[GLOB2]], i32 [[TMP4]])
; CHECK-NEXT: br label [[REGION_EXIT3]]
; CHECK: region.exit3:
+; CHECK-NEXT: store i32 4711, ptr [[SELECT]], align 4
; CHECK-NEXT: [[INC_I]] = add nuw nsw i32 [[I_0_I]], 1
; CHECK-NEXT: br label [[FOR_COND_I]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: __omp_outlined__.exit:
@@ -249,6 +250,7 @@ define weak void @__omp_offloading_2a_fbfa7a_sequential_loop_l6(ptr %dyn, ptr %x
; CHECK-DISABLED-NEXT: [[IDXPROM4_I:%.*]] = zext i32 [[I_0_I]] to i64
; CHECK-DISABLED-NEXT: [[ARRAYIDX5_I:%.*]] = getelementptr inbounds i32, ptr [[X]], i64 [[IDXPROM4_I]]
; CHECK-DISABLED-NEXT: store i32 [[SUB3_I]], ptr [[ARRAYIDX5_I]], align 4, !noalias [[META8]]
+; CHECK-DISABLED-NEXT: store i32 4711, ptr [[SELECT]], align 4
; CHECK-DISABLED-NEXT: [[INC_I]] = add nuw nsw i32 [[I_0_I]], 1
; CHECK-DISABLED-NEXT: br label [[FOR_COND_I]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK-DISABLED: __omp_outlined__.exit:
More information about the llvm-commits
mailing list