[libc-commits] [libc] [compiler-rt] [llvm] [clang] [flang] [libcxx] [clang-tools-extra] [SLP]Improve minbitwidth analysis. (PR #78976)
Alexey Bataev via libc-commits
libc-commits at lists.llvm.org
Thu Feb 1 13:53:05 PST 2024
https://github.com/alexey-bataev updated https://github.com/llvm/llvm-project/pull/78976
>From 391145a2d04873ef84c4f1083fe9b9fcdbb3b940 Mon Sep 17 00:00:00 2001
From: Alexey Bataev <a.bataev at outlook.com>
Date: Mon, 22 Jan 2024 14:13:51 +0000
Subject: [PATCH] =?UTF-8?q?[=F0=9D=98=80=F0=9D=97=BD=F0=9D=97=BF]=20initia?=
=?UTF-8?q?l=20version?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Created using spr 1.3.5
---
.../Transforms/Vectorize/SLPVectorizer.cpp | 444 ++++++++++++------
.../SLPVectorizer/AArch64/ext-trunc.ll | 9 +-
.../SLPVectorizer/AArch64/getelementptr.ll | 116 +++--
.../SLPVectorizer/AArch64/reduce-add-i64.ll | 20 +-
.../SLPVectorizer/RISCV/reductions.ll | 7 +-
.../Transforms/SLPVectorizer/X86/PR35777.ll | 23 +-
...minbitwidth-multiuse-with-insertelement.ll | 17 +-
.../SLPVectorizer/X86/minimum-sizes.ll | 17 +-
.../SLPVectorizer/X86/phi-undef-input.ll | 24 +-
.../Transforms/SLPVectorizer/X86/resched.ll | 32 +-
.../X86/reused-reductions-with-minbitwidth.ll | 10 +-
.../X86/store-insertelement-minbitwidth.ll | 22 +-
.../SLPVectorizer/alt-cmp-vectorize.ll | 4 +-
13 files changed, 466 insertions(+), 279 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index 482970bbf3061..5a493aed0fed7 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -2273,9 +2273,11 @@ class BoUpSLP {
/// constant and to be demoted. Required to correctly identify constant nodes
/// to be demoted.
bool collectValuesToDemote(
- Value *V, SmallVectorImpl<Value *> &ToDemote,
+ Value *V, bool IsProfitableToDemoteRoot, unsigned &BitWidth,
+ SmallVectorImpl<Value *> &ToDemote,
DenseMap<Instruction *, SmallVector<unsigned>> &DemotedConsts,
- SmallVectorImpl<Value *> &Roots, DenseSet<Value *> &Visited) const;
+ DenseSet<Value *> &Visited, unsigned &MaxDepthLevel,
+ bool &IsProfitableToDemote) const;
/// Check if the operands on the edges \p Edges of the \p UserTE allows
/// reordering (i.e. the operands can be reordered because they have only one
@@ -7862,7 +7864,7 @@ BoUpSLP::getEntryCost(const TreeEntry *E, ArrayRef<Value *> VectorizedVals,
unsigned BWSz = DL->getTypeSizeInBits(ScalarTy);
unsigned SrcBWSz = DL->getTypeSizeInBits(UserScalarTy);
unsigned VecOpcode;
- auto *SrcVecTy =
+ auto *UserVecTy =
FixedVectorType::get(UserScalarTy, E->getVectorFactor());
if (BWSz > SrcBWSz)
VecOpcode = Instruction::Trunc;
@@ -7870,11 +7872,10 @@ BoUpSLP::getEntryCost(const TreeEntry *E, ArrayRef<Value *> VectorizedVals,
VecOpcode =
It->second.second ? Instruction::SExt : Instruction::ZExt;
TTI::CastContextHint CCH = GetCastContextHint(VL0);
- VecCost += TTI->getCastInstrCost(VecOpcode, VecTy, SrcVecTy, CCH,
+ VecCost += TTI->getCastInstrCost(VecOpcode, UserVecTy, VecTy, CCH,
CostKind);
- ScalarCost +=
- Sz * TTI->getCastInstrCost(VecOpcode, ScalarTy, UserScalarTy,
- CCH, CostKind);
+ ScalarCost += Sz * TTI->getCastInstrCost(VecOpcode, UserScalarTy,
+ ScalarTy, CCH, CostKind);
}
}
}
@@ -8955,7 +8956,7 @@ InstructionCost BoUpSLP::getTreeCost(ArrayRef<Value *> VectorizedVals) {
SmallVector<std::pair<Value *, const TreeEntry *>> FirstUsers;
SmallVector<APInt> DemandedElts;
SmallDenseSet<Value *, 4> UsedInserts;
- DenseSet<Value *> VectorCasts;
+ DenseSet<std::pair<const TreeEntry *, Type *>> VectorCasts;
for (ExternalUser &EU : ExternalUses) {
// We only add extract cost once for the same scalar.
if (!isa_and_nonnull<InsertElementInst>(EU.User) &&
@@ -9025,7 +9026,10 @@ InstructionCost BoUpSLP::getTreeCost(ArrayRef<Value *> VectorizedVals) {
DemandedElts.push_back(APInt::getZero(FTy->getNumElements()));
VecId = FirstUsers.size() - 1;
auto It = MinBWs.find(ScalarTE);
- if (It != MinBWs.end() && VectorCasts.insert(EU.Scalar).second) {
+ if (It != MinBWs.end() &&
+ VectorCasts
+ .insert(std::make_pair(ScalarTE, FTy->getElementType()))
+ .second) {
unsigned BWSz = It->second.second;
unsigned SrcBWSz = DL->getTypeSizeInBits(FTy->getElementType());
unsigned VecOpcode;
@@ -9082,17 +9086,20 @@ InstructionCost BoUpSLP::getTreeCost(ArrayRef<Value *> VectorizedVals) {
}
// Add reduced value cost, if resized.
if (!VectorizedVals.empty()) {
- auto BWIt = MinBWs.find(VectorizableTree.front().get());
+ const TreeEntry &Root = *VectorizableTree.front().get();
+ auto BWIt = MinBWs.find(&Root);
if (BWIt != MinBWs.end()) {
- Type *DstTy = VectorizableTree.front()->Scalars.front()->getType();
+ Type *DstTy = Root.Scalars.front()->getType();
unsigned OriginalSz = DL->getTypeSizeInBits(DstTy);
- unsigned Opcode = Instruction::Trunc;
- if (OriginalSz < BWIt->second.first)
- Opcode = BWIt->second.second ? Instruction::SExt : Instruction::ZExt;
- Type *SrcTy = IntegerType::get(DstTy->getContext(), BWIt->second.first);
- Cost += TTI->getCastInstrCost(Opcode, DstTy, SrcTy,
- TTI::CastContextHint::None,
- TTI::TCK_RecipThroughput);
+ if (OriginalSz != BWIt->second.first) {
+ unsigned Opcode = Instruction::Trunc;
+ if (OriginalSz < BWIt->second.first)
+ Opcode = BWIt->second.second ? Instruction::SExt : Instruction::ZExt;
+ Type *SrcTy = IntegerType::get(DstTy->getContext(), BWIt->second.first);
+ Cost += TTI->getCastInstrCost(Opcode, DstTy, SrcTy,
+ TTI::CastContextHint::None,
+ TTI::TCK_RecipThroughput);
+ }
}
}
@@ -11383,9 +11390,10 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, bool PostponedPHIs) {
VecOpcode = Instruction::BitCast;
} else if (BWSz < SrcBWSz) {
VecOpcode = Instruction::Trunc;
- } else if (It != MinBWs.end()) {
+ } else if (SrcIt != MinBWs.end()) {
assert(BWSz > SrcBWSz && "Invalid cast!");
- VecOpcode = It->second.second ? Instruction::SExt : Instruction::ZExt;
+ VecOpcode =
+ SrcIt->second.second ? Instruction::SExt : Instruction::ZExt;
}
}
Value *V = (VecOpcode != ShuffleOrOp && VecOpcode == Instruction::BitCast)
@@ -11893,7 +11901,7 @@ Value *BoUpSLP::vectorizeTree(
// basic block. Only one extractelement per block should be emitted.
DenseMap<Value *, DenseMap<BasicBlock *, Instruction *>> ScalarToEEs;
SmallDenseSet<Value *, 4> UsedInserts;
- DenseMap<Value *, Value *> VectorCasts;
+ DenseMap<std::pair<Value *, Type *>, Value *> VectorCasts;
SmallDenseSet<Value *, 4> ScalarsWithNullptrUser;
// Extract all of the elements with the external uses.
for (const auto &ExternalUse : ExternalUses) {
@@ -12014,7 +12022,9 @@ Value *BoUpSLP::vectorizeTree(
// Need to use original vector, if the root is truncated.
auto BWIt = MinBWs.find(E);
if (BWIt != MinBWs.end() && Vec->getType() != VU->getType()) {
- auto VecIt = VectorCasts.find(Scalar);
+ auto *ScalarTy = FTy->getElementType();
+ auto Key = std::make_pair(Vec, ScalarTy);
+ auto VecIt = VectorCasts.find(Key);
if (VecIt == VectorCasts.end()) {
IRBuilder<>::InsertPointGuard Guard(Builder);
if (auto *IVec = dyn_cast<Instruction>(Vec))
@@ -12022,10 +12032,10 @@ Value *BoUpSLP::vectorizeTree(
Vec = Builder.CreateIntCast(
Vec,
FixedVectorType::get(
- cast<VectorType>(VU->getType())->getElementType(),
+ ScalarTy,
cast<FixedVectorType>(Vec->getType())->getNumElements()),
BWIt->second.second);
- VectorCasts.try_emplace(Scalar, Vec);
+ VectorCasts.try_emplace(Key, Vec);
} else {
Vec = VecIt->second;
}
@@ -13095,16 +13105,21 @@ unsigned BoUpSLP::getVectorElementSize(Value *V) {
// smaller type with a truncation. We collect the values that will be demoted
// in ToDemote and additional roots that require investigating in Roots.
bool BoUpSLP::collectValuesToDemote(
- Value *V, SmallVectorImpl<Value *> &ToDemote,
+ Value *V, bool IsProfitableToDemoteRoot, unsigned &BitWidth,
+ SmallVectorImpl<Value *> &ToDemote,
DenseMap<Instruction *, SmallVector<unsigned>> &DemotedConsts,
- SmallVectorImpl<Value *> &Roots, DenseSet<Value *> &Visited) const {
+ DenseSet<Value *> &Visited, unsigned &MaxDepthLevel,
+ bool &IsProfitableToDemote) const {
// We can always demote constants.
- if (isa<Constant>(V))
+ if (isa<Constant>(V)) {
+ MaxDepthLevel = 1;
return true;
+ }
// If the value is not a vectorized instruction in the expression and not used
// by the insertelement instruction and not used in multiple vector nodes, it
// cannot be demoted.
+ // TODO: improve handling of gathered values and others.
auto *I = dyn_cast<Instruction>(V);
if (!I || !getTreeEntry(I) || MultiNodeScalars.contains(I) ||
!Visited.insert(I).second || all_of(I->users(), [&](User *U) {
@@ -13112,6 +13127,21 @@ bool BoUpSLP::collectValuesToDemote(
}))
return false;
+ auto IsPotentiallyTruncated = [&](Value *V, unsigned &BitWidth) -> bool {
+ if (MultiNodeScalars.contains(V))
+ return false;
+ uint32_t OrigBitWidth = DL->getTypeSizeInBits(V->getType());
+ APInt Mask = APInt::getBitsSetFrom(OrigBitWidth, BitWidth);
+ if (MaskedValueIsZero(V, Mask, SimplifyQuery(*DL)))
+ return true;
+ auto NumSignBits = ComputeNumSignBits(V, *DL, 0, AC, nullptr, DT);
+ unsigned BitWidth1 = OrigBitWidth - NumSignBits;
+ KnownBits Known = computeKnownBits(V, *DL);
+ if (!Known.isNonNegative())
+ ++BitWidth1;
+ BitWidth = std::max(BitWidth, BitWidth1);
+ return BitWidth > 0 && OrigBitWidth / BitWidth > 1;
+ };
unsigned Start = 0;
unsigned End = I->getNumOperands();
switch (I->getOpcode()) {
@@ -13119,12 +13149,16 @@ bool BoUpSLP::collectValuesToDemote(
// We can always demote truncations and extensions. Since truncations can
// seed additional demotion, we save the truncated value.
case Instruction::Trunc:
- Roots.push_back(I->getOperand(0));
+ MaxDepthLevel = 1;
+ if (IsProfitableToDemoteRoot)
+ IsProfitableToDemote = true;
break;
case Instruction::ZExt:
case Instruction::SExt:
- if (isa<ExtractElementInst, InsertElementInst>(I->getOperand(0)))
- return false;
+ MaxDepthLevel = 1;
+ if (isa<InsertElementInst>(I->getOperand(0)))
+ return true;
+ IsProfitableToDemote = true;
break;
// We can demote certain binary operations if we can demote both of their
@@ -13134,23 +13168,32 @@ bool BoUpSLP::collectValuesToDemote(
case Instruction::Mul:
case Instruction::And:
case Instruction::Or:
- case Instruction::Xor:
- if (!collectValuesToDemote(I->getOperand(0), ToDemote, DemotedConsts, Roots,
- Visited) ||
- !collectValuesToDemote(I->getOperand(1), ToDemote, DemotedConsts, Roots,
- Visited))
+ case Instruction::Xor: {
+ unsigned Level1, Level2;
+ if (!collectValuesToDemote(I->getOperand(0), IsProfitableToDemoteRoot,
+ BitWidth, ToDemote, DemotedConsts, Visited,
+ Level1, IsProfitableToDemote) ||
+ !collectValuesToDemote(I->getOperand(1), IsProfitableToDemoteRoot,
+ BitWidth, ToDemote, DemotedConsts, Visited,
+ Level2, IsProfitableToDemote))
return false;
+ MaxDepthLevel = std::max(Level1, Level2);
break;
+ }
// We can demote selects if we can demote their true and false values.
case Instruction::Select: {
Start = 1;
+ unsigned Level1, Level2;
SelectInst *SI = cast<SelectInst>(I);
- if (!collectValuesToDemote(SI->getTrueValue(), ToDemote, DemotedConsts,
- Roots, Visited) ||
- !collectValuesToDemote(SI->getFalseValue(), ToDemote, DemotedConsts,
- Roots, Visited))
+ if (!collectValuesToDemote(SI->getTrueValue(), IsProfitableToDemoteRoot,
+ BitWidth, ToDemote, DemotedConsts, Visited,
+ Level1, IsProfitableToDemote) ||
+ !collectValuesToDemote(SI->getFalseValue(), IsProfitableToDemoteRoot,
+ BitWidth, ToDemote, DemotedConsts, Visited,
+ Level2, IsProfitableToDemote))
return false;
+ MaxDepthLevel = std::max(Level1, Level2);
break;
}
@@ -13159,157 +13202,252 @@ bool BoUpSLP::collectValuesToDemote(
case Instruction::PHI: {
PHINode *PN = cast<PHINode>(I);
for (Value *IncValue : PN->incoming_values())
- if (!collectValuesToDemote(IncValue, ToDemote, DemotedConsts, Roots,
- Visited))
+ if (!collectValuesToDemote(IncValue, IsProfitableToDemoteRoot, BitWidth,
+ ToDemote, DemotedConsts, Visited,
+ MaxDepthLevel, IsProfitableToDemote))
return false;
break;
}
// Otherwise, conservatively give up.
default:
- return false;
+ if (!IsPotentiallyTruncated(I, BitWidth))
+ return false;
+ MaxDepthLevel = 0;
+ Start = End = 0;
+ break;
}
+ ++MaxDepthLevel;
// Gather demoted constant operands.
for (unsigned Idx : seq<unsigned>(Start, End))
if (isa<Constant>(I->getOperand(Idx)))
DemotedConsts.try_emplace(I).first->getSecond().push_back(Idx);
// Record the value that we can demote.
ToDemote.push_back(V);
- return true;
+ return IsProfitableToDemote;
}
void BoUpSLP::computeMinimumValueSizes() {
// We only attempt to truncate integer expressions.
- auto &TreeRoot = VectorizableTree[0]->Scalars;
- auto *TreeRootIT = dyn_cast<IntegerType>(TreeRoot[0]->getType());
- if (!TreeRootIT)
- return;
+ bool IsStoreOrInsertElt =
+ VectorizableTree.front()->getOpcode() == Instruction::Store ||
+ VectorizableTree.front()->getOpcode() == Instruction::InsertElement;
+ unsigned NodeIdx = 0;
+ if (IsStoreOrInsertElt &&
+ VectorizableTree.front()->State != TreeEntry::NeedToGather)
+ NodeIdx = 1;
// Ensure the roots of the vectorizable tree don't form a cycle.
- if (!VectorizableTree.front()->UserTreeIndices.empty())
+ if ((NodeIdx == 0 && !VectorizableTree[NodeIdx]->UserTreeIndices.empty()) ||
+ (NodeIdx != 0 && any_of(VectorizableTree[NodeIdx]->UserTreeIndices,
+ [&](const EdgeInfo &EI) {
+ return EI.UserTE->Idx >
+ static_cast<int>(NodeIdx);
+ })))
return;
- // Conservatively determine if we can actually truncate the roots of the
- // expression. Collect the values that can be demoted in ToDemote and
- // additional roots that require investigating in Roots.
- SmallVector<Value *, 32> ToDemote;
- DenseMap<Instruction *, SmallVector<unsigned>> DemotedConsts;
- SmallVector<Value *, 4> Roots;
- for (auto *Root : TreeRoot) {
- DenseSet<Value *> Visited;
- if (!collectValuesToDemote(Root, ToDemote, DemotedConsts, Roots, Visited))
- return;
+ // The first value node for store/insertelement is sext/zext/trunc? Skip it,
+ // resize to the final type.
+ bool IsProfitableToDemoteRoot = !IsStoreOrInsertElt;
+ if (NodeIdx != 0 &&
+ VectorizableTree[NodeIdx]->State == TreeEntry::Vectorize &&
+ (VectorizableTree[NodeIdx]->getOpcode() == Instruction::ZExt ||
+ VectorizableTree[NodeIdx]->getOpcode() == Instruction::SExt ||
+ VectorizableTree[NodeIdx]->getOpcode() == Instruction::Trunc)) {
+ assert(IsStoreOrInsertElt && "Expected store/insertelement seeded graph.");
+ ++NodeIdx;
+ IsProfitableToDemoteRoot = true;
}
- // The maximum bit width required to represent all the values that can be
- // demoted without loss of precision. It would be safe to truncate the roots
- // of the expression to this width.
- auto MaxBitWidth = 1u;
-
- // We first check if all the bits of the roots are demanded. If they're not,
- // we can truncate the roots to this narrower type.
- for (auto *Root : TreeRoot) {
- auto Mask = DB->getDemandedBits(cast<Instruction>(Root));
- MaxBitWidth = std::max<unsigned>(Mask.getBitWidth() - Mask.countl_zero(),
- MaxBitWidth);
- }
-
- // True if the roots can be zero-extended back to their original type, rather
- // than sign-extended. We know that if the leading bits are not demanded, we
- // can safely zero-extend. So we initialize IsKnownPositive to True.
- bool IsKnownPositive = true;
-
- // If all the bits of the roots are demanded, we can try a little harder to
- // compute a narrower type. This can happen, for example, if the roots are
- // getelementptr indices. InstCombine promotes these indices to the pointer
- // width. Thus, all their bits are technically demanded even though the
- // address computation might be vectorized in a smaller type.
- //
- // We start by looking at each entry that can be demoted. We compute the
- // maximum bit width required to store the scalar by using ValueTracking to
- // compute the number of high-order bits we can truncate.
- if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType()) &&
- all_of(TreeRoot, [](Value *V) {
- return all_of(V->users(),
- [](User *U) { return isa<GetElementPtrInst>(U); });
- })) {
- MaxBitWidth = 8u;
-
+ SmallVector<Value *> ToDemote;
+ DenseMap<Instruction *, SmallVector<unsigned>> DemotedConsts;
+ auto ComputeMaxBitWidth = [&](ArrayRef<Value *> TreeRoot, unsigned VF,
+ bool IsTopRoot, bool IsProfitableToDemoteRoot,
+ unsigned Opcode, unsigned Limit) {
+ ToDemote.clear();
+ auto *TreeRootIT = dyn_cast<IntegerType>(TreeRoot[0]->getType());
+ if (!TreeRootIT || !Opcode)
+ return 0u;
+
+ unsigned NumParts = TTI->getNumberOfParts(
+ FixedVectorType::get(TreeRoot.front()->getType(), VF));
+
+ // The maximum bit width required to represent all the values that can be
+ // demoted without loss of precision. It would be safe to truncate the roots
+ // of the expression to this width.
+ auto MaxBitWidth = 1u;
+
+ // True if the roots can be zero-extended back to their original type,
+ // rather than sign-extended. We know that if the leading bits are not
+ // demanded, we can safely zero-extend. So we initialize IsKnownPositive to
+ // True.
// Determine if the sign bit of all the roots is known to be zero. If not,
// IsKnownPositive is set to False.
- IsKnownPositive = llvm::all_of(TreeRoot, [&](Value *R) {
+ bool IsKnownPositive = all_of(TreeRoot, [&](Value *R) {
KnownBits Known = computeKnownBits(R, *DL);
return Known.isNonNegative();
});
- // Determine the maximum number of bits required to store the scalar
- // values.
- for (auto *Scalar : ToDemote) {
- auto NumSignBits = ComputeNumSignBits(Scalar, *DL, 0, AC, nullptr, DT);
- auto NumTypeBits = DL->getTypeSizeInBits(Scalar->getType());
- MaxBitWidth = std::max<unsigned>(NumTypeBits - NumSignBits, MaxBitWidth);
- }
-
- // If we can't prove that the sign bit is zero, we must add one to the
- // maximum bit width to account for the unknown sign bit. This preserves
- // the existing sign bit so we can safely sign-extend the root back to the
- // original type. Otherwise, if we know the sign bit is zero, we will
- // zero-extend the root instead.
- //
- // FIXME: This is somewhat suboptimal, as there will be cases where adding
- // one to the maximum bit width will yield a larger-than-necessary
- // type. In general, we need to add an extra bit only if we can't
- // prove that the upper bit of the original type is equal to the
- // upper bit of the proposed smaller type. If these two bits are the
- // same (either zero or one) we know that sign-extending from the
- // smaller type will result in the same value. Here, since we can't
- // yet prove this, we are just making the proposed smaller type
- // larger to ensure correctness.
- if (!IsKnownPositive)
- ++MaxBitWidth;
- }
-
- // Round MaxBitWidth up to the next power-of-two.
- MaxBitWidth = llvm::bit_ceil(MaxBitWidth);
-
- // If the maximum bit width we compute is less than the with of the roots'
- // type, we can proceed with the narrowing. Otherwise, do nothing.
- if (MaxBitWidth >= TreeRootIT->getBitWidth())
- return;
+ // We first check if all the bits of the roots are demanded. If they're not,
+ // we can truncate the roots to this narrower type.
+ for (auto *Root : TreeRoot) {
+ auto NumSignBits = ComputeNumSignBits(Root, *DL, 0, AC, nullptr, DT);
+ auto NumTypeBits = DL->getTypeSizeInBits(Root->getType());
+ unsigned BitWidth1 = NumTypeBits - NumSignBits;
+ // If we can't prove that the sign bit is zero, we must add one to the
+ // maximum bit width to account for the unknown sign bit. This preserves
+ // the existing sign bit so we can safely sign-extend the root back to the
+ // original type. Otherwise, if we know the sign bit is zero, we will
+ // zero-extend the root instead.
+ //
+ // FIXME: This is somewhat suboptimal, as there will be cases where adding
+ // one to the maximum bit width will yield a larger-than-necessary
+ // type. In general, we need to add an extra bit only if we can't
+ // prove that the upper bit of the original type is equal to the
+ // upper bit of the proposed smaller type. If these two bits are
+ // the same (either zero or one) we know that sign-extending from
+ // the smaller type will result in the same value. Here, since we
+ // can't yet prove this, we are just making the proposed smaller
+ // type larger to ensure correctness.
+ if (!IsKnownPositive)
+ ++BitWidth1;
+
+ auto Mask = DB->getDemandedBits(cast<Instruction>(Root));
+ unsigned BitWidth2 = Mask.getBitWidth() - Mask.countl_zero();
+ MaxBitWidth =
+ std::max<unsigned>(std::min(BitWidth1, BitWidth2), MaxBitWidth);
+ }
+
+ if (MaxBitWidth < 8 && MaxBitWidth > 1)
+ MaxBitWidth = 8;
+
+ // If the original type is large, but reduced type does not improve the reg
+ // use - ignore it.
+ if (NumParts > 1 &&
+ NumParts ==
+ TTI->getNumberOfParts(FixedVectorType::get(
+ IntegerType::get(F->getContext(), bit_ceil(MaxBitWidth)), VF)))
+ return 0u;
+
+ bool IsProfitableToDemote = Opcode == Instruction::Trunc ||
+ Opcode == Instruction::SExt ||
+ Opcode == Instruction::ZExt || NumParts > 1;
+ // Conservatively determine if we can actually truncate the roots of the
+ // expression. Collect the values that can be demoted in ToDemote and
+ // additional roots that require investigating in Roots.
+ for (auto *Root : TreeRoot) {
+ DenseSet<Value *> Visited;
+ unsigned MaxDepthLevel;
+ bool NeedToDemote = IsProfitableToDemote;
+ if (!collectValuesToDemote(Root, IsProfitableToDemoteRoot, MaxBitWidth,
+ ToDemote, DemotedConsts, Visited,
+ MaxDepthLevel, NeedToDemote) ||
+ (MaxDepthLevel <= Limit && Opcode != Instruction::Trunc &&
+ Opcode != Instruction::SExt && Opcode != Instruction::ZExt))
+ return 0u;
+ }
+ // Round MaxBitWidth up to the next power-of-two.
+ MaxBitWidth = bit_ceil(MaxBitWidth);
+
+ return MaxBitWidth;
+ };
// If we can truncate the root, we must collect additional values that might
// be demoted as a result. That is, those seeded by truncations we will
// modify.
- while (!Roots.empty()) {
- DenseSet<Value *> Visited;
- collectValuesToDemote(Roots.pop_back_val(), ToDemote, DemotedConsts, Roots,
- Visited);
- }
+ // Add reduction ops sizes, if any.
+ unsigned RedMaxBitWidth = 0;
+ bool IsRedSigned = false;
+ if (UserIgnoreList &&
+ isa<IntegerType>(VectorizableTree.front()->Scalars.front()->getType())) {
+ for (Value *V : *UserIgnoreList) {
+ auto NumSignBits = ComputeNumSignBits(V, *DL, 0, AC, nullptr, DT);
+ auto NumTypeBits = DL->getTypeSizeInBits(V->getType());
+ unsigned BitWidth1 = NumTypeBits - NumSignBits;
+ KnownBits Known = computeKnownBits(V, *DL);
+ if (!Known.isNonNegative()) {
+ IsRedSigned = true;
+ ++BitWidth1;
+ }
+ auto Mask = DB->getDemandedBits(cast<Instruction>(V));
+ unsigned BitWidth2 = Mask.getBitWidth() - Mask.countl_zero();
+ RedMaxBitWidth = std::max(std::min(BitWidth1, BitWidth2), RedMaxBitWidth);
+ }
+ if (RedMaxBitWidth < 8 && RedMaxBitWidth > 1)
+ RedMaxBitWidth = 8;
+
+ RedMaxBitWidth = bit_ceil(RedMaxBitWidth);
+ }
+ bool IsTopRoot = NodeIdx == 0;
+ while (NodeIdx < VectorizableTree.size()) {
+ ArrayRef<Value *> TreeRoot = VectorizableTree[NodeIdx]->Scalars;
+ unsigned Limit = 2;
+ if (NodeIdx == 0 &&
+ RedMaxBitWidth == DL->getTypeSizeInBits(TreeRoot.front()->getType()))
+ Limit = 3;
+ unsigned MaxBitWidth = ComputeMaxBitWidth(
+ TreeRoot, VectorizableTree[NodeIdx]->getVectorFactor(), IsTopRoot,
+ IsProfitableToDemoteRoot, VectorizableTree[NodeIdx]->getOpcode(),
+ Limit);
+ IsTopRoot = false;
+ IsProfitableToDemoteRoot = true;
+
+ ++NodeIdx;
+ for (unsigned E = VectorizableTree.size(); NodeIdx < E; ++NodeIdx) {
+ if (VectorizableTree[NodeIdx]->State != TreeEntry::NeedToGather &&
+ !VectorizableTree[NodeIdx]->isAltShuffle() &&
+ VectorizableTree[NodeIdx]->getOpcode() == Instruction::Trunc) {
+ ++NodeIdx;
+ break;
+ }
+ }
- // Finally, map the values we can demote to the maximum bit with we computed.
- for (auto *Scalar : ToDemote) {
- auto *TE = getTreeEntry(Scalar);
- assert(TE && "Expected vectorized scalar.");
- if (MinBWs.contains(TE))
+ // If the maximum bit width we compute is less than the with of the roots'
+ // type, we can proceed with the narrowing. Otherwise, do nothing.
+ if (MaxBitWidth == 0 ||
+ MaxBitWidth >=
+ cast<IntegerType>(TreeRoot.front()->getType())->getBitWidth())
continue;
- bool IsSigned = any_of(TE->Scalars, [&](Value *R) {
- KnownBits Known = computeKnownBits(R, *DL);
- return !Known.isNonNegative();
- });
- MinBWs.try_emplace(TE, MaxBitWidth, IsSigned);
- const auto *I = cast<Instruction>(Scalar);
- auto DCIt = DemotedConsts.find(I);
- if (DCIt != DemotedConsts.end()) {
- for (unsigned Idx : DCIt->getSecond()) {
- // Check that all instructions operands are demoted.
- if (all_of(TE->Scalars, [&](Value *V) {
- auto SIt = DemotedConsts.find(cast<Instruction>(V));
- return SIt != DemotedConsts.end() &&
- is_contained(SIt->getSecond(), Idx);
- })) {
+
+ // Finally, map the values we can demote to the maximum bit with we
+ // computed.
+ for (Value *Scalar : ToDemote) {
+ TreeEntry *TE = getTreeEntry(Scalar);
+ assert(TE && "Expected vectorized scalar.");
+ if (MinBWs.contains(TE))
+ continue;
+ bool IsSigned = false;
+ unsigned BitWidth = MaxBitWidth;
+ if (TE->Idx == 0 && UserIgnoreList) {
+ assert(RedMaxBitWidth != std::numeric_limits<unsigned>::max() &&
+ "Expected reduction instruction bitwidth analysis.");
+ if (RedMaxBitWidth ==
+ DL->getTypeSizeInBits(TE->Scalars.front()->getType()))
+ continue;
+ BitWidth = RedMaxBitWidth;
+ IsSigned = IsRedSigned;
+ } else {
+ IsSigned = any_of(TE->Scalars, [&](Value *R) {
+ KnownBits Known = computeKnownBits(R, *DL);
+ return !Known.isNonNegative();
+ });
+ }
+ MinBWs.try_emplace(TE, BitWidth, IsSigned);
+ const auto *I = cast<Instruction>(Scalar);
+ auto DCIt = DemotedConsts.find(I);
+ if (DCIt != DemotedConsts.end()) {
+ for (unsigned Idx : DCIt->getSecond()) {
+ // Check that all instructions operands are demoted.
const TreeEntry *CTE = getOperandEntry(TE, Idx);
- MinBWs.try_emplace(CTE, MaxBitWidth, IsSigned);
+ if (all_of(TE->Scalars,
+ [&](Value *V) {
+ auto SIt = DemotedConsts.find(cast<Instruction>(V));
+ return SIt != DemotedConsts.end() &&
+ is_contained(SIt->getSecond(), Idx);
+ }) ||
+ all_of(CTE->Scalars, Constant::classof))
+ MinBWs.try_emplace(CTE, BitWidth, IsSigned);
}
}
}
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/ext-trunc.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/ext-trunc.ll
index cef791633655a..5e3fd156666f5 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/ext-trunc.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/ext-trunc.ll
@@ -17,12 +17,13 @@ define void @test1(<4 x i16> %a, <4 x i16> %b, ptr %p) {
; CHECK-NEXT: [[GEP0:%.*]] = getelementptr inbounds i64, ptr [[P:%.*]], i64 [[S0]]
; CHECK-NEXT: [[LOAD0:%.*]] = load i64, ptr [[GEP0]], align 4
; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <4 x i32> [[SUB0]], <4 x i32> poison, <2 x i32> <i32 1, i32 2>
-; CHECK-NEXT: [[TMP1:%.*]] = sext <2 x i32> [[TMP0]] to <2 x i64>
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x i64> [[TMP1]], i32 0
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x i32> [[TMP0]], i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = sext i32 [[TMP1]] to i64
; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 [[TMP2]]
; CHECK-NEXT: [[LOAD1:%.*]] = load i64, ptr [[GEP1]], align 4
-; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x i64> [[TMP1]], i32 1
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 [[TMP3]]
+; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x i32> [[TMP0]], i32 1
+; CHECK-NEXT: [[TMP4:%.*]] = sext i32 [[TMP3]] to i64
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 [[TMP4]]
; CHECK-NEXT: [[LOAD2:%.*]] = load i64, ptr [[GEP2]], align 4
; CHECK-NEXT: [[E3:%.*]] = extractelement <4 x i32> [[SUB0]], i32 3
; CHECK-NEXT: [[S3:%.*]] = sext i32 [[E3]] to i64
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/getelementptr.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/getelementptr.ll
index d756d57fa08a1..c3f8456b8b608 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/getelementptr.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/getelementptr.ll
@@ -232,44 +232,102 @@ define void @test_i16_extend(ptr %p.1, ptr %p.2, i32 %idx.i32) {
; CHECK-LABEL: @test_i16_extend(
; CHECK-NEXT: [[P_0:%.*]] = load ptr, ptr @global, align 8
; CHECK-NEXT: [[IDX_0:%.*]] = zext i32 [[IDX_I32:%.*]] to i64
+; CHECK-NEXT: [[IDX_1:%.*]] = add nuw nsw i64 [[IDX_0]], 1
+; CHECK-NEXT: [[IDX_2:%.*]] = add nuw nsw i64 [[IDX_0]], 2
+; CHECK-NEXT: [[IDX_3:%.*]] = add nuw nsw i64 [[IDX_0]], 3
+; CHECK-NEXT: [[IDX_4:%.*]] = add nuw nsw i64 [[IDX_0]], 4
+; CHECK-NEXT: [[IDX_5:%.*]] = add nuw nsw i64 [[IDX_0]], 5
+; CHECK-NEXT: [[IDX_6:%.*]] = add nuw nsw i64 [[IDX_0]], 6
+; CHECK-NEXT: [[IDX_7:%.*]] = add nuw nsw i64 [[IDX_0]], 7
; CHECK-NEXT: [[T53:%.*]] = getelementptr inbounds i16, ptr [[P_1:%.*]], i64 [[IDX_0]]
+; CHECK-NEXT: [[OP1_L:%.*]] = load i16, ptr [[T53]], align 2
; CHECK-NEXT: [[T56:%.*]] = getelementptr inbounds i16, ptr [[P_2:%.*]], i64 [[IDX_0]]
-; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr [[T53]], align 2
-; CHECK-NEXT: [[TMP2:%.*]] = zext <8 x i16> [[TMP1]] to <8 x i32>
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr [[T56]], align 2
-; CHECK-NEXT: [[TMP4:%.*]] = zext <8 x i16> [[TMP3]] to <8 x i32>
-; CHECK-NEXT: [[TMP5:%.*]] = sub nsw <8 x i32> [[TMP2]], [[TMP4]]
-; CHECK-NEXT: [[TMP6:%.*]] = extractelement <8 x i32> [[TMP5]], i64 0
-; CHECK-NEXT: [[TMP7:%.*]] = sext i32 [[TMP6]] to i64
-; CHECK-NEXT: [[T60:%.*]] = getelementptr inbounds i32, ptr [[P_0]], i64 [[TMP7]]
+; CHECK-NEXT: [[OP2_L:%.*]] = load i16, ptr [[T56]], align 2
+; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x i16> poison, i16 [[OP1_L]], i64 0
+; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x i16> poison, i16 [[OP2_L]], i64 1
+; CHECK-NEXT: [[T64:%.*]] = getelementptr inbounds i16, ptr [[P_1]], i64 [[IDX_1]]
+; CHECK-NEXT: [[T65:%.*]] = load i16, ptr [[T64]], align 2
+; CHECK-NEXT: [[T67:%.*]] = getelementptr inbounds i16, ptr [[P_2]], i64 [[IDX_1]]
+; CHECK-NEXT: [[T68:%.*]] = load i16, ptr [[T67]], align 2
+; CHECK-NEXT: [[TMP3:%.*]] = insertelement <2 x i16> poison, i16 [[T65]], i64 0
+; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x i16> poison, i16 [[T68]], i64 1
+; CHECK-NEXT: [[T75:%.*]] = getelementptr inbounds i16, ptr [[P_1]], i64 [[IDX_2]]
+; CHECK-NEXT: [[T76:%.*]] = load i16, ptr [[T75]], align 2
+; CHECK-NEXT: [[T78:%.*]] = getelementptr inbounds i16, ptr [[P_2]], i64 [[IDX_2]]
+; CHECK-NEXT: [[T79:%.*]] = load i16, ptr [[T78]], align 2
+; CHECK-NEXT: [[T86:%.*]] = getelementptr inbounds i16, ptr [[P_1]], i64 [[IDX_3]]
+; CHECK-NEXT: [[T87:%.*]] = load i16, ptr [[T86]], align 2
+; CHECK-NEXT: [[T89:%.*]] = getelementptr inbounds i16, ptr [[P_2]], i64 [[IDX_3]]
+; CHECK-NEXT: [[T90:%.*]] = load i16, ptr [[T89]], align 2
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <2 x i16> [[TMP1]], <2 x i16> [[TMP3]], <4 x i32> <i32 0, i32 2, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x i16> [[TMP5]], i16 [[T76]], i64 2
+; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i16> [[TMP6]], i16 [[T87]], i64 3
+; CHECK-NEXT: [[TMP8:%.*]] = zext <4 x i16> [[TMP7]] to <4 x i32>
+; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <2 x i16> [[TMP2]], <2 x i16> [[TMP4]], <4 x i32> <i32 1, i32 3, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP10:%.*]] = insertelement <4 x i16> [[TMP9]], i16 [[T79]], i64 2
+; CHECK-NEXT: [[TMP11:%.*]] = insertelement <4 x i16> [[TMP10]], i16 [[T90]], i64 3
+; CHECK-NEXT: [[TMP12:%.*]] = zext <4 x i16> [[TMP11]] to <4 x i32>
+; CHECK-NEXT: [[TMP13:%.*]] = sub nsw <4 x i32> [[TMP8]], [[TMP12]]
+; CHECK-NEXT: [[TMP14:%.*]] = extractelement <4 x i32> [[TMP13]], i64 0
+; CHECK-NEXT: [[TMP15:%.*]] = sext i32 [[TMP14]] to i64
+; CHECK-NEXT: [[T60:%.*]] = getelementptr inbounds i32, ptr [[P_0]], i64 [[TMP15]]
; CHECK-NEXT: [[L_1:%.*]] = load i32, ptr [[T60]], align 4
-; CHECK-NEXT: [[TMP8:%.*]] = extractelement <8 x i32> [[TMP5]], i64 1
-; CHECK-NEXT: [[TMP9:%.*]] = sext i32 [[TMP8]] to i64
-; CHECK-NEXT: [[T71:%.*]] = getelementptr inbounds i32, ptr [[P_0]], i64 [[TMP9]]
+; CHECK-NEXT: [[TMP16:%.*]] = extractelement <4 x i32> [[TMP13]], i64 1
+; CHECK-NEXT: [[TMP17:%.*]] = sext i32 [[TMP16]] to i64
+; CHECK-NEXT: [[T71:%.*]] = getelementptr inbounds i32, ptr [[P_0]], i64 [[TMP17]]
; CHECK-NEXT: [[L_2:%.*]] = load i32, ptr [[T71]], align 4
-; CHECK-NEXT: [[TMP10:%.*]] = extractelement <8 x i32> [[TMP5]], i64 2
-; CHECK-NEXT: [[TMP11:%.*]] = sext i32 [[TMP10]] to i64
-; CHECK-NEXT: [[T82:%.*]] = getelementptr inbounds i32, ptr [[P_0]], i64 [[TMP11]]
+; CHECK-NEXT: [[TMP18:%.*]] = extractelement <4 x i32> [[TMP13]], i64 2
+; CHECK-NEXT: [[TMP19:%.*]] = sext i32 [[TMP18]] to i64
+; CHECK-NEXT: [[T82:%.*]] = getelementptr inbounds i32, ptr [[P_0]], i64 [[TMP19]]
; CHECK-NEXT: [[L_3:%.*]] = load i32, ptr [[T82]], align 4
-; CHECK-NEXT: [[TMP12:%.*]] = extractelement <8 x i32> [[TMP5]], i64 3
-; CHECK-NEXT: [[TMP13:%.*]] = sext i32 [[TMP12]] to i64
-; CHECK-NEXT: [[T93:%.*]] = getelementptr inbounds i32, ptr [[P_0]], i64 [[TMP13]]
+; CHECK-NEXT: [[TMP20:%.*]] = extractelement <4 x i32> [[TMP13]], i64 3
+; CHECK-NEXT: [[TMP21:%.*]] = sext i32 [[TMP20]] to i64
+; CHECK-NEXT: [[T93:%.*]] = getelementptr inbounds i32, ptr [[P_0]], i64 [[TMP21]]
; CHECK-NEXT: [[L_4:%.*]] = load i32, ptr [[T93]], align 4
-; CHECK-NEXT: [[TMP14:%.*]] = extractelement <8 x i32> [[TMP5]], i64 4
-; CHECK-NEXT: [[TMP15:%.*]] = sext i32 [[TMP14]] to i64
-; CHECK-NEXT: [[T104:%.*]] = getelementptr inbounds i32, ptr [[P_0]], i64 [[TMP15]]
+; CHECK-NEXT: [[T97:%.*]] = getelementptr inbounds i16, ptr [[P_1]], i64 [[IDX_4]]
+; CHECK-NEXT: [[T98:%.*]] = load i16, ptr [[T97]], align 2
+; CHECK-NEXT: [[T100:%.*]] = getelementptr inbounds i16, ptr [[P_2]], i64 [[IDX_4]]
+; CHECK-NEXT: [[T101:%.*]] = load i16, ptr [[T100]], align 2
+; CHECK-NEXT: [[TMP22:%.*]] = insertelement <2 x i16> poison, i16 [[T98]], i64 0
+; CHECK-NEXT: [[TMP23:%.*]] = insertelement <2 x i16> poison, i16 [[T101]], i64 1
+; CHECK-NEXT: [[T108:%.*]] = getelementptr inbounds i16, ptr [[P_1]], i64 [[IDX_5]]
+; CHECK-NEXT: [[T109:%.*]] = load i16, ptr [[T108]], align 2
+; CHECK-NEXT: [[T111:%.*]] = getelementptr inbounds i16, ptr [[P_2]], i64 [[IDX_5]]
+; CHECK-NEXT: [[T112:%.*]] = load i16, ptr [[T111]], align 2
+; CHECK-NEXT: [[TMP24:%.*]] = insertelement <2 x i16> poison, i16 [[T109]], i64 0
+; CHECK-NEXT: [[TMP25:%.*]] = insertelement <2 x i16> poison, i16 [[T112]], i64 1
+; CHECK-NEXT: [[T119:%.*]] = getelementptr inbounds i16, ptr [[P_1]], i64 [[IDX_6]]
+; CHECK-NEXT: [[T120:%.*]] = load i16, ptr [[T119]], align 2
+; CHECK-NEXT: [[T122:%.*]] = getelementptr inbounds i16, ptr [[P_2]], i64 [[IDX_6]]
+; CHECK-NEXT: [[T123:%.*]] = load i16, ptr [[T122]], align 2
+; CHECK-NEXT: [[T130:%.*]] = getelementptr inbounds i16, ptr [[P_1]], i64 [[IDX_7]]
+; CHECK-NEXT: [[T131:%.*]] = load i16, ptr [[T130]], align 2
+; CHECK-NEXT: [[T133:%.*]] = getelementptr inbounds i16, ptr [[P_2]], i64 [[IDX_7]]
+; CHECK-NEXT: [[T134:%.*]] = load i16, ptr [[T133]], align 2
+; CHECK-NEXT: [[TMP26:%.*]] = shufflevector <2 x i16> [[TMP22]], <2 x i16> [[TMP24]], <4 x i32> <i32 0, i32 2, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP27:%.*]] = insertelement <4 x i16> [[TMP26]], i16 [[T120]], i64 2
+; CHECK-NEXT: [[TMP28:%.*]] = insertelement <4 x i16> [[TMP27]], i16 [[T131]], i64 3
+; CHECK-NEXT: [[TMP29:%.*]] = zext <4 x i16> [[TMP28]] to <4 x i32>
+; CHECK-NEXT: [[TMP30:%.*]] = shufflevector <2 x i16> [[TMP23]], <2 x i16> [[TMP25]], <4 x i32> <i32 1, i32 3, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP31:%.*]] = insertelement <4 x i16> [[TMP30]], i16 [[T123]], i64 2
+; CHECK-NEXT: [[TMP32:%.*]] = insertelement <4 x i16> [[TMP31]], i16 [[T134]], i64 3
+; CHECK-NEXT: [[TMP33:%.*]] = zext <4 x i16> [[TMP32]] to <4 x i32>
+; CHECK-NEXT: [[TMP34:%.*]] = sub nsw <4 x i32> [[TMP29]], [[TMP33]]
+; CHECK-NEXT: [[TMP35:%.*]] = extractelement <4 x i32> [[TMP34]], i64 0
+; CHECK-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64
+; CHECK-NEXT: [[T104:%.*]] = getelementptr inbounds i32, ptr [[P_0]], i64 [[TMP36]]
; CHECK-NEXT: [[L_5:%.*]] = load i32, ptr [[T104]], align 4
-; CHECK-NEXT: [[TMP16:%.*]] = extractelement <8 x i32> [[TMP5]], i64 5
-; CHECK-NEXT: [[TMP17:%.*]] = sext i32 [[TMP16]] to i64
-; CHECK-NEXT: [[T115:%.*]] = getelementptr inbounds i32, ptr [[P_0]], i64 [[TMP17]]
+; CHECK-NEXT: [[TMP37:%.*]] = extractelement <4 x i32> [[TMP34]], i64 1
+; CHECK-NEXT: [[TMP38:%.*]] = sext i32 [[TMP37]] to i64
+; CHECK-NEXT: [[T115:%.*]] = getelementptr inbounds i32, ptr [[P_0]], i64 [[TMP38]]
; CHECK-NEXT: [[L_6:%.*]] = load i32, ptr [[T115]], align 4
-; CHECK-NEXT: [[TMP18:%.*]] = extractelement <8 x i32> [[TMP5]], i64 6
-; CHECK-NEXT: [[TMP19:%.*]] = sext i32 [[TMP18]] to i64
-; CHECK-NEXT: [[T126:%.*]] = getelementptr inbounds i32, ptr [[P_0]], i64 [[TMP19]]
+; CHECK-NEXT: [[TMP39:%.*]] = extractelement <4 x i32> [[TMP34]], i64 2
+; CHECK-NEXT: [[TMP40:%.*]] = sext i32 [[TMP39]] to i64
+; CHECK-NEXT: [[T126:%.*]] = getelementptr inbounds i32, ptr [[P_0]], i64 [[TMP40]]
; CHECK-NEXT: [[L_7:%.*]] = load i32, ptr [[T126]], align 4
-; CHECK-NEXT: [[TMP20:%.*]] = extractelement <8 x i32> [[TMP5]], i64 7
-; CHECK-NEXT: [[TMP21:%.*]] = sext i32 [[TMP20]] to i64
-; CHECK-NEXT: [[T137:%.*]] = getelementptr inbounds i32, ptr [[P_0]], i64 [[TMP21]]
+; CHECK-NEXT: [[TMP41:%.*]] = extractelement <4 x i32> [[TMP34]], i64 3
+; CHECK-NEXT: [[TMP42:%.*]] = sext i32 [[TMP41]] to i64
+; CHECK-NEXT: [[T137:%.*]] = getelementptr inbounds i32, ptr [[P_0]], i64 [[TMP42]]
; CHECK-NEXT: [[L_8:%.*]] = load i32, ptr [[T137]], align 4
; CHECK-NEXT: call void @use(i32 [[L_1]], i32 [[L_2]], i32 [[L_3]], i32 [[L_4]], i32 [[L_5]], i32 [[L_6]], i32 [[L_7]], i32 [[L_8]])
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/reduce-add-i64.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/reduce-add-i64.ll
index d67fdc1cd6aa0..a7a7f642ced53 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/reduce-add-i64.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/reduce-add-i64.ll
@@ -28,21 +28,11 @@ entry:
define i64 @red_zext_ld_4xi64(ptr %ptr) {
; CHECK-LABEL: @red_zext_ld_4xi64(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[LD0:%.*]] = load i8, ptr [[PTR:%.*]], align 1
-; CHECK-NEXT: [[ZEXT:%.*]] = zext i8 [[LD0]] to i64
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 1
-; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[GEP]], align 1
-; CHECK-NEXT: [[ZEXT_1:%.*]] = zext i8 [[LD1]] to i64
-; CHECK-NEXT: [[ADD_1:%.*]] = add nuw nsw i64 [[ZEXT]], [[ZEXT_1]]
-; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 2
-; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[GEP_1]], align 1
-; CHECK-NEXT: [[ZEXT_2:%.*]] = zext i8 [[LD2]] to i64
-; CHECK-NEXT: [[ADD_2:%.*]] = add nuw nsw i64 [[ADD_1]], [[ZEXT_2]]
-; CHECK-NEXT: [[GEP_2:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 3
-; CHECK-NEXT: [[LD3:%.*]] = load i8, ptr [[GEP_2]], align 1
-; CHECK-NEXT: [[ZEXT_3:%.*]] = zext i8 [[LD3]] to i64
-; CHECK-NEXT: [[ADD_3:%.*]] = add nuw nsw i64 [[ADD_2]], [[ZEXT_3]]
-; CHECK-NEXT: ret i64 [[ADD_3]]
+; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i8>, ptr [[PTR:%.*]], align 1
+; CHECK-NEXT: [[TMP1:%.*]] = zext <4 x i8> [[TMP0]] to <4 x i16>
+; CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> [[TMP1]])
+; CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[TMP2]] to i64
+; CHECK-NEXT: ret i64 [[TMP3]]
;
entry:
%ld0 = load i8, ptr %ptr
diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/reductions.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/reductions.ll
index 35908ee72bc20..ca6f8748e8762 100644
--- a/llvm/test/Transforms/SLPVectorizer/RISCV/reductions.ll
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/reductions.ll
@@ -802,9 +802,10 @@ define i64 @red_zext_ld_4xi64(ptr %ptr) {
; CHECK-LABEL: @red_zext_ld_4xi64(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i8>, ptr [[PTR:%.*]], align 1
-; CHECK-NEXT: [[TMP1:%.*]] = zext <4 x i8> [[TMP0]] to <4 x i64>
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP1]])
-; CHECK-NEXT: ret i64 [[TMP2]]
+; CHECK-NEXT: [[TMP1:%.*]] = zext <4 x i8> [[TMP0]] to <4 x i16>
+; CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> [[TMP1]])
+; CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[TMP2]] to i64
+; CHECK-NEXT: ret i64 [[TMP3]]
;
entry:
%ld0 = load i8, ptr %ptr
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/PR35777.ll b/llvm/test/Transforms/SLPVectorizer/X86/PR35777.ll
index 4132fbaa5c755..05511f843a68f 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/PR35777.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/PR35777.ll
@@ -9,17 +9,18 @@ define { i64, i64 } @patatino(double %arg) {
; CHECK-NEXT: [[TMP0:%.*]] = load <2 x double>, ptr @global, align 16
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr getelementptr inbounds ([6 x double], ptr @global, i64 0, i64 2), align 16
; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x double> poison, double [[ARG:%.*]], i32 0
-; CHECK-NEXT: [[SHUFFLE:%.*]] = shufflevector <2 x double> [[TMP2]], <2 x double> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP3:%.*]] = fmul <2 x double> [[TMP1]], [[SHUFFLE]]
-; CHECK-NEXT: [[TMP4:%.*]] = fadd <2 x double> [[TMP0]], [[TMP3]]
-; CHECK-NEXT: [[TMP5:%.*]] = load <2 x double>, ptr getelementptr inbounds ([6 x double], ptr @global, i64 0, i64 4), align 16
-; CHECK-NEXT: [[TMP6:%.*]] = fadd <2 x double> [[TMP5]], [[TMP4]]
-; CHECK-NEXT: [[TMP7:%.*]] = fptosi <2 x double> [[TMP6]] to <2 x i32>
-; CHECK-NEXT: [[TMP8:%.*]] = sext <2 x i32> [[TMP7]] to <2 x i64>
-; CHECK-NEXT: [[TMP9:%.*]] = extractelement <2 x i64> [[TMP8]], i32 0
-; CHECK-NEXT: [[T16:%.*]] = insertvalue { i64, i64 } undef, i64 [[TMP9]], 0
-; CHECK-NEXT: [[TMP10:%.*]] = extractelement <2 x i64> [[TMP8]], i32 1
-; CHECK-NEXT: [[T17:%.*]] = insertvalue { i64, i64 } [[T16]], i64 [[TMP10]], 1
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <2 x double> [[TMP2]], <2 x double> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP4:%.*]] = fmul <2 x double> [[TMP1]], [[TMP3]]
+; CHECK-NEXT: [[TMP5:%.*]] = fadd <2 x double> [[TMP0]], [[TMP4]]
+; CHECK-NEXT: [[TMP6:%.*]] = load <2 x double>, ptr getelementptr inbounds ([6 x double], ptr @global, i64 0, i64 4), align 16
+; CHECK-NEXT: [[TMP7:%.*]] = fadd <2 x double> [[TMP6]], [[TMP5]]
+; CHECK-NEXT: [[TMP8:%.*]] = fptosi <2 x double> [[TMP7]] to <2 x i32>
+; CHECK-NEXT: [[TMP9:%.*]] = extractelement <2 x i32> [[TMP8]], i32 0
+; CHECK-NEXT: [[TMP10:%.*]] = sext i32 [[TMP9]] to i64
+; CHECK-NEXT: [[T16:%.*]] = insertvalue { i64, i64 } undef, i64 [[TMP10]], 0
+; CHECK-NEXT: [[TMP11:%.*]] = extractelement <2 x i32> [[TMP8]], i32 1
+; CHECK-NEXT: [[TMP12:%.*]] = sext i32 [[TMP11]] to i64
+; CHECK-NEXT: [[T17:%.*]] = insertvalue { i64, i64 } [[T16]], i64 [[TMP12]], 1
; CHECK-NEXT: ret { i64, i64 } [[T17]]
;
bb:
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/minbitwidth-multiuse-with-insertelement.ll b/llvm/test/Transforms/SLPVectorizer/X86/minbitwidth-multiuse-with-insertelement.ll
index 6e512fcbb7392..6051638562b59 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/minbitwidth-multiuse-with-insertelement.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/minbitwidth-multiuse-with-insertelement.ll
@@ -6,18 +6,17 @@ define void @test(i8 %0) {
; CHECK-SAME: i8 [[TMP0:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x i8> <i8 0, i8 poison>, i8 [[TMP0]], i32 1
-; CHECK-NEXT: [[TMP2:%.*]] = sext <2 x i8> [[TMP1]] to <2 x i16>
-; CHECK-NEXT: [[TMP3:%.*]] = sext <2 x i16> [[TMP2]] to <2 x i32>
-; CHECK-NEXT: [[TMP4:%.*]] = mul <2 x i16> [[TMP2]], zeroinitializer
-; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x i16> [[TMP4]], i32 0
-; CHECK-NEXT: [[TMP6:%.*]] = zext i16 [[TMP5]] to i32
-; CHECK-NEXT: [[TMP7:%.*]] = extractelement <2 x i16> [[TMP4]], i32 1
-; CHECK-NEXT: [[TMP8:%.*]] = zext i16 [[TMP7]] to i32
-; CHECK-NEXT: [[ADD:%.*]] = or i32 [[TMP6]], [[TMP8]]
+; CHECK-NEXT: [[TMP2:%.*]] = sext <2 x i8> [[TMP1]] to <2 x i32>
+; CHECK-NEXT: [[TMP3:%.*]] = mul <2 x i8> [[TMP1]], zeroinitializer
+; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i8> [[TMP3]], i32 0
+; CHECK-NEXT: [[TMP5:%.*]] = zext i8 [[TMP4]] to i32
+; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x i8> [[TMP3]], i32 1
+; CHECK-NEXT: [[TMP7:%.*]] = zext i8 [[TMP6]] to i32
+; CHECK-NEXT: [[ADD:%.*]] = or i32 [[TMP5]], [[TMP7]]
; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[ADD]], 1
; CHECK-NEXT: [[CONV9:%.*]] = trunc i32 [[SHR]] to i8
; CHECK-NEXT: store i8 [[CONV9]], ptr null, align 1
-; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <2 x i32> [[TMP3]], <2 x i32> poison, <8 x i32> <i32 1, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <2 x i32> [[TMP2]], <2 x i32> poison, <8 x i32> <i32 1, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
; CHECK-NEXT: ret void
;
entry:
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/minimum-sizes.ll b/llvm/test/Transforms/SLPVectorizer/X86/minimum-sizes.ll
index 903adc8893f34..ab8120ee8d5a8 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/minimum-sizes.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/minimum-sizes.ll
@@ -89,15 +89,14 @@ define i8 @PR31243_sext(i8 %v0, i8 %v1, i8 %v2, i8 %v3, ptr %ptr) {
; AVX-NEXT: [[TMP0:%.*]] = insertelement <2 x i8> poison, i8 [[V0:%.*]], i64 0
; AVX-NEXT: [[TMP1:%.*]] = insertelement <2 x i8> [[TMP0]], i8 [[V1:%.*]], i64 1
; AVX-NEXT: [[TMP2:%.*]] = or <2 x i8> [[TMP1]], <i8 1, i8 1>
-; AVX-NEXT: [[TMP3:%.*]] = sext <2 x i8> [[TMP2]] to <2 x i16>
-; AVX-NEXT: [[TMP4:%.*]] = extractelement <2 x i16> [[TMP3]], i64 0
-; AVX-NEXT: [[TMP5:%.*]] = sext i16 [[TMP4]] to i64
-; AVX-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i64 [[TMP5]]
-; AVX-NEXT: [[TMP6:%.*]] = extractelement <2 x i16> [[TMP3]], i64 1
-; AVX-NEXT: [[TMP7:%.*]] = sext i16 [[TMP6]] to i64
-; AVX-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[TMP7]]
-; AVX-NEXT: [[TMP6:%.*]] = load i8, ptr [[TMP4]], align 1
-; AVX-NEXT: [[TMP7:%.*]] = load i8, ptr [[TMP5]], align 1
+; AVX-NEXT: [[TMP3:%.*]] = extractelement <2 x i8> [[TMP2]], i64 0
+; AVX-NEXT: [[TMP4:%.*]] = sext i8 [[TMP3]] to i64
+; AVX-NEXT: [[TMP44:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i64 [[TMP4]]
+; AVX-NEXT: [[TMP5:%.*]] = extractelement <2 x i8> [[TMP2]], i64 1
+; AVX-NEXT: [[TMP6:%.*]] = sext i8 [[TMP5]] to i64
+; AVX-NEXT: [[TMP55:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[TMP6]]
+; AVX-NEXT: [[TMP6:%.*]] = load i8, ptr [[TMP44]], align 1
+; AVX-NEXT: [[TMP7:%.*]] = load i8, ptr [[TMP55]], align 1
; AVX-NEXT: [[TMP8:%.*]] = add i8 [[TMP6]], [[TMP7]]
; AVX-NEXT: ret i8 [[TMP8]]
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/phi-undef-input.ll b/llvm/test/Transforms/SLPVectorizer/X86/phi-undef-input.ll
index 88f75c37846ef..3cc32c1fc7b28 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/phi-undef-input.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/phi-undef-input.ll
@@ -15,8 +15,8 @@ define i32 @phi3UndefInput(i1 %cond, i8 %arg0, i8 %arg1, i8 %arg2, i8 %arg3) {
; CHECK-NEXT: br label [[BB3]]
; CHECK: bb3:
; CHECK-NEXT: [[TMP4:%.*]] = phi <4 x i8> [ [[TMP3]], [[BB2]] ], [ <i8 0, i8 undef, i8 undef, i8 undef>, [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[TMP5:%.*]] = zext <4 x i8> [[TMP4]] to <4 x i32>
-; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP5]])
+; CHECK-NEXT: [[TMP5:%.*]] = call i8 @llvm.vector.reduce.or.v4i8(<4 x i8> [[TMP4]])
+; CHECK-NEXT: [[TMP6:%.*]] = zext i8 [[TMP5]] to i32
; CHECK-NEXT: ret i32 [[TMP6]]
;
entry:
@@ -52,8 +52,8 @@ define i32 @phi2UndefInput(i1 %cond, i8 %arg0, i8 %arg1, i8 %arg2, i8 %arg3) {
; CHECK-NEXT: br label [[BB3]]
; CHECK: bb3:
; CHECK-NEXT: [[TMP4:%.*]] = phi <4 x i8> [ [[TMP3]], [[BB2]] ], [ <i8 0, i8 0, i8 undef, i8 undef>, [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[TMP5:%.*]] = zext <4 x i8> [[TMP4]] to <4 x i32>
-; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP5]])
+; CHECK-NEXT: [[TMP5:%.*]] = call i8 @llvm.vector.reduce.or.v4i8(<4 x i8> [[TMP4]])
+; CHECK-NEXT: [[TMP6:%.*]] = zext i8 [[TMP5]] to i32
; CHECK-NEXT: ret i32 [[TMP6]]
;
entry:
@@ -89,8 +89,8 @@ define i32 @phi1UndefInput(i1 %cond, i8 %arg0, i8 %arg1, i8 %arg2, i8 %arg3) {
; CHECK-NEXT: br label [[BB3]]
; CHECK: bb3:
; CHECK-NEXT: [[TMP4:%.*]] = phi <4 x i8> [ [[TMP3]], [[BB2]] ], [ <i8 0, i8 0, i8 0, i8 undef>, [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[TMP5:%.*]] = zext <4 x i8> [[TMP4]] to <4 x i32>
-; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP5]])
+; CHECK-NEXT: [[TMP5:%.*]] = call i8 @llvm.vector.reduce.or.v4i8(<4 x i8> [[TMP4]])
+; CHECK-NEXT: [[TMP6:%.*]] = zext i8 [[TMP5]] to i32
; CHECK-NEXT: ret i32 [[TMP6]]
;
entry:
@@ -127,8 +127,8 @@ define i32 @phi1Undef1PoisonInput(i1 %cond, i8 %arg0, i8 %arg1, i8 %arg2, i8 %ar
; CHECK-NEXT: br label [[BB3]]
; CHECK: bb3:
; CHECK-NEXT: [[TMP4:%.*]] = phi <4 x i8> [ [[TMP3]], [[BB2]] ], [ <i8 0, i8 0, i8 poison, i8 undef>, [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[TMP5:%.*]] = zext <4 x i8> [[TMP4]] to <4 x i32>
-; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP5]])
+; CHECK-NEXT: [[TMP5:%.*]] = call i8 @llvm.vector.reduce.or.v4i8(<4 x i8> [[TMP4]])
+; CHECK-NEXT: [[TMP6:%.*]] = zext i8 [[TMP5]] to i32
; CHECK-NEXT: ret i32 [[TMP6]]
;
entry:
@@ -165,8 +165,8 @@ define i32 @phi1Undef2PoisonInputs(i1 %cond, i8 %arg0, i8 %arg1, i8 %arg2, i8 %a
; CHECK-NEXT: br label [[BB3]]
; CHECK: bb3:
; CHECK-NEXT: [[TMP4:%.*]] = phi <4 x i8> [ [[TMP3]], [[BB2]] ], [ <i8 0, i8 poison, i8 poison, i8 undef>, [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[TMP5:%.*]] = zext <4 x i8> [[TMP4]] to <4 x i32>
-; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP5]])
+; CHECK-NEXT: [[TMP5:%.*]] = call i8 @llvm.vector.reduce.or.v4i8(<4 x i8> [[TMP4]])
+; CHECK-NEXT: [[TMP6:%.*]] = zext i8 [[TMP5]] to i32
; CHECK-NEXT: ret i32 [[TMP6]]
;
entry:
@@ -202,8 +202,8 @@ define i32 @phi1Undef1PoisonGapInput(i1 %cond, i8 %arg0, i8 %arg1, i8 %arg2, i8
; CHECK-NEXT: br label [[BB3]]
; CHECK: bb3:
; CHECK-NEXT: [[TMP4:%.*]] = phi <4 x i8> [ [[TMP3]], [[BB2]] ], [ <i8 0, i8 0, i8 poison, i8 undef>, [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[TMP5:%.*]] = zext <4 x i8> [[TMP4]] to <4 x i32>
-; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP5]])
+; CHECK-NEXT: [[TMP5:%.*]] = call i8 @llvm.vector.reduce.or.v4i8(<4 x i8> [[TMP4]])
+; CHECK-NEXT: [[TMP6:%.*]] = zext i8 [[TMP5]] to i32
; CHECK-NEXT: ret i32 [[TMP6]]
;
entry:
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/resched.ll b/llvm/test/Transforms/SLPVectorizer/X86/resched.ll
index 78c6d9516a3de..b7237cbb02bb3 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/resched.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/resched.ll
@@ -11,26 +11,26 @@ define fastcc void @_ZN12_GLOBAL__N_127PolynomialMultiplyRecognize9recognizeEv()
; CHECK: if.then22.i:
; CHECK-NEXT: [[SUB_I:%.*]] = add nsw i32 undef, -1
; CHECK-NEXT: [[CONV31_I:%.*]] = and i32 undef, [[SUB_I]]
-; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i32> poison, i32 [[CONV31_I]], i32 0
-; CHECK-NEXT: [[SHUFFLE1:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> poison, <4 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP2:%.*]] = lshr <4 x i32> [[SHUFFLE1]], <i32 1, i32 2, i32 3, i32 4>
+; CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i32> poison, i32 [[CONV31_I]], i32 0
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[TMP0]], <4 x i32> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP2:%.*]] = lshr <4 x i32> [[TMP1]], <i32 1, i32 2, i32 3, i32 4>
; CHECK-NEXT: [[SHR_4_I_I:%.*]] = lshr i32 [[CONV31_I]], 5
; CHECK-NEXT: [[SHR_5_I_I:%.*]] = lshr i32 [[CONV31_I]], 6
; CHECK-NEXT: [[SHR_6_I_I:%.*]] = lshr i32 [[CONV31_I]], 7
; CHECK-NEXT: [[TMP3:%.*]] = insertelement <8 x i32> poison, i32 [[CONV31_I]], i32 0
-; CHECK-NEXT: [[SHUFFLE:%.*]] = shufflevector <8 x i32> [[TMP3]], <8 x i32> poison, <8 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP4:%.*]] = lshr <8 x i32> [[SHUFFLE]], <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; CHECK-NEXT: [[TMP5:%.*]] = insertelement <16 x i32> poison, i32 [[SUB_I]], i32 0
-; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
-; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <16 x i32> [[TMP5]], <16 x i32> [[TMP6]], <16 x i32> <i32 0, i32 16, i32 17, i32 18, i32 19, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
-; CHECK-NEXT: [[TMP8:%.*]] = insertelement <16 x i32> [[TMP7]], i32 [[SHR_4_I_I]], i32 5
-; CHECK-NEXT: [[TMP9:%.*]] = insertelement <16 x i32> [[TMP8]], i32 [[SHR_5_I_I]], i32 6
-; CHECK-NEXT: [[TMP10:%.*]] = insertelement <16 x i32> [[TMP9]], i32 [[SHR_6_I_I]], i32 7
-; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <8 x i32> [[TMP4]], <8 x i32> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
-; CHECK-NEXT: [[TMP12:%.*]] = shufflevector <16 x i32> [[TMP10]], <16 x i32> [[TMP11]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
-; CHECK-NEXT: [[TMP13:%.*]] = trunc <16 x i32> [[TMP12]] to <16 x i8>
-; CHECK-NEXT: [[TMP14:%.*]] = and <16 x i8> [[TMP13]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
-; CHECK-NEXT: store <16 x i8> [[TMP14]], ptr undef, align 1
+; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x i32> [[TMP3]], <8 x i32> poison, <8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP5:%.*]] = lshr <8 x i32> [[TMP4]], <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; CHECK-NEXT: [[TMP6:%.*]] = insertelement <16 x i32> poison, i32 [[SUB_I]], i32 0
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <16 x i32> [[TMP6]], <16 x i32> [[TMP7]], <16 x i32> <i32 0, i32 16, i32 17, i32 18, i32 19, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP9:%.*]] = insertelement <16 x i32> [[TMP8]], i32 [[SHR_4_I_I]], i32 5
+; CHECK-NEXT: [[TMP10:%.*]] = insertelement <16 x i32> [[TMP9]], i32 [[SHR_5_I_I]], i32 6
+; CHECK-NEXT: [[TMP11:%.*]] = insertelement <16 x i32> [[TMP10]], i32 [[SHR_6_I_I]], i32 7
+; CHECK-NEXT: [[TMP12:%.*]] = shufflevector <8 x i32> [[TMP5]], <8 x i32> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP13:%.*]] = shufflevector <16 x i32> [[TMP11]], <16 x i32> [[TMP12]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
+; CHECK-NEXT: [[TMP14:%.*]] = trunc <16 x i32> [[TMP13]] to <16 x i8>
+; CHECK-NEXT: [[TMP15:%.*]] = and <16 x i8> [[TMP14]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+; CHECK-NEXT: store <16 x i8> [[TMP15]], ptr undef, align 1
; CHECK-NEXT: unreachable
; CHECK: if.end50.i:
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/reused-reductions-with-minbitwidth.ll b/llvm/test/Transforms/SLPVectorizer/X86/reused-reductions-with-minbitwidth.ll
index 5d22b5a4873be..1d1fcec2a7aeb 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/reused-reductions-with-minbitwidth.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/reused-reductions-with-minbitwidth.ll
@@ -7,12 +7,10 @@ define i1 @test(i1 %cmp5.not.31) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i1> <i1 poison, i1 false, i1 false, i1 false>, i1 [[CMP5_NOT_31]], i32 0
; CHECK-NEXT: [[TMP1:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> zeroinitializer, <4 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP2:%.*]] = trunc <4 x i32> [[TMP1]] to <4 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = zext <4 x i1> [[TMP2]] to <4 x i32>
-; CHECK-NEXT: [[TMP4:%.*]] = mul <4 x i32> [[TMP3]], <i32 2, i32 1, i32 1, i32 1>
-; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP4]])
-; CHECK-NEXT: [[TMP6:%.*]] = and i32 [[TMP5]], 0
-; CHECK-NEXT: [[CMP_NOT_I_I:%.*]] = icmp eq i32 [[TMP6]], 0
+; CHECK-NEXT: [[TMP2:%.*]] = mul <4 x i32> [[TMP1]], <i32 2, i32 1, i32 1, i32 1>
+; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP2]])
+; CHECK-NEXT: [[TMP4:%.*]] = and i32 [[TMP3]], 0
+; CHECK-NEXT: [[CMP_NOT_I_I:%.*]] = icmp eq i32 [[TMP4]], 0
; CHECK-NEXT: ret i1 [[CMP_NOT_I_I]]
;
entry:
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/store-insertelement-minbitwidth.ll b/llvm/test/Transforms/SLPVectorizer/X86/store-insertelement-minbitwidth.ll
index c1dd90d0e9a7b..8005c528688e5 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/store-insertelement-minbitwidth.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/store-insertelement-minbitwidth.ll
@@ -8,17 +8,18 @@
; YAML-NEXT: Function: stores
; YAML-NEXT: Args:
; YAML-NEXT: - String: 'Stores SLP vectorized with cost '
-; YAML-NEXT: - Cost: '-3'
+; YAML-NEXT: - Cost: '-11'
; YAML-NEXT: - String: ' and with tree size '
; YAML-NEXT: - TreeSize: '6'
define void @stores(ptr noalias %in, ptr noalias %inn, ptr noalias %out) {
; CHECK-LABEL: @stores(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i8>, ptr [[IN:%.*]], align 1
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i8>, ptr [[INN:%.*]], align 1
-; CHECK-NEXT: [[TMP3:%.*]] = zext <4 x i8> [[TMP1]] to <4 x i64>
-; CHECK-NEXT: [[TMP4:%.*]] = zext <4 x i8> [[TMP2]] to <4 x i64>
-; CHECK-NEXT: [[TMP5:%.*]] = add <4 x i64> [[TMP3]], [[TMP4]]
-; CHECK-NEXT: store <4 x i64> [[TMP5]], ptr [[OUT:%.*]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = zext <4 x i8> [[TMP1]] to <4 x i16>
+; CHECK-NEXT: [[TMP4:%.*]] = zext <4 x i8> [[TMP2]] to <4 x i16>
+; CHECK-NEXT: [[TMP5:%.*]] = add <4 x i16> [[TMP3]], [[TMP4]]
+; CHECK-NEXT: [[TMP6:%.*]] = zext <4 x i16> [[TMP5]] to <4 x i64>
+; CHECK-NEXT: store <4 x i64> [[TMP6]], ptr [[OUT:%.*]], align 4
; CHECK-NEXT: ret void
;
%load.1 = load i8, ptr %in, align 1
@@ -63,17 +64,18 @@ define void @stores(ptr noalias %in, ptr noalias %inn, ptr noalias %out) {
; YAML-NEXT: Function: insertelems
; YAML-NEXT: Args:
; YAML-NEXT: - String: 'SLP vectorized with cost '
-; YAML-NEXT: - Cost: '-5'
+; YAML-NEXT: - Cost: '-13'
; YAML-NEXT: - String: ' and with tree size '
; YAML-NEXT: - TreeSize: '6'
define <4 x i64> @insertelems(ptr noalias %in, ptr noalias %inn) {
; CHECK-LABEL: @insertelems(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i8>, ptr [[IN:%.*]], align 1
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i8>, ptr [[INN:%.*]], align 1
-; CHECK-NEXT: [[TMP3:%.*]] = zext <4 x i8> [[TMP1]] to <4 x i64>
-; CHECK-NEXT: [[TMP4:%.*]] = zext <4 x i8> [[TMP2]] to <4 x i64>
-; CHECK-NEXT: [[TMP5:%.*]] = add <4 x i64> [[TMP3]], [[TMP4]]
-; CHECK-NEXT: ret <4 x i64> [[TMP5]]
+; CHECK-NEXT: [[TMP3:%.*]] = zext <4 x i8> [[TMP1]] to <4 x i16>
+; CHECK-NEXT: [[TMP4:%.*]] = zext <4 x i8> [[TMP2]] to <4 x i16>
+; CHECK-NEXT: [[TMP5:%.*]] = add <4 x i16> [[TMP3]], [[TMP4]]
+; CHECK-NEXT: [[TMP6:%.*]] = zext <4 x i16> [[TMP5]] to <4 x i64>
+; CHECK-NEXT: ret <4 x i64> [[TMP6]]
;
%load.1 = load i8, ptr %in, align 1
%gep.1 = getelementptr inbounds i8, ptr %in, i64 1
diff --git a/llvm/test/Transforms/SLPVectorizer/alt-cmp-vectorize.ll b/llvm/test/Transforms/SLPVectorizer/alt-cmp-vectorize.ll
index 061fbdb45a13b..ff6f0bdd3db8f 100644
--- a/llvm/test/Transforms/SLPVectorizer/alt-cmp-vectorize.ll
+++ b/llvm/test/Transforms/SLPVectorizer/alt-cmp-vectorize.ll
@@ -10,8 +10,8 @@ define i32 @alt_cmp(i16 %call46) {
; CHECK-NEXT: [[TMP2:%.*]] = icmp ult <4 x i16> [[TMP0]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt <4 x i16> [[TMP0]], [[TMP1]]
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i1> [[TMP2]], <4 x i1> [[TMP3]], <4 x i32> <i32 0, i32 5, i32 2, i32 3>
-; CHECK-NEXT: [[TMP5:%.*]] = zext <4 x i1> [[TMP4]] to <4 x i16>
-; CHECK-NEXT: [[TMP6:%.*]] = call i16 @llvm.vector.reduce.or.v4i16(<4 x i16> [[TMP5]])
+; CHECK-NEXT: [[TMP5:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP4]])
+; CHECK-NEXT: [[TMP6:%.*]] = zext i1 [[TMP5]] to i16
; CHECK-NEXT: [[OP_RDX:%.*]] = or i16 [[TMP6]], 0
; CHECK-NEXT: [[EXT:%.*]] = zext i16 [[OP_RDX]] to i32
; CHECK-NEXT: ret i32 [[EXT]]
More information about the libc-commits
mailing list