[llvm] 571c8c2 - Revert "[SLP]Initial support for non-power-of-2 (but still whole register) number of elements in operands."
Alexey Bataev via llvm-commits
llvm-commits at lists.llvm.org
Tue Sep 3 11:21:59 PDT 2024
Author: Alexey Bataev
Date: 2024-09-03T11:02:07-07:00
New Revision: 571c8c2c88122d318ed84cd9e948613e3f1aac5f
URL: https://github.com/llvm/llvm-project/commit/571c8c2c88122d318ed84cd9e948613e3f1aac5f
DIFF: https://github.com/llvm/llvm-project/commit/571c8c2c88122d318ed84cd9e948613e3f1aac5f.diff
LOG: Revert "[SLP]Initial support for non-power-of-2 (but still whole register) number of elements in operands."
This reverts commit a3ea90ffbbe47d9a1b3eab03324f09d7b8e0dcb3 after the
post commit review. The number of parts is calculated incorrectly.
Added:
Modified:
llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
llvm/test/Transforms/SLPVectorizer/RISCV/reduction-whole-regs-loads.ll
Removed:
################################################################################
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index 2bb106672c2309..afc2d28aa4c96d 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -260,20 +260,6 @@ static FixedVectorType *getWidenedType(Type *ScalarTy, unsigned VF) {
VF * getNumElements(ScalarTy));
}
-/// Returns the number of elements of the given type \p Ty, not less than \p Sz,
-/// which forms type, which splits by \p TTI into whole vector types during
-/// legalization.
-static unsigned getFullVectorNumberOfElements(const TargetTransformInfo &TTI,
- Type *Ty, unsigned Sz) {
- if (!isValidElementType(Ty))
- return PowerOf2Ceil(Sz);
- // Find the number of elements, which forms full vectors.
- const unsigned NumParts = TTI.getRegUsageForType(getWidenedType(Ty, Sz));
- if (NumParts == 0 || NumParts >= Sz)
- return PowerOf2Ceil(Sz);
- return PowerOf2Ceil(divideCeil(Sz, NumParts)) * NumParts;
-}
-
static void transformScalarShuffleIndiciesToVector(unsigned VecTyNumElements,
SmallVectorImpl<int> &Mask) {
// The ShuffleBuilder implementation use shufflevector to splat an "element".
@@ -1237,22 +1223,6 @@ static bool doesNotNeedToSchedule(ArrayRef<Value *> VL) {
(all_of(VL, isUsedOutsideBlock) || all_of(VL, areAllOperandsNonInsts));
}
-/// Returns true if widened type of \p Ty elements with size \p Sz represents
-/// full vector type, i.e. adding extra element results in extra parts upon type
-/// legalization.
-static bool hasFullVectorsOnly(const TargetTransformInfo &TTI, Type *Ty,
- unsigned Sz) {
- if (Sz <= 1)
- return false;
- if (!isValidElementType(Ty) && !isa<FixedVectorType>(Ty))
- return false;
- if (has_single_bit(Sz))
- return true;
- const unsigned NumParts = TTI.getRegUsageForType(getWidenedType(Ty, Sz));
- return NumParts > 0 && NumParts < Sz && has_single_bit(Sz / NumParts) &&
- Sz % NumParts == 0;
-}
-
namespace slpvectorizer {
/// Bottom Up SLP Vectorizer.
@@ -2496,9 +2466,7 @@ class BoUpSLP {
}
// TODO: Check if we can remove a check for non-power-2 number of
// scalars after full support of non-power-2 vectorization.
- return UniqueValues.size() != 2 &&
- hasFullVectorsOnly(*R.TTI, (*UniqueValues.begin())->getType(),
- UniqueValues.size());
+ return UniqueValues.size() != 2 && has_single_bit(UniqueValues.size());
};
// If the initial strategy fails for any of the operand indexes, then we
@@ -3307,9 +3275,8 @@ class BoUpSLP {
SmallVectorImpl<Value *> *AltScalars = nullptr) const;
/// Return true if this is a non-power-of-2 node.
- bool isNonPowOf2Vec(const TargetTransformInfo &TTI) const {
- bool IsNonPowerOf2 = !hasFullVectorsOnly(
- TTI, getValueType(Scalars.front()), Scalars.size());
+ bool isNonPowOf2Vec() const {
+ bool IsNonPowerOf2 = !has_single_bit(Scalars.size());
assert((!IsNonPowerOf2 || ReuseShuffleIndices.empty()) &&
"Reshuffling not supported with non-power-of-2 vectors yet.");
return IsNonPowerOf2;
@@ -3487,7 +3454,7 @@ class BoUpSLP {
if (UserTreeIdx.UserTE) {
Last->UserTreeIndices.push_back(UserTreeIdx);
- assert((!Last->isNonPowOf2Vec(*TTI) || Last->ReorderIndices.empty()) &&
+ assert((!Last->isNonPowOf2Vec() || Last->ReorderIndices.empty()) &&
"Reordering isn't implemented for non-power-of-2 nodes yet");
}
return Last;
@@ -4393,7 +4360,7 @@ BoUpSLP::findReusedOrderedScalars(const BoUpSLP::TreeEntry &TE) {
if (!isValidElementType(ScalarTy))
return std::nullopt;
auto *VecTy = getWidenedType(ScalarTy, NumScalars);
- int NumParts = TTI->getRegUsageForType(VecTy);
+ int NumParts = TTI->getNumberOfParts(VecTy);
if (NumParts == 0 || NumParts >= NumScalars)
NumParts = 1;
SmallVector<int> ExtractMask;
@@ -4765,7 +4732,7 @@ BoUpSLP::LoadsState BoUpSLP::canVectorizeLoads(
// Check the order of pointer operands or that all pointers are the same.
bool IsSorted = sortPtrAccesses(PointerOps, ScalarTy, *DL, *SE, Order);
// FIXME: Reordering isn't implemented for non-power-of-2 nodes yet.
- if (!Order.empty() && !hasFullVectorsOnly(*TTI, ScalarTy, Sz)) {
+ if (!Order.empty() && !has_single_bit(VL.size())) {
assert(VectorizeNonPowerOf2 && "non-power-of-2 number of loads only "
"supported with VectorizeNonPowerOf2");
return LoadsState::Gather;
@@ -4819,13 +4786,12 @@ BoUpSLP::LoadsState BoUpSLP::canVectorizeLoads(
});
});
const unsigned AbsoluteDiff = std::abs(*Diff);
- if (IsPossibleStrided &&
- (IsAnyPointerUsedOutGraph ||
- ((Sz > MinProfitableStridedLoads ||
- (AbsoluteDiff <= MaxProfitableLoadStride * Sz &&
- hasFullVectorsOnly(*TTI, ScalarTy, AbsoluteDiff))) &&
- AbsoluteDiff > Sz) ||
- *Diff == -(static_cast<int>(Sz) - 1))) {
+ if (IsPossibleStrided && (IsAnyPointerUsedOutGraph ||
+ ((Sz > MinProfitableStridedLoads ||
+ (AbsoluteDiff <= MaxProfitableLoadStride * Sz &&
+ has_single_bit(AbsoluteDiff))) &&
+ AbsoluteDiff > Sz) ||
+ *Diff == -(static_cast<int>(Sz) - 1))) {
int Stride = *Diff / static_cast<int>(Sz - 1);
if (*Diff == Stride * static_cast<int>(Sz - 1)) {
Align Alignment =
@@ -5230,7 +5196,7 @@ static bool areTwoInsertFromSameBuildVector(
std::optional<BoUpSLP::OrdersType>
BoUpSLP::getReorderingData(const TreeEntry &TE, bool TopToBottom) {
// FIXME: Vectorizing is not supported yet for non-power-of-2 ops.
- if (TE.isNonPowOf2Vec(*TTI))
+ if (TE.isNonPowOf2Vec())
return std::nullopt;
// No need to reorder if need to shuffle reuses, still need to shuffle the
@@ -5264,8 +5230,8 @@ BoUpSLP::getReorderingData(const TreeEntry &TE, bool TopToBottom) {
}
}
if (Sz == 2 && TE.getVectorFactor() == 4 &&
- TTI->getRegUsageForType(getWidenedType(TE.Scalars.front()->getType(),
- 2 * TE.getVectorFactor())) == 1)
+ TTI->getNumberOfParts(getWidenedType(TE.Scalars.front()->getType(),
+ 2 * TE.getVectorFactor())) == 1)
return std::nullopt;
if (!ShuffleVectorInst::isOneUseSingleSourceMask(TE.ReuseShuffleIndices,
Sz)) {
@@ -5614,7 +5580,7 @@ void BoUpSLP::reorderTopToBottom() {
// Reorder the graph nodes according to their vectorization factor.
for (unsigned VF = VectorizableTree.front()->getVectorFactor(); VF > 1;
- VF -= 2) {
+ VF /= 2) {
auto It = VFToOrderedEntries.find(VF);
if (It == VFToOrderedEntries.end())
continue;
@@ -5787,7 +5753,7 @@ bool BoUpSLP::canReorderOperands(
ArrayRef<TreeEntry *> ReorderableGathers,
SmallVectorImpl<TreeEntry *> &GatherOps) {
// FIXME: Reordering isn't implemented for non-power-of-2 nodes yet.
- if (UserTE->isNonPowOf2Vec(*TTI))
+ if (UserTE->isNonPowOf2Vec())
return false;
for (unsigned I = 0, E = UserTE->getNumOperands(); I < E; ++I) {
@@ -5962,7 +5928,7 @@ void BoUpSLP::reorderBottomToTop(bool IgnoreReorder) {
auto Res = OrdersUses.insert(std::make_pair(OrdersType(), 0));
const auto AllowsReordering = [&](const TreeEntry *TE) {
// FIXME: Reordering isn't implemented for non-power-of-2 nodes yet.
- if (TE->isNonPowOf2Vec(*TTI))
+ if (TE->isNonPowOf2Vec())
return false;
if (!TE->ReorderIndices.empty() || !TE->ReuseShuffleIndices.empty() ||
(TE->State == TreeEntry::Vectorize && TE->isAltShuffle()) ||
@@ -6615,7 +6581,7 @@ BoUpSLP::TreeEntry::EntryState BoUpSLP::getScalarsVectorizationState(
case Instruction::ExtractElement: {
bool Reuse = canReuseExtract(VL, VL0, CurrentOrder);
// FIXME: Vectorizing is not supported yet for non-power-of-2 ops.
- if (!hasFullVectorsOnly(*TTI, VL0->getType(), VL.size()))
+ if (!has_single_bit(VL.size()))
return TreeEntry::NeedToGather;
if (Reuse || !CurrentOrder.empty())
return TreeEntry::Vectorize;
@@ -7025,7 +6991,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
ReuseShuffleIndices.clear();
} else {
// FIXME: Reshuffing scalars is not supported yet for non-power-of-2 ops.
- if (UserTreeIdx.UserTE && UserTreeIdx.UserTE->isNonPowOf2Vec(*TTI)) {
+ if (UserTreeIdx.UserTE && UserTreeIdx.UserTE->isNonPowOf2Vec()) {
LLVM_DEBUG(dbgs() << "SLP: Reshuffling scalars not yet supported "
"for nodes with padding.\n");
newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx);
@@ -7038,8 +7004,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
return isa<UndefValue>(V) ||
!isConstant(V);
})) ||
- !hasFullVectorsOnly(*TTI, UniqueValues.front()->getType(),
- NumUniqueScalarValues)) {
+ !llvm::has_single_bit<uint32_t>(NumUniqueScalarValues)) {
if (DoNotFail && UniquePositions.size() > 1 &&
NumUniqueScalarValues > 1 && S.MainOp->isSafeToRemove() &&
all_of(UniqueValues, [=](Value *V) {
@@ -7047,9 +7012,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
areAllUsersVectorized(cast<Instruction>(V),
UserIgnoreList);
})) {
- // Find the number of elements, which forms full vectors.
- unsigned PWSz = getFullVectorNumberOfElements(
- *TTI, UniqueValues.front()->getType(), UniqueValues.size());
+ unsigned PWSz = PowerOf2Ceil(UniqueValues.size());
if (PWSz == VL.size()) {
ReuseShuffleIndices.clear();
} else {
@@ -9260,7 +9223,7 @@ class BoUpSLP::ShuffleCostEstimator : public BaseShuffleAnalysis {
}
assert(!CommonMask.empty() && "Expected non-empty common mask.");
auto *MaskVecTy = getWidenedType(ScalarTy, Mask.size());
- unsigned NumParts = TTI.getRegUsageForType(MaskVecTy);
+ unsigned NumParts = TTI.getNumberOfParts(MaskVecTy);
if (NumParts == 0 || NumParts >= Mask.size())
NumParts = 1;
unsigned SliceSize = getPartNumElems(Mask.size(), NumParts);
@@ -9277,7 +9240,7 @@ class BoUpSLP::ShuffleCostEstimator : public BaseShuffleAnalysis {
}
assert(!CommonMask.empty() && "Expected non-empty common mask.");
auto *MaskVecTy = getWidenedType(ScalarTy, Mask.size());
- unsigned NumParts = TTI.getRegUsageForType(MaskVecTy);
+ unsigned NumParts = TTI.getNumberOfParts(MaskVecTy);
if (NumParts == 0 || NumParts >= Mask.size())
NumParts = 1;
unsigned SliceSize = getPartNumElems(Mask.size(), NumParts);
@@ -9783,7 +9746,7 @@ BoUpSLP::getEntryCost(const TreeEntry *E, ArrayRef<Value *> VectorizedVals,
unsigned const NumElts = SrcVecTy->getNumElements();
unsigned const NumScalars = VL.size();
- unsigned NumOfParts = TTI->getRegUsageForType(SrcVecTy);
+ unsigned NumOfParts = TTI->getNumberOfParts(SrcVecTy);
SmallVector<int> InsertMask(NumElts, PoisonMaskElem);
unsigned OffsetBeg = *getElementIndex(VL.front());
@@ -11027,9 +10990,7 @@ InstructionCost BoUpSLP::getTreeCost(ArrayRef<Value *> VectorizedVals) {
// Keep original scalar if number of externally used instructions in
// the same entry is not power of 2. It may help to do some extra
// vectorization for now.
- KeepScalar =
- ScalarUsesCount <= 1 ||
- !hasFullVectorsOnly(*TTI, EU.Scalar->getType(), ScalarUsesCount);
+ KeepScalar = ScalarUsesCount <= 1 || !has_single_bit(ScalarUsesCount);
}
if (KeepScalar) {
ExternalUsesAsOriginalScalar.insert(EU.Scalar);
@@ -11726,14 +11687,13 @@ BoUpSLP::isGatherShuffledEntry(
if (TE == VectorizableTree.front().get())
return {};
// FIXME: Gathering for non-power-of-2 nodes not implemented yet.
- if (TE->isNonPowOf2Vec(*TTI))
+ if (TE->isNonPowOf2Vec())
return {};
Mask.assign(VL.size(), PoisonMaskElem);
assert(TE->UserTreeIndices.size() == 1 &&
"Expected only single user of the gather node.");
- // Number of scalars must be divisible by NumParts.
- if (VL.size() % NumParts != 0)
- return {};
+ assert(VL.size() % NumParts == 0 &&
+ "Number of scalars must be divisible by NumParts.");
unsigned SliceSize = getPartNumElems(VL.size(), NumParts);
SmallVector<std::optional<TTI::ShuffleKind>> Res;
for (unsigned Part : seq<unsigned>(NumParts)) {
@@ -12872,7 +12832,7 @@ ResTy BoUpSLP::processBuildVector(const TreeEntry *E, Type *ScalarTy,
SmallVector<SmallVector<const TreeEntry *>> Entries;
Type *OrigScalarTy = GatheredScalars.front()->getType();
auto *VecTy = getWidenedType(ScalarTy, GatheredScalars.size());
- unsigned NumParts = TTI->getRegUsageForType(VecTy);
+ unsigned NumParts = TTI->getNumberOfParts(VecTy);
if (NumParts == 0 || NumParts >= GatheredScalars.size())
NumParts = 1;
if (!all_of(GatheredScalars, IsaPred<UndefValue>)) {
@@ -16126,7 +16086,7 @@ void BoUpSLP::computeMinimumValueSizes() {
[&](Value *V) { return AnalyzedMinBWVals.contains(V); }))
return 0u;
- unsigned NumParts = TTI->getRegUsageForType(
+ unsigned NumParts = TTI->getNumberOfParts(
getWidenedType(TreeRootIT, VF * ScalarTyNumElements));
// The maximum bit width required to represent all the values that can be
@@ -16183,7 +16143,7 @@ void BoUpSLP::computeMinimumValueSizes() {
// use - ignore it.
if (NumParts > 1 &&
NumParts ==
- TTI->getRegUsageForType(getWidenedType(
+ TTI->getNumberOfParts(getWidenedType(
IntegerType::get(F->getContext(), bit_ceil(MaxBitWidth)), VF)))
return 0u;
@@ -17044,7 +17004,7 @@ bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R,
for (unsigned I = NextInst; I < MaxInst; ++I) {
unsigned ActualVF = std::min(MaxInst - I, VF);
- if (!hasFullVectorsOnly(*TTI, ScalarTy, ActualVF))
+ if (!has_single_bit(ActualVF))
continue;
if (MaxVFOnly && ActualVF < MaxVF)
diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/reduction-whole-regs-loads.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/reduction-whole-regs-loads.ll
index c9a3158acdda34..54dc33dbc0d00b 100644
--- a/llvm/test/Transforms/SLPVectorizer/RISCV/reduction-whole-regs-loads.ll
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/reduction-whole-regs-loads.ll
@@ -4,11 +4,15 @@
define i64 @test(ptr %p) {
; CHECK-LABEL: @test(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load <6 x i64>, ptr [[P:%.*]], align 4
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <6 x i64> [[TMP0]], <6 x i64> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 0, i32 0>
-; CHECK-NEXT: [[TMP2:%.*]] = mul <8 x i64> [[TMP1]], <i64 42, i64 42, i64 42, i64 42, i64 42, i64 42, i64 42, i64 42>
-; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP2]])
-; CHECK-NEXT: ret i64 [[TMP3]]
+; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds i64, ptr [[P:%.*]], i64 4
+; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr [[P]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr [[ARRAYIDX_4]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x i64> [[TMP0]], <4 x i64> poison, <8 x i32> <i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 0, i32 0>
+; CHECK-NEXT: [[TMP3:%.*]] = call <8 x i64> @llvm.vector.insert.v8i64.v4i64(<8 x i64> [[TMP2]], <4 x i64> [[TMP0]], i64 0)
+; CHECK-NEXT: [[TMP4:%.*]] = call <8 x i64> @llvm.vector.insert.v8i64.v2i64(<8 x i64> [[TMP3]], <2 x i64> [[TMP1]], i64 4)
+; CHECK-NEXT: [[TMP5:%.*]] = mul <8 x i64> [[TMP4]], <i64 42, i64 42, i64 42, i64 42, i64 42, i64 42, i64 42, i64 42>
+; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP5]])
+; CHECK-NEXT: ret i64 [[TMP6]]
;
entry:
%arrayidx.1 = getelementptr inbounds i64, ptr %p, i64 1
More information about the llvm-commits
mailing list