[llvm] 1813ffd - [SLP][REVEC] Make SLP support revectorization (-slp-revec) and add simple test. (#98269)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Jul 17 05:14:14 PDT 2024
Author: Han-Kuan Chen
Date: 2024-07-17T20:14:12+08:00
New Revision: 1813ffd6b2eb04ee2c296a4399a18748740a439d
URL: https://github.com/llvm/llvm-project/commit/1813ffd6b2eb04ee2c296a4399a18748740a439d
DIFF: https://github.com/llvm/llvm-project/commit/1813ffd6b2eb04ee2c296a4399a18748740a439d.diff
LOG: [SLP][REVEC] Make SLP support revectorization (-slp-revec) and add simple test. (#98269)
This PR will make SLP support revectorization. Add an option -slp-revec
to control the functionality.
reference:
https://discourse.llvm.org/t/rfc-make-slp-vectorizer-revectorize-vector-instructions/79436
Added:
llvm/test/Transforms/SLPVectorizer/revec.ll
Modified:
llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
Removed:
################################################################################
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index 722590a840a54..ccb6734d5618c 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -113,6 +113,10 @@ static cl::opt<bool>
RunSLPVectorization("vectorize-slp", cl::init(true), cl::Hidden,
cl::desc("Run the SLP vectorization passes"));
+static cl::opt<bool>
+ SLPReVec("slp-revec", cl::init(false), cl::Hidden,
+ cl::desc("Enable vectorization for wider vector utilization"));
+
static cl::opt<int>
SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden,
cl::desc("Only vectorize if you gain more than this "
@@ -227,13 +231,26 @@ static const unsigned MaxPHINumOperands = 128;
/// avoids spending time checking the cost model and realizing that they will
/// be inevitably scalarized.
static bool isValidElementType(Type *Ty) {
+ // TODO: Support ScalableVectorType.
+ if (SLPReVec && isa<FixedVectorType>(Ty))
+ Ty = Ty->getScalarType();
return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() &&
!Ty->isPPC_FP128Ty();
}
+/// \returns the number of elements for Ty.
+static unsigned getNumElements(Type *Ty) {
+ assert(!isa<ScalableVectorType>(Ty) &&
+ "ScalableVectorType is not supported.");
+ if (auto *VecTy = dyn_cast<FixedVectorType>(Ty))
+ return VecTy->getNumElements();
+ return 1;
+}
+
/// \returns the vector type of ScalarTy based on vectorization factor.
static FixedVectorType *getWidenedType(Type *ScalarTy, unsigned VF) {
- return FixedVectorType::get(ScalarTy, VF);
+ return FixedVectorType::get(ScalarTy->getScalarType(),
+ VF * getNumElements(ScalarTy));
}
/// \returns True if the value is a constant (but not globals/constant
@@ -6779,7 +6796,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
}
// Don't handle vectors.
- if (S.OpValue->getType()->isVectorTy() &&
+ if (!SLPReVec && S.OpValue->getType()->isVectorTy() &&
!isa<InsertElementInst>(S.OpValue)) {
LLVM_DEBUG(dbgs() << "SLP: Gathering due to vector type.\n");
newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx);
@@ -6787,7 +6804,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
}
if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue))
- if (SI->getValueOperand()->getType()->isVectorTy()) {
+ if (!SLPReVec && SI->getValueOperand()->getType()->isVectorTy()) {
LLVM_DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n");
newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx);
return;
@@ -11833,10 +11850,12 @@ class BoUpSLP::ShuffleInstructionBuilder final : public BaseShuffleAnalysis {
Value *castToScalarTyElem(Value *V,
std::optional<bool> IsSigned = std::nullopt) {
auto *VecTy = cast<VectorType>(V->getType());
- if (VecTy->getElementType() == ScalarTy)
+ assert(getNumElements(ScalarTy) < getNumElements(VecTy) &&
+ (getNumElements(VecTy) % getNumElements(ScalarTy) == 0));
+ if (VecTy->getElementType() == ScalarTy->getScalarType())
return V;
return Builder.CreateIntCast(
- V, VectorType::get(ScalarTy, VecTy->getElementCount()),
+ V, VectorType::get(ScalarTy->getScalarType(), VecTy->getElementCount()),
IsSigned.value_or(!isKnownNonNegative(V, SimplifyQuery(*R.DL))));
}
@@ -12221,7 +12240,8 @@ Value *BoUpSLP::vectorizeOperand(TreeEntry *E, unsigned NodeIdx,
return ShuffleBuilder.finalize(std::nullopt);
};
Value *V = vectorizeTree(VE, PostponedPHIs);
- if (VF != cast<FixedVectorType>(V->getType())->getNumElements()) {
+ if (VF * getNumElements(VL[0]->getType()) !=
+ cast<FixedVectorType>(V->getType())->getNumElements()) {
if (!VE->ReuseShuffleIndices.empty()) {
// Reshuffle to get only unique values.
// If some of the scalars are duplicated in the vectorization
diff --git a/llvm/test/Transforms/SLPVectorizer/revec.ll b/llvm/test/Transforms/SLPVectorizer/revec.ll
new file mode 100644
index 0000000000000..4b37b100763a9
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/revec.ll
@@ -0,0 +1,40 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -passes=slp-vectorizer -S -slp-revec -slp-max-reg-size=1024 -slp-threshold=-100 %s | FileCheck %s
+
+define void @test1(ptr %a, ptr %b, ptr %c) {
+; CHECK-LABEL: @test1(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = load <16 x i32>, ptr [[A:%.*]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr [[B:%.*]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = add <16 x i32> [[TMP1]], [[TMP0]]
+; CHECK-NEXT: store <16 x i32> [[TMP2]], ptr [[C:%.*]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %arrayidx3 = getelementptr inbounds i32, ptr %a, i64 4
+ %arrayidx7 = getelementptr inbounds i32, ptr %a, i64 8
+ %arrayidx11 = getelementptr inbounds i32, ptr %a, i64 12
+ %0 = load <4 x i32>, ptr %a, align 4
+ %1 = load <4 x i32>, ptr %arrayidx3, align 4
+ %2 = load <4 x i32>, ptr %arrayidx7, align 4
+ %3 = load <4 x i32>, ptr %arrayidx11, align 4
+ %arrayidx19 = getelementptr inbounds i32, ptr %b, i64 4
+ %arrayidx23 = getelementptr inbounds i32, ptr %b, i64 8
+ %arrayidx27 = getelementptr inbounds i32, ptr %b, i64 12
+ %4 = load <4 x i32>, ptr %b, align 4
+ %5 = load <4 x i32>, ptr %arrayidx19, align 4
+ %6 = load <4 x i32>, ptr %arrayidx23, align 4
+ %7 = load <4 x i32>, ptr %arrayidx27, align 4
+ %add.i = add <4 x i32> %4, %0
+ %add.i63 = add <4 x i32> %5, %1
+ %add.i64 = add <4 x i32> %6, %2
+ %add.i65 = add <4 x i32> %7, %3
+ %arrayidx36 = getelementptr inbounds i32, ptr %c, i64 4
+ %arrayidx39 = getelementptr inbounds i32, ptr %c, i64 8
+ %arrayidx42 = getelementptr inbounds i32, ptr %c, i64 12
+ store <4 x i32> %add.i, ptr %c, align 4
+ store <4 x i32> %add.i63, ptr %arrayidx36, align 4
+ store <4 x i32> %add.i64, ptr %arrayidx39, align 4
+ store <4 x i32> %add.i65, ptr %arrayidx42, align 4
+ ret void
+}
More information about the llvm-commits
mailing list