[llvm] [SLP]Reorder buildvector/reduction vectorization and fuse the loops. (PR #96943)
via llvm-commits
llvm-commits at lists.llvm.org
Thu Jun 27 11:15:16 PDT 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-llvm-transforms
Author: Alexey Bataev (alexey-bataev)
<details>
<summary>Changes</summary>
Currently SLP vectorizer tries at first to find reduction nodes, and
then vectorize buildvector sequences. Need to try to vectorize wide
buildvector sequences at first and only then try to vectorize
reductions, and then smaller buildvector sequences.
---
Full diff: https://github.com/llvm/llvm-project/pull/96943.diff
5 Files Affected:
- (modified) llvm/include/llvm/Transforms/Vectorize/SLPVectorizer.h (+2-2)
- (modified) llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp (+22-10)
- (modified) llvm/test/Transforms/SLPVectorizer/AArch64/scalarization-overhead.ll (+19-18)
- (modified) llvm/test/Transforms/SLPVectorizer/X86/entries-different-vf.ll (+9-12)
- (modified) llvm/test/Transforms/SLPVectorizer/X86/reused-extractelements.ll (+3-5)
``````````diff
diff --git a/llvm/include/llvm/Transforms/Vectorize/SLPVectorizer.h b/llvm/include/llvm/Transforms/Vectorize/SLPVectorizer.h
index 4f99d171469e4..a37a44b89e0de 100644
--- a/llvm/include/llvm/Transforms/Vectorize/SLPVectorizer.h
+++ b/llvm/include/llvm/Transforms/Vectorize/SLPVectorizer.h
@@ -133,11 +133,11 @@ struct SLPVectorizerPass : public PassInfoMixin<SLPVectorizerPass> {
/// Try to vectorize trees that start at insertvalue instructions.
bool vectorizeInsertValueInst(InsertValueInst *IVI, BasicBlock *BB,
- slpvectorizer::BoUpSLP &R);
+ slpvectorizer::BoUpSLP &R, bool MaxVFOnly);
/// Try to vectorize trees that start at insertelement instructions.
bool vectorizeInsertElementInst(InsertElementInst *IEI, BasicBlock *BB,
- slpvectorizer::BoUpSLP &R);
+ slpvectorizer::BoUpSLP &R, bool MaxVFOnly);
/// Tries to vectorize \p CmpInts. \Returns true on success.
template <typename ItT>
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index 974f966d46e81..47d7a7e488e37 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -18074,7 +18074,8 @@ bool SLPVectorizerPass::tryToVectorize(ArrayRef<WeakTrackingVH> Insts,
}
bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst *IVI,
- BasicBlock *BB, BoUpSLP &R) {
+ BasicBlock *BB, BoUpSLP &R,
+ bool MaxVFOnly) {
if (!R.canMapToVector(IVI->getType()))
return false;
@@ -18085,11 +18086,12 @@ bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst *IVI,
LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI << "\n");
// Aggregate value is unlikely to be processed in vector register.
- return tryToVectorizeList(BuildVectorOpds, R);
+ return tryToVectorizeList(BuildVectorOpds, R, MaxVFOnly);
}
bool SLPVectorizerPass::vectorizeInsertElementInst(InsertElementInst *IEI,
- BasicBlock *BB, BoUpSLP &R) {
+ BasicBlock *BB, BoUpSLP &R,
+ bool MaxVFOnly) {
SmallVector<Value *, 16> BuildVectorInsts;
SmallVector<Value *, 16> BuildVectorOpds;
SmallVector<int> Mask;
@@ -18099,7 +18101,7 @@ bool SLPVectorizerPass::vectorizeInsertElementInst(InsertElementInst *IEI,
return false;
LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IEI << "\n");
- return tryToVectorizeList(BuildVectorInsts, R);
+ return tryToVectorizeList(BuildVectorInsts, R, MaxVFOnly);
}
template <typename T>
@@ -18319,20 +18321,30 @@ bool SLPVectorizerPass::vectorizeInserts(InstSetVector &Instructions,
"This function only accepts Insert instructions");
bool OpsChanged = false;
SmallVector<WeakTrackingVH> PostponedInsts;
- // pass1 - try to vectorize reductions only
for (auto *I : reverse(Instructions)) {
+ // pass1 - try to match and vectorize a buildvector sequence for MaxVF only.
+ if (R.isDeleted(I) || isa<CmpInst>(I))
+ continue;
+ if (auto *LastInsertValue = dyn_cast<InsertValueInst>(I)) {
+ OpsChanged |=
+ vectorizeInsertValueInst(LastInsertValue, BB, R, /*MaxVFOnly=*/true);
+ } else if (auto *LastInsertElem = dyn_cast<InsertElementInst>(I)) {
+ OpsChanged |=
+ vectorizeInsertElementInst(LastInsertElem, BB, R, /*MaxVFOnly=*/true);
+ }
+ // pass2 - try to vectorize reductions only
if (R.isDeleted(I))
continue;
OpsChanged |= vectorizeHorReduction(nullptr, I, BB, R, TTI, PostponedInsts);
- }
- // pass2 - try to match and vectorize a buildvector sequence.
- for (auto *I : reverse(Instructions)) {
if (R.isDeleted(I) || isa<CmpInst>(I))
continue;
+ // pass3 - try to match and vectorize a buildvector sequence.
if (auto *LastInsertValue = dyn_cast<InsertValueInst>(I)) {
- OpsChanged |= vectorizeInsertValueInst(LastInsertValue, BB, R);
+ OpsChanged |=
+ vectorizeInsertValueInst(LastInsertValue, BB, R, /*MaxVFOnly=*/false);
} else if (auto *LastInsertElem = dyn_cast<InsertElementInst>(I)) {
- OpsChanged |= vectorizeInsertElementInst(LastInsertElem, BB, R);
+ OpsChanged |= vectorizeInsertElementInst(LastInsertElem, BB, R,
+ /*MaxVFOnly=*/false);
}
}
// Now try to vectorize postponed instructions.
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/scalarization-overhead.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/scalarization-overhead.ll
index c6209fd71063a..a24cb81541d7c 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/scalarization-overhead.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/scalarization-overhead.ll
@@ -10,32 +10,33 @@ define fastcc i64 @zot(float %arg, float %arg1, float %arg2, float %arg3, float
; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x float> [[TMP0]], float [[ARG3:%.*]], i32 2
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 2>
; CHECK-NEXT: [[TMP3:%.*]] = fmul fast <4 x float> <float 0.000000e+00, float 0.000000e+00, float 1.000000e+00, float 1.000000e+00>, [[TMP2]]
-; CHECK-NEXT: [[VAL12:%.*]] = fadd fast float [[ARG3]], 1.000000e+00
-; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x float> [[TMP2]], float [[VAL12]], i32 0
-; CHECK-NEXT: [[TMP5:%.*]] = insertelement <4 x float> [[TMP4]], float 0.000000e+00, i32 1
-; CHECK-NEXT: [[TMP6:%.*]] = fadd fast <4 x float> [[TMP5]], <float 2.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
+; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x float> <float poison, float 0.000000e+00>, float [[ARG3]], i32 0
+; CHECK-NEXT: [[TMP5:%.*]] = fadd fast <2 x float> [[TMP4]], <float 1.000000e+00, float 0.000000e+00>
+; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <2 x float> [[TMP5]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <4 x float> [[TMP2]], <4 x float> [[TMP6]], <4 x i32> <i32 4, i32 5, i32 2, i32 3>
+; CHECK-NEXT: [[TMP8:%.*]] = fadd fast <4 x float> [[TMP7]], <float 2.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
; CHECK-NEXT: br i1 [[ARG6:%.*]], label [[BB18:%.*]], label [[BB57:%.*]]
; CHECK: bb18:
-; CHECK-NEXT: [[TMP7:%.*]] = phi <4 x float> [ [[TMP6]], [[BB:%.*]] ]
-; CHECK-NEXT: [[TMP8:%.*]] = extractelement <4 x float> [[TMP6]], i32 2
-; CHECK-NEXT: [[VAL23:%.*]] = fmul fast float [[TMP8]], 2.000000e+00
-; CHECK-NEXT: [[TMP9:%.*]] = extractelement <4 x float> [[TMP6]], i32 3
-; CHECK-NEXT: [[VAL24:%.*]] = fmul fast float [[TMP9]], 3.000000e+00
+; CHECK-NEXT: [[TMP9:%.*]] = phi <4 x float> [ [[TMP8]], [[BB:%.*]] ]
+; CHECK-NEXT: [[TMP10:%.*]] = extractelement <4 x float> [[TMP8]], i32 2
+; CHECK-NEXT: [[VAL23:%.*]] = fmul fast float [[TMP10]], 2.000000e+00
+; CHECK-NEXT: [[TMP11:%.*]] = extractelement <4 x float> [[TMP8]], i32 3
+; CHECK-NEXT: [[VAL24:%.*]] = fmul fast float [[TMP11]], 3.000000e+00
; CHECK-NEXT: br i1 [[ARG7:%.*]], label [[BB25:%.*]], label [[BB57]]
; CHECK: bb25:
-; CHECK-NEXT: [[TMP10:%.*]] = phi <4 x float> [ [[TMP7]], [[BB18]] ]
-; CHECK-NEXT: [[TMP11:%.*]] = extractelement <4 x float> [[TMP3]], i32 1
+; CHECK-NEXT: [[TMP12:%.*]] = phi <4 x float> [ [[TMP9]], [[BB18]] ]
+; CHECK-NEXT: [[TMP13:%.*]] = extractelement <4 x float> [[TMP3]], i32 1
; CHECK-NEXT: br label [[BB30:%.*]]
; CHECK: bb30:
; CHECK-NEXT: [[VAL31:%.*]] = phi float [ [[VAL55:%.*]], [[BB30]] ], [ 0.000000e+00, [[BB25]] ]
-; CHECK-NEXT: [[VAL32:%.*]] = phi float [ [[TMP11]], [[BB30]] ], [ 0.000000e+00, [[BB25]] ]
-; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i8>, ptr [[ARG5:%.*]], align 1
-; CHECK-NEXT: [[TMP13:%.*]] = uitofp <4 x i8> [[TMP12]] to <4 x float>
-; CHECK-NEXT: [[TMP14:%.*]] = fsub fast <4 x float> [[TMP13]], [[TMP3]]
-; CHECK-NEXT: [[TMP15:%.*]] = fmul fast <4 x float> [[TMP14]], [[TMP10]]
-; CHECK-NEXT: [[TMP16:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[TMP15]])
+; CHECK-NEXT: [[VAL32:%.*]] = phi float [ [[TMP13]], [[BB30]] ], [ 0.000000e+00, [[BB25]] ]
+; CHECK-NEXT: [[TMP14:%.*]] = load <4 x i8>, ptr [[ARG5:%.*]], align 1
+; CHECK-NEXT: [[TMP15:%.*]] = uitofp <4 x i8> [[TMP14]] to <4 x float>
+; CHECK-NEXT: [[TMP16:%.*]] = fsub fast <4 x float> [[TMP15]], [[TMP3]]
+; CHECK-NEXT: [[TMP17:%.*]] = fmul fast <4 x float> [[TMP16]], [[TMP12]]
+; CHECK-NEXT: [[TMP18:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[TMP17]])
; CHECK-NEXT: [[VAL55]] = tail call fast float @llvm.minnum.f32(float [[VAL31]], float [[ARG1:%.*]])
-; CHECK-NEXT: [[VAL56:%.*]] = tail call fast float @llvm.maxnum.f32(float [[ARG2:%.*]], float [[TMP16]])
+; CHECK-NEXT: [[VAL56:%.*]] = tail call fast float @llvm.maxnum.f32(float [[ARG2:%.*]], float [[TMP18]])
; CHECK-NEXT: call void @ham(float [[VAL55]], float [[VAL56]])
; CHECK-NEXT: br i1 [[ARG8:%.*]], label [[BB30]], label [[BB57]]
; CHECK: bb57:
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/entries-different-vf.ll b/llvm/test/Transforms/SLPVectorizer/X86/entries-different-vf.ll
index 536526a5cfe06..692b470d80dcc 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/entries-different-vf.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/entries-different-vf.ll
@@ -5,18 +5,15 @@ define i1 @test() {
; CHECK-LABEL: define i1 @test
; CHECK-SAME: () #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = shl i64 0, 0
-; CHECK-NEXT: [[TMP1:%.*]] = insertelement <8 x i64> <i64 poison, i64 poison, i64 poison, i64 poison, i64 0, i64 0, i64 0, i64 0>, i64 0, i32 0
-; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> poison, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 5, i32 6, i32 7>
-; CHECK-NEXT: [[TMP11:%.*]] = insertelement <4 x i64> <i64 undef, i64 undef, i64 0, i64 0>, i64 [[TMP0]], i32 0
-; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i64> [[TMP11]], i64 0, i32 1
-; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x i64> [[TMP4]], <4 x i64> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 1, i32 1, i32 3, i32 0>
-; CHECK-NEXT: [[TMP6:%.*]] = or <8 x i64> [[TMP3]], [[TMP5]]
-; CHECK-NEXT: [[TMP7:%.*]] = sub <8 x i64> [[TMP3]], [[TMP5]]
-; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <8 x i64> [[TMP6]], <8 x i64> [[TMP7]], <8 x i32> <i32 0, i32 1, i32 2, i32 11, i32 12, i32 5, i32 6, i32 7>
-; CHECK-NEXT: [[TMP9:%.*]] = icmp ult <8 x i64> [[TMP8]], zeroinitializer
-; CHECK-NEXT: [[TMP10:%.*]] = call i1 @llvm.vector.reduce.or.v8i1(<8 x i1> [[TMP9]])
-; CHECK-NEXT: ret i1 [[TMP10]]
+; CHECK-NEXT: [[TMP0:%.*]] = insertelement <8 x i64> <i64 poison, i64 poison, i64 poison, i64 poison, i64 0, i64 0, i64 0, i64 0>, i64 0, i32 0
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i64> [[TMP0]], <8 x i64> poison, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x i64> zeroinitializer, <4 x i64> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 1, i32 1, i32 3, i32 0>
+; CHECK-NEXT: [[TMP3:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]]
+; CHECK-NEXT: [[TMP4:%.*]] = sub <8 x i64> [[TMP1]], [[TMP2]]
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <8 x i64> [[TMP3]], <8 x i64> [[TMP4]], <8 x i32> <i32 0, i32 1, i32 2, i32 11, i32 12, i32 5, i32 6, i32 7>
+; CHECK-NEXT: [[TMP6:%.*]] = icmp ult <8 x i64> [[TMP5]], zeroinitializer
+; CHECK-NEXT: [[TMP7:%.*]] = call i1 @llvm.vector.reduce.or.v8i1(<8 x i1> [[TMP6]])
+; CHECK-NEXT: ret i1 [[TMP7]]
;
entry:
%0 = shl i64 0, 0
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/reused-extractelements.ll b/llvm/test/Transforms/SLPVectorizer/X86/reused-extractelements.ll
index 94a1d7aa1951c..bf4903fd19b09 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/reused-extractelements.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/reused-extractelements.ll
@@ -4,13 +4,11 @@
; YAML: --- !Missed
; YAML-NEXT: Pass: slp-vectorizer
-; YAML-NEXT: Name: NotBeneficial
+; YAML-NEXT: Name: NotPossible
; YAML-NEXT: Function: g
; YAML-NEXT: Args:
-; YAML-NEXT: - String: 'List vectorization was possible but not beneficial with cost '
-; YAML-NEXT: - Cost: '0'
-; YAML-NEXT: - String: ' >= '
-; YAML-NEXT: - Treshold: '0'
+; YAML-NEXT: - String: 'Cannot SLP vectorize list: vectorization was impossible'
+; YAML-NEXT: - String: ' with available vectorization factors'
define <2 x i32> @g(<2 x i32> %x, i32 %a, i32 %b) {
; CHECK-LABEL: @g(
``````````
</details>
https://github.com/llvm/llvm-project/pull/96943
More information about the llvm-commits
mailing list