[llvm] 329b972 - [SLP] Try to match reductions before trying to vectorize a vector build sequence.

Valery N Dmitriev via llvm-commits llvm-commits at lists.llvm.org
Mon Aug 29 13:42:35 PDT 2022


Author: Valery N Dmitriev
Date: 2022-08-29T13:32:14-07:00
New Revision: 329b972d416a3dc23ab3eacb17b884ead03af8e5

URL: https://github.com/llvm/llvm-project/commit/329b972d416a3dc23ab3eacb17b884ead03af8e5
DIFF: https://github.com/llvm/llvm-project/commit/329b972d416a3dc23ab3eacb17b884ead03af8e5.diff

LOG: [SLP] Try to match reductions before trying to vectorize a vector build sequence.

This patch changes order of searching for reductions vs other vectorization possibilities.
The idea is if we do not match a reduction it won't be harmful for further attempts to
find vectorizable operations on a vector build sequences. But doing it in the opposite
order we have good chance to ruin opportunity to match a reduction later.
We also don't want to try vectorizing binary operations too early as 2-way vectorization
may effectively prohibit wider ones leading to producing less effective code.

Differential Revision: https://reviews.llvm.org/D132590

Added: 
    

Modified: 
    llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
    llvm/test/Transforms/SLPVectorizer/X86/redux-feed-buildvector.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index 7fa9359e705b..c3f66624a98c 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -11989,20 +11989,30 @@ bool SLPVectorizerPass::vectorizeSimpleInstructions(InstSetVector &Instructions,
                                                     bool AtTerminator) {
   bool OpsChanged = false;
   SmallVector<Instruction *, 4> PostponedCmps;
+  SmallVector<WeakTrackingVH> PostponedInsts;
+  // pass1 - try to vectorize reductions only
   for (auto *I : reverse(Instructions)) {
     if (R.isDeleted(I))
       continue;
+    if (isa<CmpInst>(I)) {
+      PostponedCmps.push_back(I);
+      continue;
+    }
+    OpsChanged |= vectorizeHorReduction(nullptr, I, BB, R, TTI, PostponedInsts);
+  }
+  // pass2 - try to match and vectorize a buildvector sequence.
+  for (auto *I : reverse(Instructions)) {
+    if (R.isDeleted(I) || isa<CmpInst>(I))
+      continue;
     if (auto *LastInsertValue = dyn_cast<InsertValueInst>(I)) {
       OpsChanged |= vectorizeInsertValueInst(LastInsertValue, BB, R);
     } else if (auto *LastInsertElem = dyn_cast<InsertElementInst>(I)) {
       OpsChanged |= vectorizeInsertElementInst(LastInsertElem, BB, R);
-    } else if (isa<CmpInst>(I)) {
-      PostponedCmps.push_back(I);
-      continue;
     }
-    // Try to find reductions in buildvector sequnces.
-    OpsChanged |= vectorizeRootInstruction(nullptr, I, BB, R, TTI);
   }
+  // Now try to vectorize postponed instructions.
+  OpsChanged |= tryToVectorize(PostponedInsts, R);
+
   if (AtTerminator) {
     // Try to find reductions first.
     for (Instruction *I : PostponedCmps) {

diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/redux-feed-buildvector.ll b/llvm/test/Transforms/SLPVectorizer/X86/redux-feed-buildvector.ll
index acaa1b4a8eae..1a4cd7a2ee96 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/redux-feed-buildvector.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/redux-feed-buildvector.ll
@@ -10,102 +10,23 @@ declare void @llvm.masked.scatter.v2f64.v2p0f64(<2 x double>, <2 x double*>, i32
 define void @test(double* nocapture readonly %arg, double* nocapture readonly %arg1, double* nocapture %arg2) {
 ; CHECK-LABEL: @test(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[GEP1_0:%.*]] = getelementptr inbounds double, double* [[ARG:%.*]], i64 1
-; CHECK-NEXT:    [[LD1_0:%.*]] = load double, double* [[GEP1_0]], align 8
+; CHECK-NEXT:    [[TMP0:%.*]] = insertelement <8 x double*> poison, double* [[ARG:%.*]], i32 0
+; CHECK-NEXT:    [[SHUFFLE:%.*]] = shufflevector <8 x double*> [[TMP0]], <8 x double*> poison, <8 x i32> zeroinitializer
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr double, <8 x double*> [[SHUFFLE]], <8 x i64> <i64 1, i64 3, i64 5, i64 7, i64 9, i64 11, i64 13, i64 15>
 ; CHECK-NEXT:    [[GEP2_0:%.*]] = getelementptr inbounds double, double* [[ARG1:%.*]], i64 16
-; CHECK-NEXT:    [[GEP1_1:%.*]] = getelementptr inbounds double, double* [[ARG]], i64 3
-; CHECK-NEXT:    [[LD1_1:%.*]] = load double, double* [[GEP1_1]], align 8
-; CHECK-NEXT:    [[GEP0_1:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 1
-; CHECK-NEXT:    [[GEP2_1:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 17
-; CHECK-NEXT:    [[GEP1_2:%.*]] = getelementptr inbounds double, double* [[ARG]], i64 5
-; CHECK-NEXT:    [[LD1_2:%.*]] = load double, double* [[GEP1_2]], align 8
-; CHECK-NEXT:    [[GEP0_2:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 2
-; CHECK-NEXT:    [[GEP2_2:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 18
-; CHECK-NEXT:    [[GEP1_3:%.*]] = getelementptr inbounds double, double* [[ARG]], i64 7
-; CHECK-NEXT:    [[LD1_3:%.*]] = load double, double* [[GEP1_3]], align 8
-; CHECK-NEXT:    [[GEP0_3:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 3
-; CHECK-NEXT:    [[GEP2_3:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 19
-; CHECK-NEXT:    [[GEP1_4:%.*]] = getelementptr inbounds double, double* [[ARG]], i64 9
-; CHECK-NEXT:    [[LD1_4:%.*]] = load double, double* [[GEP1_4]], align 8
-; CHECK-NEXT:    [[GEP0_4:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 4
-; CHECK-NEXT:    [[GEP2_4:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 20
-; CHECK-NEXT:    [[GEP1_5:%.*]] = getelementptr inbounds double, double* [[ARG]], i64 11
-; CHECK-NEXT:    [[LD1_5:%.*]] = load double, double* [[GEP1_5]], align 8
-; CHECK-NEXT:    [[GEP0_5:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 5
-; CHECK-NEXT:    [[GEP2_5:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 21
-; CHECK-NEXT:    [[GEP1_6:%.*]] = getelementptr inbounds double, double* [[ARG]], i64 13
-; CHECK-NEXT:    [[LD1_6:%.*]] = load double, double* [[GEP1_6]], align 8
-; CHECK-NEXT:    [[GEP0_6:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 6
-; CHECK-NEXT:    [[GEP2_6:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 22
-; CHECK-NEXT:    [[GEP1_7:%.*]] = getelementptr inbounds double, double* [[ARG]], i64 15
-; CHECK-NEXT:    [[LD1_7:%.*]] = load double, double* [[GEP1_7]], align 8
-; CHECK-NEXT:    [[GEP0_7:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 7
-; CHECK-NEXT:    [[GEP2_7:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 23
-; CHECK-NEXT:    [[LD0_0:%.*]] = load double, double* [[ARG1]], align 8
-; CHECK-NEXT:    [[LD2_0:%.*]] = load double, double* [[GEP2_0]], align 8
-; CHECK-NEXT:    [[TMP0:%.*]] = insertelement <2 x double> poison, double [[LD0_0]], i32 0
-; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <2 x double> [[TMP0]], double [[LD2_0]], i32 1
-; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <2 x double> poison, double [[LD1_0]], i32 0
-; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x double> [[TMP2]], double [[LD1_0]], i32 1
-; CHECK-NEXT:    [[TMP4:%.*]] = fmul fast <2 x double> [[TMP1]], [[TMP3]]
-; CHECK-NEXT:    [[LD0_1:%.*]] = load double, double* [[GEP0_1]], align 8
-; CHECK-NEXT:    [[LD2_1:%.*]] = load double, double* [[GEP2_1]], align 8
-; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <2 x double> poison, double [[LD0_1]], i32 0
-; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x double> [[TMP5]], double [[LD2_1]], i32 1
-; CHECK-NEXT:    [[TMP7:%.*]] = insertelement <2 x double> poison, double [[LD1_1]], i32 0
-; CHECK-NEXT:    [[TMP8:%.*]] = insertelement <2 x double> [[TMP7]], double [[LD1_1]], i32 1
-; CHECK-NEXT:    [[TMP9:%.*]] = fmul fast <2 x double> [[TMP6]], [[TMP8]]
-; CHECK-NEXT:    [[TMP10:%.*]] = fadd fast <2 x double> [[TMP4]], [[TMP9]]
-; CHECK-NEXT:    [[LD0_2:%.*]] = load double, double* [[GEP0_2]], align 8
-; CHECK-NEXT:    [[LD2_2:%.*]] = load double, double* [[GEP2_2]], align 8
-; CHECK-NEXT:    [[TMP11:%.*]] = insertelement <2 x double> poison, double [[LD0_2]], i32 0
-; CHECK-NEXT:    [[TMP12:%.*]] = insertelement <2 x double> [[TMP11]], double [[LD2_2]], i32 1
-; CHECK-NEXT:    [[TMP13:%.*]] = insertelement <2 x double> poison, double [[LD1_2]], i32 0
-; CHECK-NEXT:    [[TMP14:%.*]] = insertelement <2 x double> [[TMP13]], double [[LD1_2]], i32 1
-; CHECK-NEXT:    [[TMP15:%.*]] = fmul fast <2 x double> [[TMP12]], [[TMP14]]
-; CHECK-NEXT:    [[TMP16:%.*]] = fadd fast <2 x double> [[TMP10]], [[TMP15]]
-; CHECK-NEXT:    [[LD0_3:%.*]] = load double, double* [[GEP0_3]], align 8
-; CHECK-NEXT:    [[LD2_3:%.*]] = load double, double* [[GEP2_3]], align 8
-; CHECK-NEXT:    [[TMP17:%.*]] = insertelement <2 x double> poison, double [[LD0_3]], i32 0
-; CHECK-NEXT:    [[TMP18:%.*]] = insertelement <2 x double> [[TMP17]], double [[LD2_3]], i32 1
-; CHECK-NEXT:    [[TMP19:%.*]] = insertelement <2 x double> poison, double [[LD1_3]], i32 0
-; CHECK-NEXT:    [[TMP20:%.*]] = insertelement <2 x double> [[TMP19]], double [[LD1_3]], i32 1
-; CHECK-NEXT:    [[TMP21:%.*]] = fmul fast <2 x double> [[TMP18]], [[TMP20]]
-; CHECK-NEXT:    [[TMP22:%.*]] = fadd fast <2 x double> [[TMP16]], [[TMP21]]
-; CHECK-NEXT:    [[LD0_4:%.*]] = load double, double* [[GEP0_4]], align 8
-; CHECK-NEXT:    [[LD2_4:%.*]] = load double, double* [[GEP2_4]], align 8
-; CHECK-NEXT:    [[TMP23:%.*]] = insertelement <2 x double> poison, double [[LD0_4]], i32 0
-; CHECK-NEXT:    [[TMP24:%.*]] = insertelement <2 x double> [[TMP23]], double [[LD2_4]], i32 1
-; CHECK-NEXT:    [[TMP25:%.*]] = insertelement <2 x double> poison, double [[LD1_4]], i32 0
-; CHECK-NEXT:    [[TMP26:%.*]] = insertelement <2 x double> [[TMP25]], double [[LD1_4]], i32 1
-; CHECK-NEXT:    [[TMP27:%.*]] = fmul fast <2 x double> [[TMP24]], [[TMP26]]
-; CHECK-NEXT:    [[TMP28:%.*]] = fadd fast <2 x double> [[TMP22]], [[TMP27]]
-; CHECK-NEXT:    [[LD0_5:%.*]] = load double, double* [[GEP0_5]], align 8
-; CHECK-NEXT:    [[LD2_5:%.*]] = load double, double* [[GEP2_5]], align 8
-; CHECK-NEXT:    [[TMP29:%.*]] = insertelement <2 x double> poison, double [[LD0_5]], i32 0
-; CHECK-NEXT:    [[TMP30:%.*]] = insertelement <2 x double> [[TMP29]], double [[LD2_5]], i32 1
-; CHECK-NEXT:    [[TMP31:%.*]] = insertelement <2 x double> poison, double [[LD1_5]], i32 0
-; CHECK-NEXT:    [[TMP32:%.*]] = insertelement <2 x double> [[TMP31]], double [[LD1_5]], i32 1
-; CHECK-NEXT:    [[TMP33:%.*]] = fmul fast <2 x double> [[TMP30]], [[TMP32]]
-; CHECK-NEXT:    [[TMP34:%.*]] = fadd fast <2 x double> [[TMP28]], [[TMP33]]
-; CHECK-NEXT:    [[LD0_6:%.*]] = load double, double* [[GEP0_6]], align 8
-; CHECK-NEXT:    [[LD2_6:%.*]] = load double, double* [[GEP2_6]], align 8
-; CHECK-NEXT:    [[TMP35:%.*]] = insertelement <2 x double> poison, double [[LD0_6]], i32 0
-; CHECK-NEXT:    [[TMP36:%.*]] = insertelement <2 x double> [[TMP35]], double [[LD2_6]], i32 1
-; CHECK-NEXT:    [[TMP37:%.*]] = insertelement <2 x double> poison, double [[LD1_6]], i32 0
-; CHECK-NEXT:    [[TMP38:%.*]] = insertelement <2 x double> [[TMP37]], double [[LD1_6]], i32 1
-; CHECK-NEXT:    [[TMP39:%.*]] = fmul fast <2 x double> [[TMP36]], [[TMP38]]
-; CHECK-NEXT:    [[TMP40:%.*]] = fadd fast <2 x double> [[TMP34]], [[TMP39]]
-; CHECK-NEXT:    [[LD0_7:%.*]] = load double, double* [[GEP0_7]], align 8
-; CHECK-NEXT:    [[LD2_7:%.*]] = load double, double* [[GEP2_7]], align 8
-; CHECK-NEXT:    [[TMP41:%.*]] = insertelement <2 x double> poison, double [[LD0_7]], i32 0
-; CHECK-NEXT:    [[TMP42:%.*]] = insertelement <2 x double> [[TMP41]], double [[LD2_7]], i32 1
-; CHECK-NEXT:    [[TMP43:%.*]] = insertelement <2 x double> poison, double [[LD1_7]], i32 0
-; CHECK-NEXT:    [[TMP44:%.*]] = insertelement <2 x double> [[TMP43]], double [[LD1_7]], i32 1
-; CHECK-NEXT:    [[TMP45:%.*]] = fmul fast <2 x double> [[TMP42]], [[TMP44]]
-; CHECK-NEXT:    [[TMP46:%.*]] = fadd fast <2 x double> [[TMP40]], [[TMP45]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call <8 x double> @llvm.masked.gather.v8f64.v8p0f64(<8 x double*> [[TMP1]], i32 8, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x double> undef)
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast double* [[GEP2_0]] to <8 x double>*
+; CHECK-NEXT:    [[TMP4:%.*]] = load <8 x double>, <8 x double>* [[TMP3]], align 8
+; CHECK-NEXT:    [[TMP5:%.*]] = fmul fast <8 x double> [[TMP4]], [[TMP2]]
+; CHECK-NEXT:    [[TMP6:%.*]] = bitcast double* [[ARG1]] to <8 x double>*
+; CHECK-NEXT:    [[TMP7:%.*]] = load <8 x double>, <8 x double>* [[TMP6]], align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = fmul fast <8 x double> [[TMP7]], [[TMP2]]
+; CHECK-NEXT:    [[TMP9:%.*]] = call fast double @llvm.vector.reduce.fadd.v8f64(double -0.000000e+00, <8 x double> [[TMP8]])
+; CHECK-NEXT:    [[TMP10:%.*]] = call fast double @llvm.vector.reduce.fadd.v8f64(double -0.000000e+00, <8 x double> [[TMP5]])
+; CHECK-NEXT:    [[I142:%.*]] = insertelement <2 x double> poison, double [[TMP9]], i64 0
+; CHECK-NEXT:    [[I143:%.*]] = insertelement <2 x double> [[I142]], double [[TMP10]], i64 1
 ; CHECK-NEXT:    [[P:%.*]] = getelementptr inbounds double, double* [[ARG2:%.*]], <2 x i64> <i64 0, i64 16>
-; CHECK-NEXT:    call void @llvm.masked.scatter.v2f64.v2p0f64(<2 x double> [[TMP46]], <2 x double*> [[P]], i32 8, <2 x i1> <i1 true, i1 true>)
+; CHECK-NEXT:    call void @llvm.masked.scatter.v2f64.v2p0f64(<2 x double> [[I143]], <2 x double*> [[P]], i32 8, <2 x i1> <i1 true, i1 true>)
 ; CHECK-NEXT:    ret void
 ;
 entry:


        


More information about the llvm-commits mailing list