[llvm] 13bf603 - Title: [LV] Handle Fold-Tail of loops with vectorizarion factor equal to 1

Anh Tuyen Tran via llvm-commits llvm-commits at lists.llvm.org
Fri May 22 06:31:06 PDT 2020


Author: Anh Tuyen Tran
Date: 2020-05-22T13:30:56Z
New Revision: 13bf6039c9ae959598e8c117c3d8c5a72303fd2b

URL: https://github.com/llvm/llvm-project/commit/13bf6039c9ae959598e8c117c3d8c5a72303fd2b
DIFF: https://github.com/llvm/llvm-project/commit/13bf6039c9ae959598e8c117c3d8c5a72303fd2b.diff

LOG: Title: [LV] Handle Fold-Tail of loops with vectorizarion factor equal to 1

Summary:
When handling loops whose VF is 1, fold-tail vectorization sets the
backedge taken count of the original loop with a vector of a single
element. This causes type-mismatch during instruction generartion.

The purpose of this patch is toto address the case of VF==1.

Reviewer: Ayal (Ayal Zaks), bmahjour (Bardia Mahjour), fhahn (Florian Hahn), gilr (Gil Rapaport), rengolin (Renato Golin)

Reviewed By: Ayal (Ayal Zaks), bmahjour (Bardia Mahjour), fhahn (Florian Hahn)

Subscribers: Ayal (Ayal Zaks), rkruppe (Hanna Kruppe), bmahjour (Bardia Mahjour), rogfer01 (Roger Ferrer Ibanez), vkmr (Vineet Kumar), bollu (Siddharth Bhat), hiraditya (Aditya Kumar), llvm-commits (Mailing List llvm-commits)

Tag: LLVM

Differential Revision: https://reviews.llvm.org/D79976

Added: 
    llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll

Modified: 
    llvm/lib/Transforms/Vectorize/VPlan.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/Vectorize/VPlan.cpp b/llvm/lib/Transforms/Vectorize/VPlan.cpp
index de5eb35b96af..a015550b0c77 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlan.cpp
@@ -441,7 +441,9 @@ void VPlan::execute(VPTransformState *State) {
     IRBuilder<> Builder(State->CFG.PrevBB->getTerminator());
     auto *TCMO = Builder.CreateSub(TC, ConstantInt::get(TC->getType(), 1),
                                    "trip.count.minus.1");
-    Value *VTCMO = Builder.CreateVectorSplat(State->VF, TCMO, "broadcast");
+    auto VF = State->VF;
+    Value *VTCMO =
+        VF == 1 ? TCMO : Builder.CreateVectorSplat(VF, TCMO, "broadcast");
     for (unsigned Part = 0, UF = State->UF; Part < UF; ++Part)
       State->set(BackedgeTakenCount, VTCMO, Part);
   }
@@ -809,12 +811,17 @@ void VPWidenCanonicalIVRecipe::execute(VPTransformState &State) {
   Value *CanonicalIV = State.CanonicalIV;
   Type *STy = CanonicalIV->getType();
   IRBuilder<> Builder(State.CFG.PrevBB->getTerminator());
-  Value *VStart = Builder.CreateVectorSplat(State.VF, CanonicalIV, "broadcast");
+  auto VF = State.VF;
+  Value *VStart = VF == 1
+                      ? CanonicalIV
+                      : Builder.CreateVectorSplat(VF, CanonicalIV, "broadcast");
   for (unsigned Part = 0, UF = State.UF; Part < UF; ++Part) {
     SmallVector<Constant *, 8> Indices;
-    for (unsigned Lane = 0, VF = State.VF; Lane < VF; ++Lane)
+    for (unsigned Lane = 0; Lane < VF; ++Lane)
       Indices.push_back(ConstantInt::get(STy, Part * VF + Lane));
-    Constant *VStep = ConstantVector::get(Indices);
+    // If VF == 1, there is only one iteration in the loop above, thus the
+    // element pushed back into Indices is ConstantInt::get(STy, Part)
+    Constant *VStep = VF == 1 ? Indices.back() : ConstantVector::get(Indices);
     // Add the consecutive indices to the vector value.
     Value *CanonicalVectorIV = Builder.CreateAdd(VStart, VStep, "vec.iv");
     State.set(getVPValue(), CanonicalVectorIV, Part);

diff  --git a/llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll b/llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll
new file mode 100644
index 000000000000..2973a4425a5d
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll
@@ -0,0 +1,189 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s  -loop-vectorize -force-vector-interleave=4 -pass-remarks='loop-vectorize' -disable-output -S 2>&1 | FileCheck %s --check-prefix=CHECK-REMARKS
+; RUN: opt < %s  -loop-vectorize -force-vector-interleave=4 -S | FileCheck %s
+; RUN: opt < %s  -loop-vectorize -force-vector-width=1 -force-vector-interleave=4 -S | FileCheck %s --check-prefix=CHECK-VF1
+
+; These tests are to check that fold-tail procedure produces correct scalar code when
+; loop-vectorization is only unrolling but not vectorizing.
+
+; CHECK-REMARKS:      remark: {{.*}} interleaved loop (interleaved count: 4)
+; CHECK-REMARKS-NEXT: remark: {{.*}} interleaved loop (interleaved count: 4)
+; CHECK-REMARKS-NOT:  remark: {{.*}} vectorized loop
+
+define void @VF1-VPlanExe() {
+; CHECK-LABEL: @VF1-VPlanExe
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK:       vector.ph:
+; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
+; CHECK:       vector.body:
+; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[INDUCTION:%.*]] = add i64 [[INDEX]], 0
+; CHECK-NEXT:    [[INDUCTION1:%.*]] = add i64 [[INDEX]], 1
+; CHECK-NEXT:    [[INDUCTION2:%.*]] = add i64 [[INDEX]], 2
+; CHECK-NEXT:    [[INDUCTION3:%.*]] = add i64 [[INDEX]], 3
+; CHECK-NEXT:    [[TMP0:%.*]] = icmp ule i64 [[INDUCTION]], 14
+; CHECK-NEXT:    [[TMP1:%.*]] = icmp ule i64 [[INDUCTION1]], 14
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp ule i64 [[INDUCTION2]], 14
+; CHECK-NEXT:    [[TMP3:%.*]] = icmp ule i64 [[INDUCTION3]], 14
+; CHECK-NEXT:    [[INDEX_NEXT]] = add i64 [[INDEX]], 4
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16
+; CHECK-NEXT:    br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !0
+; CHECK:       middle.block:
+; CHECK-NEXT:    br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
+; CHECK:       scalar.ph:
+; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ 16, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
+; CHECK:       for.cond.cleanup:
+; CHECK-NEXT:    ret void
+; CHECK:       for.body:
+; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 15
+; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop !2
+;
+entry:
+  br label %for.body
+
+for.cond.cleanup:
+  ret void
+
+for.body:
+  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+  %exitcond = icmp eq i64 %indvars.iv.next, 15
+  br i1 %exitcond, label %for.cond.cleanup, label %for.body
+}
+
+define void @VF1-VPWidenCanonicalIVRecipeExe(double* %ptr1) {
+; CHECK-LABEL: @VF1-VPWidenCanonicalIVRecipeExe
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[PTR2:%.*]] = getelementptr inbounds double, double* [[PTR1:%.*]], i64 15
+; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK:       vector.ph:
+; CHECK-NEXT:    [[IND_END:%.*]] = getelementptr double, double* [[PTR1]], i64 16
+; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
+; CHECK:       vector.body:
+; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[TMP0:%.*]] = add i64 [[INDEX]], 0
+; CHECK-NEXT:    [[NEXT_GEP:%.*]] = getelementptr double, double* [[PTR1]], i64 [[TMP0]]
+; CHECK-NEXT:    [[TMP1:%.*]] = add i64 [[INDEX]], 1
+; CHECK-NEXT:    [[NEXT_GEP1:%.*]] = getelementptr double, double* [[PTR1]], i64 [[TMP1]]
+; CHECK-NEXT:    [[TMP2:%.*]] = add i64 [[INDEX]], 2
+; CHECK-NEXT:    [[NEXT_GEP2:%.*]] = getelementptr double, double* [[PTR1]], i64 [[TMP2]]
+; CHECK-NEXT:    [[TMP3:%.*]] = add i64 [[INDEX]], 3
+; CHECK-NEXT:    [[NEXT_GEP3:%.*]] = getelementptr double, double* [[PTR1]], i64 [[TMP3]]
+; CHECK-NEXT:    [[VEC_IV:%.*]] = add i64 [[INDEX]], 0
+; CHECK-NEXT:    [[VEC_IV4:%.*]] = add i64 [[INDEX]], 1
+; CHECK-NEXT:    [[VEC_IV5:%.*]] = add i64 [[INDEX]], 2
+; CHECK-NEXT:    [[VEC_IV6:%.*]] = add i64 [[INDEX]], 3
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp ule i64 [[VEC_IV]], 14
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp ule i64 [[VEC_IV4]], 14
+; CHECK-NEXT:    [[TMP6:%.*]] = icmp ule i64 [[VEC_IV5]], 14
+; CHECK-NEXT:    [[TMP7:%.*]] = icmp ule i64 [[VEC_IV6]], 14
+; CHECK-NEXT:    [[INDEX_NEXT]] = add i64 [[INDEX]], 4
+; CHECK-NEXT:    [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16
+; CHECK-NEXT:    br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !3
+; CHECK:       middle.block:
+; CHECK-NEXT:    br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
+; CHECK:       scalar.ph:
+; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi double* [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[PTR1]], [[ENTRY:%.*]] ]
+; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
+; CHECK:       for.cond.cleanup:
+; CHECK-NEXT:    ret void
+; CHECK:       for.body:
+; CHECK-NEXT:    [[ADDR:%.*]] = phi double* [ [[PTR:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
+; CHECK-NEXT:    [[PTR]] = getelementptr inbounds double, double* [[ADDR]], i64 1
+; CHECK-NEXT:    [[COND:%.*]] = icmp eq double* [[PTR]], [[PTR2]]
+; CHECK-NEXT:    br i1 [[COND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop !4
+;
+entry:
+  %ptr2 = getelementptr inbounds double, double* %ptr1, i64 15
+  br label %for.body
+
+for.cond.cleanup:
+  ret void
+
+for.body:
+  %addr = phi double* [ %ptr, %for.body ], [ %ptr1, %entry ]
+  %ptr = getelementptr inbounds double, double* %addr, i64 1
+  %cond = icmp eq double* %ptr, %ptr2
+  br i1 %cond, label %for.cond.cleanup, label %for.body
+}
+
+; The following testcase is extended from the test of https://reviews.llvm.org/D80085
+; Similar to two tests above, it is to check that fold-tail procedure produces correct scalar code when
+; loop-vectorization is only unrolling but not vectorizing.
+
+define void @pr45679(i32* %A) optsize {
+; CHECK-VF1-LABEL: @pr45679
+; CHECK-VF1-NEXT:  entry:
+; CHECK-VF1-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-VF1:       vector.ph:
+; CHECK-VF1-NEXT:    br label [[VECTOR_BODY:%.*]]
+; CHECK-VF1:       vector.body:
+; CHECK-VF1-NEXT:    [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE9:%.*]] ]
+; CHECK-VF1-NEXT:    [[INDUCTION:%.*]] = add i32 [[INDEX]], 0
+; CHECK-VF1-NEXT:    [[INDUCTION1:%.*]] = add i32 [[INDEX]], 1
+; CHECK-VF1-NEXT:    [[INDUCTION2:%.*]] = add i32 [[INDEX]], 2
+; CHECK-VF1-NEXT:    [[INDUCTION3:%.*]] = add i32 [[INDEX]], 3
+; CHECK-VF1-NEXT:    [[TMP0:%.*]] = icmp ule i32 [[INDUCTION]], 13
+; CHECK-VF1-NEXT:    [[TMP1:%.*]] = icmp ule i32 [[INDUCTION1]], 13
+; CHECK-VF1-NEXT:    [[TMP2:%.*]] = icmp ule i32 [[INDUCTION2]], 13
+; CHECK-VF1-NEXT:    [[TMP3:%.*]] = icmp ule i32 [[INDUCTION3]], 13
+; CHECK-VF1-NEXT:    br i1 [[TMP0]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
+; CHECK-VF1:       pred.store.if:
+; CHECK-VF1-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i32 [[INDUCTION]]
+; CHECK-VF1-NEXT:    store i32 13, i32* [[TMP4]], align 1
+; CHECK-VF1-NEXT:    br label [[PRED_STORE_CONTINUE]]
+; CHECK-VF1:       pred.store.continue:
+; CHECK-VF1-NEXT:    br i1 [[TMP1]], label [[PRED_STORE_IF4:%.*]], label [[PRED_STORE_CONTINUE5:%.*]]
+; CHECK-VF1:       pred.store.if4:
+; CHECK-VF1-NEXT:    [[TMP5:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[INDUCTION1]]
+; CHECK-VF1-NEXT:    store i32 13, i32* [[TMP5]], align 1
+; CHECK-VF1-NEXT:    br label [[PRED_STORE_CONTINUE5]]
+; CHECK-VF1:       pred.store.continue5:
+; CHECK-VF1-NEXT:    br i1 [[TMP2]], label [[PRED_STORE_IF6:%.*]], label [[PRED_STORE_CONTINUE7:%.*]]
+; CHECK-VF1:       pred.store.if6:
+; CHECK-VF1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[INDUCTION2]]
+; CHECK-VF1-NEXT:    store i32 13, i32* [[TMP6]], align 1
+; CHECK-VF1-NEXT:    br label [[PRED_STORE_CONTINUE7]]
+; CHECK-VF1:       pred.store.continue7:
+; CHECK-VF1-NEXT:    br i1 [[TMP3]], label [[PRED_STORE_IF8:%.*]], label [[PRED_STORE_CONTINUE9]]
+; CHECK-VF1:       pred.store.if8:
+; CHECK-VF1-NEXT:    [[TMP7:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[INDUCTION3]]
+; CHECK-VF1-NEXT:    store i32 13, i32* [[TMP7]], align 1
+; CHECK-VF1-NEXT:    br label [[PRED_STORE_CONTINUE9]]
+; CHECK-VF1:       pred.store.continue9:
+; CHECK-VF1-NEXT:    [[INDEX_NEXT]] = add i32 [[INDEX]], 4
+; CHECK-VF1-NEXT:    [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], 16
+; CHECK-VF1-NEXT:    br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]]
+; CHECK-VF1:       middle.block:
+; CHECK-VF1-NEXT:    br i1 true, label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK-VF1:       scalar.ph:
+; CHECK-VF1-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i32 [ 16, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-VF1-NEXT:    br label [[LOOP:%.*]]
+; CHECK-VF1:       loop:
+; CHECK-VF1-NEXT:    [[RIV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[RIVPLUS1:%.*]], [[LOOP]] ]
+; CHECK-VF1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[RIV]]
+; CHECK-VF1-NEXT:    store i32 13, i32* [[ARRAYIDX]], align 1
+; CHECK-VF1-NEXT:    [[RIVPLUS1]] = add nuw nsw i32 [[RIV]], 1
+; CHECK-VF1-NEXT:    [[COND:%.*]] = icmp eq i32 [[RIVPLUS1]], 14
+; CHECK-VF1-NEXT:    br i1 [[COND]], label [[EXIT]], label [[LOOP]]
+; CHECK-VF1:       exit:
+; CHECK-VF1-NEXT:    ret void
+;
+entry:
+  br label %loop
+
+loop:
+  %riv = phi i32 [ 0, %entry ], [ %rivPlus1, %loop ]
+  %arrayidx = getelementptr inbounds i32, i32* %A, i32 %riv
+  store i32 13, i32* %arrayidx, align 1
+  %rivPlus1 = add nuw nsw i32 %riv, 1
+  %cond = icmp eq i32 %rivPlus1, 14
+  br i1 %cond, label %exit, label %loop
+
+exit:
+  ret void
+}


        


More information about the llvm-commits mailing list