[llvm] c7496ce - [LV] Use SCEV to check if minimum iteration check is known. (#111310)
via llvm-commits
llvm-commits at lists.llvm.org
Fri Oct 18 15:23:02 PDT 2024
Author: Florian Hahn
Date: 2024-10-18T15:22:59-07:00
New Revision: c7496cebac047665dbe9460d536c7654bc643a43
URL: https://github.com/llvm/llvm-project/commit/c7496cebac047665dbe9460d536c7654bc643a43
DIFF: https://github.com/llvm/llvm-project/commit/c7496cebac047665dbe9460d536c7654bc643a43.diff
LOG: [LV] Use SCEV to check if minimum iteration check is known. (#111310)
Use SCEV to check if the minimum iteration check (TC < Step) is known to
be false.
This is a first step towards addressing
https://github.com/llvm/llvm-project/issues/111098. To catch the exact
case from the issue, we need to do extra work to make sure the wrap
flags on the shl are preserved and used by SCEV.
Note that skeleton creation will be gradually moved to VPlan and this
simplification should be done as VPlan transform eventually. The current
plan is to move skeleton creation to VPlan starting from parts closest
to the parts already created by VPlan, starting with induction resume
value creation (started with
https://github.com/llvm/llvm-project/pull/110577), then memory and SCEV
checks and finally minimum iteration checks.
PR: https://github.com/llvm/llvm-project/pull/111310
Added:
Modified:
llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
llvm/test/Transforms/LoopVectorize/AArch64/eliminate-tail-predication.ll
llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll
llvm/test/Transforms/LoopVectorize/AArch64/pr60831-sve-inv-store-crash.ll
llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll
llvm/test/Transforms/LoopVectorize/AArch64/wider-VF-for-callinst.ll
llvm/test/Transforms/LoopVectorize/if-reduction.ll
llvm/test/Transforms/LoopVectorize/version-stride-with-integer-casts.ll
Removed:
################################################################################
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index e377e1d820373d..857efbdf687cb8 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -2447,12 +2447,26 @@ void InnerLoopVectorizer::emitIterationCountCheck(BasicBlock *Bypass) {
};
TailFoldingStyle Style = Cost->getTailFoldingStyle();
- if (Style == TailFoldingStyle::None)
- CheckMinIters =
- Builder.CreateICmp(P, Count, CreateStep(), "min.iters.check");
- else if (VF.isScalable() &&
- !isIndvarOverflowCheckKnownFalse(Cost, VF, UF) &&
- Style != TailFoldingStyle::DataAndControlFlowWithoutRuntimeCheck) {
+ if (Style == TailFoldingStyle::None) {
+ Value *Step = CreateStep();
+ ScalarEvolution &SE = *PSE.getSE();
+ // TODO: Emit unconditional branch to vector preheader instead of
+ // conditional branch with known condition.
+ const SCEV *TripCountSCEV = SE.applyLoopGuards(SE.getSCEV(Count), OrigLoop);
+ // Check if the trip count is < the step.
+ if (SE.isKnownPredicate(P, TripCountSCEV, SE.getSCEV(Step))) {
+ // TODO: Ensure step is at most the trip count when determining max VF and
+ // UF, w/o tail folding.
+ CheckMinIters = Builder.getTrue();
+ } else if (!SE.isKnownPredicate(CmpInst::getInversePredicate(P),
+ TripCountSCEV, SE.getSCEV(Step))) {
+ // Generate the minimum iteration check only if we cannot prove the
+ // check is known to be true, or known to be false.
+ CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check");
+ } // else step known to be < trip count, use CheckMinIters preset to false.
+ } else if (VF.isScalable() &&
+ !isIndvarOverflowCheckKnownFalse(Cost, VF, UF) &&
+ Style != TailFoldingStyle::DataAndControlFlowWithoutRuntimeCheck) {
// vscale is not necessarily a power-of-2, which means we cannot guarantee
// an overflow to zero when updating induction variables and so an
// additional overflow check is required before entering the vector loop.
@@ -2462,8 +2476,18 @@ void InnerLoopVectorizer::emitIterationCountCheck(BasicBlock *Bypass) {
ConstantInt::get(CountTy, cast<IntegerType>(CountTy)->getMask());
Value *LHS = Builder.CreateSub(MaxUIntTripCount, Count);
+ Value *Step = CreateStep();
+#ifndef NDEBUG
+ ScalarEvolution &SE = *PSE.getSE();
+ const SCEV *TC2OverflowSCEV = SE.applyLoopGuards(SE.getSCEV(LHS), OrigLoop);
+ assert(
+ !isIndvarOverflowCheckKnownFalse(Cost, VF * UF) &&
+ !SE.isKnownPredicate(CmpInst::getInversePredicate(ICmpInst::ICMP_ULT),
+ TC2OverflowSCEV, SE.getSCEV(Step)) &&
+ "unexpectedly proved overflow check to be known");
+#endif
// Don't execute the vector loop if (UMax - n) < (VF * UF).
- CheckMinIters = Builder.CreateICmp(ICmpInst::ICMP_ULT, LHS, CreateStep());
+ CheckMinIters = Builder.CreateICmp(ICmpInst::ICMP_ULT, LHS, Step);
}
// Create new preheader for vector loop.
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/eliminate-tail-predication.ll b/llvm/test/Transforms/LoopVectorize/AArch64/eliminate-tail-predication.ll
index 8c50d86489c9dd..7dcab6d807cf72 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/eliminate-tail-predication.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/eliminate-tail-predication.ll
@@ -11,8 +11,7 @@ define void @f1(ptr %A) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 4
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll b/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll
index 93034f4dbe56ec..5496eed16e5443 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll
@@ -11,10 +11,7 @@ target triple = "aarch64-unknown-linux-gnu"
define void @test_widen(ptr noalias %a, ptr readnone %b) #4 {
; TFNONE-LABEL: @test_widen(
; TFNONE-NEXT: entry:
-; TFNONE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; TFNONE-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2
-; TFNONE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]]
-; TFNONE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; TFNONE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; TFNONE: vector.ph:
; TFNONE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; TFNONE-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2
@@ -146,10 +143,7 @@ for.cond.cleanup:
define void @test_if_then(ptr noalias %a, ptr readnone %b) #4 {
; TFNONE-LABEL: @test_if_then(
; TFNONE-NEXT: entry:
-; TFNONE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; TFNONE-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2
-; TFNONE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]]
-; TFNONE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; TFNONE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; TFNONE: vector.ph:
; TFNONE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; TFNONE-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2
@@ -310,10 +304,7 @@ for.cond.cleanup:
define void @test_widen_if_then_else(ptr noalias %a, ptr readnone %b) #4 {
; TFNONE-LABEL: @test_widen_if_then_else(
; TFNONE-NEXT: entry:
-; TFNONE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; TFNONE-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2
-; TFNONE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]]
-; TFNONE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; TFNONE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; TFNONE: vector.ph:
; TFNONE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; TFNONE-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2
@@ -490,10 +481,7 @@ for.cond.cleanup:
define void @test_widen_nomask(ptr noalias %a, ptr readnone %b) #4 {
; TFNONE-LABEL: @test_widen_nomask(
; TFNONE-NEXT: entry:
-; TFNONE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; TFNONE-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2
-; TFNONE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]]
-; TFNONE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; TFNONE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; TFNONE: vector.ph:
; TFNONE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; TFNONE-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2
@@ -548,11 +536,6 @@ define void @test_widen_nomask(ptr noalias %a, ptr readnone %b) #4 {
;
; TFFALLBACK-LABEL: @test_widen_nomask(
; TFFALLBACK-NEXT: entry:
-; TFFALLBACK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; TFFALLBACK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2
-; TFFALLBACK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]]
-; TFFALLBACK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
-; TFFALLBACK: vector.ph:
; TFFALLBACK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; TFFALLBACK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2
; TFFALLBACK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]]
@@ -561,7 +544,7 @@ define void @test_widen_nomask(ptr noalias %a, ptr readnone %b) #4 {
; TFFALLBACK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 2
; TFFALLBACK-NEXT: br label [[VECTOR_BODY:%.*]]
; TFFALLBACK: vector.body:
-; TFFALLBACK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; TFFALLBACK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; TFFALLBACK-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[B:%.*]], i64 [[INDEX]]
; TFFALLBACK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP6]], align 8
; TFFALLBACK-NEXT: [[TMP7:%.*]] = call <vscale x 2 x i64> @foo_vector_nomask(<vscale x 2 x i64> [[WIDE_LOAD]])
@@ -569,12 +552,9 @@ define void @test_widen_nomask(ptr noalias %a, ptr readnone %b) #4 {
; TFFALLBACK-NEXT: store <vscale x 2 x i64> [[TMP7]], ptr [[TMP8]], align 8
; TFFALLBACK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; TFFALLBACK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; TFFALLBACK-NEXT: br i1 [[TMP9]], label [[SCALAR_PH]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
-; TFFALLBACK: scalar.ph:
-; TFFALLBACK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[N_VEC]], [[VECTOR_BODY]] ]
-; TFFALLBACK-NEXT: br label [[FOR_BODY:%.*]]
+; TFFALLBACK-NEXT: br i1 [[TMP9]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; TFFALLBACK: for.body:
-; TFFALLBACK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; TFFALLBACK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[N_VEC]], [[VECTOR_BODY]] ]
; TFFALLBACK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDVARS_IV]]
; TFFALLBACK-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 8
; TFFALLBACK-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR5:[0-9]+]]
@@ -626,10 +606,7 @@ for.cond.cleanup:
define void @test_widen_optmask(ptr noalias %a, ptr readnone %b) #4 {
; TFNONE-LABEL: @test_widen_optmask(
; TFNONE-NEXT: entry:
-; TFNONE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; TFNONE-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2
-; TFNONE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]]
-; TFNONE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; TFNONE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; TFNONE: vector.ph:
; TFNONE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; TFNONE-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2
@@ -791,10 +768,7 @@ for.cond.cleanup:
define double @test_widen_fmuladd_and_call(ptr noalias %a, ptr readnone %b, double %m) #4 {
; TFNONE-LABEL: @test_widen_fmuladd_and_call(
; TFNONE-NEXT: entry:
-; TFNONE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; TFNONE-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2
-; TFNONE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]]
-; TFNONE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; TFNONE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; TFNONE: vector.ph:
; TFNONE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; TFNONE-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/pr60831-sve-inv-store-crash.ll b/llvm/test/Transforms/LoopVectorize/AArch64/pr60831-sve-inv-store-crash.ll
index 0e95d742092e65..d18cdc1ae617a4 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/pr60831-sve-inv-store-crash.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/pr60831-sve-inv-store-crash.ll
@@ -10,8 +10,7 @@ define void @test_invar_gep(ptr %dst) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 4
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll
index 94b90aa3cfb308..1d150141e6251e 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll
@@ -757,8 +757,7 @@ define void @simple_memset_trip1024(i32 %val, ptr %ptr, i64 %n) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 4
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/wider-VF-for-callinst.ll b/llvm/test/Transforms/LoopVectorize/AArch64/wider-VF-for-callinst.ll
index 4a2f9d07ed91c6..4a3bc4679bba49 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/wider-VF-for-callinst.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/wider-VF-for-callinst.ll
@@ -7,10 +7,7 @@ target triple = "aarch64-unknown-linux-gnu"
define void @test_widen(ptr noalias %a, ptr readnone %b) #1 {
; WIDE-LABEL: @test_widen(
; WIDE-NEXT: entry:
-; WIDE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; WIDE-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4
-; WIDE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]]
-; WIDE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; WIDE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; WIDE: vector.ph:
; WIDE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; WIDE-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 4
diff --git a/llvm/test/Transforms/LoopVectorize/if-reduction.ll b/llvm/test/Transforms/LoopVectorize/if-reduction.ll
index 383b62b368ef0f..330cdeaeb7c275 100644
--- a/llvm/test/Transforms/LoopVectorize/if-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/if-reduction.ll
@@ -1659,6 +1659,7 @@ for.end: ; preds = %for.body, %entry
ret i64 %1
}
+; FIXME: %indvars.iv.next is poison on first iteration due to sub nuw 0, 1.
define i32 @fcmp_0_sub_select1(ptr noalias %x, i32 %N) nounwind readonly {
; CHECK-LABEL: define i32 @fcmp_0_sub_select1(
; CHECK-SAME: ptr noalias [[X:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
@@ -1668,8 +1669,7 @@ define i32 @fcmp_0_sub_select1(ptr noalias %x, i32 %N) nounwind readonly {
; CHECK: [[FOR_HEADER]]:
; CHECK-NEXT: [[ZEXT:%.*]] = zext i32 [[N]] to i64
; CHECK-NEXT: [[TMP0:%.*]] = sub i64 0, [[ZEXT]]
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], 4
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], 4
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
diff --git a/llvm/test/Transforms/LoopVectorize/version-stride-with-integer-casts.ll b/llvm/test/Transforms/LoopVectorize/version-stride-with-integer-casts.ll
index b3ec3e8f0f3c63..5e65832aba8cc4 100644
--- a/llvm/test/Transforms/LoopVectorize/version-stride-with-integer-casts.ll
+++ b/llvm/test/Transforms/LoopVectorize/version-stride-with-integer-casts.ll
@@ -415,6 +415,7 @@ exit:
; Test case to make sure that uses of versioned strides of type i1 are properly
; extended. From https://github.com/llvm/llvm-project/issues/91369.
+; TODO: Better check (udiv i64 15, %g.64) after checking if %g == 1.
define void @zext_of_i1_stride(i1 %g, ptr %dst) mustprogress {
; CHECK-LABEL: define void @zext_of_i1_stride(
; CHECK-SAME: i1 [[G:%.*]], ptr [[DST:%.*]]) #[[ATTR0:[0-9]+]] {
@@ -423,8 +424,7 @@ define void @zext_of_i1_stride(i1 %g, ptr %dst) mustprogress {
; CHECK-NEXT: [[G_64:%.*]] = zext i1 [[G]] to i64
; CHECK-NEXT: [[TMP0:%.*]] = udiv i64 15, [[G_64]]
; CHECK-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[TMP0]], 1
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP1]], 4
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
; CHECK: vector.scevcheck:
; CHECK-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i1 [[G]], true
; CHECK-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
@@ -489,8 +489,7 @@ define void @sext_of_i1_stride(i1 %g, ptr %dst) mustprogress {
; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[UMAX]], -1
; CHECK-NEXT: [[TMP1:%.*]] = udiv i64 [[TMP0]], [[G_64]]
; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], 4
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
+; CHECK-NEXT: br i1 true, label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
; CHECK: vector.scevcheck:
; CHECK-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i1 [[G]], true
; CHECK-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
More information about the llvm-commits
mailing list