[llvm] [LoopVectorize] Use predicated version of getSmallConstantMaxTripCount (PR #109928)
David Sherwood via llvm-commits
llvm-commits at lists.llvm.org
Fri Sep 27 07:07:31 PDT 2024
https://github.com/david-arm updated https://github.com/llvm/llvm-project/pull/109928
>From 3072e43ea10a5ce7fdc05a8b28163ac4e896a9d0 Mon Sep 17 00:00:00 2001
From: David Sherwood <david.sherwood at arm.com>
Date: Wed, 25 Sep 2024 08:30:35 +0000
Subject: [PATCH 1/3] Add tests and extra max trip count debug output
---
.../Transforms/Vectorize/LoopVectorize.cpp | 2 +
.../AArch64/low_trip_count_predicates.ll | 683 ++++++++++++++++++
.../RISCV/riscv-vector-reverse.ll | 2 +
3 files changed, 687 insertions(+)
create mode 100644 llvm/test/Transforms/LoopVectorize/AArch64/low_trip_count_predicates.ll
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index bd493fb2c1ba19..f943e27e2a0560 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -3989,6 +3989,8 @@ LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) {
unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
unsigned MaxTC = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop);
LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
+ if (TC != MaxTC)
+ LLVM_DEBUG(dbgs() << "LV: Found maximum trip count: " << MaxTC << '\n');
if (TC == 1) {
reportVectorizationFailure("Single iteration (non) loop",
"loop trip count is one, irrelevant for vectorization",
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/low_trip_count_predicates.ll b/llvm/test/Transforms/LoopVectorize/AArch64/low_trip_count_predicates.ll
new file mode 100644
index 00000000000000..8a32ec315fa97f
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/low_trip_count_predicates.ll
@@ -0,0 +1,683 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; REQUIRES: asserts
+; RUN: opt -S < %s -p loop-vectorize -debug-only=loop-vectorize -mattr=+sve 2>%t | FileCheck %s
+; RUN: cat %t | FileCheck %s --check-prefix=DEBUG
+
+target triple = "aarch64-unknown-linux-gnu"
+
+; DEBUG-LABEL: LV: Checking a loop in 'low_vf_ic_is_better'
+; DEBUG: LV: Found trip count: 0
+; DEBUG-NOT: LV: Found maximum trip count
+; DEBUG: LV: IC is 2
+; DEBUG: LV: VF is vscale x 16
+; DEBUG: Main Loop VF:vscale x 16, Main Loop UF:2, Epilogue Loop VF:vscale x 8, Epilogue Loop UF:1
+
+; DEBUG-LABEL: LV: Checking a loop in 'trip_count_too_small'
+; DEBUG: LV: Found trip count: 0
+; DEBUG-NOT: LV: Found maximum trip count
+; DEBUG: LV: IC is 2
+; DEBUG: LV: VF is vscale x 16
+; DEBUG: Main Loop VF:vscale x 16, Main Loop UF:2, Epilogue Loop VF:vscale x 8, Epilogue Loop UF:1
+
+; DEBUG-LABEL: LV: Checking a loop in 'too_many_runtime_checks'
+; DEBUG: LV: Found trip count: 0
+; DEBUG-NOT: LV: Found maximum trip count
+; DEBUG: LV: IC is 2
+; DEBUG: LV: VF is vscale x 16
+; DEBUG: Main Loop VF:vscale x 16, Main Loop UF:1, Epilogue Loop VF:vscale x 8, Epilogue Loop UF:1
+
+; DEBUG-LABEL: LV: Checking a loop in 'overflow_indvar_known_false'
+; DEBUG: LV: Found trip count: 0
+; DEBUG-NOT: LV: Found maximum trip count
+; DEBUG: LV: can fold tail by masking.
+; DEBUG: Executing best plan with VF=vscale x 16, UF=1
+
+define void @low_vf_ic_is_better(ptr nocapture noundef %p, i16 noundef %val) {
+; CHECK-LABEL: define void @low_vf_ic_is_better(
+; CHECK-SAME: ptr nocapture noundef [[P:%.*]], i16 noundef [[VAL:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[P_PROMOTED:%.*]] = load i32, ptr [[P]], align 4
+; CHECK-NEXT: [[CMP7:%.*]] = icmp ult i32 [[P_PROMOTED]], 19
+; CHECK-NEXT: br i1 [[CMP7]], label %[[ITER_CHECK:.*]], label %[[WHILE_END:.*]]
+; CHECK: [[ITER_CHECK]]:
+; CHECK-NEXT: [[CONV:%.*]] = trunc i16 [[VAL]] to i8
+; CHECK-NEXT: [[V:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 4
+; CHECK-NEXT: [[TMP0:%.*]] = zext nneg i32 [[P_PROMOTED]] to i64
+; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[P_PROMOTED]], 1
+; CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
+; CHECK-NEXT: [[TMP3:%.*]] = sub i64 20, [[TMP2]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 8
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP3]], [[TMP5]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]]
+; CHECK: [[VECTOR_SCEVCHECK]]:
+; CHECK-NEXT: [[TMP6:%.*]] = add i32 [[P_PROMOTED]], 1
+; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
+; CHECK-NEXT: [[TMP8:%.*]] = sub i64 19, [[TMP7]]
+; CHECK-NEXT: [[TMP9:%.*]] = trunc i64 [[TMP8]] to i32
+; CHECK-NEXT: [[TMP10:%.*]] = add i32 [[TMP6]], [[TMP9]]
+; CHECK-NEXT: [[TMP11:%.*]] = icmp ult i32 [[TMP10]], [[TMP6]]
+; CHECK-NEXT: [[TMP12:%.*]] = icmp ugt i64 [[TMP8]], 4294967295
+; CHECK-NEXT: [[TMP13:%.*]] = or i1 [[TMP11]], [[TMP12]]
+; CHECK-NEXT: br i1 [[TMP13]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]]
+; CHECK: [[VECTOR_MAIN_LOOP_ITER_CHECK]]:
+; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP15:%.*]] = mul i64 [[TMP14]], 32
+; CHECK-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 [[TMP3]], [[TMP15]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK1]], label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP17:%.*]] = mul i64 [[TMP16]], 32
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], [[TMP17]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP19:%.*]] = mul i64 [[TMP18]], 32
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[CONV]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[TMP0]], [[INDEX]]
+; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[OFFSET_IDX]], 0
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds [100 x i8], ptr [[V]], i64 0, i64 [[TMP20]]
+; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[TMP21]], i32 0
+; CHECK-NEXT: [[TMP23:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP24:%.*]] = mul i64 [[TMP23]], 16
+; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i8, ptr [[TMP21]], i64 [[TMP24]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP22]], align 1
+; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 16 x i8>, ptr [[TMP25]], align 1
+; CHECK-NEXT: [[TMP26:%.*]] = add <vscale x 16 x i8> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP27:%.*]] = add <vscale x 16 x i8> [[WIDE_LOAD2]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP28:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP29:%.*]] = mul i64 [[TMP28]], 16
+; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[TMP21]], i64 [[TMP29]]
+; CHECK-NEXT: store <vscale x 16 x i8> [[TMP26]], ptr [[TMP22]], align 1
+; CHECK-NEXT: store <vscale x 16 x i8> [[TMP27]], ptr [[TMP30]], align 1
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP19]]
+; CHECK-NEXT: [[TMP31:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP31]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[WHILE_END_LOOPEXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]]
+; CHECK: [[VEC_EPILOG_ITER_CHECK]]:
+; CHECK-NEXT: [[IND_END5:%.*]] = add i64 [[TMP0]], [[N_VEC]]
+; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP3]], [[N_VEC]]
+; CHECK-NEXT: [[TMP32:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP33:%.*]] = mul i64 [[TMP32]], 8
+; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], [[TMP33]]
+; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]]
+; CHECK: [[VEC_EPILOG_PH]]:
+; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
+; CHECK-NEXT: [[TMP34:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP35:%.*]] = mul i64 [[TMP34]], 8
+; CHECK-NEXT: [[N_MOD_VF3:%.*]] = urem i64 [[TMP3]], [[TMP35]]
+; CHECK-NEXT: [[N_VEC4:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF3]]
+; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[TMP0]], [[N_VEC4]]
+; CHECK-NEXT: [[TMP36:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP37:%.*]] = mul i64 [[TMP36]], 8
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT9:%.*]] = insertelement <vscale x 8 x i8> poison, i8 [[CONV]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT10:%.*]] = shufflevector <vscale x 8 x i8> [[BROADCAST_SPLATINSERT9]], <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
+; CHECK-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]]
+; CHECK: [[VEC_EPILOG_VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX6:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT11:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-NEXT: [[OFFSET_IDX7:%.*]] = add i64 [[TMP0]], [[INDEX6]]
+; CHECK-NEXT: [[TMP38:%.*]] = add i64 [[OFFSET_IDX7]], 0
+; CHECK-NEXT: [[TMP39:%.*]] = getelementptr inbounds [100 x i8], ptr [[V]], i64 0, i64 [[TMP38]]
+; CHECK-NEXT: [[TMP40:%.*]] = getelementptr inbounds i8, ptr [[TMP39]], i32 0
+; CHECK-NEXT: [[WIDE_LOAD8:%.*]] = load <vscale x 8 x i8>, ptr [[TMP40]], align 1
+; CHECK-NEXT: [[TMP41:%.*]] = add <vscale x 8 x i8> [[WIDE_LOAD8]], [[BROADCAST_SPLAT10]]
+; CHECK-NEXT: store <vscale x 8 x i8> [[TMP41]], ptr [[TMP40]], align 1
+; CHECK-NEXT: [[INDEX_NEXT11]] = add nuw i64 [[INDEX6]], [[TMP37]]
+; CHECK-NEXT: [[TMP42:%.*]] = icmp eq i64 [[INDEX_NEXT11]], [[N_VEC4]]
+; CHECK-NEXT: br i1 [[TMP42]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK: [[VEC_EPILOG_MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[CMP_N12:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC4]]
+; CHECK-NEXT: br i1 [[CMP_N12]], label %[[WHILE_END_LOOPEXIT]], label %[[VEC_EPILOG_SCALAR_PH]]
+; CHECK: [[VEC_EPILOG_SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END5]], %[[VEC_EPILOG_ITER_CHECK]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP0]], %[[ITER_CHECK]] ]
+; CHECK-NEXT: br label %[[WHILE_BODY:.*]]
+; CHECK: [[WHILE_BODY]]:
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[WHILE_BODY]] ]
+; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [100 x i8], ptr [[V]], i64 0, i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP43:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[ADD:%.*]] = add i8 [[TMP43]], [[CONV]]
+; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[TMP44:%.*]] = and i64 [[INDVARS_IV_NEXT]], 4294967295
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[TMP44]], 19
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[WHILE_END_LOOPEXIT]], label %[[WHILE_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK: [[WHILE_END_LOOPEXIT]]:
+; CHECK-NEXT: br label %[[WHILE_END]]
+; CHECK: [[WHILE_END]]:
+; CHECK-NEXT: ret void
+;
+entry:
+ %p.promoted = load i32, ptr %p, align 4
+ %cmp7 = icmp ult i32 %p.promoted, 19
+ br i1 %cmp7, label %while.preheader, label %while.end
+
+while.preheader:
+ %conv = trunc i16 %val to i8
+ %v = getelementptr inbounds nuw i8, ptr %p, i64 4
+ %0 = zext nneg i32 %p.promoted to i64
+ br label %while.body
+
+while.body:
+ %indvars.iv = phi i64 [ %0, %while.preheader ], [ %indvars.iv.next, %while.body ]
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %arrayidx = getelementptr inbounds nuw [100 x i8], ptr %v, i64 0, i64 %indvars.iv
+ %1 = load i8, ptr %arrayidx, align 1
+ %add = add i8 %1, %conv
+ store i8 %add, ptr %arrayidx, align 1
+ %2 = and i64 %indvars.iv.next, 4294967295
+ %exitcond.not = icmp eq i64 %2, 19
+ br i1 %exitcond.not, label %while.end, label %while.body
+
+while.end:
+ ret void
+}
+
+define void @trip_count_too_small(ptr nocapture noundef %p, i16 noundef %val) {
+; CHECK-LABEL: define void @trip_count_too_small(
+; CHECK-SAME: ptr nocapture noundef [[P:%.*]], i16 noundef [[VAL:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[P_PROMOTED:%.*]] = load i32, ptr [[P]], align 4
+; CHECK-NEXT: [[CMP7:%.*]] = icmp ult i32 [[P_PROMOTED]], 3
+; CHECK-NEXT: br i1 [[CMP7]], label %[[ITER_CHECK:.*]], label %[[WHILE_END:.*]]
+; CHECK: [[ITER_CHECK]]:
+; CHECK-NEXT: [[CONV:%.*]] = trunc i16 [[VAL]] to i8
+; CHECK-NEXT: [[V:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 4
+; CHECK-NEXT: [[TMP0:%.*]] = zext nneg i32 [[P_PROMOTED]] to i64
+; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[P_PROMOTED]], 1
+; CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
+; CHECK-NEXT: [[TMP3:%.*]] = sub i64 4, [[TMP2]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 8
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP3]], [[TMP5]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]]
+; CHECK: [[VECTOR_SCEVCHECK]]:
+; CHECK-NEXT: [[TMP6:%.*]] = add i32 [[P_PROMOTED]], 1
+; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
+; CHECK-NEXT: [[TMP8:%.*]] = sub i64 3, [[TMP7]]
+; CHECK-NEXT: [[TMP9:%.*]] = trunc i64 [[TMP8]] to i32
+; CHECK-NEXT: [[TMP10:%.*]] = add i32 [[TMP6]], [[TMP9]]
+; CHECK-NEXT: [[TMP11:%.*]] = icmp ult i32 [[TMP10]], [[TMP6]]
+; CHECK-NEXT: [[TMP12:%.*]] = icmp ugt i64 [[TMP8]], 4294967295
+; CHECK-NEXT: [[TMP13:%.*]] = or i1 [[TMP11]], [[TMP12]]
+; CHECK-NEXT: br i1 [[TMP13]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]]
+; CHECK: [[VECTOR_MAIN_LOOP_ITER_CHECK]]:
+; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP15:%.*]] = mul i64 [[TMP14]], 32
+; CHECK-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 [[TMP3]], [[TMP15]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK1]], label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP17:%.*]] = mul i64 [[TMP16]], 32
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], [[TMP17]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP19:%.*]] = mul i64 [[TMP18]], 32
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[CONV]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[TMP0]], [[INDEX]]
+; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[OFFSET_IDX]], 0
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds [100 x i8], ptr [[V]], i64 0, i64 [[TMP20]]
+; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[TMP21]], i32 0
+; CHECK-NEXT: [[TMP23:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP24:%.*]] = mul i64 [[TMP23]], 16
+; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i8, ptr [[TMP21]], i64 [[TMP24]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP22]], align 1
+; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 16 x i8>, ptr [[TMP25]], align 1
+; CHECK-NEXT: [[TMP26:%.*]] = add <vscale x 16 x i8> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP27:%.*]] = add <vscale x 16 x i8> [[WIDE_LOAD2]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP28:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP29:%.*]] = mul i64 [[TMP28]], 16
+; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[TMP21]], i64 [[TMP29]]
+; CHECK-NEXT: store <vscale x 16 x i8> [[TMP26]], ptr [[TMP22]], align 1
+; CHECK-NEXT: store <vscale x 16 x i8> [[TMP27]], ptr [[TMP30]], align 1
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP19]]
+; CHECK-NEXT: [[TMP31:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP31]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[WHILE_END_LOOPEXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]]
+; CHECK: [[VEC_EPILOG_ITER_CHECK]]:
+; CHECK-NEXT: [[IND_END5:%.*]] = add i64 [[TMP0]], [[N_VEC]]
+; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP3]], [[N_VEC]]
+; CHECK-NEXT: [[TMP32:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP33:%.*]] = mul i64 [[TMP32]], 8
+; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], [[TMP33]]
+; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]]
+; CHECK: [[VEC_EPILOG_PH]]:
+; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
+; CHECK-NEXT: [[TMP34:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP35:%.*]] = mul i64 [[TMP34]], 8
+; CHECK-NEXT: [[N_MOD_VF3:%.*]] = urem i64 [[TMP3]], [[TMP35]]
+; CHECK-NEXT: [[N_VEC4:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF3]]
+; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[TMP0]], [[N_VEC4]]
+; CHECK-NEXT: [[TMP36:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP37:%.*]] = mul i64 [[TMP36]], 8
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT9:%.*]] = insertelement <vscale x 8 x i8> poison, i8 [[CONV]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT10:%.*]] = shufflevector <vscale x 8 x i8> [[BROADCAST_SPLATINSERT9]], <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
+; CHECK-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]]
+; CHECK: [[VEC_EPILOG_VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX6:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT11:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-NEXT: [[OFFSET_IDX7:%.*]] = add i64 [[TMP0]], [[INDEX6]]
+; CHECK-NEXT: [[TMP38:%.*]] = add i64 [[OFFSET_IDX7]], 0
+; CHECK-NEXT: [[TMP39:%.*]] = getelementptr inbounds [100 x i8], ptr [[V]], i64 0, i64 [[TMP38]]
+; CHECK-NEXT: [[TMP40:%.*]] = getelementptr inbounds i8, ptr [[TMP39]], i32 0
+; CHECK-NEXT: [[WIDE_LOAD8:%.*]] = load <vscale x 8 x i8>, ptr [[TMP40]], align 1
+; CHECK-NEXT: [[TMP41:%.*]] = add <vscale x 8 x i8> [[WIDE_LOAD8]], [[BROADCAST_SPLAT10]]
+; CHECK-NEXT: store <vscale x 8 x i8> [[TMP41]], ptr [[TMP40]], align 1
+; CHECK-NEXT: [[INDEX_NEXT11]] = add nuw i64 [[INDEX6]], [[TMP37]]
+; CHECK-NEXT: [[TMP42:%.*]] = icmp eq i64 [[INDEX_NEXT11]], [[N_VEC4]]
+; CHECK-NEXT: br i1 [[TMP42]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK: [[VEC_EPILOG_MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[CMP_N12:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC4]]
+; CHECK-NEXT: br i1 [[CMP_N12]], label %[[WHILE_END_LOOPEXIT]], label %[[VEC_EPILOG_SCALAR_PH]]
+; CHECK: [[VEC_EPILOG_SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END5]], %[[VEC_EPILOG_ITER_CHECK]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP0]], %[[ITER_CHECK]] ]
+; CHECK-NEXT: br label %[[WHILE_BODY:.*]]
+; CHECK: [[WHILE_BODY]]:
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[WHILE_BODY]] ]
+; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [100 x i8], ptr [[V]], i64 0, i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP43:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[ADD:%.*]] = add i8 [[TMP43]], [[CONV]]
+; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[TMP44:%.*]] = and i64 [[INDVARS_IV_NEXT]], 4294967295
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[TMP44]], 3
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[WHILE_END_LOOPEXIT]], label %[[WHILE_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK: [[WHILE_END_LOOPEXIT]]:
+; CHECK-NEXT: br label %[[WHILE_END]]
+; CHECK: [[WHILE_END]]:
+; CHECK-NEXT: ret void
+;
+entry:
+ %p.promoted = load i32, ptr %p, align 4
+ %cmp7 = icmp ult i32 %p.promoted, 3
+ br i1 %cmp7, label %while.preheader, label %while.end
+
+while.preheader:
+ %conv = trunc i16 %val to i8
+ %v = getelementptr inbounds nuw i8, ptr %p, i64 4
+ %0 = zext nneg i32 %p.promoted to i64
+ br label %while.body
+
+while.body:
+ %indvars.iv = phi i64 [ %0, %while.preheader ], [ %indvars.iv.next, %while.body ]
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %arrayidx = getelementptr inbounds nuw [100 x i8], ptr %v, i64 0, i64 %indvars.iv
+ %1 = load i8, ptr %arrayidx, align 1
+ %add = add i8 %1, %conv
+ store i8 %add, ptr %arrayidx, align 1
+ %2 = and i64 %indvars.iv.next, 4294967295
+ %exitcond.not = icmp eq i64 %2, 3
+ br i1 %exitcond.not, label %while.end, label %while.body
+
+while.end:
+ ret void
+}
+
+define void @too_many_runtime_checks(ptr nocapture noundef %p, ptr nocapture noundef %p1, ptr nocapture noundef readonly %p2, ptr nocapture noundef readonly %p3, i16 noundef %val) {
+; CHECK-LABEL: define void @too_many_runtime_checks(
+; CHECK-SAME: ptr nocapture noundef [[P:%.*]], ptr nocapture noundef [[P1:%.*]], ptr nocapture noundef readonly [[P2:%.*]], ptr nocapture noundef readonly [[P3:%.*]], i16 noundef [[VAL:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[P]], align 4
+; CHECK-NEXT: [[CMP20:%.*]] = icmp ult i32 [[TMP0]], 16
+; CHECK-NEXT: br i1 [[CMP20]], label %[[ITER_CHECK:.*]], label %[[WHILE_END:.*]]
+; CHECK: [[ITER_CHECK]]:
+; CHECK-NEXT: [[CONV8:%.*]] = trunc i16 [[VAL]] to i8
+; CHECK-NEXT: [[V:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 4
+; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[TMP0]] to i64
+; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[TMP0]], 1
+; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64
+; CHECK-NEXT: [[TMP4:%.*]] = sub i64 17, [[TMP3]]
+; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 8
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP4]], [[TMP6]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]]
+; CHECK: [[VECTOR_SCEVCHECK]]:
+; CHECK-NEXT: [[TMP7:%.*]] = add i32 [[TMP0]], 1
+; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
+; CHECK-NEXT: [[TMP9:%.*]] = sub i64 16, [[TMP8]]
+; CHECK-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; CHECK-NEXT: [[TMP11:%.*]] = add i32 [[TMP7]], [[TMP10]]
+; CHECK-NEXT: [[TMP12:%.*]] = icmp ult i32 [[TMP11]], [[TMP7]]
+; CHECK-NEXT: [[TMP13:%.*]] = icmp ugt i64 [[TMP9]], 4294967295
+; CHECK-NEXT: [[TMP14:%.*]] = or i1 [[TMP12]], [[TMP13]]
+; CHECK-NEXT: br i1 [[TMP14]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VECTOR_MEMCHECK:.*]]
+; CHECK: [[VECTOR_MEMCHECK]]:
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[P1]], i64 [[TMP1]]
+; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[TMP1]], 17
+; CHECK-NEXT: [[TMP16:%.*]] = add i32 [[TMP0]], 1
+; CHECK-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
+; CHECK-NEXT: [[TMP18:%.*]] = sub i64 [[TMP15]], [[TMP17]]
+; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[P1]], i64 [[TMP18]]
+; CHECK-NEXT: [[TMP19:%.*]] = add nuw nsw i64 [[TMP1]], 4
+; CHECK-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP19]]
+; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[TMP1]], 21
+; CHECK-NEXT: [[TMP21:%.*]] = sub i64 [[TMP20]], [[TMP17]]
+; CHECK-NEXT: [[SCEVGEP3:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP21]]
+; CHECK-NEXT: [[SCEVGEP4:%.*]] = getelementptr i8, ptr [[P2]], i64 [[TMP1]]
+; CHECK-NEXT: [[SCEVGEP5:%.*]] = getelementptr i8, ptr [[P2]], i64 [[TMP18]]
+; CHECK-NEXT: [[SCEVGEP6:%.*]] = getelementptr i8, ptr [[P3]], i64 [[TMP1]]
+; CHECK-NEXT: [[SCEVGEP7:%.*]] = getelementptr i8, ptr [[P3]], i64 [[TMP18]]
+; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[SCEVGEP]], [[SCEVGEP3]]
+; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[SCEVGEP2]], [[SCEVGEP1]]
+; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
+; CHECK-NEXT: [[BOUND08:%.*]] = icmp ult ptr [[SCEVGEP]], [[SCEVGEP5]]
+; CHECK-NEXT: [[BOUND19:%.*]] = icmp ult ptr [[SCEVGEP4]], [[SCEVGEP1]]
+; CHECK-NEXT: [[FOUND_CONFLICT10:%.*]] = and i1 [[BOUND08]], [[BOUND19]]
+; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT10]]
+; CHECK-NEXT: [[BOUND011:%.*]] = icmp ult ptr [[SCEVGEP]], [[SCEVGEP7]]
+; CHECK-NEXT: [[BOUND112:%.*]] = icmp ult ptr [[SCEVGEP6]], [[SCEVGEP1]]
+; CHECK-NEXT: [[FOUND_CONFLICT13:%.*]] = and i1 [[BOUND011]], [[BOUND112]]
+; CHECK-NEXT: [[CONFLICT_RDX14:%.*]] = or i1 [[CONFLICT_RDX]], [[FOUND_CONFLICT13]]
+; CHECK-NEXT: [[BOUND015:%.*]] = icmp ult ptr [[SCEVGEP2]], [[SCEVGEP5]]
+; CHECK-NEXT: [[BOUND116:%.*]] = icmp ult ptr [[SCEVGEP4]], [[SCEVGEP3]]
+; CHECK-NEXT: [[FOUND_CONFLICT17:%.*]] = and i1 [[BOUND015]], [[BOUND116]]
+; CHECK-NEXT: [[CONFLICT_RDX18:%.*]] = or i1 [[CONFLICT_RDX14]], [[FOUND_CONFLICT17]]
+; CHECK-NEXT: [[BOUND019:%.*]] = icmp ult ptr [[SCEVGEP2]], [[SCEVGEP7]]
+; CHECK-NEXT: [[BOUND120:%.*]] = icmp ult ptr [[SCEVGEP6]], [[SCEVGEP3]]
+; CHECK-NEXT: [[FOUND_CONFLICT21:%.*]] = and i1 [[BOUND019]], [[BOUND120]]
+; CHECK-NEXT: [[CONFLICT_RDX22:%.*]] = or i1 [[CONFLICT_RDX18]], [[FOUND_CONFLICT21]]
+; CHECK-NEXT: br i1 [[CONFLICT_RDX22]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]]
+; CHECK: [[VECTOR_MAIN_LOOP_ITER_CHECK]]:
+; CHECK-NEXT: [[TMP22:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP23:%.*]] = mul i64 [[TMP22]], 16
+; CHECK-NEXT: [[MIN_ITERS_CHECK23:%.*]] = icmp ult i64 [[TMP4]], [[TMP23]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK23]], label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP24:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP25:%.*]] = mul i64 [[TMP24]], 16
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP4]], [[TMP25]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP4]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP26:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP27:%.*]] = mul i64 [[TMP26]], 16
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[CONV8]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[TMP1]], [[INDEX]]
+; CHECK-NEXT: [[TMP28:%.*]] = add i64 [[OFFSET_IDX]], 0
+; CHECK-NEXT: [[TMP29:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[TMP28]]
+; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[TMP29]], i32 0
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP30]], align 1, !alias.scope [[META8:![0-9]+]]
+; CHECK-NEXT: [[TMP31:%.*]] = getelementptr inbounds i8, ptr [[P3]], i64 [[TMP28]]
+; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds i8, ptr [[TMP31]], i32 0
+; CHECK-NEXT: [[WIDE_LOAD24:%.*]] = load <vscale x 16 x i8>, ptr [[TMP32]], align 1, !alias.scope [[META11:![0-9]+]]
+; CHECK-NEXT: [[TMP33:%.*]] = mul <vscale x 16 x i8> [[WIDE_LOAD24]], [[WIDE_LOAD]]
+; CHECK-NEXT: [[TMP34:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[TMP28]]
+; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds i8, ptr [[TMP34]], i32 0
+; CHECK-NEXT: [[WIDE_LOAD25:%.*]] = load <vscale x 16 x i8>, ptr [[TMP35]], align 1, !alias.scope [[META13:![0-9]+]], !noalias [[META15:![0-9]+]]
+; CHECK-NEXT: [[TMP36:%.*]] = add <vscale x 16 x i8> [[TMP33]], [[WIDE_LOAD25]]
+; CHECK-NEXT: store <vscale x 16 x i8> [[TMP36]], ptr [[TMP35]], align 1, !alias.scope [[META13]], !noalias [[META15]]
+; CHECK-NEXT: [[TMP37:%.*]] = getelementptr inbounds [100 x i8], ptr [[V]], i64 0, i64 [[TMP28]]
+; CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[TMP37]], i32 0
+; CHECK-NEXT: [[WIDE_LOAD26:%.*]] = load <vscale x 16 x i8>, ptr [[TMP38]], align 1, !alias.scope [[META17:![0-9]+]], !noalias [[META18:![0-9]+]]
+; CHECK-NEXT: [[TMP39:%.*]] = add <vscale x 16 x i8> [[WIDE_LOAD26]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: store <vscale x 16 x i8> [[TMP39]], ptr [[TMP38]], align 1, !alias.scope [[META17]], !noalias [[META18]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP27]]
+; CHECK-NEXT: [[TMP40:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP40]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP4]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[WHILE_END_LOOPEXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]]
+; CHECK: [[VEC_EPILOG_ITER_CHECK]]:
+; CHECK-NEXT: [[IND_END29:%.*]] = add i64 [[TMP1]], [[N_VEC]]
+; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP4]], [[N_VEC]]
+; CHECK-NEXT: [[TMP41:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP42:%.*]] = mul i64 [[TMP41]], 8
+; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], [[TMP42]]
+; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]]
+; CHECK: [[VEC_EPILOG_PH]]:
+; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
+; CHECK-NEXT: [[TMP43:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP44:%.*]] = mul i64 [[TMP43]], 8
+; CHECK-NEXT: [[N_MOD_VF27:%.*]] = urem i64 [[TMP4]], [[TMP44]]
+; CHECK-NEXT: [[N_VEC28:%.*]] = sub i64 [[TMP4]], [[N_MOD_VF27]]
+; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[TMP1]], [[N_VEC28]]
+; CHECK-NEXT: [[TMP45:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP46:%.*]] = mul i64 [[TMP45]], 8
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT36:%.*]] = insertelement <vscale x 8 x i8> poison, i8 [[CONV8]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT37:%.*]] = shufflevector <vscale x 8 x i8> [[BROADCAST_SPLATINSERT36]], <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
+; CHECK-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]]
+; CHECK: [[VEC_EPILOG_VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX30:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT38:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-NEXT: [[OFFSET_IDX31:%.*]] = add i64 [[TMP1]], [[INDEX30]]
+; CHECK-NEXT: [[TMP47:%.*]] = add i64 [[OFFSET_IDX31]], 0
+; CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[TMP47]]
+; CHECK-NEXT: [[TMP49:%.*]] = getelementptr inbounds i8, ptr [[TMP48]], i32 0
+; CHECK-NEXT: [[WIDE_LOAD32:%.*]] = load <vscale x 8 x i8>, ptr [[TMP49]], align 1, !alias.scope [[META20:![0-9]+]]
+; CHECK-NEXT: [[TMP50:%.*]] = getelementptr inbounds i8, ptr [[P3]], i64 [[TMP47]]
+; CHECK-NEXT: [[TMP51:%.*]] = getelementptr inbounds i8, ptr [[TMP50]], i32 0
+; CHECK-NEXT: [[WIDE_LOAD33:%.*]] = load <vscale x 8 x i8>, ptr [[TMP51]], align 1, !alias.scope [[META23:![0-9]+]]
+; CHECK-NEXT: [[TMP52:%.*]] = mul <vscale x 8 x i8> [[WIDE_LOAD33]], [[WIDE_LOAD32]]
+; CHECK-NEXT: [[TMP53:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[TMP47]]
+; CHECK-NEXT: [[TMP54:%.*]] = getelementptr inbounds i8, ptr [[TMP53]], i32 0
+; CHECK-NEXT: [[WIDE_LOAD34:%.*]] = load <vscale x 8 x i8>, ptr [[TMP54]], align 1, !alias.scope [[META25:![0-9]+]], !noalias [[META27:![0-9]+]]
+; CHECK-NEXT: [[TMP55:%.*]] = add <vscale x 8 x i8> [[TMP52]], [[WIDE_LOAD34]]
+; CHECK-NEXT: store <vscale x 8 x i8> [[TMP55]], ptr [[TMP54]], align 1, !alias.scope [[META25]], !noalias [[META27]]
+; CHECK-NEXT: [[TMP56:%.*]] = getelementptr inbounds [100 x i8], ptr [[V]], i64 0, i64 [[TMP47]]
+; CHECK-NEXT: [[TMP57:%.*]] = getelementptr inbounds i8, ptr [[TMP56]], i32 0
+; CHECK-NEXT: [[WIDE_LOAD35:%.*]] = load <vscale x 8 x i8>, ptr [[TMP57]], align 1, !alias.scope [[META29:![0-9]+]], !noalias [[META30:![0-9]+]]
+; CHECK-NEXT: [[TMP58:%.*]] = add <vscale x 8 x i8> [[WIDE_LOAD35]], [[BROADCAST_SPLAT37]]
+; CHECK-NEXT: store <vscale x 8 x i8> [[TMP58]], ptr [[TMP57]], align 1, !alias.scope [[META29]], !noalias [[META30]]
+; CHECK-NEXT: [[INDEX_NEXT38]] = add nuw i64 [[INDEX30]], [[TMP46]]
+; CHECK-NEXT: [[TMP59:%.*]] = icmp eq i64 [[INDEX_NEXT38]], [[N_VEC28]]
+; CHECK-NEXT: br i1 [[TMP59]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]]
+; CHECK: [[VEC_EPILOG_MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[CMP_N39:%.*]] = icmp eq i64 [[TMP4]], [[N_VEC28]]
+; CHECK-NEXT: br i1 [[CMP_N39]], label %[[WHILE_END_LOOPEXIT]], label %[[VEC_EPILOG_SCALAR_PH]]
+; CHECK: [[VEC_EPILOG_SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END29]], %[[VEC_EPILOG_ITER_CHECK]] ], [ [[TMP1]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP1]], %[[VECTOR_MEMCHECK]] ], [ [[TMP1]], %[[ITER_CHECK]] ]
+; CHECK-NEXT: br label %[[WHILE_BODY:.*]]
+; CHECK: [[WHILE_BODY]]:
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[WHILE_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i8, ptr [[P2]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP60:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i8, ptr [[P3]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP61:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1
+; CHECK-NEXT: [[MUL:%.*]] = mul i8 [[TMP61]], [[TMP60]]
+; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds nuw i8, ptr [[P1]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP62:%.*]] = load i8, ptr [[ARRAYIDX5]], align 1
+; CHECK-NEXT: [[ADD:%.*]] = add i8 [[MUL]], [[TMP62]]
+; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX5]], align 1
+; CHECK-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds nuw [100 x i8], ptr [[V]], i64 0, i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP63:%.*]] = load i8, ptr [[ARRAYIDX10]], align 1
+; CHECK-NEXT: [[ADD12:%.*]] = add i8 [[TMP63]], [[CONV8]]
+; CHECK-NEXT: store i8 [[ADD12]], ptr [[ARRAYIDX10]], align 1
+; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEXT: [[TMP64:%.*]] = and i64 [[INDVARS_IV_NEXT]], 4294967295
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[TMP64]], 16
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[WHILE_END_LOOPEXIT]], label %[[WHILE_BODY]], !llvm.loop [[LOOP32:![0-9]+]]
+; CHECK: [[WHILE_END_LOOPEXIT]]:
+; CHECK-NEXT: br label %[[WHILE_END]]
+; CHECK: [[WHILE_END]]:
+; CHECK-NEXT: ret void
+;
+entry:
+ %0 = load i32, ptr %p, align 4
+ %cmp20 = icmp ult i32 %0, 16
+ br i1 %cmp20, label %while.preheader, label %while.end
+
+while.preheader:
+ %conv8 = trunc i16 %val to i8
+ %v = getelementptr inbounds nuw i8, ptr %p, i64 4
+ %1 = zext nneg i32 %0 to i64
+ br label %while.body
+
+while.body:
+ %indvars.iv = phi i64 [ %1, %while.preheader ], [ %indvars.iv.next, %while.body ]
+ %arrayidx = getelementptr inbounds nuw i8, ptr %p2, i64 %indvars.iv
+ %2 = load i8, ptr %arrayidx, align 1
+ %arrayidx2 = getelementptr inbounds nuw i8, ptr %p3, i64 %indvars.iv
+ %3 = load i8, ptr %arrayidx2, align 1
+ %mul = mul i8 %3, %2
+ %arrayidx5 = getelementptr inbounds nuw i8, ptr %p1, i64 %indvars.iv
+ %4 = load i8, ptr %arrayidx5, align 1
+ %add = add i8 %mul, %4
+ store i8 %add, ptr %arrayidx5, align 1
+ %arrayidx10 = getelementptr inbounds nuw [100 x i8], ptr %v, i64 0, i64 %indvars.iv
+ %5 = load i8, ptr %arrayidx10, align 1
+ %add12 = add i8 %5, %conv8
+ store i8 %add12, ptr %arrayidx10, align 1
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %6 = and i64 %indvars.iv.next, 4294967295
+ %exitcond.not = icmp eq i64 %6, 16
+ br i1 %exitcond.not, label %while.end, label %while.body
+
+while.end:
+ ret void
+}
+
+define void @overflow_indvar_known_false(ptr nocapture noundef %p, i16 noundef %val) vscale_range(1,16) {
+; CHECK-LABEL: define void @overflow_indvar_known_false(
+; CHECK-SAME: ptr nocapture noundef [[P:%.*]], i16 noundef [[VAL:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[P_PROMOTED:%.*]] = load i32, ptr [[P]], align 4
+; CHECK-NEXT: [[CMP7:%.*]] = icmp ult i32 [[P_PROMOTED]], 1027
+; CHECK-NEXT: br i1 [[CMP7]], label %[[WHILE_PREHEADER:.*]], label %[[WHILE_END:.*]]
+; CHECK: [[WHILE_PREHEADER]]:
+; CHECK-NEXT: [[CONV:%.*]] = trunc i16 [[VAL]] to i8
+; CHECK-NEXT: [[V:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 4
+; CHECK-NEXT: [[TMP0:%.*]] = zext nneg i32 [[P_PROMOTED]] to i64
+; CHECK-NEXT: [[TMP19:%.*]] = add i32 [[P_PROMOTED]], 1
+; CHECK-NEXT: [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
+; CHECK-NEXT: [[TMP1:%.*]] = sub i64 1028, [[TMP20]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]]
+; CHECK: [[VECTOR_SCEVCHECK]]:
+; CHECK-NEXT: [[TMP21:%.*]] = add i32 [[P_PROMOTED]], 1
+; CHECK-NEXT: [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
+; CHECK-NEXT: [[TMP23:%.*]] = sub i64 1027, [[TMP22]]
+; CHECK-NEXT: [[TMP24:%.*]] = trunc i64 [[TMP23]] to i32
+; CHECK-NEXT: [[TMP25:%.*]] = add i32 [[TMP21]], [[TMP24]]
+; CHECK-NEXT: [[TMP26:%.*]] = icmp ult i32 [[TMP25]], [[TMP21]]
+; CHECK-NEXT: [[TMP27:%.*]] = icmp ugt i64 [[TMP23]], 4294967295
+; CHECK-NEXT: [[TMP28:%.*]] = or i1 [[TMP26]], [[TMP27]]
+; CHECK-NEXT: br i1 [[TMP28]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 16
+; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP1]], [[TMP4]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
+; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[TMP0]], [[N_VEC]]
+; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 16
+; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 16
+; CHECK-NEXT: [[TMP9:%.*]] = sub i64 [[TMP1]], [[TMP8]]
+; CHECK-NEXT: [[TMP10:%.*]] = icmp ugt i64 [[TMP1]], [[TMP8]]
+; CHECK-NEXT: [[TMP11:%.*]] = select i1 [[TMP10]], i64 [[TMP9]], i64 0
+; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 0, i64 [[TMP1]])
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[CONV]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 16 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], %[[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[TMP0]], [[INDEX]]
+; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[OFFSET_IDX]], 0
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds [100 x i8], ptr [[V]], i64 0, i64 [[TMP12]]
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[TMP13]], i32 0
+; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[TMP14]], i32 1, <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i8> poison)
+; CHECK-NEXT: [[TMP15:%.*]] = add <vscale x 16 x i8> [[WIDE_MASKED_LOAD]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[TMP15]], ptr [[TMP14]], i32 1, <vscale x 16 x i1> [[ACTIVE_LANE_MASK]])
+; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP6]]
+; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[INDEX]], i64 [[TMP11]])
+; CHECK-NEXT: [[TMP16:%.*]] = xor <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer)
+; CHECK-NEXT: [[TMP17:%.*]] = extractelement <vscale x 16 x i1> [[TMP16]], i32 0
+; CHECK-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: br i1 true, label %[[WHILE_END_LOOPEXIT:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ [[TMP0]], %[[WHILE_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ]
+; CHECK-NEXT: br label %[[WHILE_BODY:.*]]
+; CHECK: [[WHILE_BODY]]:
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[WHILE_BODY]] ]
+; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [100 x i8], ptr [[V]], i64 0, i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP18:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[ADD:%.*]] = add i8 [[TMP18]], [[CONV]]
+; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[TMP29:%.*]] = and i64 [[INDVARS_IV_NEXT]], 4294967295
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[TMP29]], 1027
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[WHILE_END_LOOPEXIT]], label %[[WHILE_BODY]], !llvm.loop [[LOOP34:![0-9]+]]
+; CHECK: [[WHILE_END_LOOPEXIT]]:
+; CHECK-NEXT: br label %[[WHILE_END]]
+; CHECK: [[WHILE_END]]:
+; CHECK-NEXT: ret void
+;
+entry:
+ %p.promoted = load i32, ptr %p, align 4
+ %cmp7 = icmp ult i32 %p.promoted, 1027
+ br i1 %cmp7, label %while.preheader, label %while.end
+
+while.preheader:
+ %conv = trunc i16 %val to i8
+ %v = getelementptr inbounds nuw i8, ptr %p, i64 4
+ %0 = zext nneg i32 %p.promoted to i64
+ br label %while.body
+
+while.body:
+ %indvars.iv = phi i64 [ %0, %while.preheader ], [ %indvars.iv.next, %while.body ]
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %arrayidx = getelementptr inbounds nuw [100 x i8], ptr %v, i64 0, i64 %indvars.iv
+ %1 = load i8, ptr %arrayidx, align 1
+ %add = add i8 %1, %conv
+ store i8 %add, ptr %arrayidx, align 1
+ %2 = and i64 %indvars.iv.next, 4294967295
+ %exitcond.not = icmp eq i64 %2, 1027
+ br i1 %exitcond.not, label %while.end, label %while.body, !llvm.loop !0
+
+while.end:
+ ret void
+}
+
+
+!0 = distinct !{!0, !1}
+!1 = !{!"llvm.loop.vectorize.predicate.enable", i1 true}
+;.
+; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
+; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
+; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
+; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]}
+; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]]}
+; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]}
+; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]}
+; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]]}
+; CHECK: [[META8]] = !{[[META9:![0-9]+]]}
+; CHECK: [[META9]] = distinct !{[[META9]], [[META10:![0-9]+]]}
+; CHECK: [[META10]] = distinct !{[[META10]], !"LVerDomain"}
+; CHECK: [[META11]] = !{[[META12:![0-9]+]]}
+; CHECK: [[META12]] = distinct !{[[META12]], [[META10]]}
+; CHECK: [[META13]] = !{[[META14:![0-9]+]]}
+; CHECK: [[META14]] = distinct !{[[META14]], [[META10]]}
+; CHECK: [[META15]] = !{[[META16:![0-9]+]], [[META9]], [[META12]]}
+; CHECK: [[META16]] = distinct !{[[META16]], [[META10]]}
+; CHECK: [[META17]] = !{[[META16]]}
+; CHECK: [[META18]] = !{[[META9]], [[META12]]}
+; CHECK: [[LOOP19]] = distinct !{[[LOOP19]], [[META1]], [[META2]]}
+; CHECK: [[META20]] = !{[[META21:![0-9]+]]}
+; CHECK: [[META21]] = distinct !{[[META21]], [[META22:![0-9]+]]}
+; CHECK: [[META22]] = distinct !{[[META22]], !"LVerDomain"}
+; CHECK: [[META23]] = !{[[META24:![0-9]+]]}
+; CHECK: [[META24]] = distinct !{[[META24]], [[META22]]}
+; CHECK: [[META25]] = !{[[META26:![0-9]+]]}
+; CHECK: [[META26]] = distinct !{[[META26]], [[META22]]}
+; CHECK: [[META27]] = !{[[META28:![0-9]+]], [[META21]], [[META24]]}
+; CHECK: [[META28]] = distinct !{[[META28]], [[META22]]}
+; CHECK: [[META29]] = !{[[META28]]}
+; CHECK: [[META30]] = !{[[META21]], [[META24]]}
+; CHECK: [[LOOP31]] = distinct !{[[LOOP31]], [[META1]], [[META2]]}
+; CHECK: [[LOOP32]] = distinct !{[[LOOP32]], [[META1]]}
+; CHECK: [[LOOP33]] = distinct !{[[LOOP33]], [[META1]], [[META2]]}
+; CHECK: [[LOOP34]] = distinct !{[[LOOP34]], [[META1]]}
+;.
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll b/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll
index b6a9fed507acd3..4eb3d75ec6f02d 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll
@@ -20,6 +20,7 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur
; CHECK-NEXT: LV: We can vectorize this loop (with a runtime bound check)!
; CHECK-NEXT: LV: Loop does not require scalar epilogue
; CHECK-NEXT: LV: Found trip count: 0
+; CHECK-NEXT: LV: Found maximum trip count: 4294967295
; CHECK-NEXT: LV: Scalable vectorization is available
; CHECK-NEXT: LV: The max safe fixed VF is: 67108864.
; CHECK-NEXT: LV: The max safe scalable VF is: vscale x 4294967295.
@@ -224,6 +225,7 @@ define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocaptur
; CHECK-NEXT: LV: We can vectorize this loop (with a runtime bound check)!
; CHECK-NEXT: LV: Loop does not require scalar epilogue
; CHECK-NEXT: LV: Found trip count: 0
+; CHECK-NEXT: LV: Found maximum trip count: 4294967295
; CHECK-NEXT: LV: Scalable vectorization is available
; CHECK-NEXT: LV: The max safe fixed VF is: 67108864.
; CHECK-NEXT: LV: The max safe scalable VF is: vscale x 4294967295.
>From 3cf65dd6df3dffd6b716a33b9fe5e070ca5dd80b Mon Sep 17 00:00:00 2001
From: David Sherwood <david.sherwood at arm.com>
Date: Wed, 25 Sep 2024 08:30:59 +0000
Subject: [PATCH 2/3] [LoopVectorize] Use predicated version of
getSmallConstantMaxTripCount
There are a number of places where we call getSmallConstantMaxTripCount
without passing a vector of predicates:
getSmallBestKnownTC
isIndvarOverflowCheckKnownFalse
computeMaxVF
isMoreProfitable
I've changed all of these to now pass in a predicate vector so that
we get the benefit of making better vectorisation choices when we
know the max trip count for loops that require SCEV predicate checks.
I've tried to add tests that cover all the cases affected by these
changes.
---
.../Transforms/Vectorize/LoopVectorize.cpp | 17 +-
.../AArch64/low_trip_count_predicates.ll | 366 +++---------------
2 files changed, 56 insertions(+), 327 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index f943e27e2a0560..db9a04e294bfe3 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -422,7 +422,8 @@ static std::optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE,
return *EstimatedTC;
// Check if upper bound estimate is known.
- if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L))
+ SmallVector<const SCEVPredicate *, 2> Predicates;
+ if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L, &Predicates))
return ExpectedTC;
return std::nullopt;
@@ -2291,8 +2292,9 @@ static bool isIndvarOverflowCheckKnownFalse(
// We know the runtime overflow check is known false iff the (max) trip-count
// is known and (max) trip-count + (VF * UF) does not overflow in the type of
// the vector loop induction variable.
- if (unsigned TC =
- Cost->PSE.getSE()->getSmallConstantMaxTripCount(Cost->TheLoop)) {
+ SmallVector<const SCEVPredicate *, 2> Predicates;
+ if (unsigned TC = Cost->PSE.getSE()->getSmallConstantMaxTripCount(
+ Cost->TheLoop, &Predicates)) {
uint64_t MaxVF = VF.getKnownMinValue();
if (VF.isScalable()) {
std::optional<unsigned> MaxVScale =
@@ -3987,7 +3989,10 @@ LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) {
}
unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
- unsigned MaxTC = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop);
+
+ SmallVector<const SCEVPredicate *, 2> Predicates;
+ unsigned MaxTC =
+ PSE.getSE()->getSmallConstantMaxTripCount(TheLoop, &Predicates);
LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
if (TC != MaxTC)
LLVM_DEBUG(dbgs() << "LV: Found maximum trip count: " << MaxTC << '\n');
@@ -4278,7 +4283,9 @@ bool LoopVectorizationPlanner::isMoreProfitable(
InstructionCost CostA = A.Cost;
InstructionCost CostB = B.Cost;
- unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(OrigLoop);
+ SmallVector<const SCEVPredicate *, 2> Predicates;
+ unsigned MaxTripCount =
+ PSE.getSE()->getSmallConstantMaxTripCount(OrigLoop, &Predicates);
// Improve estimate for the vector width if it is scalable.
unsigned EstimatedWidthA = A.Width.getKnownMinValue();
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/low_trip_count_predicates.ll b/llvm/test/Transforms/LoopVectorize/AArch64/low_trip_count_predicates.ll
index 8a32ec315fa97f..2cdfa0d1564219 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/low_trip_count_predicates.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/low_trip_count_predicates.ll
@@ -7,28 +7,27 @@ target triple = "aarch64-unknown-linux-gnu"
; DEBUG-LABEL: LV: Checking a loop in 'low_vf_ic_is_better'
; DEBUG: LV: Found trip count: 0
-; DEBUG-NOT: LV: Found maximum trip count
-; DEBUG: LV: IC is 2
-; DEBUG: LV: VF is vscale x 16
-; DEBUG: Main Loop VF:vscale x 16, Main Loop UF:2, Epilogue Loop VF:vscale x 8, Epilogue Loop UF:1
+; DEBUG: LV: Found maximum trip count: 19
+; DEBUG: LV: IC is 1
+; DEBUG: LV: VF is vscale x 8
+; DEBUG: Main Loop VF:vscale x 8, Main Loop UF:1, Epilogue Loop VF:vscale x 4, Epilogue Loop UF:1
; DEBUG-LABEL: LV: Checking a loop in 'trip_count_too_small'
-; DEBUG: LV: Found trip count: 0
-; DEBUG-NOT: LV: Found maximum trip count
-; DEBUG: LV: IC is 2
-; DEBUG: LV: VF is vscale x 16
-; DEBUG: Main Loop VF:vscale x 16, Main Loop UF:2, Epilogue Loop VF:vscale x 8, Epilogue Loop UF:1
+; DEBUG: LV: Found a loop with a very small trip count. This loop is worth vectorizing only if no scalar iteration overheads are incurred.
+; DEBUG: LV: Not vectorizing: The trip count is below the minial threshold value..
; DEBUG-LABEL: LV: Checking a loop in 'too_many_runtime_checks'
; DEBUG: LV: Found trip count: 0
-; DEBUG-NOT: LV: Found maximum trip count
-; DEBUG: LV: IC is 2
-; DEBUG: LV: VF is vscale x 16
-; DEBUG: Main Loop VF:vscale x 16, Main Loop UF:1, Epilogue Loop VF:vscale x 8, Epilogue Loop UF:1
+; DEBUG: LV: Found maximum trip count: 16
+; DEBUG: LV: Clamping the MaxVF to maximum power of two not exceeding the constant trip count: 16
+; DEBUG: LV: IC is 1
+; DEBUG: LV: VF is 16
+; DEBUG: LV: Vectorization is not beneficial: expected trip count < minimum profitable VF (16 < 32)
+; DEBUG: LV: Too many memory checks needed.
; DEBUG-LABEL: LV: Checking a loop in 'overflow_indvar_known_false'
; DEBUG: LV: Found trip count: 0
-; DEBUG-NOT: LV: Found maximum trip count
+; DEBUG: LV: Found maximum trip count: 1027
; DEBUG: LV: can fold tail by masking.
; DEBUG: Executing best plan with VF=vscale x 16, UF=1
@@ -47,7 +46,7 @@ define void @low_vf_ic_is_better(ptr nocapture noundef %p, i16 noundef %val) {
; CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
; CHECK-NEXT: [[TMP3:%.*]] = sub i64 20, [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 8
+; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP3]], [[TMP5]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]]
; CHECK: [[VECTOR_SCEVCHECK]]:
@@ -62,18 +61,18 @@ define void @low_vf_ic_is_better(ptr nocapture noundef %p, i16 noundef %val) {
; CHECK-NEXT: br i1 [[TMP13]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]]
; CHECK: [[VECTOR_MAIN_LOOP_ITER_CHECK]]:
; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP15:%.*]] = mul i64 [[TMP14]], 32
+; CHECK-NEXT: [[TMP15:%.*]] = mul i64 [[TMP14]], 8
; CHECK-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 [[TMP3]], [[TMP15]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK1]], label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP17:%.*]] = mul i64 [[TMP16]], 32
+; CHECK-NEXT: [[TMP17:%.*]] = mul i64 [[TMP16]], 8
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], [[TMP17]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP19:%.*]] = mul i64 [[TMP18]], 32
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[CONV]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP19:%.*]] = mul i64 [[TMP18]], 8
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i8> poison, i8 [[CONV]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 8 x i8> [[BROADCAST_SPLATINSERT]], <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -81,18 +80,9 @@ define void @low_vf_ic_is_better(ptr nocapture noundef %p, i16 noundef %val) {
; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[OFFSET_IDX]], 0
; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds [100 x i8], ptr [[V]], i64 0, i64 [[TMP20]]
; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[TMP21]], i32 0
-; CHECK-NEXT: [[TMP23:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP24:%.*]] = mul i64 [[TMP23]], 16
-; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i8, ptr [[TMP21]], i64 [[TMP24]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP22]], align 1
-; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 16 x i8>, ptr [[TMP25]], align 1
-; CHECK-NEXT: [[TMP26:%.*]] = add <vscale x 16 x i8> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: [[TMP27:%.*]] = add <vscale x 16 x i8> [[WIDE_LOAD2]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: [[TMP28:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP29:%.*]] = mul i64 [[TMP28]], 16
-; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[TMP21]], i64 [[TMP29]]
-; CHECK-NEXT: store <vscale x 16 x i8> [[TMP26]], ptr [[TMP22]], align 1
-; CHECK-NEXT: store <vscale x 16 x i8> [[TMP27]], ptr [[TMP30]], align 1
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i8>, ptr [[TMP22]], align 1
+; CHECK-NEXT: [[TMP23:%.*]] = add <vscale x 8 x i8> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: store <vscale x 8 x i8> [[TMP23]], ptr [[TMP22]], align 1
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP19]]
; CHECK-NEXT: [[TMP31:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP31]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -103,20 +93,20 @@ define void @low_vf_ic_is_better(ptr nocapture noundef %p, i16 noundef %val) {
; CHECK-NEXT: [[IND_END5:%.*]] = add i64 [[TMP0]], [[N_VEC]]
; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP3]], [[N_VEC]]
; CHECK-NEXT: [[TMP32:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP33:%.*]] = mul i64 [[TMP32]], 8
+; CHECK-NEXT: [[TMP33:%.*]] = mul i64 [[TMP32]], 4
; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], [[TMP33]]
; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]]
; CHECK: [[VEC_EPILOG_PH]]:
; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
; CHECK-NEXT: [[TMP34:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP35:%.*]] = mul i64 [[TMP34]], 8
+; CHECK-NEXT: [[TMP35:%.*]] = mul i64 [[TMP34]], 4
; CHECK-NEXT: [[N_MOD_VF3:%.*]] = urem i64 [[TMP3]], [[TMP35]]
; CHECK-NEXT: [[N_VEC4:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF3]]
; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[TMP0]], [[N_VEC4]]
; CHECK-NEXT: [[TMP36:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP37:%.*]] = mul i64 [[TMP36]], 8
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT9:%.*]] = insertelement <vscale x 8 x i8> poison, i8 [[CONV]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT10:%.*]] = shufflevector <vscale x 8 x i8> [[BROADCAST_SPLATINSERT9]], <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP37:%.*]] = mul i64 [[TMP36]], 4
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT8:%.*]] = insertelement <vscale x 4 x i8> poison, i8 [[CONV]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT9:%.*]] = shufflevector <vscale x 4 x i8> [[BROADCAST_SPLATINSERT8]], <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]]
; CHECK: [[VEC_EPILOG_VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX6:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT11:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
@@ -124,9 +114,9 @@ define void @low_vf_ic_is_better(ptr nocapture noundef %p, i16 noundef %val) {
; CHECK-NEXT: [[TMP38:%.*]] = add i64 [[OFFSET_IDX7]], 0
; CHECK-NEXT: [[TMP39:%.*]] = getelementptr inbounds [100 x i8], ptr [[V]], i64 0, i64 [[TMP38]]
; CHECK-NEXT: [[TMP40:%.*]] = getelementptr inbounds i8, ptr [[TMP39]], i32 0
-; CHECK-NEXT: [[WIDE_LOAD8:%.*]] = load <vscale x 8 x i8>, ptr [[TMP40]], align 1
-; CHECK-NEXT: [[TMP41:%.*]] = add <vscale x 8 x i8> [[WIDE_LOAD8]], [[BROADCAST_SPLAT10]]
-; CHECK-NEXT: store <vscale x 8 x i8> [[TMP41]], ptr [[TMP40]], align 1
+; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <vscale x 4 x i8>, ptr [[TMP40]], align 1
+; CHECK-NEXT: [[TMP41:%.*]] = add <vscale x 4 x i8> [[WIDE_LOAD7]], [[BROADCAST_SPLAT9]]
+; CHECK-NEXT: store <vscale x 4 x i8> [[TMP41]], ptr [[TMP40]], align 1
; CHECK-NEXT: [[INDEX_NEXT11]] = add nuw i64 [[INDEX6]], [[TMP37]]
; CHECK-NEXT: [[TMP42:%.*]] = icmp eq i64 [[INDEX_NEXT11]], [[N_VEC4]]
; CHECK-NEXT: br i1 [[TMP42]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
@@ -183,106 +173,14 @@ define void @trip_count_too_small(ptr nocapture noundef %p, i16 noundef %val) {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[P_PROMOTED:%.*]] = load i32, ptr [[P]], align 4
; CHECK-NEXT: [[CMP7:%.*]] = icmp ult i32 [[P_PROMOTED]], 3
-; CHECK-NEXT: br i1 [[CMP7]], label %[[ITER_CHECK:.*]], label %[[WHILE_END:.*]]
-; CHECK: [[ITER_CHECK]]:
+; CHECK-NEXT: br i1 [[CMP7]], label %[[WHILE_PREHEADER:.*]], label %[[WHILE_END:.*]]
+; CHECK: [[WHILE_PREHEADER]]:
; CHECK-NEXT: [[CONV:%.*]] = trunc i16 [[VAL]] to i8
; CHECK-NEXT: [[V:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 4
; CHECK-NEXT: [[TMP0:%.*]] = zext nneg i32 [[P_PROMOTED]] to i64
-; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[P_PROMOTED]], 1
-; CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
-; CHECK-NEXT: [[TMP3:%.*]] = sub i64 4, [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 8
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP3]], [[TMP5]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]]
-; CHECK: [[VECTOR_SCEVCHECK]]:
-; CHECK-NEXT: [[TMP6:%.*]] = add i32 [[P_PROMOTED]], 1
-; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
-; CHECK-NEXT: [[TMP8:%.*]] = sub i64 3, [[TMP7]]
-; CHECK-NEXT: [[TMP9:%.*]] = trunc i64 [[TMP8]] to i32
-; CHECK-NEXT: [[TMP10:%.*]] = add i32 [[TMP6]], [[TMP9]]
-; CHECK-NEXT: [[TMP11:%.*]] = icmp ult i32 [[TMP10]], [[TMP6]]
-; CHECK-NEXT: [[TMP12:%.*]] = icmp ugt i64 [[TMP8]], 4294967295
-; CHECK-NEXT: [[TMP13:%.*]] = or i1 [[TMP11]], [[TMP12]]
-; CHECK-NEXT: br i1 [[TMP13]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]]
-; CHECK: [[VECTOR_MAIN_LOOP_ITER_CHECK]]:
-; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP15:%.*]] = mul i64 [[TMP14]], 32
-; CHECK-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 [[TMP3]], [[TMP15]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK1]], label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]]
-; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP17:%.*]] = mul i64 [[TMP16]], 32
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], [[TMP17]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP19:%.*]] = mul i64 [[TMP18]], 32
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[CONV]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
-; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[TMP0]], [[INDEX]]
-; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[OFFSET_IDX]], 0
-; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds [100 x i8], ptr [[V]], i64 0, i64 [[TMP20]]
-; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[TMP21]], i32 0
-; CHECK-NEXT: [[TMP23:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP24:%.*]] = mul i64 [[TMP23]], 16
-; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i8, ptr [[TMP21]], i64 [[TMP24]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP22]], align 1
-; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 16 x i8>, ptr [[TMP25]], align 1
-; CHECK-NEXT: [[TMP26:%.*]] = add <vscale x 16 x i8> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: [[TMP27:%.*]] = add <vscale x 16 x i8> [[WIDE_LOAD2]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: [[TMP28:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP29:%.*]] = mul i64 [[TMP28]], 16
-; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[TMP21]], i64 [[TMP29]]
-; CHECK-NEXT: store <vscale x 16 x i8> [[TMP26]], ptr [[TMP22]], align 1
-; CHECK-NEXT: store <vscale x 16 x i8> [[TMP27]], ptr [[TMP30]], align 1
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP19]]
-; CHECK-NEXT: [[TMP31:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP31]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
-; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[WHILE_END_LOOPEXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]]
-; CHECK: [[VEC_EPILOG_ITER_CHECK]]:
-; CHECK-NEXT: [[IND_END5:%.*]] = add i64 [[TMP0]], [[N_VEC]]
-; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP3]], [[N_VEC]]
-; CHECK-NEXT: [[TMP32:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP33:%.*]] = mul i64 [[TMP32]], 8
-; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], [[TMP33]]
-; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]]
-; CHECK: [[VEC_EPILOG_PH]]:
-; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
-; CHECK-NEXT: [[TMP34:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP35:%.*]] = mul i64 [[TMP34]], 8
-; CHECK-NEXT: [[N_MOD_VF3:%.*]] = urem i64 [[TMP3]], [[TMP35]]
-; CHECK-NEXT: [[N_VEC4:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF3]]
-; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[TMP0]], [[N_VEC4]]
-; CHECK-NEXT: [[TMP36:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP37:%.*]] = mul i64 [[TMP36]], 8
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT9:%.*]] = insertelement <vscale x 8 x i8> poison, i8 [[CONV]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT10:%.*]] = shufflevector <vscale x 8 x i8> [[BROADCAST_SPLATINSERT9]], <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-; CHECK-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]]
-; CHECK: [[VEC_EPILOG_VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX6:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT11:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
-; CHECK-NEXT: [[OFFSET_IDX7:%.*]] = add i64 [[TMP0]], [[INDEX6]]
-; CHECK-NEXT: [[TMP38:%.*]] = add i64 [[OFFSET_IDX7]], 0
-; CHECK-NEXT: [[TMP39:%.*]] = getelementptr inbounds [100 x i8], ptr [[V]], i64 0, i64 [[TMP38]]
-; CHECK-NEXT: [[TMP40:%.*]] = getelementptr inbounds i8, ptr [[TMP39]], i32 0
-; CHECK-NEXT: [[WIDE_LOAD8:%.*]] = load <vscale x 8 x i8>, ptr [[TMP40]], align 1
-; CHECK-NEXT: [[TMP41:%.*]] = add <vscale x 8 x i8> [[WIDE_LOAD8]], [[BROADCAST_SPLAT10]]
-; CHECK-NEXT: store <vscale x 8 x i8> [[TMP41]], ptr [[TMP40]], align 1
-; CHECK-NEXT: [[INDEX_NEXT11]] = add nuw i64 [[INDEX6]], [[TMP37]]
-; CHECK-NEXT: [[TMP42:%.*]] = icmp eq i64 [[INDEX_NEXT11]], [[N_VEC4]]
-; CHECK-NEXT: br i1 [[TMP42]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
-; CHECK: [[VEC_EPILOG_MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[CMP_N12:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC4]]
-; CHECK-NEXT: br i1 [[CMP_N12]], label %[[WHILE_END_LOOPEXIT]], label %[[VEC_EPILOG_SCALAR_PH]]
-; CHECK: [[VEC_EPILOG_SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END5]], %[[VEC_EPILOG_ITER_CHECK]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP0]], %[[ITER_CHECK]] ]
; CHECK-NEXT: br label %[[WHILE_BODY:.*]]
; CHECK: [[WHILE_BODY]]:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[WHILE_BODY]] ]
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[TMP0]], %[[WHILE_PREHEADER]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[WHILE_BODY]] ]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [100 x i8], ptr [[V]], i64 0, i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP43:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
@@ -290,7 +188,7 @@ define void @trip_count_too_small(ptr nocapture noundef %p, i16 noundef %val) {
; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[TMP44:%.*]] = and i64 [[INDVARS_IV_NEXT]], 4294967295
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[TMP44]], 3
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[WHILE_END_LOOPEXIT]], label %[[WHILE_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[WHILE_END_LOOPEXIT:.*]], label %[[WHILE_BODY]]
; CHECK: [[WHILE_END_LOOPEXIT]]:
; CHECK-NEXT: br label %[[WHILE_END]]
; CHECK: [[WHILE_END]]:
@@ -328,157 +226,14 @@ define void @too_many_runtime_checks(ptr nocapture noundef %p, ptr nocapture nou
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[P]], align 4
; CHECK-NEXT: [[CMP20:%.*]] = icmp ult i32 [[TMP0]], 16
-; CHECK-NEXT: br i1 [[CMP20]], label %[[ITER_CHECK:.*]], label %[[WHILE_END:.*]]
-; CHECK: [[ITER_CHECK]]:
+; CHECK-NEXT: br i1 [[CMP20]], label %[[WHILE_PREHEADER:.*]], label %[[WHILE_END:.*]]
+; CHECK: [[WHILE_PREHEADER]]:
; CHECK-NEXT: [[CONV8:%.*]] = trunc i16 [[VAL]] to i8
; CHECK-NEXT: [[V:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 4
; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[TMP0]] to i64
-; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[TMP0]], 1
-; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64
-; CHECK-NEXT: [[TMP4:%.*]] = sub i64 17, [[TMP3]]
-; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 8
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP4]], [[TMP6]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]]
-; CHECK: [[VECTOR_SCEVCHECK]]:
-; CHECK-NEXT: [[TMP7:%.*]] = add i32 [[TMP0]], 1
-; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
-; CHECK-NEXT: [[TMP9:%.*]] = sub i64 16, [[TMP8]]
-; CHECK-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
-; CHECK-NEXT: [[TMP11:%.*]] = add i32 [[TMP7]], [[TMP10]]
-; CHECK-NEXT: [[TMP12:%.*]] = icmp ult i32 [[TMP11]], [[TMP7]]
-; CHECK-NEXT: [[TMP13:%.*]] = icmp ugt i64 [[TMP9]], 4294967295
-; CHECK-NEXT: [[TMP14:%.*]] = or i1 [[TMP12]], [[TMP13]]
-; CHECK-NEXT: br i1 [[TMP14]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VECTOR_MEMCHECK:.*]]
-; CHECK: [[VECTOR_MEMCHECK]]:
-; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[P1]], i64 [[TMP1]]
-; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[TMP1]], 17
-; CHECK-NEXT: [[TMP16:%.*]] = add i32 [[TMP0]], 1
-; CHECK-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
-; CHECK-NEXT: [[TMP18:%.*]] = sub i64 [[TMP15]], [[TMP17]]
-; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[P1]], i64 [[TMP18]]
-; CHECK-NEXT: [[TMP19:%.*]] = add nuw nsw i64 [[TMP1]], 4
-; CHECK-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP19]]
-; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[TMP1]], 21
-; CHECK-NEXT: [[TMP21:%.*]] = sub i64 [[TMP20]], [[TMP17]]
-; CHECK-NEXT: [[SCEVGEP3:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP21]]
-; CHECK-NEXT: [[SCEVGEP4:%.*]] = getelementptr i8, ptr [[P2]], i64 [[TMP1]]
-; CHECK-NEXT: [[SCEVGEP5:%.*]] = getelementptr i8, ptr [[P2]], i64 [[TMP18]]
-; CHECK-NEXT: [[SCEVGEP6:%.*]] = getelementptr i8, ptr [[P3]], i64 [[TMP1]]
-; CHECK-NEXT: [[SCEVGEP7:%.*]] = getelementptr i8, ptr [[P3]], i64 [[TMP18]]
-; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[SCEVGEP]], [[SCEVGEP3]]
-; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[SCEVGEP2]], [[SCEVGEP1]]
-; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
-; CHECK-NEXT: [[BOUND08:%.*]] = icmp ult ptr [[SCEVGEP]], [[SCEVGEP5]]
-; CHECK-NEXT: [[BOUND19:%.*]] = icmp ult ptr [[SCEVGEP4]], [[SCEVGEP1]]
-; CHECK-NEXT: [[FOUND_CONFLICT10:%.*]] = and i1 [[BOUND08]], [[BOUND19]]
-; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT10]]
-; CHECK-NEXT: [[BOUND011:%.*]] = icmp ult ptr [[SCEVGEP]], [[SCEVGEP7]]
-; CHECK-NEXT: [[BOUND112:%.*]] = icmp ult ptr [[SCEVGEP6]], [[SCEVGEP1]]
-; CHECK-NEXT: [[FOUND_CONFLICT13:%.*]] = and i1 [[BOUND011]], [[BOUND112]]
-; CHECK-NEXT: [[CONFLICT_RDX14:%.*]] = or i1 [[CONFLICT_RDX]], [[FOUND_CONFLICT13]]
-; CHECK-NEXT: [[BOUND015:%.*]] = icmp ult ptr [[SCEVGEP2]], [[SCEVGEP5]]
-; CHECK-NEXT: [[BOUND116:%.*]] = icmp ult ptr [[SCEVGEP4]], [[SCEVGEP3]]
-; CHECK-NEXT: [[FOUND_CONFLICT17:%.*]] = and i1 [[BOUND015]], [[BOUND116]]
-; CHECK-NEXT: [[CONFLICT_RDX18:%.*]] = or i1 [[CONFLICT_RDX14]], [[FOUND_CONFLICT17]]
-; CHECK-NEXT: [[BOUND019:%.*]] = icmp ult ptr [[SCEVGEP2]], [[SCEVGEP7]]
-; CHECK-NEXT: [[BOUND120:%.*]] = icmp ult ptr [[SCEVGEP6]], [[SCEVGEP3]]
-; CHECK-NEXT: [[FOUND_CONFLICT21:%.*]] = and i1 [[BOUND019]], [[BOUND120]]
-; CHECK-NEXT: [[CONFLICT_RDX22:%.*]] = or i1 [[CONFLICT_RDX18]], [[FOUND_CONFLICT21]]
-; CHECK-NEXT: br i1 [[CONFLICT_RDX22]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]]
-; CHECK: [[VECTOR_MAIN_LOOP_ITER_CHECK]]:
-; CHECK-NEXT: [[TMP22:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP23:%.*]] = mul i64 [[TMP22]], 16
-; CHECK-NEXT: [[MIN_ITERS_CHECK23:%.*]] = icmp ult i64 [[TMP4]], [[TMP23]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK23]], label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]]
-; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP24:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP25:%.*]] = mul i64 [[TMP24]], 16
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP4]], [[TMP25]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP4]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP26:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP27:%.*]] = mul i64 [[TMP26]], 16
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[CONV8]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
-; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[TMP1]], [[INDEX]]
-; CHECK-NEXT: [[TMP28:%.*]] = add i64 [[OFFSET_IDX]], 0
-; CHECK-NEXT: [[TMP29:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[TMP28]]
-; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[TMP29]], i32 0
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP30]], align 1, !alias.scope [[META8:![0-9]+]]
-; CHECK-NEXT: [[TMP31:%.*]] = getelementptr inbounds i8, ptr [[P3]], i64 [[TMP28]]
-; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds i8, ptr [[TMP31]], i32 0
-; CHECK-NEXT: [[WIDE_LOAD24:%.*]] = load <vscale x 16 x i8>, ptr [[TMP32]], align 1, !alias.scope [[META11:![0-9]+]]
-; CHECK-NEXT: [[TMP33:%.*]] = mul <vscale x 16 x i8> [[WIDE_LOAD24]], [[WIDE_LOAD]]
-; CHECK-NEXT: [[TMP34:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[TMP28]]
-; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds i8, ptr [[TMP34]], i32 0
-; CHECK-NEXT: [[WIDE_LOAD25:%.*]] = load <vscale x 16 x i8>, ptr [[TMP35]], align 1, !alias.scope [[META13:![0-9]+]], !noalias [[META15:![0-9]+]]
-; CHECK-NEXT: [[TMP36:%.*]] = add <vscale x 16 x i8> [[TMP33]], [[WIDE_LOAD25]]
-; CHECK-NEXT: store <vscale x 16 x i8> [[TMP36]], ptr [[TMP35]], align 1, !alias.scope [[META13]], !noalias [[META15]]
-; CHECK-NEXT: [[TMP37:%.*]] = getelementptr inbounds [100 x i8], ptr [[V]], i64 0, i64 [[TMP28]]
-; CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[TMP37]], i32 0
-; CHECK-NEXT: [[WIDE_LOAD26:%.*]] = load <vscale x 16 x i8>, ptr [[TMP38]], align 1, !alias.scope [[META17:![0-9]+]], !noalias [[META18:![0-9]+]]
-; CHECK-NEXT: [[TMP39:%.*]] = add <vscale x 16 x i8> [[WIDE_LOAD26]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: store <vscale x 16 x i8> [[TMP39]], ptr [[TMP38]], align 1, !alias.scope [[META17]], !noalias [[META18]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP27]]
-; CHECK-NEXT: [[TMP40:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP40]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
-; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP4]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[WHILE_END_LOOPEXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]]
-; CHECK: [[VEC_EPILOG_ITER_CHECK]]:
-; CHECK-NEXT: [[IND_END29:%.*]] = add i64 [[TMP1]], [[N_VEC]]
-; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP4]], [[N_VEC]]
-; CHECK-NEXT: [[TMP41:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP42:%.*]] = mul i64 [[TMP41]], 8
-; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], [[TMP42]]
-; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]]
-; CHECK: [[VEC_EPILOG_PH]]:
-; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
-; CHECK-NEXT: [[TMP43:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP44:%.*]] = mul i64 [[TMP43]], 8
-; CHECK-NEXT: [[N_MOD_VF27:%.*]] = urem i64 [[TMP4]], [[TMP44]]
-; CHECK-NEXT: [[N_VEC28:%.*]] = sub i64 [[TMP4]], [[N_MOD_VF27]]
-; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[TMP1]], [[N_VEC28]]
-; CHECK-NEXT: [[TMP45:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP46:%.*]] = mul i64 [[TMP45]], 8
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT36:%.*]] = insertelement <vscale x 8 x i8> poison, i8 [[CONV8]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT37:%.*]] = shufflevector <vscale x 8 x i8> [[BROADCAST_SPLATINSERT36]], <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-; CHECK-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]]
-; CHECK: [[VEC_EPILOG_VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX30:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT38:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
-; CHECK-NEXT: [[OFFSET_IDX31:%.*]] = add i64 [[TMP1]], [[INDEX30]]
-; CHECK-NEXT: [[TMP47:%.*]] = add i64 [[OFFSET_IDX31]], 0
-; CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[TMP47]]
-; CHECK-NEXT: [[TMP49:%.*]] = getelementptr inbounds i8, ptr [[TMP48]], i32 0
-; CHECK-NEXT: [[WIDE_LOAD32:%.*]] = load <vscale x 8 x i8>, ptr [[TMP49]], align 1, !alias.scope [[META20:![0-9]+]]
-; CHECK-NEXT: [[TMP50:%.*]] = getelementptr inbounds i8, ptr [[P3]], i64 [[TMP47]]
-; CHECK-NEXT: [[TMP51:%.*]] = getelementptr inbounds i8, ptr [[TMP50]], i32 0
-; CHECK-NEXT: [[WIDE_LOAD33:%.*]] = load <vscale x 8 x i8>, ptr [[TMP51]], align 1, !alias.scope [[META23:![0-9]+]]
-; CHECK-NEXT: [[TMP52:%.*]] = mul <vscale x 8 x i8> [[WIDE_LOAD33]], [[WIDE_LOAD32]]
-; CHECK-NEXT: [[TMP53:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[TMP47]]
-; CHECK-NEXT: [[TMP54:%.*]] = getelementptr inbounds i8, ptr [[TMP53]], i32 0
-; CHECK-NEXT: [[WIDE_LOAD34:%.*]] = load <vscale x 8 x i8>, ptr [[TMP54]], align 1, !alias.scope [[META25:![0-9]+]], !noalias [[META27:![0-9]+]]
-; CHECK-NEXT: [[TMP55:%.*]] = add <vscale x 8 x i8> [[TMP52]], [[WIDE_LOAD34]]
-; CHECK-NEXT: store <vscale x 8 x i8> [[TMP55]], ptr [[TMP54]], align 1, !alias.scope [[META25]], !noalias [[META27]]
-; CHECK-NEXT: [[TMP56:%.*]] = getelementptr inbounds [100 x i8], ptr [[V]], i64 0, i64 [[TMP47]]
-; CHECK-NEXT: [[TMP57:%.*]] = getelementptr inbounds i8, ptr [[TMP56]], i32 0
-; CHECK-NEXT: [[WIDE_LOAD35:%.*]] = load <vscale x 8 x i8>, ptr [[TMP57]], align 1, !alias.scope [[META29:![0-9]+]], !noalias [[META30:![0-9]+]]
-; CHECK-NEXT: [[TMP58:%.*]] = add <vscale x 8 x i8> [[WIDE_LOAD35]], [[BROADCAST_SPLAT37]]
-; CHECK-NEXT: store <vscale x 8 x i8> [[TMP58]], ptr [[TMP57]], align 1, !alias.scope [[META29]], !noalias [[META30]]
-; CHECK-NEXT: [[INDEX_NEXT38]] = add nuw i64 [[INDEX30]], [[TMP46]]
-; CHECK-NEXT: [[TMP59:%.*]] = icmp eq i64 [[INDEX_NEXT38]], [[N_VEC28]]
-; CHECK-NEXT: br i1 [[TMP59]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]]
-; CHECK: [[VEC_EPILOG_MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[CMP_N39:%.*]] = icmp eq i64 [[TMP4]], [[N_VEC28]]
-; CHECK-NEXT: br i1 [[CMP_N39]], label %[[WHILE_END_LOOPEXIT]], label %[[VEC_EPILOG_SCALAR_PH]]
-; CHECK: [[VEC_EPILOG_SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END29]], %[[VEC_EPILOG_ITER_CHECK]] ], [ [[TMP1]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP1]], %[[VECTOR_MEMCHECK]] ], [ [[TMP1]], %[[ITER_CHECK]] ]
; CHECK-NEXT: br label %[[WHILE_BODY:.*]]
; CHECK: [[WHILE_BODY]]:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[WHILE_BODY]] ]
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[TMP1]], %[[WHILE_PREHEADER]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[WHILE_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i8, ptr [[P2]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP60:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i8, ptr [[P3]], i64 [[INDVARS_IV]]
@@ -495,7 +250,7 @@ define void @too_many_runtime_checks(ptr nocapture noundef %p, ptr nocapture nou
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[TMP64:%.*]] = and i64 [[INDVARS_IV_NEXT]], 4294967295
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[TMP64]], 16
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[WHILE_END_LOOPEXIT]], label %[[WHILE_BODY]], !llvm.loop [[LOOP32:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[WHILE_END_LOOPEXIT:.*]], label %[[WHILE_BODY]]
; CHECK: [[WHILE_END_LOOPEXIT]]:
; CHECK-NEXT: br label %[[WHILE_END]]
; CHECK: [[WHILE_END]]:
@@ -538,7 +293,7 @@ while.end:
define void @overflow_indvar_known_false(ptr nocapture noundef %p, i16 noundef %val) vscale_range(1,16) {
; CHECK-LABEL: define void @overflow_indvar_known_false(
-; CHECK-SAME: ptr nocapture noundef [[P:%.*]], i16 noundef [[VAL:%.*]]) #[[ATTR0]] {
+; CHECK-SAME: ptr nocapture noundef [[P:%.*]], i16 noundef [[VAL:%.*]]) #[[ATTR1:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[P_PROMOTED:%.*]] = load i32, ptr [[P]], align 4
; CHECK-NEXT: [[CMP7:%.*]] = icmp ult i32 [[P_PROMOTED]], 1027
@@ -569,13 +324,8 @@ define void @overflow_indvar_known_false(ptr nocapture noundef %p, i16 noundef %
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[TMP0]], [[N_VEC]]
-; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 16
; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 16
-; CHECK-NEXT: [[TMP9:%.*]] = sub i64 [[TMP1]], [[TMP8]]
-; CHECK-NEXT: [[TMP10:%.*]] = icmp ugt i64 [[TMP1]], [[TMP8]]
-; CHECK-NEXT: [[TMP11:%.*]] = select i1 [[TMP10]], i64 [[TMP9]], i64 0
; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 0, i64 [[TMP1]])
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[CONV]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
@@ -590,11 +340,11 @@ define void @overflow_indvar_known_false(ptr nocapture noundef %p, i16 noundef %
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[TMP14]], i32 1, <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i8> poison)
; CHECK-NEXT: [[TMP15:%.*]] = add <vscale x 16 x i8> [[WIDE_MASKED_LOAD]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[TMP15]], ptr [[TMP14]], i32 1, <vscale x 16 x i1> [[ACTIVE_LANE_MASK]])
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP6]]
-; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[INDEX]], i64 [[TMP11]])
+; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
+; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[INDEX_NEXT]], i64 [[TMP1]])
; CHECK-NEXT: [[TMP16:%.*]] = xor <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer)
; CHECK-NEXT: [[TMP17:%.*]] = extractelement <vscale x 16 x i1> [[TMP16]], i32 0
-; CHECK-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br i1 true, label %[[WHILE_END_LOOPEXIT:.*]], label %[[SCALAR_PH]]
; CHECK: [[SCALAR_PH]]:
@@ -609,7 +359,7 @@ define void @overflow_indvar_known_false(ptr nocapture noundef %p, i16 noundef %
; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[TMP29:%.*]] = and i64 [[INDVARS_IV_NEXT]], 4294967295
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[TMP29]], 1027
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[WHILE_END_LOOPEXIT]], label %[[WHILE_BODY]], !llvm.loop [[LOOP34:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[WHILE_END_LOOPEXIT]], label %[[WHILE_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: [[WHILE_END_LOOPEXIT]]:
; CHECK-NEXT: br label %[[WHILE_END]]
; CHECK: [[WHILE_END]]:
@@ -651,33 +401,5 @@ while.end:
; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]}
; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]]}
; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]}
-; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]}
-; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]]}
-; CHECK: [[META8]] = !{[[META9:![0-9]+]]}
-; CHECK: [[META9]] = distinct !{[[META9]], [[META10:![0-9]+]]}
-; CHECK: [[META10]] = distinct !{[[META10]], !"LVerDomain"}
-; CHECK: [[META11]] = !{[[META12:![0-9]+]]}
-; CHECK: [[META12]] = distinct !{[[META12]], [[META10]]}
-; CHECK: [[META13]] = !{[[META14:![0-9]+]]}
-; CHECK: [[META14]] = distinct !{[[META14]], [[META10]]}
-; CHECK: [[META15]] = !{[[META16:![0-9]+]], [[META9]], [[META12]]}
-; CHECK: [[META16]] = distinct !{[[META16]], [[META10]]}
-; CHECK: [[META17]] = !{[[META16]]}
-; CHECK: [[META18]] = !{[[META9]], [[META12]]}
-; CHECK: [[LOOP19]] = distinct !{[[LOOP19]], [[META1]], [[META2]]}
-; CHECK: [[META20]] = !{[[META21:![0-9]+]]}
-; CHECK: [[META21]] = distinct !{[[META21]], [[META22:![0-9]+]]}
-; CHECK: [[META22]] = distinct !{[[META22]], !"LVerDomain"}
-; CHECK: [[META23]] = !{[[META24:![0-9]+]]}
-; CHECK: [[META24]] = distinct !{[[META24]], [[META22]]}
-; CHECK: [[META25]] = !{[[META26:![0-9]+]]}
-; CHECK: [[META26]] = distinct !{[[META26]], [[META22]]}
-; CHECK: [[META27]] = !{[[META28:![0-9]+]], [[META21]], [[META24]]}
-; CHECK: [[META28]] = distinct !{[[META28]], [[META22]]}
-; CHECK: [[META29]] = !{[[META28]]}
-; CHECK: [[META30]] = !{[[META21]], [[META24]]}
-; CHECK: [[LOOP31]] = distinct !{[[LOOP31]], [[META1]], [[META2]]}
-; CHECK: [[LOOP32]] = distinct !{[[LOOP32]], [[META1]]}
-; CHECK: [[LOOP33]] = distinct !{[[LOOP33]], [[META1]], [[META2]]}
-; CHECK: [[LOOP34]] = distinct !{[[LOOP34]], [[META1]]}
+; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]]}
;.
>From 49eaf3d5fbf3faca7c3a4edc4d46679bda1282b4 Mon Sep 17 00:00:00 2001
From: David Sherwood <david.sherwood at arm.com>
Date: Fri, 27 Sep 2024 14:05:56 +0000
Subject: [PATCH 3/3] Address review comments
* Add getSmallConstantMaxTripCount function to
PredicatedScalarEvolution, and call this function instead so that
we ensure the predicates are added.
---
llvm/include/llvm/Analysis/ScalarEvolution.h | 7 ++++
llvm/lib/Analysis/ScalarEvolution.cpp | 10 ++++++
.../Transforms/Vectorize/LoopVectorize.cpp | 32 +++++++------------
3 files changed, 29 insertions(+), 20 deletions(-)
diff --git a/llvm/include/llvm/Analysis/ScalarEvolution.h b/llvm/include/llvm/Analysis/ScalarEvolution.h
index 68b860725752d0..529280cbe1a214 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolution.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolution.h
@@ -2381,6 +2381,10 @@ class PredicatedScalarEvolution {
/// Get the (predicated) symbolic max backedge count for the analyzed loop.
const SCEV *getSymbolicMaxBackedgeTakenCount();
+ /// Returns the upper bound of the loop trip count as a normal unsigned
+ /// value, or 0 if the trip count is unknown.
+ unsigned getSmallConstantMaxTripCount();
+
/// Adds a new predicate.
void addPredicate(const SCEVPredicate &Pred);
@@ -2452,6 +2456,9 @@ class PredicatedScalarEvolution {
/// The symbolic backedge taken count.
const SCEV *SymbolicMaxBackedgeCount = nullptr;
+
+ /// The constant max trip count for the loop.
+ std::optional<unsigned> SmallConstantMaxTripCount;
};
template <> struct DenseMapInfo<ScalarEvolution::FoldID> {
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index 09e5c080c19cf9..f0424707f6a352 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -15025,6 +15025,16 @@ const SCEV *PredicatedScalarEvolution::getSymbolicMaxBackedgeTakenCount() {
return SymbolicMaxBackedgeCount;
}
+unsigned PredicatedScalarEvolution::getSmallConstantMaxTripCount() {
+ if (!SmallConstantMaxTripCount) {
+ SmallVector<const SCEVPredicate *, 4> Preds;
+ SmallConstantMaxTripCount = SE.getSmallConstantMaxTripCount(&L, &Preds);
+ for (const auto *P : Preds)
+ addPredicate(*P);
+ }
+ return *SmallConstantMaxTripCount;
+}
+
void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) {
if (Preds->implies(&Pred))
return;
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index db9a04e294bfe3..4110a23aa95b5f 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -410,10 +410,10 @@ static bool hasIrregularType(Type *Ty, const DataLayout &DL) {
/// 2) Returns expected trip count according to profile data if any.
/// 3) Returns upper bound estimate if it is known.
/// 4) Returns std::nullopt if all of the above failed.
-static std::optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE,
- Loop *L) {
+static std::optional<unsigned>
+getSmallBestKnownTC(PredicatedScalarEvolution &PSE, Loop *L) {
// Check if exact trip count is known.
- if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L))
+ if (unsigned ExpectedTC = PSE.getSE()->getSmallConstantTripCount(L))
return ExpectedTC;
// Check if there is an expected trip count available from profile data.
@@ -422,8 +422,7 @@ static std::optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE,
return *EstimatedTC;
// Check if upper bound estimate is known.
- SmallVector<const SCEVPredicate *, 2> Predicates;
- if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L, &Predicates))
+ if (unsigned ExpectedTC = PSE.getSmallConstantMaxTripCount())
return ExpectedTC;
return std::nullopt;
@@ -2292,9 +2291,7 @@ static bool isIndvarOverflowCheckKnownFalse(
// We know the runtime overflow check is known false iff the (max) trip-count
// is known and (max) trip-count + (VF * UF) does not overflow in the type of
// the vector loop induction variable.
- SmallVector<const SCEVPredicate *, 2> Predicates;
- if (unsigned TC = Cost->PSE.getSE()->getSmallConstantMaxTripCount(
- Cost->TheLoop, &Predicates)) {
+ if (unsigned TC = Cost->PSE.getSmallConstantMaxTripCount()) {
uint64_t MaxVF = VF.getKnownMinValue();
if (VF.isScalable()) {
std::optional<unsigned> MaxVScale =
@@ -3989,10 +3986,7 @@ LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) {
}
unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
-
- SmallVector<const SCEVPredicate *, 2> Predicates;
- unsigned MaxTC =
- PSE.getSE()->getSmallConstantMaxTripCount(TheLoop, &Predicates);
+ unsigned MaxTC = PSE.getSmallConstantMaxTripCount();
LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
if (TC != MaxTC)
LLVM_DEBUG(dbgs() << "LV: Found maximum trip count: " << MaxTC << '\n');
@@ -4283,9 +4277,7 @@ bool LoopVectorizationPlanner::isMoreProfitable(
InstructionCost CostA = A.Cost;
InstructionCost CostB = B.Cost;
- SmallVector<const SCEVPredicate *, 2> Predicates;
- unsigned MaxTripCount =
- PSE.getSE()->getSmallConstantMaxTripCount(OrigLoop, &Predicates);
+ unsigned MaxTripCount = PSE.getSmallConstantMaxTripCount();
// Improve estimate for the vector width if it is scalable.
unsigned EstimatedWidthA = A.Width.getKnownMinValue();
@@ -4873,7 +4865,7 @@ LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF,
if (!Legal->isSafeForAnyVectorWidth())
return 1;
- auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop);
+ auto BestKnownTC = getSmallBestKnownTC(PSE, TheLoop);
const bool HasReductions = !Legal->getReductionVars().empty();
// If we did not calculate the cost for VF (because the user selected the VF)
@@ -9643,7 +9635,7 @@ static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) {
static bool areRuntimeChecksProfitable(GeneratedRTChecks &Checks,
VectorizationFactor &VF,
std::optional<unsigned> VScale, Loop *L,
- ScalarEvolution &SE,
+ PredicatedScalarEvolution &PSE,
ScalarEpilogueLowering SEL) {
InstructionCost CheckCost = Checks.getCost();
if (!CheckCost.isValid())
@@ -9728,7 +9720,7 @@ static bool areRuntimeChecksProfitable(GeneratedRTChecks &Checks,
// Skip vectorization if the expected trip count is less than the minimum
// required trip count.
- if (auto ExpectedTC = getSmallBestKnownTC(SE, L)) {
+ if (auto ExpectedTC = getSmallBestKnownTC(PSE, L)) {
if (ElementCount::isKnownLT(ElementCount::getFixed(*ExpectedTC),
VF.MinProfitableTripCount)) {
LLVM_DEBUG(dbgs() << "LV: Vectorization is not beneficial: expected "
@@ -9835,7 +9827,7 @@ bool LoopVectorizePass::processLoop(Loop *L) {
// Check the loop for a trip count threshold: vectorize loops with a tiny trip
// count by optimizing for size, to minimize overheads.
- auto ExpectedTC = getSmallBestKnownTC(*SE, L);
+ auto ExpectedTC = getSmallBestKnownTC(PSE, L);
if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) {
LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
<< "This loop is worth vectorizing only if no scalar "
@@ -9950,7 +9942,7 @@ bool LoopVectorizePass::processLoop(Loop *L) {
Hints.getForce() == LoopVectorizeHints::FK_Enabled;
if (!ForceVectorization &&
!areRuntimeChecksProfitable(Checks, VF, getVScaleForTuning(L, *TTI), L,
- *PSE.getSE(), SEL)) {
+ PSE, SEL)) {
ORE->emit([&]() {
return OptimizationRemarkAnalysisAliasing(
DEBUG_TYPE, "CantReorderMemOps", L->getStartLoc(),
More information about the llvm-commits
mailing list