[llvm] [VPlan] Remove single-operand dead VPPhi (PR #156438)
Ramkumar Ramachandra via llvm-commits
llvm-commits at lists.llvm.org
Wed Sep 3 04:52:23 PDT 2025
https://github.com/artagnon updated https://github.com/llvm/llvm-project/pull/156438
>From 489ea90e3a51de8098e185b141f17db7478abdd6 Mon Sep 17 00:00:00 2001
From: Ramkumar Ramachandra <ramkumar.ramachandra at codasip.com>
Date: Tue, 2 Sep 2025 11:28:21 +0100
Subject: [PATCH] [VPlan] Free removeDeadRecipes from VPPhi simplification
VPPhi simplification is best done in simplifyRecipe.
---
.../Transforms/Vectorize/VPlanTransforms.cpp | 30 +++----
llvm/lib/Transforms/Vectorize/VPlanValue.h | 4 +
.../AArch64/divs-with-scalable-vfs.ll | 10 +--
.../AArch64/induction-costs-sve.ll | 34 +++----
.../AArch64/low_trip_count_predicates.ll | 3 +-
.../LoopVectorize/AArch64/predicated-costs.ll | 5 +-
.../LoopVectorize/AArch64/store-costs-sve.ll | 3 +-
.../ARM/mve-gather-scatter-tailpred.ll | 3 +-
.../ARM/mve-hoist-runtime-checks.ll | 3 +-
.../RISCV/blocks-with-dead-instructions.ll | 5 +-
.../LoopVectorize/RISCV/fminimumnum.ll | 20 ++---
.../RISCV/gather-scatter-cost.ll | 3 +-
.../LoopVectorize/RISCV/induction-costs.ll | 8 +-
.../RISCV/masked_gather_scatter.ll | 6 +-
.../RISCV/riscv-vector-reverse.ll | 8 --
.../LoopVectorize/RISCV/strided-accesses.ll | 28 +++---
.../RISCV/tail-folding-bin-unary-ops-args.ll | 90 ++++++++-----------
.../RISCV/tail-folding-call-intrinsics.ll | 45 ++++------
.../RISCV/tail-folding-cast-intrinsics.ll | 50 +++++------
.../RISCV/tail-folding-intermediate-store.ll | 12 +--
.../RISCV/type-info-cache-evl-crash.ll | 5 +-
.../vf-will-not-generate-any-vector-insts.ll | 5 +-
.../RISCV/vplan-riscv-vector-reverse.ll | 2 -
.../X86/divs-with-tail-folding.ll | 10 +--
24 files changed, 154 insertions(+), 238 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index c588453091fcc..5b0046e50fad8 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -561,17 +561,23 @@ void VPlanTransforms::removeDeadRecipes(VPlan &Plan) {
continue;
}
- // Check if R is a dead VPPhi <-> update cycle and remove it.
+ // Remove dead VPPhi <-> update cycles.
auto *PhiR = dyn_cast<VPPhi>(&R);
- if (!PhiR || PhiR->getNumOperands() != 2 || PhiR->getNumUsers() != 1)
+ if (!PhiR)
continue;
- VPValue *Incoming = PhiR->getOperand(1);
- if (*PhiR->user_begin() != Incoming->getDefiningRecipe() ||
- Incoming->getNumUsers() != 1)
- continue;
- PhiR->replaceAllUsesWith(PhiR->getOperand(0));
- PhiR->eraseFromParent();
- Incoming->getDefiningRecipe()->eraseFromParent();
+ if (PhiR->getNumOperands() == 1) {
+ PhiR->replaceAllUsesWith(PhiR->getOperand(0));
+ PhiR->eraseFromParent();
+ }
+ if (PhiR->getNumOperands() == 2) {
+ VPValue *Incoming = PhiR->getOperand(1);
+ VPUser *U = PhiR->getUniqueUser();
+ if (U && U == Incoming->getDefiningRecipe() && Incoming->hasOneUser()) {
+ PhiR->replaceAllUsesWith(PhiR->getOperand(0));
+ PhiR->eraseFromParent();
+ Incoming->getDefiningRecipe()->eraseFromParent();
+ }
+ }
}
}
}
@@ -1221,12 +1227,6 @@ static void simplifyRecipe(VPRecipeBase &R, VPTypeAnalysis &TypeInfo) {
return;
}
- if (auto *Phi = dyn_cast<VPPhi>(Def)) {
- if (Phi->getNumOperands() == 1)
- Phi->replaceAllUsesWith(Phi->getOperand(0));
- return;
- }
-
// Some simplifications can only be applied after unrolling. Perform them
// below.
if (!Plan->isUnrolled())
diff --git a/llvm/lib/Transforms/Vectorize/VPlanValue.h b/llvm/lib/Transforms/Vectorize/VPlanValue.h
index 85c6c2c8d7965..ec8e90b41a8eb 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanValue.h
+++ b/llvm/lib/Transforms/Vectorize/VPlanValue.h
@@ -111,6 +111,10 @@ class LLVM_ABI_FOR_TEST VPValue {
#endif
unsigned getNumUsers() const { return Users.size(); }
+ bool hasOneUser() const { return Users.size() == 1; }
+ VPUser *getUniqueUser() const {
+ return hasOneUser() ? Users.front() : nullptr;
+ }
void addUser(VPUser &User) { Users.push_back(&User); }
/// Remove a single \p User from the list of users.
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/divs-with-scalable-vfs.ll b/llvm/test/Transforms/LoopVectorize/AArch64/divs-with-scalable-vfs.ll
index a44cc09b8a8ea..464e2145aae06 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/divs-with-scalable-vfs.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/divs-with-scalable-vfs.ll
@@ -95,7 +95,7 @@ exit:
define void @sdiv_feeding_gep_predicated(ptr %dst, i32 %x, i64 %M, i64 %conv6, i64 %N) {
; CHECK-LABEL: define void @sdiv_feeding_gep_predicated(
; CHECK-SAME: ptr [[DST:%.*]], i32 [[X:%.*]], i64 [[M:%.*]], i64 [[CONV6:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[CONV61:%.*]] = zext i32 [[X]] to i64
; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]]
; CHECK: [[VECTOR_SCEVCHECK]]:
@@ -149,10 +149,9 @@ define void @sdiv_feeding_gep_predicated(ptr %dst, i32 %x, i64 %M, i64 %conv6, i
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_SCEVCHECK]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
; CHECK-NEXT: [[C:%.*]] = icmp ule i64 [[IV]], [[M]]
; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[LOOP_LATCH]]
; CHECK: [[THEN]]:
@@ -209,7 +208,7 @@ exit:
define void @udiv_urem_feeding_gep(i64 %x, ptr %dst, i64 %N) {
; CHECK-LABEL: define void @udiv_urem_feeding_gep(
; CHECK-SAME: i64 [[X:%.*]], ptr [[DST:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[MUL_1_I:%.*]] = mul i64 [[X]], [[X]]
; CHECK-NEXT: [[MUL_2_I:%.*]] = mul i64 [[MUL_1_I]], [[X]]
; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], 1
@@ -267,10 +266,9 @@ define void @udiv_urem_feeding_gep(i64 %x, ptr %dst, i64 %N) {
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_SCEVCHECK]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[DIV_I:%.*]] = udiv i64 [[IV]], [[MUL_2_I]]
; CHECK-NEXT: [[REM_I:%.*]] = urem i64 [[IV]], [[MUL_2_I]]
; CHECK-NEXT: [[DIV_1_I:%.*]] = udiv i64 [[REM_I]], [[MUL_1_I]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs-sve.ll b/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs-sve.ll
index f094b9a72d85b..cb45301ad06f1 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs-sve.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs-sve.ll
@@ -85,7 +85,7 @@ define void @iv_casts(ptr %dst, ptr %src, i32 %x, i64 %N) #0 {
;
; PRED-LABEL: define void @iv_casts(
; PRED-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]], i32 [[X:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
-; PRED-NEXT: [[ENTRY:.*]]:
+; PRED-NEXT: [[ENTRY:.*:]]
; PRED-NEXT: [[SRC2:%.*]] = ptrtoint ptr [[SRC]] to i64
; PRED-NEXT: [[DST1:%.*]] = ptrtoint ptr [[DST]] to i64
; PRED-NEXT: [[TMP0:%.*]] = add i64 [[N]], 1
@@ -129,10 +129,9 @@ define void @iv_casts(ptr %dst, ptr %src, i32 %x, i64 %N) #0 {
; PRED: [[MIDDLE_BLOCK]]:
; PRED-NEXT: br label %[[EXIT:.*]]
; PRED: [[SCALAR_PH]]:
-; PRED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; PRED-NEXT: br label %[[LOOP:.*]]
; PRED: [[LOOP]]:
-; PRED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; PRED-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; PRED-NEXT: [[GEP_SRC:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[IV]]
; PRED-NEXT: [[L:%.*]] = load i8, ptr [[GEP_SRC]], align 1
; PRED-NEXT: [[L_EXT:%.*]] = zext i8 [[L]] to i32
@@ -237,7 +236,7 @@ define void @iv_trunc(i32 %x, ptr %dst, i64 %N) #0 {
;
; PRED-LABEL: define void @iv_trunc(
; PRED-SAME: i32 [[X:%.*]], ptr [[DST:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; PRED-NEXT: [[ENTRY:.*]]:
+; PRED-NEXT: [[ENTRY:.*:]]
; PRED-NEXT: [[MUL_X:%.*]] = add i32 [[X]], 1
; PRED-NEXT: [[TMP0:%.*]] = add i64 [[N]], 1
; PRED-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]]
@@ -297,10 +296,9 @@ define void @iv_trunc(i32 %x, ptr %dst, i64 %N) #0 {
; PRED: [[MIDDLE_BLOCK]]:
; PRED-NEXT: br label %[[EXIT:.*]]
; PRED: [[SCALAR_PH]]:
-; PRED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_SCEVCHECK]] ]
; PRED-NEXT: br label %[[FOR_BODY:.*]]
; PRED: [[FOR_BODY]]:
-; PRED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; PRED-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
; PRED-NEXT: [[TRUNC_IV:%.*]] = trunc i64 [[IV]] to i32
; PRED-NEXT: [[ADD_I:%.*]] = mul i32 [[MUL_X]], [[TRUNC_IV]]
; PRED-NEXT: [[IV_MUL:%.*]] = zext i32 [[ADD_I]] to i64
@@ -404,7 +402,7 @@ define void @trunc_ivs_and_store(i32 %x, ptr %dst, i64 %N) #0 {
;
; PRED-LABEL: define void @trunc_ivs_and_store(
; PRED-SAME: i32 [[X:%.*]], ptr [[DST:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; PRED-NEXT: [[ENTRY:.*]]:
+; PRED-NEXT: [[ENTRY:.*:]]
; PRED-NEXT: [[MUL:%.*]] = mul i32 [[X]], [[X]]
; PRED-NEXT: [[TMP0:%.*]] = add i64 [[N]], 1
; PRED-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]]
@@ -486,12 +484,10 @@ define void @trunc_ivs_and_store(i32 %x, ptr %dst, i64 %N) #0 {
; PRED: [[MIDDLE_BLOCK]]:
; PRED-NEXT: br label %[[EXIT:.*]]
; PRED: [[SCALAR_PH]]:
-; PRED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_SCEVCHECK]] ]
-; PRED-NEXT: [[BC_RESUME_VAL8:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_SCEVCHECK]] ]
; PRED-NEXT: br label %[[LOOP:.*]]
; PRED: [[LOOP]]:
-; PRED-NEXT: [[IV_1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_1_NEXT:%.*]], %[[LOOP]] ]
-; PRED-NEXT: [[IV_2:%.*]] = phi i32 [ [[BC_RESUME_VAL8]], %[[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], %[[LOOP]] ]
+; PRED-NEXT: [[IV_1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_1_NEXT:%.*]], %[[LOOP]] ]
+; PRED-NEXT: [[IV_2:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], %[[LOOP]] ]
; PRED-NEXT: [[IV_1_TRUNC:%.*]] = trunc i64 [[IV_1]] to i32
; PRED-NEXT: [[IV_1_MUL:%.*]] = mul i32 [[MUL]], [[IV_1_TRUNC]]
; PRED-NEXT: [[IV_2_NEXT]] = add i32 [[IV_2]], 1
@@ -596,7 +592,7 @@ define void @ivs_trunc_and_ext(i32 %x, ptr %dst, i64 %N) #0 {
;
; PRED-LABEL: define void @ivs_trunc_and_ext(
; PRED-SAME: i32 [[X:%.*]], ptr [[DST:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; PRED-NEXT: [[ENTRY:.*]]:
+; PRED-NEXT: [[ENTRY:.*:]]
; PRED-NEXT: [[ADD:%.*]] = add i32 [[X]], 1
; PRED-NEXT: [[TMP0:%.*]] = add i64 [[N]], 1
; PRED-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]]
@@ -677,12 +673,10 @@ define void @ivs_trunc_and_ext(i32 %x, ptr %dst, i64 %N) #0 {
; PRED: [[MIDDLE_BLOCK]]:
; PRED-NEXT: br label %[[EXIT:.*]]
; PRED: [[SCALAR_PH]]:
-; PRED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_SCEVCHECK]] ]
-; PRED-NEXT: [[BC_RESUME_VAL7:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_SCEVCHECK]] ]
; PRED-NEXT: br label %[[LOOP:.*]]
; PRED: [[LOOP]]:
-; PRED-NEXT: [[IV_1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_1_NEXT:%.*]], %[[LOOP]] ]
-; PRED-NEXT: [[IV_2:%.*]] = phi i32 [ [[BC_RESUME_VAL7]], %[[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], %[[LOOP]] ]
+; PRED-NEXT: [[IV_1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_1_NEXT:%.*]], %[[LOOP]] ]
+; PRED-NEXT: [[IV_2:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], %[[LOOP]] ]
; PRED-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV_1]] to i32
; PRED-NEXT: [[IV_MUL:%.*]] = mul i32 [[ADD]], [[IV_TRUNC]]
; PRED-NEXT: [[IV_2_NEXT]] = add i32 [[IV_2]], 1
@@ -769,7 +763,7 @@ define void @exit_cond_zext_iv(ptr %dst, i64 %N) {
;
; PRED-LABEL: define void @exit_cond_zext_iv(
; PRED-SAME: ptr [[DST:%.*]], i64 [[N:%.*]]) {
-; PRED-NEXT: [[ENTRY:.*]]:
+; PRED-NEXT: [[ENTRY:.*:]]
; PRED-NEXT: [[UMAX1:%.*]] = call i64 @llvm.umax.i64(i64 [[N]], i64 1)
; PRED-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]]
; PRED: [[VECTOR_SCEVCHECK]]:
@@ -817,12 +811,10 @@ define void @exit_cond_zext_iv(ptr %dst, i64 %N) {
; PRED: [[MIDDLE_BLOCK]]:
; PRED-NEXT: br label %[[EXIT:.*]]
; PRED: [[SCALAR_PH]]:
-; PRED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_SCEVCHECK]] ]
-; PRED-NEXT: [[BC_RESUME_VAL6:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_SCEVCHECK]] ]
; PRED-NEXT: br label %[[LOOP:.*]]
; PRED: [[LOOP]]:
-; PRED-NEXT: [[IV_1:%.*]] = phi i32 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_1_NEXT:%.*]], %[[LOOP]] ]
-; PRED-NEXT: [[IV_CONV:%.*]] = phi i64 [ [[BC_RESUME_VAL6]], %[[SCALAR_PH]] ], [ [[IV_EXT:%.*]], %[[LOOP]] ]
+; PRED-NEXT: [[IV_1:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_1_NEXT:%.*]], %[[LOOP]] ]
+; PRED-NEXT: [[IV_CONV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_EXT:%.*]], %[[LOOP]] ]
; PRED-NEXT: [[GEP:%.*]] = getelementptr { [100 x i32], i32, i32 }, ptr [[DST]], i64 [[IV_CONV]], i32 2
; PRED-NEXT: store i32 0, ptr [[GEP]], align 8
; PRED-NEXT: [[IV_1_NEXT]] = add i32 [[IV_1]], 1
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/low_trip_count_predicates.ll b/llvm/test/Transforms/LoopVectorize/AArch64/low_trip_count_predicates.ll
index 1f486fba069c9..a1a459c14ca7c 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/low_trip_count_predicates.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/low_trip_count_predicates.ll
@@ -428,10 +428,9 @@ define void @overflow_indvar_known_false(ptr nocapture noundef %p, i32 noundef %
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[WHILE_END_LOOPEXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP0]], %[[WHILE_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ]
; CHECK-NEXT: br label %[[WHILE_BODY:.*]]
; CHECK: [[WHILE_BODY]]:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[WHILE_BODY]] ]
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[TMP0]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[WHILE_BODY]] ]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP18:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/predicated-costs.ll b/llvm/test/Transforms/LoopVectorize/AArch64/predicated-costs.ll
index 019d2ee9886a6..eeb143d5e938b 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/predicated-costs.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/predicated-costs.ll
@@ -8,7 +8,7 @@ target triple = "aarch64-unknown-linux"
define void @test_predicated_load_cast_hint(ptr %dst.1, ptr %dst.2, ptr %src, i8 %n, i64 %off) #0 {
; CHECK-LABEL: define void @test_predicated_load_cast_hint(
; CHECK-SAME: ptr [[DST_1:%.*]], ptr [[DST_2:%.*]], ptr [[SRC:%.*]], i8 [[N:%.*]], i64 [[OFF:%.*]]) {
-; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[N_EXT:%.*]] = sext i8 [[N]] to i32
; CHECK-NEXT: [[N_SUB:%.*]] = add i32 [[N_EXT]], -15
; CHECK-NEXT: [[SMAX16:%.*]] = call i32 @llvm.smax.i32(i32 [[N_SUB]], i32 4)
@@ -269,10 +269,9 @@ define void @test_predicated_load_cast_hint(ptr %dst.1, ptr %dst.2, ptr %src, i8
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i8 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_SCEVCHECK]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i8 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i8 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[L:%.*]] = load i8, ptr [[SRC]], align 1
; CHECK-NEXT: [[L_EXT:%.*]] = zext i8 [[L]] to i64
; CHECK-NEXT: [[ADD:%.*]] = or i64 [[L_EXT]], 1
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll b/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll
index 4055d016e7b9d..27e8708afe64a 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll
@@ -238,10 +238,9 @@ define void @trunc_store(ptr %dst, ptr %src, i16 %x) #1 {
; PRED: middle.block:
; PRED-NEXT: br label [[EXIT:%.*]]
; PRED: scalar.ph:
-; PRED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
; PRED-NEXT: br label [[LOOP:%.*]]
; PRED: loop:
-; PRED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; PRED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
; PRED-NEXT: [[X_EXT:%.*]] = zext i16 [[X]] to i64
; PRED-NEXT: [[L:%.*]] = load i64, ptr [[SRC]], align 8
; PRED-NEXT: [[AND:%.*]] = and i64 [[L]], [[X_EXT]]
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll
index d134dc50ecac8..3d2a60e842847 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll
@@ -336,10 +336,9 @@ define void @test_stride_loopinvar_4i32(ptr readonly %data, ptr noalias nocaptur
; CHECK: middle.block:
; CHECK-NEXT: br label [[END:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_023:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[I_023:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[I_023]], [[STRIDE]]
; CHECK-NEXT: [[ADD5:%.*]] = add nuw nsw i32 [[MUL]], 2
; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, ptr [[DATA]], i32 [[ADD5]]
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-hoist-runtime-checks.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-hoist-runtime-checks.ll
index 029bffdce394d..5ea69a879ebfc 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/mve-hoist-runtime-checks.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-hoist-runtime-checks.ll
@@ -79,10 +79,9 @@ define void @diff_checks(ptr nocapture noundef writeonly %dst, ptr nocapture nou
; CHECK: middle.block:
; CHECK-NEXT: br label [[INNER_LOOP_EXIT]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[OUTER_LOOP]] ], [ 0, [[VECTOR_MEMCHECK]] ]
; CHECK-NEXT: br label [[INNER_LOOP:%.*]]
; CHECK: inner.loop:
-; CHECK-NEXT: [[J_021_US:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC_US:%.*]], [[INNER_LOOP]] ]
+; CHECK-NEXT: [[J_021_US:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[INC_US:%.*]], [[INNER_LOOP]] ]
; CHECK-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr i32, ptr [[TMP7]], i32 [[J_021_US]]
; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[ARRAYIDX_US]], align 4
; CHECK-NEXT: [[ARRAYIDX8_US:%.*]] = getelementptr i32, ptr [[TMP8]], i32 [[J_021_US]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll b/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll
index 5a99f15b9f585..8d5e3e299b648 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll
@@ -664,7 +664,7 @@ exit:
define void @dead_load_in_block(ptr %dst, ptr %src, i8 %N, i64 %x) #0 {
; CHECK-LABEL: define void @dead_load_in_block(
; CHECK-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]], i8 [[N:%.*]], i64 [[X:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[N_EXT:%.*]] = zext i8 [[N]] to i64
; CHECK-NEXT: [[UMIN7:%.*]] = call i64 @llvm.umin.i64(i64 [[N_EXT]], i64 1)
; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[N_EXT]], [[UMIN7]]
@@ -716,10 +716,9 @@ define void @dead_load_in_block(ptr %dst, ptr %src, i8 %N, i64 %x) #0 {
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
; CHECK-NEXT: [[L_0:%.*]] = load i32, ptr [[SRC]], align 4
; CHECK-NEXT: [[C_0:%.*]] = icmp eq i32 [[L_0]], 0
; CHECK-NEXT: br i1 [[C_0]], label %[[LOOP_LATCH]], label %[[THEN:.*]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/fminimumnum.ll b/llvm/test/Transforms/LoopVectorize/RISCV/fminimumnum.ll
index 84151c2cce931..1d392051f0918 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/fminimumnum.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/fminimumnum.ll
@@ -586,7 +586,7 @@ declare double @llvm.maximumnum.f64(double, double)
define void @fmin16(ptr noundef readonly captures(none) %input1, ptr noundef readonly captures(none) %input2, ptr noundef writeonly captures(none) %output) {
; CHECK-LABEL: define void @fmin16(
; CHECK-SAME: ptr noundef readonly captures(none) [[INPUT1:%.*]], ptr noundef readonly captures(none) [[INPUT2:%.*]], ptr noundef writeonly captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[INPUT23:%.*]] = ptrtoint ptr [[INPUT2]] to i64
; CHECK-NEXT: [[INPUT12:%.*]] = ptrtoint ptr [[INPUT1]] to i64
; CHECK-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64
@@ -623,10 +623,9 @@ define void @fmin16(ptr noundef readonly captures(none) %input1, ptr noundef rea
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT1]], i64 0, i64 [[INDVARS_IV]]
; CHECK-NEXT: [[IN1:%.*]] = load half, ptr [[ARRAYIDX]], align 2
; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT2]], i64 0, i64 [[INDVARS_IV]]
@@ -642,7 +641,7 @@ define void @fmin16(ptr noundef readonly captures(none) %input1, ptr noundef rea
;
; ZVFHMIN-LABEL: define void @fmin16(
; ZVFHMIN-SAME: ptr noundef readonly captures(none) [[INPUT1:%.*]], ptr noundef readonly captures(none) [[INPUT2:%.*]], ptr noundef writeonly captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] {
-; ZVFHMIN-NEXT: [[ENTRY:.*]]:
+; ZVFHMIN-NEXT: [[ENTRY:.*:]]
; ZVFHMIN-NEXT: [[INPUT23:%.*]] = ptrtoint ptr [[INPUT2]] to i64
; ZVFHMIN-NEXT: [[INPUT12:%.*]] = ptrtoint ptr [[INPUT1]] to i64
; ZVFHMIN-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64
@@ -679,10 +678,9 @@ define void @fmin16(ptr noundef readonly captures(none) %input1, ptr noundef rea
; ZVFHMIN: [[MIDDLE_BLOCK]]:
; ZVFHMIN-NEXT: br label %[[EXIT:.*]]
; ZVFHMIN: [[SCALAR_PH]]:
-; ZVFHMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; ZVFHMIN-NEXT: br label %[[FOR_BODY:.*]]
; ZVFHMIN: [[FOR_BODY]]:
-; ZVFHMIN-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; ZVFHMIN-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
; ZVFHMIN-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT1]], i64 0, i64 [[IV]]
; ZVFHMIN-NEXT: [[IN1:%.*]] = load half, ptr [[ARRAYIDX]], align 2
; ZVFHMIN-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT2]], i64 0, i64 [[IV]]
@@ -721,7 +719,7 @@ declare half @llvm.minimumnum.f16(half, half)
define void @fmax16(ptr noundef readonly captures(none) %input1, ptr noundef readonly captures(none) %input2, ptr noundef writeonly captures(none) %output) {
; CHECK-LABEL: define void @fmax16(
; CHECK-SAME: ptr noundef readonly captures(none) [[INPUT1:%.*]], ptr noundef readonly captures(none) [[INPUT2:%.*]], ptr noundef writeonly captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[INPUT23:%.*]] = ptrtoint ptr [[INPUT2]] to i64
; CHECK-NEXT: [[INPUT12:%.*]] = ptrtoint ptr [[INPUT1]] to i64
; CHECK-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64
@@ -758,10 +756,9 @@ define void @fmax16(ptr noundef readonly captures(none) %input1, ptr noundef rea
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT1]], i64 0, i64 [[INDVARS_IV]]
; CHECK-NEXT: [[IN1:%.*]] = load half, ptr [[ARRAYIDX]], align 2
; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT2]], i64 0, i64 [[INDVARS_IV]]
@@ -777,7 +774,7 @@ define void @fmax16(ptr noundef readonly captures(none) %input1, ptr noundef rea
;
; ZVFHMIN-LABEL: define void @fmax16(
; ZVFHMIN-SAME: ptr noundef readonly captures(none) [[INPUT1:%.*]], ptr noundef readonly captures(none) [[INPUT2:%.*]], ptr noundef writeonly captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] {
-; ZVFHMIN-NEXT: [[ENTRY:.*]]:
+; ZVFHMIN-NEXT: [[ENTRY:.*:]]
; ZVFHMIN-NEXT: [[INPUT23:%.*]] = ptrtoint ptr [[INPUT2]] to i64
; ZVFHMIN-NEXT: [[INPUT12:%.*]] = ptrtoint ptr [[INPUT1]] to i64
; ZVFHMIN-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64
@@ -814,10 +811,9 @@ define void @fmax16(ptr noundef readonly captures(none) %input1, ptr noundef rea
; ZVFHMIN: [[MIDDLE_BLOCK]]:
; ZVFHMIN-NEXT: br label %[[EXIT:.*]]
; ZVFHMIN: [[SCALAR_PH]]:
-; ZVFHMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; ZVFHMIN-NEXT: br label %[[FOR_BODY:.*]]
; ZVFHMIN: [[FOR_BODY]]:
-; ZVFHMIN-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; ZVFHMIN-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
; ZVFHMIN-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT1]], i64 0, i64 [[IV]]
; ZVFHMIN-NEXT: [[IN1:%.*]] = load half, ptr [[ARRAYIDX]], align 2
; ZVFHMIN-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT2]], i64 0, i64 [[IV]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/gather-scatter-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/gather-scatter-cost.ll
index 4f91670e7751a..180fb3f5f6e1c 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/gather-scatter-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/gather-scatter-cost.ll
@@ -48,10 +48,9 @@ define void @predicated_uniform_load(ptr %src, i32 %n, ptr %dst, i1 %cond) {
; CHECK: middle.block:
; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[VECTOR_MEMCHECK]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
; CHECK-NEXT: br i1 [[COND]], label [[LOOP_THEN:%.*]], label [[LOOP_ELSE:%.*]]
; CHECK: loop.then:
; CHECK-NEXT: br label [[LOOP_LATCH]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll b/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll
index 90e72e90647fa..90365cd36abfd 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll
@@ -8,7 +8,7 @@ target triple = "riscv64-unknown-linux-gnu"
define void @skip_free_iv_truncate(i16 %x, ptr %A) #0 {
; CHECK-LABEL: define void @skip_free_iv_truncate(
; CHECK-SAME: i16 [[X:%.*]], ptr [[A:%.*]]) #[[ATTR0:[0-9]+]] {
-; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[X_I32:%.*]] = sext i16 [[X]] to i32
; CHECK-NEXT: [[X_I64:%.*]] = sext i16 [[X]] to i64
; CHECK-NEXT: [[INVARIANT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 -8
@@ -77,12 +77,10 @@ define void @skip_free_iv_truncate(i16 %x, ptr %A) #0 {
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[X_I64]], %[[ENTRY]] ], [ [[X_I64]], %[[VECTOR_MEMCHECK]] ]
-; CHECK-NEXT: [[BC_RESUME_VAL13:%.*]] = phi i32 [ [[X_I32]], %[[ENTRY]] ], [ [[X_I32]], %[[VECTOR_MEMCHECK]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[IV_CONV:%.*]] = phi i32 [ [[BC_RESUME_VAL13]], %[[SCALAR_PH]] ], [ [[TMP64:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[X_I64]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV_CONV:%.*]] = phi i32 [ [[X_I32]], %[[SCALAR_PH]] ], [ [[TMP64:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[GEP_I64:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP61:%.*]] = load i64, ptr [[GEP_I64]], align 8
; CHECK-NEXT: [[TMP62:%.*]] = sext i32 [[IV_CONV]] to i64
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll b/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll
index d0dac0e380cdc..b9d9674144e4a 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll
@@ -60,10 +60,9 @@ define void @foo4(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea
; RV32: middle.block:
; RV32-NEXT: br label [[FOR_END:%.*]]
; RV32: scalar.ph:
-; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
; RV32-NEXT: br label [[FOR_BODY:%.*]]
; RV32: for.body:
-; RV32-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
+; RV32-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
; RV32-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TRIGGER]], i64 [[INDVARS_IV]]
; RV32-NEXT: [[TMP21:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; RV32-NEXT: [[CMP1:%.*]] = icmp slt i32 [[TMP21]], 100
@@ -130,10 +129,9 @@ define void @foo4(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea
; RV64: middle.block:
; RV64-NEXT: br label [[FOR_END:%.*]]
; RV64: scalar.ph:
-; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
; RV64-NEXT: br label [[FOR_BODY:%.*]]
; RV64: for.body:
-; RV64-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
+; RV64-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
; RV64-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TRIGGER]], i64 [[INDVARS_IV]]
; RV64-NEXT: [[TMP21:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; RV64-NEXT: [[CMP1:%.*]] = icmp slt i32 [[TMP21]], 100
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll b/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll
index 8272e6d82295c..7f66baf833aa9 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll
@@ -246,8 +246,6 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur
; RV64: [[MIDDLE_BLOCK]]:
; RV64-NEXT: br label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]]
; RV64: [[SCALAR_PH]]:
-; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ]
-; RV64-NEXT: [[BC_RESUME_VAL4:%.*]] = phi i32 [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_SCEVCHECK]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ]
; RV64-NEXT: br label %[[FOR_BODY:.*]]
; RV64: [[FOR_COND_CLEANUP_LOOPEXIT]]:
; RV64-NEXT: br label %[[FOR_COND_CLEANUP]]
@@ -307,8 +305,6 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur
; RV32: [[MIDDLE_BLOCK]]:
; RV32-NEXT: br label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]]
; RV32: [[SCALAR_PH]]:
-; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ]
-; RV32-NEXT: [[BC_RESUME_VAL4:%.*]] = phi i32 [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ]
; RV32-NEXT: br label %[[FOR_BODY:.*]]
; RV32: [[FOR_COND_CLEANUP_LOOPEXIT]]:
; RV32-NEXT: br label %[[FOR_COND_CLEANUP]]
@@ -507,8 +503,6 @@ define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocaptur
; RV64: [[MIDDLE_BLOCK]]:
; RV64-NEXT: br label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]]
; RV64: [[SCALAR_PH]]:
-; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ]
-; RV64-NEXT: [[BC_RESUME_VAL4:%.*]] = phi i32 [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_SCEVCHECK]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ]
; RV64-NEXT: br label %[[FOR_BODY:.*]]
; RV64: [[FOR_COND_CLEANUP_LOOPEXIT]]:
; RV64-NEXT: br label %[[FOR_COND_CLEANUP]]
@@ -568,8 +562,6 @@ define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocaptur
; RV32: [[MIDDLE_BLOCK]]:
; RV32-NEXT: br label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]]
; RV32: [[SCALAR_PH]]:
-; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ]
-; RV32-NEXT: [[BC_RESUME_VAL4:%.*]] = phi i32 [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ]
; RV32-NEXT: br label %[[FOR_BODY:.*]]
; RV32: [[FOR_COND_CLEANUP_LOOPEXIT]]:
; RV32-NEXT: br label %[[FOR_COND_CLEANUP]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll b/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
index d9fc6b7069cad..099a085c1b3f5 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
@@ -212,10 +212,9 @@ define void @single_stride_int_scaled(ptr %p, i64 %stride) {
; NOSTRIDED: middle.block:
; NOSTRIDED-NEXT: br label [[EXIT:%.*]]
; NOSTRIDED: scalar.ph:
-; NOSTRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
; NOSTRIDED-NEXT: br label [[LOOP:%.*]]
; NOSTRIDED: loop:
-; NOSTRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; NOSTRIDED-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; NOSTRIDED-NEXT: [[OFFSET:%.*]] = mul nuw nsw i64 [[I]], [[STRIDE]]
; NOSTRIDED-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]]
; NOSTRIDED-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
@@ -286,11 +285,10 @@ define void @single_stride_int_iv(ptr %p, i64 %stride) {
; NOSTRIDED: middle.block:
; NOSTRIDED-NEXT: br label [[EXIT:%.*]]
; NOSTRIDED: scalar.ph:
-; NOSTRIDED-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
; NOSTRIDED-NEXT: br label [[LOOP:%.*]]
; NOSTRIDED: loop:
-; NOSTRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; NOSTRIDED-NEXT: [[OFFSET:%.*]] = phi i64 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[OFFSET_NEXT:%.*]], [[LOOP]] ]
+; NOSTRIDED-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; NOSTRIDED-NEXT: [[OFFSET:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[OFFSET_NEXT:%.*]], [[LOOP]] ]
; NOSTRIDED-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]]
; NOSTRIDED-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
; NOSTRIDED-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
@@ -409,10 +407,9 @@ define void @double_stride_int_scaled(ptr %p, ptr %p2, i64 %stride) {
; NOSTRIDED: middle.block:
; NOSTRIDED-NEXT: br label [[EXIT:%.*]]
; NOSTRIDED: scalar.ph:
-; NOSTRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[VECTOR_MEMCHECK]] ]
; NOSTRIDED-NEXT: br label [[LOOP:%.*]]
; NOSTRIDED: loop:
-; NOSTRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; NOSTRIDED-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; NOSTRIDED-NEXT: [[OFFSET:%.*]] = mul nuw nsw i64 [[I]], [[STRIDE]]
; NOSTRIDED-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]]
; NOSTRIDED-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
@@ -503,10 +500,9 @@ define void @double_stride_int_scaled(ptr %p, ptr %p2, i64 %stride) {
; STRIDED: middle.block:
; STRIDED-NEXT: br label [[EXIT:%.*]]
; STRIDED: scalar.ph:
-; STRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[VECTOR_MEMCHECK1]] ]
; STRIDED-NEXT: br label [[LOOP:%.*]]
; STRIDED: loop:
-; STRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; STRIDED-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; STRIDED-NEXT: [[OFFSET:%.*]] = mul nuw nsw i64 [[I]], [[STRIDE]]
; STRIDED-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]]
; STRIDED-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
@@ -563,11 +559,10 @@ define void @double_stride_int_iv(ptr %p, ptr %p2, i64 %stride) {
; NOSTRIDED: middle.block:
; NOSTRIDED-NEXT: br label [[EXIT:%.*]]
; NOSTRIDED: scalar.ph:
-; NOSTRIDED-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
; NOSTRIDED-NEXT: br label [[LOOP:%.*]]
; NOSTRIDED: loop:
-; NOSTRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; NOSTRIDED-NEXT: [[OFFSET:%.*]] = phi i64 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[OFFSET_NEXT:%.*]], [[LOOP]] ]
+; NOSTRIDED-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; NOSTRIDED-NEXT: [[OFFSET:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[OFFSET_NEXT:%.*]], [[LOOP]] ]
; NOSTRIDED-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]]
; NOSTRIDED-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
; NOSTRIDED-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
@@ -689,14 +684,11 @@ define void @double_stride_ptr_iv(ptr %p, ptr %p2, i64 %stride) {
; STRIDED: middle.block:
; STRIDED-NEXT: br label [[EXIT:%.*]]
; STRIDED: scalar.ph:
-; STRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
-; STRIDED-NEXT: [[BC_RESUME_VAL6:%.*]] = phi ptr [ [[P]], [[ENTRY]] ], [ [[P]], [[VECTOR_MEMCHECK]] ]
-; STRIDED-NEXT: [[BC_RESUME_VAL8:%.*]] = phi ptr [ [[P2]], [[ENTRY]] ], [ [[P2]], [[VECTOR_MEMCHECK]] ]
; STRIDED-NEXT: br label [[LOOP:%.*]]
; STRIDED: loop:
-; STRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; STRIDED-NEXT: [[PTR:%.*]] = phi ptr [ [[BC_RESUME_VAL6]], [[SCALAR_PH]] ], [ [[PTR_NEXT:%.*]], [[LOOP]] ]
-; STRIDED-NEXT: [[PTR2:%.*]] = phi ptr [ [[BC_RESUME_VAL8]], [[SCALAR_PH]] ], [ [[PTR2_NEXT:%.*]], [[LOOP]] ]
+; STRIDED-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; STRIDED-NEXT: [[PTR:%.*]] = phi ptr [ [[P]], [[SCALAR_PH]] ], [ [[PTR_NEXT:%.*]], [[LOOP]] ]
+; STRIDED-NEXT: [[PTR2:%.*]] = phi ptr [ [[P2]], [[SCALAR_PH]] ], [ [[PTR2_NEXT:%.*]], [[LOOP]] ]
; STRIDED-NEXT: [[X0:%.*]] = load i32, ptr [[PTR]], align 4
; STRIDED-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
; STRIDED-NEXT: store i32 [[Y0]], ptr [[PTR2]], align 4
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-bin-unary-ops-args.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-bin-unary-ops-args.ll
index fb62d21a86817..409e363717a1d 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-bin-unary-ops-args.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-bin-unary-ops-args.ll
@@ -11,7 +11,7 @@
define void @test_and(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-LABEL: define void @test_and(
; IF-EVL-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0:[0-9]+]] {
-; IF-EVL-NEXT: [[LOOP_PREHEADER:.*]]:
+; IF-EVL-NEXT: [[LOOP_PREHEADER:.*:]]
; IF-EVL-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
; IF-EVL-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
@@ -40,10 +40,9 @@ define void @test_and(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FINISH_LOOPEXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
-; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; IF-EVL-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; IF-EVL-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[LEN]]
; IF-EVL-NEXT: [[TMP20:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
@@ -127,7 +126,7 @@ finish.loopexit:
define void @test_or(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-LABEL: define void @test_or(
; IF-EVL-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] {
-; IF-EVL-NEXT: [[LOOP_PREHEADER:.*]]:
+; IF-EVL-NEXT: [[LOOP_PREHEADER:.*:]]
; IF-EVL-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
; IF-EVL-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
@@ -156,10 +155,9 @@ define void @test_or(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FINISH_LOOPEXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
-; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; IF-EVL-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; IF-EVL-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[LEN]]
; IF-EVL-NEXT: [[TMP20:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
@@ -243,7 +241,7 @@ finish.loopexit:
define void @test_xor(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-LABEL: define void @test_xor(
; IF-EVL-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] {
-; IF-EVL-NEXT: [[LOOP_PREHEADER:.*]]:
+; IF-EVL-NEXT: [[LOOP_PREHEADER:.*:]]
; IF-EVL-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
; IF-EVL-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
@@ -272,10 +270,9 @@ define void @test_xor(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FINISH_LOOPEXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
-; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; IF-EVL-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; IF-EVL-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[LEN]]
; IF-EVL-NEXT: [[TMP20:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
@@ -359,7 +356,7 @@ finish.loopexit:
define void @test_shl(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-LABEL: define void @test_shl(
; IF-EVL-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] {
-; IF-EVL-NEXT: [[LOOP_PREHEADER:.*]]:
+; IF-EVL-NEXT: [[LOOP_PREHEADER:.*:]]
; IF-EVL-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
; IF-EVL-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
@@ -388,10 +385,9 @@ define void @test_shl(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FINISH_LOOPEXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
-; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; IF-EVL-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; IF-EVL-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[LEN]]
; IF-EVL-NEXT: [[TMP20:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
@@ -475,7 +471,7 @@ finish.loopexit:
define void @test_lshr(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-LABEL: define void @test_lshr(
; IF-EVL-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] {
-; IF-EVL-NEXT: [[LOOP_PREHEADER:.*]]:
+; IF-EVL-NEXT: [[LOOP_PREHEADER:.*:]]
; IF-EVL-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
; IF-EVL-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
@@ -504,10 +500,9 @@ define void @test_lshr(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FINISH_LOOPEXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
-; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; IF-EVL-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; IF-EVL-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[LEN]]
; IF-EVL-NEXT: [[TMP20:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
@@ -591,7 +586,7 @@ finish.loopexit:
define void @test_ashr(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-LABEL: define void @test_ashr(
; IF-EVL-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] {
-; IF-EVL-NEXT: [[LOOP_PREHEADER:.*]]:
+; IF-EVL-NEXT: [[LOOP_PREHEADER:.*:]]
; IF-EVL-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
; IF-EVL-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
@@ -620,10 +615,9 @@ define void @test_ashr(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FINISH_LOOPEXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
-; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; IF-EVL-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; IF-EVL-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[LEN]]
; IF-EVL-NEXT: [[TMP20:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
@@ -707,7 +701,7 @@ finish.loopexit:
define void @test_add(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-LABEL: define void @test_add(
; IF-EVL-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] {
-; IF-EVL-NEXT: [[LOOP_PREHEADER:.*]]:
+; IF-EVL-NEXT: [[LOOP_PREHEADER:.*:]]
; IF-EVL-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
; IF-EVL-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
@@ -736,10 +730,9 @@ define void @test_add(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FINISH_LOOPEXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
-; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; IF-EVL-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; IF-EVL-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[LEN]]
; IF-EVL-NEXT: [[TMP20:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
@@ -823,7 +816,7 @@ finish.loopexit:
define void @test_sub(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-LABEL: define void @test_sub(
; IF-EVL-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] {
-; IF-EVL-NEXT: [[LOOP_PREHEADER:.*]]:
+; IF-EVL-NEXT: [[LOOP_PREHEADER:.*:]]
; IF-EVL-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
; IF-EVL-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
@@ -852,10 +845,9 @@ define void @test_sub(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FINISH_LOOPEXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
-; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; IF-EVL-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; IF-EVL-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[LEN]]
; IF-EVL-NEXT: [[TMP20:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
@@ -939,7 +931,7 @@ finish.loopexit:
define void @test_mul(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-LABEL: define void @test_mul(
; IF-EVL-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] {
-; IF-EVL-NEXT: [[LOOP_PREHEADER:.*]]:
+; IF-EVL-NEXT: [[LOOP_PREHEADER:.*:]]
; IF-EVL-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
; IF-EVL-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
@@ -968,10 +960,9 @@ define void @test_mul(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FINISH_LOOPEXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
-; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; IF-EVL-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; IF-EVL-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[LEN]]
; IF-EVL-NEXT: [[TMP20:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
@@ -1055,7 +1046,7 @@ finish.loopexit:
define void @test_sdiv(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-LABEL: define void @test_sdiv(
; IF-EVL-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] {
-; IF-EVL-NEXT: [[LOOP_PREHEADER:.*]]:
+; IF-EVL-NEXT: [[LOOP_PREHEADER:.*:]]
; IF-EVL-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
; IF-EVL-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
@@ -1084,10 +1075,9 @@ define void @test_sdiv(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FINISH_LOOPEXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
-; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; IF-EVL-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; IF-EVL-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[LEN]]
; IF-EVL-NEXT: [[TMP20:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
@@ -1171,7 +1161,7 @@ finish.loopexit:
define void @test_udiv(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-LABEL: define void @test_udiv(
; IF-EVL-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] {
-; IF-EVL-NEXT: [[LOOP_PREHEADER:.*]]:
+; IF-EVL-NEXT: [[LOOP_PREHEADER:.*:]]
; IF-EVL-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
; IF-EVL-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
@@ -1200,10 +1190,9 @@ define void @test_udiv(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FINISH_LOOPEXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
-; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; IF-EVL-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; IF-EVL-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[LEN]]
; IF-EVL-NEXT: [[TMP20:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
@@ -1287,7 +1276,7 @@ finish.loopexit:
define void @test_srem(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-LABEL: define void @test_srem(
; IF-EVL-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] {
-; IF-EVL-NEXT: [[LOOP_PREHEADER:.*]]:
+; IF-EVL-NEXT: [[LOOP_PREHEADER:.*:]]
; IF-EVL-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
; IF-EVL-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
@@ -1316,10 +1305,9 @@ define void @test_srem(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FINISH_LOOPEXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
-; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; IF-EVL-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; IF-EVL-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[LEN]]
; IF-EVL-NEXT: [[TMP20:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
@@ -1403,7 +1391,7 @@ finish.loopexit:
define void @test_urem(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-LABEL: define void @test_urem(
; IF-EVL-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] {
-; IF-EVL-NEXT: [[LOOP_PREHEADER:.*]]:
+; IF-EVL-NEXT: [[LOOP_PREHEADER:.*:]]
; IF-EVL-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
; IF-EVL-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
@@ -1432,10 +1420,9 @@ define void @test_urem(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FINISH_LOOPEXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
-; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; IF-EVL-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; IF-EVL-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[LEN]]
; IF-EVL-NEXT: [[TMP20:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
@@ -1521,7 +1508,7 @@ finish.loopexit:
define void @test_fadd(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-LABEL: define void @test_fadd(
; IF-EVL-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] {
-; IF-EVL-NEXT: [[LOOP_PREHEADER:.*]]:
+; IF-EVL-NEXT: [[LOOP_PREHEADER:.*:]]
; IF-EVL-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
; IF-EVL-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
@@ -1551,10 +1538,9 @@ define void @test_fadd(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FINISH_LOOPEXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
-; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; IF-EVL-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; IF-EVL-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[LEN]]
; IF-EVL-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX]], align 4
@@ -1639,7 +1625,7 @@ finish.loopexit:
define void @test_fsub(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-LABEL: define void @test_fsub(
; IF-EVL-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] {
-; IF-EVL-NEXT: [[LOOP_PREHEADER:.*]]:
+; IF-EVL-NEXT: [[LOOP_PREHEADER:.*:]]
; IF-EVL-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
; IF-EVL-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
@@ -1669,10 +1655,9 @@ define void @test_fsub(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FINISH_LOOPEXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
-; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; IF-EVL-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; IF-EVL-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[LEN]]
; IF-EVL-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX]], align 4
@@ -1757,7 +1742,7 @@ finish.loopexit:
define void @test_fmul(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-LABEL: define void @test_fmul(
; IF-EVL-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] {
-; IF-EVL-NEXT: [[LOOP_PREHEADER:.*]]:
+; IF-EVL-NEXT: [[LOOP_PREHEADER:.*:]]
; IF-EVL-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
; IF-EVL-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
@@ -1787,10 +1772,9 @@ define void @test_fmul(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FINISH_LOOPEXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
-; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; IF-EVL-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; IF-EVL-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[LEN]]
; IF-EVL-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX]], align 4
@@ -1875,7 +1859,7 @@ finish.loopexit:
define void @test_fdiv(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-LABEL: define void @test_fdiv(
; IF-EVL-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] {
-; IF-EVL-NEXT: [[LOOP_PREHEADER:.*]]:
+; IF-EVL-NEXT: [[LOOP_PREHEADER:.*:]]
; IF-EVL-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
; IF-EVL-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
@@ -1905,10 +1889,9 @@ define void @test_fdiv(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FINISH_LOOPEXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
-; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; IF-EVL-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; IF-EVL-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[LEN]]
; IF-EVL-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX]], align 4
@@ -2046,7 +2029,7 @@ finish.loopexit:
define void @test_fneg(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-LABEL: define void @test_fneg(
; IF-EVL-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] {
-; IF-EVL-NEXT: [[LOOP_PREHEADER:.*]]:
+; IF-EVL-NEXT: [[LOOP_PREHEADER:.*:]]
; IF-EVL-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
; IF-EVL-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
@@ -2076,10 +2059,9 @@ define void @test_fneg(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FINISH_LOOPEXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
-; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; IF-EVL-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; IF-EVL-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[LEN]]
; IF-EVL-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX]], align 4
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-call-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-call-intrinsics.ll
index 84a043a291dc7..36cc659b00609 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-call-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-call-intrinsics.ll
@@ -10,7 +10,7 @@
define void @vp_smax(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-LABEL: define void @vp_smax(
; IF-EVL-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
-; IF-EVL-NEXT: [[ENTRY:.*]]:
+; IF-EVL-NEXT: [[ENTRY:.*:]]
; IF-EVL-NEXT: [[C3:%.*]] = ptrtoint ptr [[C]] to i64
; IF-EVL-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
; IF-EVL-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
@@ -47,10 +47,9 @@ define void @vp_smax(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
-; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; IF-EVL-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP20:%.*]] = load i32, ptr [[GEP]], align 4
; IF-EVL-NEXT: [[GEP3:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]]
@@ -149,7 +148,7 @@ exit:
define void @vp_smin(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-LABEL: define void @vp_smin(
; IF-EVL-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; IF-EVL-NEXT: [[ENTRY:.*]]:
+; IF-EVL-NEXT: [[ENTRY:.*:]]
; IF-EVL-NEXT: [[C3:%.*]] = ptrtoint ptr [[C]] to i64
; IF-EVL-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
; IF-EVL-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
@@ -186,10 +185,9 @@ define void @vp_smin(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
-; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; IF-EVL-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP20:%.*]] = load i32, ptr [[GEP]], align 4
; IF-EVL-NEXT: [[GEP3:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]]
@@ -288,7 +286,7 @@ exit:
define void @vp_umax(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-LABEL: define void @vp_umax(
; IF-EVL-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; IF-EVL-NEXT: [[ENTRY:.*]]:
+; IF-EVL-NEXT: [[ENTRY:.*:]]
; IF-EVL-NEXT: [[C3:%.*]] = ptrtoint ptr [[C]] to i64
; IF-EVL-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
; IF-EVL-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
@@ -325,10 +323,9 @@ define void @vp_umax(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
-; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; IF-EVL-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP20:%.*]] = load i32, ptr [[GEP]], align 4
; IF-EVL-NEXT: [[GEP3:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]]
@@ -427,7 +424,7 @@ exit:
define void @vp_umin(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-LABEL: define void @vp_umin(
; IF-EVL-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; IF-EVL-NEXT: [[ENTRY:.*]]:
+; IF-EVL-NEXT: [[ENTRY:.*:]]
; IF-EVL-NEXT: [[C3:%.*]] = ptrtoint ptr [[C]] to i64
; IF-EVL-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
; IF-EVL-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
@@ -464,10 +461,9 @@ define void @vp_umin(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
-; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; IF-EVL-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP20:%.*]] = load i32, ptr [[GEP]], align 4
; IF-EVL-NEXT: [[GEP3:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]]
@@ -567,7 +563,7 @@ exit:
define void @vp_ctlz(ptr %a, ptr %b, i64 %N) {
; IF-EVL-LABEL: define void @vp_ctlz(
; IF-EVL-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; IF-EVL-NEXT: [[ENTRY:.*]]:
+; IF-EVL-NEXT: [[ENTRY:.*:]]
; IF-EVL-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
; IF-EVL-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
@@ -597,10 +593,9 @@ define void @vp_ctlz(ptr %a, ptr %b, i64 %N) {
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
-; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; IF-EVL-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[GEP]], align 4
; IF-EVL-NEXT: [[TMP19:%.*]] = tail call range(i32 0, 33) i32 @llvm.ctlz.i32(i32 [[TMP18]], i1 true)
@@ -686,7 +681,7 @@ exit:
define void @vp_cttz(ptr %a, ptr %b, i64 %N) {
; IF-EVL-LABEL: define void @vp_cttz(
; IF-EVL-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; IF-EVL-NEXT: [[ENTRY:.*]]:
+; IF-EVL-NEXT: [[ENTRY:.*:]]
; IF-EVL-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
; IF-EVL-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
@@ -716,10 +711,9 @@ define void @vp_cttz(ptr %a, ptr %b, i64 %N) {
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
-; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; IF-EVL-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP22:%.*]] = load i32, ptr [[GEP]], align 4
; IF-EVL-NEXT: [[TMP23:%.*]] = tail call range(i32 0, 33) i32 @llvm.cttz.i32(i32 [[TMP22]], i1 true)
@@ -805,7 +799,7 @@ exit:
define void @vp_lrint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-LABEL: define void @vp_lrint(
; IF-EVL-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; IF-EVL-NEXT: [[ENTRY:.*]]:
+; IF-EVL-NEXT: [[ENTRY:.*:]]
; IF-EVL-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
; IF-EVL-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
@@ -837,10 +831,9 @@ define void @vp_lrint(ptr %a, ptr %b, i64 %N) {
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
-; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; IF-EVL-NEXT: [[GEP:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP20:%.*]] = load float, ptr [[GEP]], align 4
; IF-EVL-NEXT: [[CONV2:%.*]] = fpext float [[TMP20]] to double
@@ -934,7 +927,7 @@ exit:
define void @vp_llrint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-LABEL: define void @vp_llrint(
; IF-EVL-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; IF-EVL-NEXT: [[ENTRY:.*]]:
+; IF-EVL-NEXT: [[ENTRY:.*:]]
; IF-EVL-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
; IF-EVL-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
@@ -966,10 +959,9 @@ define void @vp_llrint(ptr %a, ptr %b, i64 %N) {
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
-; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; IF-EVL-NEXT: [[GEP:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP20:%.*]] = load float, ptr [[GEP]], align 4
; IF-EVL-NEXT: [[CONV2:%.*]] = fpext float [[TMP20]] to double
@@ -1063,7 +1055,7 @@ exit:
define void @vp_abs(ptr %a, ptr %b, i64 %N) {
; IF-EVL-LABEL: define void @vp_abs(
; IF-EVL-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; IF-EVL-NEXT: [[ENTRY:.*]]:
+; IF-EVL-NEXT: [[ENTRY:.*:]]
; IF-EVL-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
; IF-EVL-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
@@ -1093,10 +1085,9 @@ define void @vp_abs(ptr %a, ptr %b, i64 %N) {
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
-; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; IF-EVL-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[GEP]], align 4
; IF-EVL-NEXT: [[COND:%.*]] = tail call i32 @llvm.abs.i32(i32 [[TMP18]], i1 true)
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll
index b8add34018b22..83125126248a7 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll
@@ -10,7 +10,7 @@
define void @vp_sext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-LABEL: define void @vp_sext(
; IF-EVL-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
-; IF-EVL-NEXT: [[ENTRY:.*]]:
+; IF-EVL-NEXT: [[ENTRY:.*:]]
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; IF-EVL: [[VECTOR_MEMCHECK]]:
; IF-EVL-NEXT: [[TMP5:%.*]] = shl i64 [[N]], 3
@@ -40,10 +40,9 @@ define void @vp_sext(ptr %a, ptr %b, i64 %N) {
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
-; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; IF-EVL-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP21:%.*]] = load i32, ptr [[GEP]], align 4
; IF-EVL-NEXT: [[CONV2:%.*]] = sext i32 [[TMP21]] to i64
@@ -128,7 +127,7 @@ exit:
define void @vp_zext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-LABEL: define void @vp_zext(
; IF-EVL-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; IF-EVL-NEXT: [[ENTRY:.*]]:
+; IF-EVL-NEXT: [[ENTRY:.*:]]
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; IF-EVL: [[VECTOR_MEMCHECK]]:
; IF-EVL-NEXT: [[TMP5:%.*]] = shl i64 [[N]], 3
@@ -158,10 +157,9 @@ define void @vp_zext(ptr %a, ptr %b, i64 %N) {
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
-; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; IF-EVL-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP21:%.*]] = load i32, ptr [[GEP]], align 4
; IF-EVL-NEXT: [[CONV:%.*]] = zext i32 [[TMP21]] to i64
@@ -246,7 +244,7 @@ exit:
define void @vp_trunc(ptr %a, ptr %b, i64 %N) {
; IF-EVL-LABEL: define void @vp_trunc(
; IF-EVL-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; IF-EVL-NEXT: [[ENTRY:.*]]:
+; IF-EVL-NEXT: [[ENTRY:.*:]]
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; IF-EVL: [[VECTOR_MEMCHECK]]:
; IF-EVL-NEXT: [[TMP5:%.*]] = shl i64 [[N]], 2
@@ -276,10 +274,9 @@ define void @vp_trunc(ptr %a, ptr %b, i64 %N) {
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
-; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; IF-EVL-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP21:%.*]] = load i64, ptr [[GEP]], align 8
; IF-EVL-NEXT: [[CONV:%.*]] = trunc i64 [[TMP21]] to i32
@@ -364,7 +361,7 @@ exit:
define void @vp_fpext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-LABEL: define void @vp_fpext(
; IF-EVL-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; IF-EVL-NEXT: [[ENTRY:.*]]:
+; IF-EVL-NEXT: [[ENTRY:.*:]]
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; IF-EVL: [[VECTOR_MEMCHECK]]:
; IF-EVL-NEXT: [[TMP5:%.*]] = shl i64 [[N]], 3
@@ -394,10 +391,9 @@ define void @vp_fpext(ptr %a, ptr %b, i64 %N) {
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
-; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; IF-EVL-NEXT: [[GEP:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP21:%.*]] = load float, ptr [[GEP]], align 4
; IF-EVL-NEXT: [[CONV:%.*]] = fpext float [[TMP21]] to double
@@ -482,7 +478,7 @@ exit:
define void @vp_fptrunc(ptr %a, ptr %b, i64 %N) {
; IF-EVL-LABEL: define void @vp_fptrunc(
; IF-EVL-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; IF-EVL-NEXT: [[ENTRY:.*]]:
+; IF-EVL-NEXT: [[ENTRY:.*:]]
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; IF-EVL: [[VECTOR_MEMCHECK]]:
; IF-EVL-NEXT: [[TMP5:%.*]] = shl i64 [[N]], 2
@@ -512,10 +508,9 @@ define void @vp_fptrunc(ptr %a, ptr %b, i64 %N) {
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
-; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; IF-EVL-NEXT: [[GEP:%.*]] = getelementptr inbounds double, ptr [[B]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP21:%.*]] = load double, ptr [[GEP]], align 8
; IF-EVL-NEXT: [[CONV:%.*]] = fptrunc double [[TMP21]] to float
@@ -600,7 +595,7 @@ exit:
define void @vp_sitofp(ptr %a, ptr %b, i64 %N) {
; IF-EVL-LABEL: define void @vp_sitofp(
; IF-EVL-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; IF-EVL-NEXT: [[ENTRY:.*]]:
+; IF-EVL-NEXT: [[ENTRY:.*:]]
; IF-EVL-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
; IF-EVL-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
@@ -630,10 +625,9 @@ define void @vp_sitofp(ptr %a, ptr %b, i64 %N) {
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
-; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; IF-EVL-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP23:%.*]] = load i32, ptr [[GEP]], align 4
; IF-EVL-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP23]] to float
@@ -718,7 +712,7 @@ exit:
define void @vp_uitofp(ptr %a, ptr %b, i64 %N) {
; IF-EVL-LABEL: define void @vp_uitofp(
; IF-EVL-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; IF-EVL-NEXT: [[ENTRY:.*]]:
+; IF-EVL-NEXT: [[ENTRY:.*:]]
; IF-EVL-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
; IF-EVL-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
@@ -748,10 +742,9 @@ define void @vp_uitofp(ptr %a, ptr %b, i64 %N) {
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
-; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; IF-EVL-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP23:%.*]] = load i32, ptr [[GEP]], align 4
; IF-EVL-NEXT: [[CONV:%.*]] = uitofp i32 [[TMP23]] to float
@@ -836,7 +829,7 @@ exit:
define void @vp_fptosi(ptr %a, ptr %b, i64 %N) {
; IF-EVL-LABEL: define void @vp_fptosi(
; IF-EVL-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; IF-EVL-NEXT: [[ENTRY:.*]]:
+; IF-EVL-NEXT: [[ENTRY:.*:]]
; IF-EVL-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
; IF-EVL-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
@@ -866,10 +859,9 @@ define void @vp_fptosi(ptr %a, ptr %b, i64 %N) {
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
-; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; IF-EVL-NEXT: [[GEP:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP23:%.*]] = load float, ptr [[GEP]], align 4
; IF-EVL-NEXT: [[CONV:%.*]] = fptosi float [[TMP23]] to i32
@@ -954,7 +946,7 @@ exit:
define void @vp_fptoui(ptr %a, ptr %b, i64 %N) {
; IF-EVL-LABEL: define void @vp_fptoui(
; IF-EVL-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; IF-EVL-NEXT: [[ENTRY:.*]]:
+; IF-EVL-NEXT: [[ENTRY:.*:]]
; IF-EVL-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
; IF-EVL-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
@@ -984,10 +976,9 @@ define void @vp_fptoui(ptr %a, ptr %b, i64 %N) {
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
-; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; IF-EVL-NEXT: [[GEP:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP23:%.*]] = load float, ptr [[GEP]], align 4
; IF-EVL-NEXT: [[CONV:%.*]] = fptoui float [[TMP23]] to i32
@@ -1072,7 +1063,7 @@ exit:
define void @vp_inttoptr(ptr %a, ptr %b, i64 %N) {
; IF-EVL-LABEL: define void @vp_inttoptr(
; IF-EVL-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; IF-EVL-NEXT: [[ENTRY:.*]]:
+; IF-EVL-NEXT: [[ENTRY:.*:]]
; IF-EVL-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
; IF-EVL-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
@@ -1102,10 +1093,9 @@ define void @vp_inttoptr(ptr %a, ptr %b, i64 %N) {
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
-; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; IF-EVL-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP23:%.*]] = load i64, ptr [[GEP]], align 8
; IF-EVL-NEXT: [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-intermediate-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-intermediate-store.ll
index 9e34e1bfbffae..969b40a9822db 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-intermediate-store.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-intermediate-store.ll
@@ -52,12 +52,10 @@ define void @reduction_intermediate_store(ptr %a, i64 %n, i32 %start, ptr %addr)
; IF-EVL-OUTLOOP-NEXT: store i32 [[TMP23]], ptr [[ADDR]], align 4, !alias.scope [[META7:![0-9]+]], !noalias [[META0]]
; IF-EVL-OUTLOOP-NEXT: br label [[FOR_END:%.*]]
; IF-EVL-OUTLOOP: scalar.ph:
-; IF-EVL-OUTLOOP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY1:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
-; IF-EVL-OUTLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY1]] ], [ [[START]], [[VECTOR_MEMCHECK]] ]
; IF-EVL-OUTLOOP-NEXT: br label [[FOR_BODY1:%.*]]
; IF-EVL-OUTLOOP: for.body:
-; IF-EVL-OUTLOOP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ]
-; IF-EVL-OUTLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY1]] ]
+; IF-EVL-OUTLOOP-NEXT: [[IV1:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ]
+; IF-EVL-OUTLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY1]] ]
; IF-EVL-OUTLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV1]]
; IF-EVL-OUTLOOP-NEXT: [[TMP27:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-OUTLOOP-NEXT: [[ADD]] = add nsw i32 [[TMP27]], [[RDX]]
@@ -100,12 +98,10 @@ define void @reduction_intermediate_store(ptr %a, i64 %n, i32 %start, ptr %addr)
; IF-EVL-INLOOP-NEXT: store i32 [[TMP22]], ptr [[ADDR]], align 4, !alias.scope [[META7:![0-9]+]], !noalias [[META0]]
; IF-EVL-INLOOP-NEXT: br label [[FOR_END:%.*]]
; IF-EVL-INLOOP: scalar.ph:
-; IF-EVL-INLOOP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
-; IF-EVL-INLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ], [ [[START]], [[VECTOR_MEMCHECK]] ]
; IF-EVL-INLOOP-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL-INLOOP: for.body:
-; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-INLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
+; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-INLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-INLOOP-NEXT: [[TMP25:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-INLOOP-NEXT: [[ADD]] = add nsw i32 [[TMP25]], [[RDX]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll b/llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll
index 3f687e1f05b64..894ecdd9fb1c9 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll
@@ -8,7 +8,7 @@
define void @type_info_cache_clobber(ptr %dstv, ptr %src, i64 %wide.trip.count) {
; CHECK-LABEL: define void @type_info_cache_clobber(
; CHECK-SAME: ptr [[DSTV:%.*]], ptr [[SRC:%.*]], i64 [[WIDE_TRIP_COUNT:%.*]]) #[[ATTR0:[0-9]+]] {
-; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[WIDE_TRIP_COUNT]], 1
; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; CHECK: [[VECTOR_MEMCHECK]]:
@@ -45,10 +45,9 @@ define void @type_info_cache_clobber(ptr %dstv, ptr %src, i64 %wide.trip.count)
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[ARRAYIDX13:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[IV]]
; CHECK-NEXT: [[TMP22:%.*]] = load i8, ptr [[ARRAYIDX13]], align 1
; CHECK-NEXT: [[CONV14:%.*]] = zext i8 [[TMP22]] to i32
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vf-will-not-generate-any-vector-insts.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vf-will-not-generate-any-vector-insts.ll
index b0659df140f4d..f1f671a78af2a 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vf-will-not-generate-any-vector-insts.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vf-will-not-generate-any-vector-insts.ll
@@ -7,7 +7,7 @@ target triple = "riscv64-unknown-unknown-elf"
define void @vf_will_not_generate_any_vector_insts(ptr %src, ptr %dst) {
; CHECK-LABEL: define void @vf_will_not_generate_any_vector_insts(
; CHECK-SAME: ptr [[SRC:%.*]], ptr [[DST:%.*]]) #[[ATTR0:[0-9]+]] {
-; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; CHECK: [[VECTOR_MEMCHECK]]:
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[DST]], i64 4
@@ -34,10 +34,9 @@ define void @vf_will_not_generate_any_vector_insts(ptr %src, ptr %dst) {
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[TMP2:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[TMP3:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[TMP2:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[TMP3:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[DOTPRE:%.*]] = load i32, ptr [[SRC]], align 4
; CHECK-NEXT: store i32 [[DOTPRE]], ptr [[DST]], align 4
; CHECK-NEXT: [[TMP3]] = add nuw i64 [[TMP2]], 1
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-riscv-vector-reverse.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-riscv-vector-reverse.ll
index 46695221c27db..b6762d19a8244 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-riscv-vector-reverse.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-riscv-vector-reverse.ll
@@ -54,8 +54,6 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur
; CHECK-NEXT: No successors
; CHECK-EMPTY:
; CHECK-NEXT: scalar.ph:
-; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ ir<%n>, ir-bb<entry> ]
-; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val>.1 = phi [ ir<%n>, ir-bb<entry> ]
; CHECK-NEXT: Successor(s): ir-bb<for.body>
;
entry:
diff --git a/llvm/test/Transforms/LoopVectorize/X86/divs-with-tail-folding.ll b/llvm/test/Transforms/LoopVectorize/X86/divs-with-tail-folding.ll
index d49aca955feac..058b568fc6f42 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/divs-with-tail-folding.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/divs-with-tail-folding.ll
@@ -4,7 +4,7 @@
define void @sdiv_feeding_gep(ptr %dst, i32 %x, i64 %M, i64 %conv6, i64 %N) {
; CHECK-LABEL: define void @sdiv_feeding_gep(
; CHECK-SAME: ptr [[DST:%.*]], i32 [[X:%.*]], i64 [[M:%.*]], i64 [[CONV6:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
-; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[CONV61:%.*]] = zext i32 [[X]] to i64
; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]]
; CHECK: [[VECTOR_SCEVCHECK]]:
@@ -44,10 +44,9 @@ define void @sdiv_feeding_gep(ptr %dst, i32 %x, i64 %M, i64 %conv6, i64 %N) {
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_SCEVCHECK]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[DIV18:%.*]] = sdiv i64 [[M]], [[CONV6]]
; CHECK-NEXT: [[CONV20:%.*]] = trunc i64 [[DIV18]] to i32
; CHECK-NEXT: [[MUL30:%.*]] = mul i64 [[DIV18]], [[CONV61]]
@@ -91,7 +90,7 @@ exit:
define void @sdiv_feeding_gep_predicated(ptr %dst, i32 %x, i64 %M, i64 %conv6, i64 %N) {
; CHECK-LABEL: define void @sdiv_feeding_gep_predicated(
; CHECK-SAME: ptr [[DST:%.*]], i32 [[X:%.*]], i64 [[M:%.*]], i64 [[CONV6:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[CONV61:%.*]] = zext i32 [[X]] to i64
; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]]
; CHECK: [[VECTOR_SCEVCHECK]]:
@@ -158,10 +157,9 @@ define void @sdiv_feeding_gep_predicated(ptr %dst, i32 %x, i64 %M, i64 %conv6, i
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_SCEVCHECK]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
; CHECK-NEXT: [[C:%.*]] = icmp ule i64 [[IV]], [[M]]
; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[LOOP_LATCH]]
; CHECK: [[THEN]]:
More information about the llvm-commits
mailing list