[llvm] [VPlan] Remove constant branches early. (PR #183397)

Florian Hahn via llvm-commits llvm-commits at lists.llvm.org
Fri Mar 27 04:55:14 PDT 2026


https://github.com/fhahn updated https://github.com/llvm/llvm-project/pull/183397

>From f153ed5bd2885a4b74179a7395aade823a2fceca Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Thu, 26 Mar 2026 18:39:28 +0000
Subject: [PATCH] [LV] Simplofy BranchOnConst early.

---
 .../Transforms/Vectorize/LoopVectorize.cpp    |  7 +++-
 llvm/lib/Transforms/Vectorize/VPlan.cpp       | 13 ++++---
 llvm/lib/Transforms/Vectorize/VPlan.h         |  7 ++--
 .../Transforms/Vectorize/VPlanTransforms.cpp  | 35 +++++++++++++++--
 .../LoopVectorize/AArch64/predicated-costs.ll |  3 +-
 .../RISCV/gather-scatter-cost.ll              |  3 +-
 .../RISCV/masked_gather_scatter.ll            |  3 +-
 .../RISCV/riscv-vector-reverse.ll             |  4 --
 .../runtime-check-dependent-on-stride.ll      |  3 +-
 .../LoopVectorize/RISCV/strided-accesses.ll   |  6 +--
 .../VPlan/RISCV/vplan-riscv-vector-reverse.ll |  2 -
 .../conditional-scalar-assignment-vplan.ll    | 12 ++----
 ...-order-recurrence-sink-replicate-region.ll | 39 +++++++------------
 .../LoopVectorize/VPlan/icmp-uniforms.ll      |  3 +-
 .../VPlan/vplan-sink-scalars-and-merge.ll     | 33 ++++++----------
 15 files changed, 83 insertions(+), 90 deletions(-)

diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 7c401becc8634..43cf7fe295c5c 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -7490,8 +7490,9 @@ DenseMap<const SCEV *, Value *> LoopVectorizationPlanner::executePlan(
   // 1. Set up the skeleton for vectorization, including vector pre-header and
   // middle block. The vector loop is created during VPlan execution.
   State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
-  replaceVPBBWithIRVPBB(BestVPlan.getScalarPreheader(),
-                        State.CFG.PrevBB->getSingleSuccessor(), &BestVPlan);
+  if (VPBasicBlock *ScalarPH = BestVPlan.getScalarPreheader())
+    replaceVPBBWithIRVPBB(ScalarPH, State.CFG.PrevBB->getSingleSuccessor(),
+                          &BestVPlan);
   VPlanTransforms::removeDeadRecipes(BestVPlan);
 
   assert(verifyVPlanIsValid(BestVPlan) && "final VPlan is invalid");
@@ -8369,6 +8370,8 @@ VPlanPtr LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(
   if (!RUN_VPLAN_PASS(VPlanTransforms::handleFindLastReductions, *Plan))
     return nullptr;
 
+  VPlanTransforms::removeBranchOnConst(*Plan);
+
   // Create partial reduction recipes for scaled reductions and transform
   // recipes to abstract recipes if it is legal and beneficial and clamp the
   // range for better cost estimation.
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.cpp b/llvm/lib/Transforms/Vectorize/VPlan.cpp
index 919166099a4d1..1403c2aa91ac3 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlan.cpp
@@ -945,7 +945,7 @@ void VPlan::execute(VPTransformState *State) {
 
   BasicBlock *ScalarPh = State->CFG.ExitBB;
   VPBasicBlock *ScalarPhVPBB = getScalarPreheader();
-  if (ScalarPhVPBB->hasPredecessors()) {
+  if (ScalarPhVPBB) {
     // Disconnect scalar preheader and scalar header, as the dominator tree edge
     // will be updated as part of VPlan execution. This allows keeping the DTU
     // logic generic during VPlan execution.
@@ -986,7 +986,7 @@ void VPlan::execute(VPTransformState *State) {
   }
 
   // If the original loop is unreachable, delete it and all its blocks.
-  if (!ScalarPhVPBB->hasPredecessors()) {
+  if (!ScalarPhVPBB) {
     // DeleteDeadBlocks will remove single-entry phis. Remove them from the exit
     // VPIRBBs in VPlan as well, otherwise we would retain references to deleted
     // IR instructions.
@@ -1000,7 +1000,7 @@ void VPlan::execute(VPTransformState *State) {
     Loop *OrigLoop =
         State->LI->getLoopFor(getScalarHeader()->getIRBasicBlock());
     auto Blocks = OrigLoop->getBlocksVector();
-    Blocks.push_back(cast<VPIRBasicBlock>(ScalarPhVPBB)->getIRBasicBlock());
+    Blocks.push_back(ScalarPh);
     for (auto *BB : Blocks)
       State->LI->removeBlock(BB);
     DeleteDeadBlocks(Blocks, &State->CFG.DTU);
@@ -1689,7 +1689,8 @@ void LoopVectorizationPlanner::updateLoopMetadataAndProfileInfo(
   // Update the metadata of the scalar loop. Skip the update when vectorizing
   // the epilogue loop to ensure it is updated only once. Also skip the update
   // when the scalar loop became unreachable.
-  if (Plan.getScalarPreheader()->hasPredecessors() && !VectorizingEpilogue) {
+  auto *ScalarPH = Plan.getScalarPreheader();
+  if (ScalarPH && !VectorizingEpilogue) {
     std::optional<MDNode *> RemainderLoopID =
         makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
                                         LLVMLoopVectorizeFollowupEpilogue});
@@ -1751,7 +1752,7 @@ void LoopVectorizationPlanner::updateLoopMetadataAndProfileInfo(
     AverageVectorTripCount = SE.getSmallConstantTripCount(VectorLoop);
     if (ProfcheckDisableMetadataFixes || !AverageVectorTripCount)
       return;
-    if (Plan.getScalarPreheader()->hasPredecessors())
+    if (ScalarPH)
       RemainderAverageTripCount =
           SE.getSmallConstantTripCount(OrigLoop) % EstimatedVFxUF;
     // Setting to 1 should be sufficient to generate the correct branch weights.
@@ -1767,7 +1768,7 @@ void LoopVectorizationPlanner::updateLoopMetadataAndProfileInfo(
                               OrigLoopInvocationWeight);
   }
 
-  if (Plan.getScalarPreheader()->hasPredecessors()) {
+  if (ScalarPH) {
     setLoopEstimatedTripCount(OrigLoop, RemainderAverageTripCount,
                               OrigLoopInvocationWeight);
   }
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h
index 9ed9d07151d7f..a42cb584c5688 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.h
+++ b/llvm/lib/Transforms/Vectorize/VPlan.h
@@ -4702,7 +4702,8 @@ class VPlan {
 
   /// Return the VPBasicBlock for the preheader of the scalar loop.
   VPBasicBlock *getScalarPreheader() const {
-    return cast<VPBasicBlock>(getScalarHeader()->getSinglePredecessor());
+    return dyn_cast_or_null<VPBasicBlock>(
+        getScalarHeader()->getSinglePredecessor());
   }
 
   /// Return the VPIRBasicBlock wrapping the header of the scalar loop.
@@ -4958,8 +4959,8 @@ class VPlan {
   /// that this relies on unneeded branches to the scalar tail loop being
   /// removed.
   bool hasScalarTail() const {
-    return !(!getScalarPreheader()->hasPredecessors() ||
-             getScalarPreheader()->getSinglePredecessor() == getEntry());
+    auto *ScalarPH = getScalarPreheader();
+    return ScalarPH && ScalarPH->getSinglePredecessor() != getEntry();
   }
 };
 
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index 7e8496a568643..f49f878000aad 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -2835,6 +2835,7 @@ void VPlanTransforms::removeBranchOnConst(VPlan &Plan, bool OnlyLatches) {
   if (OnlyLatches)
     VPDT.emplace(Plan);
 
+  SmallVector<VPBlockBase *> DeadSuccs;
   for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
            vp_depth_first_shallow(Plan.getEntry()))) {
     VPValue *Cond;
@@ -2859,15 +2860,43 @@ void VPlanTransforms::removeBranchOnConst(VPlan &Plan, bool OnlyLatches) {
         cast<VPBasicBlock>(VPBB->getSuccessors()[RemovedIdx]);
     assert(count(RemovedSucc->getPredecessors(), VPBB) == 1 &&
            "There must be a single edge between VPBB and its successor");
-    // Values coming from VPBB into phi recipes of RemoveSucc are removed from
+    // Values coming from VPBB into phi recipes of RemovedSucc are removed from
     // these recipes.
     for (VPRecipeBase &R : RemovedSucc->phis())
       cast<VPPhiAccessors>(&R)->removeIncomingValueFor(VPBB);
 
-    // Disconnect blocks and remove the terminator. RemovedSucc will be deleted
-    // automatically on VPlan destruction if it becomes unreachable.
+    // Disconnect blocks and remove the terminator.
     VPBlockUtils::disconnectBlocks(VPBB, RemovedSucc);
     VPBB->back().eraseFromParent();
+    DeadSuccs.push_back(RemovedSucc);
+  }
+
+  // Transitively disconnect all blocks that became unreachable, removing
+  // incoming values from phi recipes and replacing remaining uses with poison.
+  VPTypeAnalysis TypeInfo(Plan);
+  while (!DeadSuccs.empty()) {
+    VPBlockBase *DeadBlock = DeadSuccs.pop_back_val();
+    if (!DeadBlock->getPredecessors().empty())
+      continue;
+    for (VPBlockBase *Succ :
+         SmallVector<VPBlockBase *>(DeadBlock->successors())) {
+      if (auto *SuccBB = dyn_cast<VPBasicBlock>(Succ))
+        for (VPRecipeBase &R : SuccBB->phis())
+          cast<VPPhiAccessors>(&R)->removeIncomingValueFor(DeadBlock);
+      VPBlockUtils::disconnectBlocks(DeadBlock, Succ);
+      if (Succ->getPredecessors().empty())
+        DeadSuccs.push_back(Succ);
+    }
+    auto *DeadBB = dyn_cast<VPBasicBlock>(DeadBlock);
+    if (!DeadBB)
+      continue;
+    for (VPRecipeBase &R : make_early_inc_range(*DeadBB)) {
+      for (VPValue *Def : R.definedValues())
+        if (Def->getNumUsers() > 0)
+          Def->replaceAllUsesWith(Plan.getOrAddLiveIn(
+              PoisonValue::get(TypeInfo.inferScalarType(Def))));
+      R.eraseFromParent();
+    }
   }
 }
 
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/predicated-costs.ll b/llvm/test/Transforms/LoopVectorize/AArch64/predicated-costs.ll
index 1d44aeb91d8a5..47a8bbd70505c 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/predicated-costs.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/predicated-costs.ll
@@ -127,10 +127,9 @@ define void @test_predicated_load_cast_hint(ptr %dst.1, ptr %dst.2, ptr %src, i8
 ; CHECK:       [[MIDDLE_BLOCK]]:
 ; CHECK-NEXT:    br label %[[EXIT:.*]]
 ; CHECK:       [[SCALAR_PH]]:
-; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i8 [ 0, %[[VECTOR_SCEVCHECK]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
 ; CHECK-NEXT:    br label %[[LOOP:.*]]
 ; CHECK:       [[LOOP]]:
-; CHECK-NEXT:    [[IV:%.*]] = phi i8 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT:    [[IV:%.*]] = phi i8 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
 ; CHECK-NEXT:    [[L:%.*]] = load i8, ptr [[SRC]], align 1
 ; CHECK-NEXT:    [[L_EXT:%.*]] = zext i8 [[L]] to i64
 ; CHECK-NEXT:    [[ADD:%.*]] = or i64 [[L_EXT]], 1
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/gather-scatter-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/gather-scatter-cost.ll
index fabab210fb850..07205354ac300 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/gather-scatter-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/gather-scatter-cost.ll
@@ -48,10 +48,9 @@ define void @predicated_uniform_load(ptr %src, i32 %n, ptr %dst, i1 %cond) {
 ; CHECK:       middle.block:
 ; CHECK-NEXT:    br label [[EXIT:%.*]]
 ; CHECK:       scalar.ph:
-; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[VECTOR_MEMCHECK]] ]
 ; CHECK-NEXT:    br label [[LOOP:%.*]]
 ; CHECK:       loop:
-; CHECK-NEXT:    [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
+; CHECK-NEXT:    [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
 ; CHECK-NEXT:    br i1 [[COND]], label [[LOOP_THEN:%.*]], label [[LOOP_ELSE:%.*]]
 ; CHECK:       loop.then:
 ; CHECK-NEXT:    br label [[LOOP_LATCH]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll b/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll
index d4144e0dc9579..3a27718b22767 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll
@@ -73,10 +73,9 @@ define void @foo4(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea
 ; RV32:       middle.block:
 ; RV32-NEXT:    br label [[FOR_END:%.*]]
 ; RV32:       scalar.ph:
-; RV32-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[VECTOR_MEMCHECK]] ], [ 0, [[VECTOR_MEMCHECK1]] ]
 ; RV32-NEXT:    br label [[FOR_BODY:%.*]]
 ; RV32:       for.body:
-; RV32-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
+; RV32-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
 ; RV32-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TRIGGER]], i64 [[INDVARS_IV]]
 ; RV32-NEXT:    [[TMP21:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
 ; RV32-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[TMP21]], 100
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll b/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll
index 094500f07b418..6d1a0355367d0 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll
@@ -206,8 +206,6 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur
 ; RV64:       [[MIDDLE_BLOCK]]:
 ; RV64-NEXT:    br label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]]
 ; RV64:       [[SCALAR_PH]]:
-; RV64-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ]
-; RV64-NEXT:    [[BC_RESUME_VAL3:%.*]] = phi i32 [ [[N]], %[[VECTOR_SCEVCHECK]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ]
 ; RV64-NEXT:    br label %[[FOR_BODY:.*]]
 ; RV64:       [[FOR_COND_CLEANUP_LOOPEXIT]]:
 ; RV64-NEXT:    br label %[[FOR_COND_CLEANUP]]
@@ -431,8 +429,6 @@ define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocaptur
 ; RV64:       [[MIDDLE_BLOCK]]:
 ; RV64-NEXT:    br label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]]
 ; RV64:       [[SCALAR_PH]]:
-; RV64-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ]
-; RV64-NEXT:    [[BC_RESUME_VAL3:%.*]] = phi i32 [ [[N]], %[[VECTOR_SCEVCHECK]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ]
 ; RV64-NEXT:    br label %[[FOR_BODY:.*]]
 ; RV64:       [[FOR_COND_CLEANUP_LOOPEXIT]]:
 ; RV64-NEXT:    br label %[[FOR_COND_CLEANUP]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/runtime-check-dependent-on-stride.ll b/llvm/test/Transforms/LoopVectorize/RISCV/runtime-check-dependent-on-stride.ll
index 3e7bfe7a860e7..a31bb408dcbc7 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/runtime-check-dependent-on-stride.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/runtime-check-dependent-on-stride.ll
@@ -111,10 +111,9 @@ define void @foo(ptr %p, ptr %p.strided, i64 %n, i64 %stride) {
 ; NO-UNIT-STRIDE-MV:       [[MIDDLE_BLOCK]]:
 ; NO-UNIT-STRIDE-MV-NEXT:    br label %[[EXIT:.*]]
 ; NO-UNIT-STRIDE-MV:       [[SCALAR_PH]]:
-; NO-UNIT-STRIDE-MV-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ 1, %[[VECTOR_SCEVCHECK]] ], [ 1, %[[VECTOR_MEMCHECK]] ]
 ; NO-UNIT-STRIDE-MV-NEXT:    br label %[[HEADER1:.*]]
 ; NO-UNIT-STRIDE-MV:       [[HEADER1]]:
-; NO-UNIT-STRIDE-MV-NEXT:    [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER1]] ]
+; NO-UNIT-STRIDE-MV-NEXT:    [[IV:%.*]] = phi i64 [ 1, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER1]] ]
 ; NO-UNIT-STRIDE-MV-NEXT:    [[IV_NEXT]] = add nsw i64 [[IV]], 1
 ; NO-UNIT-STRIDE-MV-NEXT:    [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]]
 ; NO-UNIT-STRIDE-MV-NEXT:    [[GEP_LD:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll b/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
index 642e28a4823e3..3bd2580fa886b 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
@@ -636,10 +636,9 @@ define void @double_stride_int_scaled(ptr %p, ptr %p2, i64 %stride) {
 ; NOSTRIDED:       middle.block:
 ; NOSTRIDED-NEXT:    br label [[EXIT:%.*]]
 ; NOSTRIDED:       scalar.ph:
-; NOSTRIDED-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[VECTOR_MEMCHECK]] ]
 ; NOSTRIDED-NEXT:    br label [[LOOP:%.*]]
 ; NOSTRIDED:       loop:
-; NOSTRIDED-NEXT:    [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; NOSTRIDED-NEXT:    [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
 ; NOSTRIDED-NEXT:    [[OFFSET:%.*]] = mul nuw nsw i64 [[I]], [[STRIDE]]
 ; NOSTRIDED-NEXT:    [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]]
 ; NOSTRIDED-NEXT:    [[X0:%.*]] = load i32, ptr [[Q0]], align 4
@@ -788,10 +787,9 @@ define void @double_stride_int_scaled(ptr %p, ptr %p2, i64 %stride) {
 ; STRIDED:       middle.block:
 ; STRIDED-NEXT:    br label [[EXIT:%.*]]
 ; STRIDED:       scalar.ph:
-; STRIDED-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[VECTOR_MEMCHECK1]] ]
 ; STRIDED-NEXT:    br label [[LOOP:%.*]]
 ; STRIDED:       loop:
-; STRIDED-NEXT:    [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; STRIDED-NEXT:    [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
 ; STRIDED-NEXT:    [[OFFSET:%.*]] = mul nuw nsw i64 [[I]], [[STRIDE]]
 ; STRIDED-NEXT:    [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]]
 ; STRIDED-NEXT:    [[X0:%.*]] = load i32, ptr [[Q0]], align 4
diff --git a/llvm/test/Transforms/LoopVectorize/VPlan/RISCV/vplan-riscv-vector-reverse.ll b/llvm/test/Transforms/LoopVectorize/VPlan/RISCV/vplan-riscv-vector-reverse.ll
index dc0570110f606..e29e0e52580d7 100644
--- a/llvm/test/Transforms/LoopVectorize/VPlan/RISCV/vplan-riscv-vector-reverse.ll
+++ b/llvm/test/Transforms/LoopVectorize/VPlan/RISCV/vplan-riscv-vector-reverse.ll
@@ -55,8 +55,6 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur
 ; CHECK-NEXT: No successors
 ; CHECK-EMPTY:
 ; CHECK-NEXT: scalar.ph:
-; CHECK-NEXT:   EMIT-SCALAR vp<%bc.resume.val> = phi [ ir<%n>, ir-bb<entry> ]
-; CHECK-NEXT:   EMIT-SCALAR vp<%bc.resume.val>.1 = phi [ ir<%n>, ir-bb<entry> ]
 ; CHECK-NEXT: Successor(s): ir-bb<for.body>
 ;
 entry:
diff --git a/llvm/test/Transforms/LoopVectorize/VPlan/conditional-scalar-assignment-vplan.ll b/llvm/test/Transforms/LoopVectorize/VPlan/conditional-scalar-assignment-vplan.ll
index cf47da0cae2ea..5d73d8d407a7a 100644
--- a/llvm/test/Transforms/LoopVectorize/VPlan/conditional-scalar-assignment-vplan.ll
+++ b/llvm/test/Transforms/LoopVectorize/VPlan/conditional-scalar-assignment-vplan.ll
@@ -120,13 +120,11 @@ define i32 @simple_csa_int_select(i64 %N, ptr %data, i32 %a) {
 ; CHECK-TF-NEXT:  No successors
 ; CHECK-TF-EMPTY:
 ; CHECK-TF-NEXT:  scalar.ph:
-; CHECK-TF-NEXT:    EMIT-SCALAR vp<%bc.resume.val> = phi [ ir<0>, ir-bb<entry> ]
-; CHECK-TF-NEXT:    EMIT-SCALAR vp<%bc.merge.rdx> = phi [ ir<-1>, ir-bb<entry> ]
 ; CHECK-TF-NEXT:  Successor(s): ir-bb<loop>
 ; CHECK-TF-EMPTY:
 ; CHECK-TF-NEXT:  ir-bb<loop>:
-; CHECK-TF-NEXT:    IR   %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] (extra operand: vp<%bc.resume.val> from scalar.ph)
-; CHECK-TF-NEXT:    IR   %data.phi = phi i32 [ -1, %entry ], [ %select.data, %loop ] (extra operand: vp<%bc.merge.rdx> from scalar.ph)
+; CHECK-TF-NEXT:    IR   %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] (extra operand: ir<0> from scalar.ph)
+; CHECK-TF-NEXT:    IR   %data.phi = phi i32 [ -1, %entry ], [ %select.data, %loop ] (extra operand: ir<-1> from scalar.ph)
 ; CHECK-TF-NEXT:    IR   %ld.addr = getelementptr inbounds i32, ptr %data, i64 %iv
 ; CHECK-TF-NEXT:    IR   %ld = load i32, ptr %ld.addr, align 4
 ; CHECK-TF-NEXT:    IR   %select.cmp = icmp slt i32 %a, %ld
@@ -287,13 +285,11 @@ define i32 @simple_csa_int_load(ptr noalias %a, ptr noalias %b, i32 %default_val
 ; CHECK-TF-NEXT:  No successors
 ; CHECK-TF-EMPTY:
 ; CHECK-TF-NEXT:  scalar.ph:
-; CHECK-TF-NEXT:    EMIT-SCALAR vp<%bc.resume.val> = phi [ ir<0>, ir-bb<entry> ]
-; CHECK-TF-NEXT:    EMIT-SCALAR vp<%bc.merge.rdx> = phi [ ir<%default_val>, ir-bb<entry> ]
 ; CHECK-TF-NEXT:  Successor(s): ir-bb<loop>
 ; CHECK-TF-EMPTY:
 ; CHECK-TF-NEXT:  ir-bb<loop>:
-; CHECK-TF-NEXT:    IR   %iv = phi i64 [ 0, %entry ], [ %iv.next, %latch ] (extra operand: vp<%bc.resume.val> from scalar.ph)
-; CHECK-TF-NEXT:    IR   %data.phi = phi i32 [ %default_val, %entry ], [ %select.data, %latch ] (extra operand: vp<%bc.merge.rdx> from scalar.ph)
+; CHECK-TF-NEXT:    IR   %iv = phi i64 [ 0, %entry ], [ %iv.next, %latch ] (extra operand: ir<0> from scalar.ph)
+; CHECK-TF-NEXT:    IR   %data.phi = phi i32 [ %default_val, %entry ], [ %select.data, %latch ] (extra operand: ir<%default_val> from scalar.ph)
 ; CHECK-TF-NEXT:    IR   %a.addr = getelementptr inbounds nuw i32, ptr %a, i64 %iv
 ; CHECK-TF-NEXT:    IR   %ld.a = load i32, ptr %a.addr, align 4
 ; CHECK-TF-NEXT:    IR   %if.cond = icmp sgt i32 %ld.a, %threshold
diff --git a/llvm/test/Transforms/LoopVectorize/VPlan/first-order-recurrence-sink-replicate-region.ll b/llvm/test/Transforms/LoopVectorize/VPlan/first-order-recurrence-sink-replicate-region.ll
index 08720f73ef0ef..31a4b97c83878 100644
--- a/llvm/test/Transforms/LoopVectorize/VPlan/first-order-recurrence-sink-replicate-region.ll
+++ b/llvm/test/Transforms/LoopVectorize/VPlan/first-order-recurrence-sink-replicate-region.ll
@@ -82,13 +82,11 @@ define void @sink_replicate_region_1(i32 %x, ptr %ptr, ptr noalias %dst) optsize
 ; CHECK-NEXT:  No successors
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  scalar.ph:
-; CHECK-NEXT:    EMIT-SCALAR vp<%scalar.recur.init> = phi [ ir<0>, ir-bb<entry> ]
-; CHECK-NEXT:    EMIT-SCALAR vp<%bc.resume.val> = phi [ ir<0>, ir-bb<entry> ]
 ; CHECK-NEXT:  Successor(s): ir-bb<loop>
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  ir-bb<loop>:
-; CHECK-NEXT:    IR   %0 = phi i32 [ 0, %entry ], [ %conv, %loop ] (extra operand: vp<%scalar.recur.init> from scalar.ph)
-; CHECK-NEXT:    IR   %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] (extra operand: vp<%bc.resume.val> from scalar.ph)
+; CHECK-NEXT:    IR   %0 = phi i32 [ 0, %entry ], [ %conv, %loop ] (extra operand: ir<0> from scalar.ph)
+; CHECK-NEXT:    IR   %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] (extra operand: ir<0> from scalar.ph)
 ; CHECK-NEXT:    IR   %rem = srem i32 %0, %x
 ; CHECK-NEXT:    IR   %gep = getelementptr i8, ptr %ptr, i32 %iv
 ; CHECK-NEXT:    IR   %lv = load i8, ptr %gep, align 1
@@ -181,13 +179,11 @@ define void @sink_replicate_region_2(i32 %x, i8 %y, ptr %ptr, i32 %z) optsize {
 ; CHECK-NEXT:  No successors
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  scalar.ph:
-; CHECK-NEXT:    EMIT-SCALAR vp<%scalar.recur.init> = phi [ ir<0>, ir-bb<entry> ]
-; CHECK-NEXT:    EMIT-SCALAR vp<%bc.resume.val> = phi [ ir<0>, ir-bb<entry> ]
 ; CHECK-NEXT:  Successor(s): ir-bb<loop>
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  ir-bb<loop>:
-; CHECK-NEXT:    IR   %recur = phi i32 [ 0, %entry ], [ %recur.next, %latch ] (extra operand: vp<%scalar.recur.init> from scalar.ph)
-; CHECK-NEXT:    IR   %iv = phi i32 [ 0, %entry ], [ %iv.next, %latch ] (extra operand: vp<%bc.resume.val> from scalar.ph)
+; CHECK-NEXT:    IR   %recur = phi i32 [ 0, %entry ], [ %recur.next, %latch ] (extra operand: ir<0> from scalar.ph)
+; CHECK-NEXT:    IR   %iv = phi i32 [ 0, %entry ], [ %iv.next, %latch ] (extra operand: ir<0> from scalar.ph)
 ; CHECK-NEXT:    IR   %recur.next = sext i8 %y to i32
 ; CHECK-NEXT:    IR   %cond = icmp eq i32 %iv, %z
 ; CHECK-NEXT:  No successors
@@ -279,15 +275,12 @@ define i32 @sink_replicate_region_3_reduction(i32 %x, i8 %y, ptr %ptr) optsize {
 ; CHECK-NEXT:  No successors
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  scalar.ph:
-; CHECK-NEXT:    EMIT-SCALAR vp<%scalar.recur.init> = phi [ ir<0>, ir-bb<entry> ]
-; CHECK-NEXT:    EMIT-SCALAR vp<%bc.resume.val> = phi [ ir<0>, ir-bb<entry> ]
-; CHECK-NEXT:    EMIT-SCALAR vp<%bc.merge.rdx> = phi [ ir<1234>, ir-bb<entry> ]
 ; CHECK-NEXT:  Successor(s): ir-bb<loop>
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  ir-bb<loop>:
-; CHECK-NEXT:    IR   %recur = phi i32 [ 0, %entry ], [ %recur.next, %loop ] (extra operand: vp<%scalar.recur.init> from scalar.ph)
-; CHECK-NEXT:    IR   %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] (extra operand: vp<%bc.resume.val> from scalar.ph)
-; CHECK-NEXT:    IR   %and.red = phi i32 [ 1234, %entry ], [ %and.red.next, %loop ] (extra operand: vp<%bc.merge.rdx> from scalar.ph)
+; CHECK-NEXT:    IR   %recur = phi i32 [ 0, %entry ], [ %recur.next, %loop ] (extra operand: ir<0> from scalar.ph)
+; CHECK-NEXT:    IR   %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] (extra operand: ir<0> from scalar.ph)
+; CHECK-NEXT:    IR   %and.red = phi i32 [ 1234, %entry ], [ %and.red.next, %loop ] (extra operand: ir<1234> from scalar.ph)
 ; CHECK-NEXT:    IR   %rem = srem i32 %recur, %x
 ; CHECK-NEXT:    IR   %recur.next = sext i8 %y to i32
 ; CHECK-NEXT:    IR   %add = add i32 %rem, %recur.next
@@ -416,13 +409,11 @@ define void @sink_replicate_region_4_requires_split_at_end_of_block(i32 %x, ptr
 ; CHECK-NEXT:  No successors
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  scalar.ph:
-; CHECK-NEXT:    EMIT-SCALAR vp<%scalar.recur.init> = phi [ ir<0>, ir-bb<entry> ]
-; CHECK-NEXT:    EMIT-SCALAR vp<%bc.resume.val> = phi [ ir<0>, ir-bb<entry> ]
 ; CHECK-NEXT:  Successor(s): ir-bb<loop>
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  ir-bb<loop>:
-; CHECK-NEXT:    IR   %0 = phi i32 [ 0, %entry ], [ %conv, %loop ] (extra operand: vp<%scalar.recur.init> from scalar.ph)
-; CHECK-NEXT:    IR   %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] (extra operand: vp<%bc.resume.val> from scalar.ph)
+; CHECK-NEXT:    IR   %0 = phi i32 [ 0, %entry ], [ %conv, %loop ] (extra operand: ir<0> from scalar.ph)
+; CHECK-NEXT:    IR   %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] (extra operand: ir<0> from scalar.ph)
 ; CHECK-NEXT:    IR   %gep = getelementptr i8, ptr %ptr, i32 %iv
 ; CHECK-NEXT:    IR   %rem = srem i32 %0, %x
 ; CHECK-NEXT:    IR   %lv = load i8, ptr %gep, align 1
@@ -523,13 +514,11 @@ define void @sink_replicate_region_after_replicate_region(ptr %ptr, ptr noalias
 ; CHECK-NEXT:  No successors
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  scalar.ph:
-; CHECK-NEXT:    EMIT-SCALAR vp<%scalar.recur.init> = phi [ ir<0>, ir-bb<entry> ]
-; CHECK-NEXT:    EMIT-SCALAR vp<%bc.resume.val> = phi [ ir<0>, ir-bb<entry> ]
 ; CHECK-NEXT:  Successor(s): ir-bb<loop>
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  ir-bb<loop>:
-; CHECK-NEXT:    IR   %recur = phi i32 [ 0, %entry ], [ %recur.next, %loop ] (extra operand: vp<%scalar.recur.init> from scalar.ph)
-; CHECK-NEXT:    IR   %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] (extra operand: vp<%bc.resume.val> from scalar.ph)
+; CHECK-NEXT:    IR   %recur = phi i32 [ 0, %entry ], [ %recur.next, %loop ] (extra operand: ir<0> from scalar.ph)
+; CHECK-NEXT:    IR   %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] (extra operand: ir<0> from scalar.ph)
 ; CHECK-NEXT:    IR   %rem = srem i32 %recur, %x
 ; CHECK-NEXT:    IR   %rem.div = sdiv i32 20, %rem
 ; CHECK-NEXT:    IR   %recur.next = sext i8 %y to i32
@@ -620,13 +609,11 @@ define void @need_new_block_after_sinking_pr56146(i32 %x, ptr %src, ptr noalias
 ; CHECK-NEXT:  No successors
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  scalar.ph:
-; CHECK-NEXT:    EMIT-SCALAR vp<%bc.resume.val> = phi [ ir<2>, ir-bb<entry> ]
-; CHECK-NEXT:    EMIT-SCALAR vp<%scalar.recur.init> = phi [ ir<0>, ir-bb<entry> ]
 ; CHECK-NEXT:  Successor(s): ir-bb<loop>
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  ir-bb<loop>:
-; CHECK-NEXT:    IR   %iv = phi i64 [ 2, %entry ], [ %iv.next, %loop ] (extra operand: vp<%bc.resume.val> from scalar.ph)
-; CHECK-NEXT:    IR   %.pn = phi i32 [ 0, %entry ], [ %l, %loop ] (extra operand: vp<%scalar.recur.init> from scalar.ph)
+; CHECK-NEXT:    IR   %iv = phi i64 [ 2, %entry ], [ %iv.next, %loop ] (extra operand: ir<2> from scalar.ph)
+; CHECK-NEXT:    IR   %.pn = phi i32 [ 0, %entry ], [ %l, %loop ] (extra operand: ir<0> from scalar.ph)
 ; CHECK-NEXT:    IR   %val = sdiv i32 %.pn, %x
 ; CHECK-NEXT:    IR   %l = load i32, ptr %src, align 4
 ; CHECK-NEXT:    IR   %gep.dst = getelementptr i32, ptr %dst, i64 %iv
diff --git a/llvm/test/Transforms/LoopVectorize/VPlan/icmp-uniforms.ll b/llvm/test/Transforms/LoopVectorize/VPlan/icmp-uniforms.ll
index 5caf66a247e6d..50b52e3e0dc67 100644
--- a/llvm/test/Transforms/LoopVectorize/VPlan/icmp-uniforms.ll
+++ b/llvm/test/Transforms/LoopVectorize/VPlan/icmp-uniforms.ll
@@ -147,11 +147,10 @@ define void @test(ptr %ptr) {
 ; CHECK-NEXT:  No successors
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  scalar.ph:
-; CHECK-NEXT:    EMIT-SCALAR vp<%bc.resume.val> = phi [ ir<0>, ir-bb<entry> ]
 ; CHECK-NEXT:  Successor(s): ir-bb<loop>
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  ir-bb<loop>:
-; CHECK-NEXT:    IR   %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] (extra operand: vp<%bc.resume.val> from scalar.ph)
+; CHECK-NEXT:    IR   %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] (extra operand: ir<0> from scalar.ph)
 ; CHECK-NEXT:    IR   %cond0 = icmp ult i64 %iv, 13
 ; CHECK-NEXT:    IR   %s = select i1 %cond0, i32 10, i32 20
 ; CHECK-NEXT:    IR   %gep = getelementptr inbounds i32, ptr %ptr, i64 %iv
diff --git a/llvm/test/Transforms/LoopVectorize/VPlan/vplan-sink-scalars-and-merge.ll b/llvm/test/Transforms/LoopVectorize/VPlan/vplan-sink-scalars-and-merge.ll
index db02e5b8ed47c..e9bc4057e4fcc 100644
--- a/llvm/test/Transforms/LoopVectorize/VPlan/vplan-sink-scalars-and-merge.ll
+++ b/llvm/test/Transforms/LoopVectorize/VPlan/vplan-sink-scalars-and-merge.ll
@@ -84,11 +84,10 @@ define void @sink1(i32 %k, i32 %x) {
 ; CHECK-NEXT:  No successors
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  scalar.ph:
-; CHECK-NEXT:    EMIT-SCALAR vp<%bc.resume.val> = phi [ ir<0>, ir-bb<entry> ]
 ; CHECK-NEXT:  Successor(s): ir-bb<loop>
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  ir-bb<loop>:
-; CHECK-NEXT:    IR   %iv = phi i32 [ 0, %entry ], [ %iv.next, %latch ] (extra operand: vp<%bc.resume.val> from scalar.ph)
+; CHECK-NEXT:    IR   %iv = phi i32 [ 0, %entry ], [ %iv.next, %latch ] (extra operand: ir<0> from scalar.ph)
 ; CHECK-NEXT:    IR   %cond = icmp eq i32 %iv, %x
 ; CHECK-NEXT:  No successors
 ; CHECK-NEXT:  }
@@ -195,11 +194,10 @@ define void @sink2(i32 %k) {
 ; CHECK-NEXT:  No successors
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  scalar.ph:
-; CHECK-NEXT:    EMIT-SCALAR vp<%bc.resume.val> = phi [ ir<0>, ir-bb<entry> ]
 ; CHECK-NEXT:  Successor(s): ir-bb<loop>
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  ir-bb<loop>:
-; CHECK-NEXT:    IR   %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] (extra operand: vp<%bc.resume.val> from scalar.ph)
+; CHECK-NEXT:    IR   %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] (extra operand: ir<0> from scalar.ph)
 ; CHECK-NEXT:    IR   %gep.b = getelementptr inbounds [2048 x i32], ptr @b, i32 0, i32 %iv
 ; CHECK-NEXT:    IR   %lv.b = load i32, ptr %gep.b, align 4
 ; CHECK-NEXT:    IR   %add = add i32 %lv.b, 10
@@ -308,11 +306,10 @@ define void @sink3(i32 %k) {
 ; CHECK-NEXT:  No successors
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  scalar.ph:
-; CHECK-NEXT:    EMIT-SCALAR vp<%bc.resume.val> = phi [ ir<0>, ir-bb<entry> ]
 ; CHECK-NEXT:  Successor(s): ir-bb<loop>
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  ir-bb<loop>:
-; CHECK-NEXT:    IR   %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] (extra operand: vp<%bc.resume.val> from scalar.ph)
+; CHECK-NEXT:    IR   %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] (extra operand: ir<0> from scalar.ph)
 ; CHECK-NEXT:    IR   %gep.b = getelementptr inbounds [2048 x i32], ptr @b, i32 0, i32 %iv
 ; CHECK-NEXT:    IR   %lv.b = load i32, ptr %gep.b, align 4
 ; CHECK-NEXT:    IR   %add = add i32 %lv.b, 10
@@ -405,11 +402,10 @@ define void @uniform_gep(i64 %k, ptr noalias %A, ptr noalias %B) {
 ; CHECK-NEXT:  No successors
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  scalar.ph:
-; CHECK-NEXT:    EMIT-SCALAR vp<%bc.resume.val> = phi [ ir<21>, ir-bb<entry> ]
 ; CHECK-NEXT:  Successor(s): ir-bb<loop>
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  ir-bb<loop>:
-; CHECK-NEXT:    IR   %iv = phi i64 [ 21, %entry ], [ %iv.next, %loop.latch ] (extra operand: vp<%bc.resume.val> from scalar.ph)
+; CHECK-NEXT:    IR   %iv = phi i64 [ 21, %entry ], [ %iv.next, %loop.latch ] (extra operand: ir<21> from scalar.ph)
 ; CHECK-NEXT:    IR   %gep.A.uniform = getelementptr inbounds i16, ptr %A, i64 0
 ; CHECK-NEXT:    IR   %gep.B = getelementptr inbounds i16, ptr %B, i64 %iv
 ; CHECK-NEXT:    IR   %lv = load i16, ptr %gep.A.uniform, align 1
@@ -517,11 +513,10 @@ define void @pred_cfg1(i32 %k, i32 %j) {
 ; CHECK-NEXT:  No successors
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  scalar.ph:
-; CHECK-NEXT:    EMIT-SCALAR vp<%bc.resume.val> = phi [ ir<0>, ir-bb<entry> ]
 ; CHECK-NEXT:  Successor(s): ir-bb<loop>
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  ir-bb<loop>:
-; CHECK-NEXT:    IR   %iv = phi i32 [ 0, %entry ], [ %iv.next, %next.0 ] (extra operand: vp<%bc.resume.val> from scalar.ph)
+; CHECK-NEXT:    IR   %iv = phi i32 [ 0, %entry ], [ %iv.next, %next.0 ] (extra operand: ir<0> from scalar.ph)
 ; CHECK-NEXT:    IR   %gep.b = getelementptr inbounds [2048 x i32], ptr @b, i32 0, i32 %iv
 ; CHECK-NEXT:    IR   %c.1 = icmp ult i32 %iv, %j
 ; CHECK-NEXT:    IR   %mul = mul i32 %iv, 10
@@ -637,11 +632,10 @@ define void @pred_cfg2(i32 %k, i32 %j) {
 ; CHECK-NEXT:  No successors
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  scalar.ph:
-; CHECK-NEXT:    EMIT-SCALAR vp<%bc.resume.val> = phi [ ir<0>, ir-bb<entry> ]
 ; CHECK-NEXT:  Successor(s): ir-bb<loop>
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  ir-bb<loop>:
-; CHECK-NEXT:    IR   %iv = phi i32 [ 0, %entry ], [ %iv.next, %next.1 ] (extra operand: vp<%bc.resume.val> from scalar.ph)
+; CHECK-NEXT:    IR   %iv = phi i32 [ 0, %entry ], [ %iv.next, %next.1 ] (extra operand: ir<0> from scalar.ph)
 ; CHECK-NEXT:    IR   %gep.b = getelementptr inbounds [2048 x i32], ptr @b, i32 0, i32 %iv
 ; CHECK-NEXT:    IR   %mul = mul i32 %iv, 10
 ; CHECK-NEXT:    IR   %gep.a = getelementptr inbounds [2048 x i32], ptr @a, i32 0, i32 %mul
@@ -766,11 +760,10 @@ define void @pred_cfg3(i32 %k, i32 %j) {
 ; CHECK-NEXT:  No successors
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  scalar.ph:
-; CHECK-NEXT:    EMIT-SCALAR vp<%bc.resume.val> = phi [ ir<0>, ir-bb<entry> ]
 ; CHECK-NEXT:  Successor(s): ir-bb<loop>
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  ir-bb<loop>:
-; CHECK-NEXT:    IR   %iv = phi i32 [ 0, %entry ], [ %iv.next, %next.1 ] (extra operand: vp<%bc.resume.val> from scalar.ph)
+; CHECK-NEXT:    IR   %iv = phi i32 [ 0, %entry ], [ %iv.next, %next.1 ] (extra operand: ir<0> from scalar.ph)
 ; CHECK-NEXT:    IR   %gep.b = getelementptr inbounds [2048 x i32], ptr @b, i32 0, i32 %iv
 ; CHECK-NEXT:    IR   %mul = mul i32 %iv, 10
 ; CHECK-NEXT:    IR   %gep.a = getelementptr inbounds [2048 x i32], ptr @a, i32 0, i32 %mul
@@ -896,11 +889,10 @@ define void @merge_3_replicate_region(i32 %k, i32 %j) {
 ; CHECK-NEXT:  No successors
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  scalar.ph:
-; CHECK-NEXT:    EMIT-SCALAR vp<%bc.resume.val> = phi [ ir<0>, ir-bb<entry> ]
 ; CHECK-NEXT:  Successor(s): ir-bb<loop>
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  ir-bb<loop>:
-; CHECK-NEXT:    IR   %iv = phi i32 [ 0, %entry ], [ %iv.next, %latch ] (extra operand: vp<%bc.resume.val> from scalar.ph)
+; CHECK-NEXT:    IR   %iv = phi i32 [ 0, %entry ], [ %iv.next, %latch ] (extra operand: ir<0> from scalar.ph)
 ; CHECK-NEXT:    IR   %gep.a = getelementptr inbounds [2048 x i32], ptr @a, i32 0, i32 %iv
 ; CHECK-NEXT:    IR   %lv.a = load i32, ptr %gep.a, align 4
 ; CHECK-NEXT:    IR   %gep.b = getelementptr inbounds [2048 x i32], ptr @b, i32 0, i32 %iv
@@ -1000,11 +992,10 @@ define void @update_2_uses_in_same_recipe_in_merged_block(i32 %k) {
 ; CHECK-NEXT:  No successors
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  scalar.ph:
-; CHECK-NEXT:    EMIT-SCALAR vp<%bc.resume.val> = phi [ ir<0>, ir-bb<entry> ]
 ; CHECK-NEXT:  Successor(s): ir-bb<loop>
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  ir-bb<loop>:
-; CHECK-NEXT:    IR   %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] (extra operand: vp<%bc.resume.val> from scalar.ph)
+; CHECK-NEXT:    IR   %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] (extra operand: ir<0> from scalar.ph)
 ; CHECK-NEXT:    IR   %gep.a = getelementptr inbounds [2048 x i32], ptr @a, i32 0, i32 %iv
 ; CHECK-NEXT:    IR   %lv.a = load i32, ptr %gep.a, align 4
 ; CHECK-NEXT:    IR   %div = sdiv i32 %lv.a, %lv.a
@@ -1109,13 +1100,11 @@ define void @recipe_in_merge_candidate_used_by_first_order_recurrence(i32 %k) {
 ; CHECK-NEXT:  No successors
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  scalar.ph:
-; CHECK-NEXT:    EMIT-SCALAR vp<%bc.resume.val> = phi [ ir<0>, ir-bb<entry> ]
-; CHECK-NEXT:    EMIT-SCALAR vp<%scalar.recur.init> = phi [ ir<0>, ir-bb<entry> ]
 ; CHECK-NEXT:  Successor(s): ir-bb<loop>
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  ir-bb<loop>:
-; CHECK-NEXT:    IR   %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] (extra operand: vp<%bc.resume.val> from scalar.ph)
-; CHECK-NEXT:    IR   %for = phi i32 [ 0, %entry ], [ %lv.a, %loop ] (extra operand: vp<%scalar.recur.init> from scalar.ph)
+; CHECK-NEXT:    IR   %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] (extra operand: ir<0> from scalar.ph)
+; CHECK-NEXT:    IR   %for = phi i32 [ 0, %entry ], [ %lv.a, %loop ] (extra operand: ir<0> from scalar.ph)
 ; CHECK-NEXT:    IR   %gep.a = getelementptr inbounds [2048 x i32], ptr @a, i32 0, i32 %iv
 ; CHECK-NEXT:    IR   %lv.a = load i32, ptr %gep.a, align 4
 ; CHECK-NEXT:    IR   %div = sdiv i32 %for, %lv.a



More information about the llvm-commits mailing list