[llvm] [LoopVectorize] Don't scalarize predicated instruction with optsize (PR #129265)
John Brawn via llvm-commits
llvm-commits at lists.llvm.org
Fri Feb 28 08:25:05 PST 2025
https://github.com/john-brawn-arm created https://github.com/llvm/llvm-project/pull/129265
Scalarizing predicated instructions results in a worse code size impact than having a scalar epilogue, which we already forbid with optsize, so we shouldn't allow it.
A couple of notes on the implementation:
* OptForSizeBasedOnProfile has been moved into the cost model and renamed to OptForSize, as shouldOptimizeForSize checks both the function attribute and profile.
* We still allow tail folding if we don't need to scalarize any instructions, e.g. see foo_optsize in the test Transforms/LoopVectorize/X86/optsize.ll.
This change requires a lot of test changes. Where a test is specifically testing scalarized predicated instructions I've adjusted it so it still does, either by removing optsize if it makes no difference or forcing tail predication to be enabled. For tests of optsize I've updated the test to check we're not scalarizing.
Fixes #66652
>From 8544164c34943eafa9d4bfa4cbab2bffb2753d4e Mon Sep 17 00:00:00 2001
From: John Brawn <john.brawn at arm.com>
Date: Tue, 14 Jan 2025 14:45:29 +0000
Subject: [PATCH] [LoopVectorize] Don't scalarize predicated instruction with
optsize
Scalarizing predicated instructions results in a worse code size
impact than having a scalar epilogue, which we already forbid with
optsize, so we shouldn't allow it.
A couple of notes on the implementation:
* OptForSizeBasedOnProfile has been moved into the cost model and
renamed to OptForSize, as shouldOptimizeForSize checks both the
function attribute and profile.
* We still allow tail folding if we don't need to scalarize any
instructions, e.g. see foo_optsize in the test
Transforms/LoopVectorize/X86/optsize.ll.
This change requires a lot of test changes. Where a test is
specifically testing scalarized predicated instructions I've adjusted
it so it still does, either by removing optsize if it makes no
difference or forcing tail predication to be enabled. For tests of
optsize I've updated the test to check we're not scalarizing.
Fixes #66652
---
.../Transforms/Vectorize/LoopVectorize.cpp | 43 ++-
.../AArch64/conditional-branches-cost.ll | 48 +--
.../LoopVectorize/AArch64/optsize_minsize.ll | 179 +---------
.../LoopVectorize/ARM/optsize_minsize.ll | 187 +----------
.../Transforms/LoopVectorize/X86/optsize.ll | 164 ++++-----
.../LoopVectorize/X86/small-size.ll | 313 +++---------------
.../LoopVectorize/X86/tail_loop_folding.ll | 5 +-
.../x86-interleaved-accesses-masked-group.ll | 4 +-
.../dont-fold-tail-for-divisible-TC.ll | 2 +-
...-order-recurrence-sink-replicate-region.ll | 2 +-
.../pr45679-fold-tail-by-masking.ll | 2 +-
.../pr51614-fold-tail-by-masking.ll | 2 +-
...e-reduction-results-in-tail-folded-loop.ll | 2 +-
.../Transforms/LoopVectorize/struct-return.ll | 2 +-
.../tail-folding-counting-down.ll | 2 +-
.../vplan-sink-scalars-and-merge.ll | 47 +--
16 files changed, 202 insertions(+), 802 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index e2612698b6b0f..601591cae9dc5 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -491,12 +491,7 @@ class InnerLoopVectorizer {
MinProfitableTripCount(MinProfitableTripCount), UF(UnrollFactor),
Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI),
PSI(PSI), RTChecks(RTChecks), Plan(Plan),
- VectorPHVPB(Plan.getEntry()->getSingleSuccessor()) {
- // Query this against the original loop and save it here because the profile
- // of the original loop header may change as the transformation happens.
- OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize(
- OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass);
- }
+ VectorPHVPB(Plan.getEntry()->getSingleSuccessor()) {}
virtual ~InnerLoopVectorizer() = default;
@@ -669,10 +664,6 @@ class InnerLoopVectorizer {
BlockFrequencyInfo *BFI;
ProfileSummaryInfo *PSI;
- // Whether this loop should be optimized for size based on profile guided size
- // optimizatios.
- bool OptForSizeBasedOnProfile;
-
/// Structure to hold information about generated runtime checks, responsible
/// for cleaning the checks, if vectorization turns out unprofitable.
GeneratedRTChecks &RTChecks;
@@ -986,13 +977,18 @@ class LoopVectorizationCostModel {
AssumptionCache *AC,
OptimizationRemarkEmitter *ORE, const Function *F,
const LoopVectorizeHints *Hints,
- InterleavedAccessInfo &IAI)
+ InterleavedAccessInfo &IAI,
+ ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI)
: ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
Hints(Hints), InterleaveInfo(IAI) {
if (TTI.supportsScalableVectors() || ForceTargetSupportsScalableVectors)
initializeVScaleForTuning();
CostKind = F->hasMinSize() ? TTI::TCK_CodeSize : TTI::TCK_RecipThroughput;
+ // Query this against the original loop and save it here because the profile
+ // of the original loop header may change as the transformation happens.
+ OptForSize = llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
+ PGSOQueryType::IRPass);
}
/// \return An upper bound for the vectorization factors (both fixed and
@@ -1833,6 +1829,10 @@ class LoopVectorizationCostModel {
/// The kind of cost that we are calculating
TTI::TargetCostKind CostKind;
+
+ /// Whether this loop should be optimized for size based on function attribute
+ /// or profile information.
+ bool OptForSize;
};
} // end namespace llvm
@@ -2612,9 +2612,8 @@ BasicBlock *InnerLoopVectorizer::emitSCEVChecks(BasicBlock *Bypass) {
if (!SCEVCheckBlock)
return nullptr;
- assert(!(SCEVCheckBlock->getParent()->hasOptSize() ||
- (OptForSizeBasedOnProfile &&
- Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) &&
+ assert((!Cost->OptForSize ||
+ Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled) &&
"Cannot SCEV check stride or overflow when optimizing for size");
assert(!LoopBypassBlocks.empty() &&
"Should already be a bypass block due to iteration count check");
@@ -2639,7 +2638,7 @@ BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(BasicBlock *Bypass) {
if (!MemCheckBlock)
return nullptr;
- if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) {
+ if (Cost->OptForSize) {
assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
"Cannot emit memory checks when optimizing for size, unless forced "
"to vectorize.");
@@ -5518,6 +5517,9 @@ InstructionCost LoopVectorizationCostModel::computePredInstDiscount(
// includes the scalarization overhead of the predicated instruction.
InstructionCost VectorCost = getInstructionCost(I, VF);
+ if (VectorCost == InstructionCost::getInvalid())
+ continue;
+
// Compute the cost of the scalarized instruction. This cost is the cost of
// the instruction as if it wasn't if-converted and instead remained in the
// predicated block. We will scale this cost by block probability after
@@ -5660,6 +5662,13 @@ LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
if (VF.isScalable())
return InstructionCost::getInvalid();
+ // Don't scalarize predicated instructions when optimizing for size unless
+ // we're forced to.
+ if (isPredicatedInst(I) && OptForSize &&
+ !ForceTailFoldingStyle.getNumOccurrences() &&
+ Hints->getForce() != LoopVectorizeHints::FK_Enabled)
+ return InstructionCost::getInvalid();
+
Type *ValTy = getLoadStoreType(I);
auto *SE = PSE.getSE();
@@ -10090,7 +10099,7 @@ static bool processLoopInVPlanNativePath(
getScalarEpilogueLowering(F, L, Hints, PSI, BFI, TTI, TLI, *LVL, &IAI);
LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
- &Hints, IAI);
+ &Hints, IAI, PSI, BFI);
// Use the planner for outer loop vectorization.
// TODO: CM is not used at this point inside the planner. Turn CM into an
// optional argument if we don't need it in the future.
@@ -10627,7 +10636,7 @@ bool LoopVectorizePass::processLoop(Loop *L) {
// Use the cost model.
LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
- F, &Hints, IAI);
+ F, &Hints, IAI, PSI, BFI);
// Use the planner for vectorization.
LoopVectorizationPlanner LVP(L, LI, DT, TLI, *TTI, &LVL, CM, IAI, PSE, Hints,
ORE);
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll
index cf4fc143fe8c3..843ff4fd5477e 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll
@@ -1588,55 +1588,29 @@ exit:
ret void
}
-define void @redundant_branch_and_tail_folding(ptr %dst, i1 %c) optsize {
+define void @redundant_branch_and_tail_folding(ptr %dst, i1 %c) {
; DEFAULT-LABEL: define void @redundant_branch_and_tail_folding(
-; DEFAULT-SAME: ptr [[DST:%.*]], i1 [[C:%.*]]) #[[ATTR4:[0-9]+]] {
+; DEFAULT-SAME: ptr [[DST:%.*]], i1 [[C:%.*]]) {
; DEFAULT-NEXT: entry:
; DEFAULT-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; DEFAULT: vector.ph:
; DEFAULT-NEXT: br label [[VECTOR_BODY:%.*]]
; DEFAULT: vector.body:
-; DEFAULT-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE6:%.*]] ]
-; DEFAULT-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[PRED_STORE_CONTINUE6]] ]
-; DEFAULT-NEXT: [[TMP0:%.*]] = icmp ule <4 x i64> [[VEC_IND]], splat (i64 20)
+; DEFAULT-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; DEFAULT-NEXT: [[VEC_IND1:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; DEFAULT-NEXT: [[VEC_IND:%.*]] = add <4 x i64> [[VEC_IND1]], splat (i64 4)
; DEFAULT-NEXT: [[TMP1:%.*]] = add nuw nsw <4 x i64> [[VEC_IND]], splat (i64 1)
; DEFAULT-NEXT: [[TMP2:%.*]] = trunc <4 x i64> [[TMP1]] to <4 x i32>
-; DEFAULT-NEXT: [[TMP3:%.*]] = extractelement <4 x i1> [[TMP0]], i32 0
-; DEFAULT-NEXT: br i1 [[TMP3]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
-; DEFAULT: pred.store.if:
-; DEFAULT-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP2]], i32 0
-; DEFAULT-NEXT: store i32 [[TMP4]], ptr [[DST]], align 4
-; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE]]
-; DEFAULT: pred.store.continue:
-; DEFAULT-NEXT: [[TMP5:%.*]] = extractelement <4 x i1> [[TMP0]], i32 1
-; DEFAULT-NEXT: br i1 [[TMP5]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2:%.*]]
-; DEFAULT: pred.store.if1:
-; DEFAULT-NEXT: [[TMP6:%.*]] = extractelement <4 x i32> [[TMP2]], i32 1
-; DEFAULT-NEXT: store i32 [[TMP6]], ptr [[DST]], align 4
-; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE2]]
-; DEFAULT: pred.store.continue2:
-; DEFAULT-NEXT: [[TMP7:%.*]] = extractelement <4 x i1> [[TMP0]], i32 2
-; DEFAULT-NEXT: br i1 [[TMP7]], label [[PRED_STORE_IF3:%.*]], label [[PRED_STORE_CONTINUE4:%.*]]
-; DEFAULT: pred.store.if3:
-; DEFAULT-NEXT: [[TMP8:%.*]] = extractelement <4 x i32> [[TMP2]], i32 2
-; DEFAULT-NEXT: store i32 [[TMP8]], ptr [[DST]], align 4
-; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE4]]
-; DEFAULT: pred.store.continue4:
-; DEFAULT-NEXT: [[TMP9:%.*]] = extractelement <4 x i1> [[TMP0]], i32 3
-; DEFAULT-NEXT: br i1 [[TMP9]], label [[PRED_STORE_IF5:%.*]], label [[PRED_STORE_CONTINUE6]]
-; DEFAULT: pred.store.if5:
; DEFAULT-NEXT: [[TMP10:%.*]] = extractelement <4 x i32> [[TMP2]], i32 3
; DEFAULT-NEXT: store i32 [[TMP10]], ptr [[DST]], align 4
-; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE6]]
-; DEFAULT: pred.store.continue6:
-; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
; DEFAULT-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4)
-; DEFAULT-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 24
-; DEFAULT-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
+; DEFAULT-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16
+; DEFAULT-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
; DEFAULT: middle.block:
-; DEFAULT-NEXT: br i1 true, label [[EXIT:%.*]], label [[SCALAR_PH]]
+; DEFAULT-NEXT: br i1 false, label [[EXIT:%.*]], label [[SCALAR_PH]]
; DEFAULT: scalar.ph:
-; DEFAULT-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 24, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; DEFAULT-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 16, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
; DEFAULT-NEXT: br label [[LOOP_HEADER:%.*]]
; DEFAULT: loop.header:
; DEFAULT-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
@@ -1653,7 +1627,7 @@ define void @redundant_branch_and_tail_folding(ptr %dst, i1 %c) optsize {
; DEFAULT-NEXT: ret void
;
; PRED-LABEL: define void @redundant_branch_and_tail_folding(
-; PRED-SAME: ptr [[DST:%.*]], i1 [[C:%.*]]) #[[ATTR4:[0-9]+]] {
+; PRED-SAME: ptr [[DST:%.*]], i1 [[C:%.*]]) {
; PRED-NEXT: entry:
; PRED-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; PRED: vector.ph:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll b/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll
index 291b8f3348f09..1fcb0eb8048b2 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll
@@ -229,7 +229,6 @@ for.cond.cleanup:
; This should be vectorized and tail predicated without optsize, as that's
; faster, but not with optsize, as it's much larger.
-; FIXME: Currently we avoid tail predication only with minsize
define void @tail_predicate_without_optsize(ptr %p, i8 %a, i8 %b, i8 %c, i32 %n) {
; DEFAULT-LABEL: define void @tail_predicate_without_optsize(
; DEFAULT-SAME: ptr [[P:%.*]], i8 [[A:%.*]], i8 [[B:%.*]], i8 [[C:%.*]], i32 [[N:%.*]]) {
@@ -429,182 +428,9 @@ define void @tail_predicate_without_optsize(ptr %p, i8 %a, i8 %b, i8 %c, i32 %n)
; OPTSIZE-LABEL: define void @tail_predicate_without_optsize(
; OPTSIZE-SAME: ptr [[P:%.*]], i8 [[A:%.*]], i8 [[B:%.*]], i8 [[C:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
; OPTSIZE-NEXT: [[ENTRY:.*]]:
-; OPTSIZE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
-; OPTSIZE: [[VECTOR_PH]]:
-; OPTSIZE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[A]], i64 0
-; OPTSIZE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
-; OPTSIZE-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <16 x i8> poison, i8 [[B]], i64 0
-; OPTSIZE-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT3]], <16 x i8> poison, <16 x i32> zeroinitializer
-; OPTSIZE-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement <16 x i8> poison, i8 [[C]], i64 0
-; OPTSIZE-NEXT: [[BROADCAST_SPLAT6:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT5]], <16 x i8> poison, <16 x i32> zeroinitializer
-; OPTSIZE-NEXT: br label %[[VECTOR_BODY:.*]]
-; OPTSIZE: [[VECTOR_BODY]]:
-; OPTSIZE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE36:.*]] ]
-; OPTSIZE-NEXT: [[VEC_IND:%.*]] = phi <16 x i64> [ <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_STORE_CONTINUE36]] ]
-; OPTSIZE-NEXT: [[VEC_IND1:%.*]] = phi <16 x i8> [ <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT2:%.*]], %[[PRED_STORE_CONTINUE36]] ]
-; OPTSIZE-NEXT: [[TMP72:%.*]] = icmp ule <16 x i64> [[VEC_IND]], splat (i64 14)
-; OPTSIZE-NEXT: [[TMP1:%.*]] = mul <16 x i8> [[BROADCAST_SPLAT]], [[VEC_IND1]]
-; OPTSIZE-NEXT: [[TMP2:%.*]] = lshr <16 x i8> [[VEC_IND1]], splat (i8 1)
-; OPTSIZE-NEXT: [[TMP3:%.*]] = mul <16 x i8> [[TMP2]], [[BROADCAST_SPLAT4]]
-; OPTSIZE-NEXT: [[TMP4:%.*]] = add <16 x i8> [[TMP3]], [[TMP1]]
-; OPTSIZE-NEXT: [[TMP5:%.*]] = lshr <16 x i8> [[VEC_IND1]], splat (i8 2)
-; OPTSIZE-NEXT: [[TMP6:%.*]] = mul <16 x i8> [[TMP5]], [[BROADCAST_SPLAT6]]
-; OPTSIZE-NEXT: [[TMP7:%.*]] = add <16 x i8> [[TMP4]], [[TMP6]]
-; OPTSIZE-NEXT: [[TMP8:%.*]] = extractelement <16 x i1> [[TMP72]], i32 0
-; OPTSIZE-NEXT: br i1 [[TMP8]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]]
-; OPTSIZE: [[PRED_STORE_IF]]:
-; OPTSIZE-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 0
-; OPTSIZE-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP9]]
-; OPTSIZE-NEXT: [[TMP11:%.*]] = extractelement <16 x i8> [[TMP7]], i32 0
-; OPTSIZE-NEXT: store i8 [[TMP11]], ptr [[TMP10]], align 1
-; OPTSIZE-NEXT: br label %[[PRED_STORE_CONTINUE]]
-; OPTSIZE: [[PRED_STORE_CONTINUE]]:
-; OPTSIZE-NEXT: [[TMP12:%.*]] = extractelement <16 x i1> [[TMP72]], i32 1
-; OPTSIZE-NEXT: br i1 [[TMP12]], label %[[PRED_STORE_IF7:.*]], label %[[PRED_STORE_CONTINUE8:.*]]
-; OPTSIZE: [[PRED_STORE_IF7]]:
-; OPTSIZE-NEXT: [[TMP13:%.*]] = add i64 [[INDEX]], 1
-; OPTSIZE-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP13]]
-; OPTSIZE-NEXT: [[TMP15:%.*]] = extractelement <16 x i8> [[TMP7]], i32 1
-; OPTSIZE-NEXT: store i8 [[TMP15]], ptr [[TMP14]], align 1
-; OPTSIZE-NEXT: br label %[[PRED_STORE_CONTINUE8]]
-; OPTSIZE: [[PRED_STORE_CONTINUE8]]:
-; OPTSIZE-NEXT: [[TMP16:%.*]] = extractelement <16 x i1> [[TMP72]], i32 2
-; OPTSIZE-NEXT: br i1 [[TMP16]], label %[[PRED_STORE_IF9:.*]], label %[[PRED_STORE_CONTINUE10:.*]]
-; OPTSIZE: [[PRED_STORE_IF9]]:
-; OPTSIZE-NEXT: [[TMP17:%.*]] = add i64 [[INDEX]], 2
-; OPTSIZE-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP17]]
-; OPTSIZE-NEXT: [[TMP19:%.*]] = extractelement <16 x i8> [[TMP7]], i32 2
-; OPTSIZE-NEXT: store i8 [[TMP19]], ptr [[TMP18]], align 1
-; OPTSIZE-NEXT: br label %[[PRED_STORE_CONTINUE10]]
-; OPTSIZE: [[PRED_STORE_CONTINUE10]]:
-; OPTSIZE-NEXT: [[TMP20:%.*]] = extractelement <16 x i1> [[TMP72]], i32 3
-; OPTSIZE-NEXT: br i1 [[TMP20]], label %[[PRED_STORE_IF11:.*]], label %[[PRED_STORE_CONTINUE12:.*]]
-; OPTSIZE: [[PRED_STORE_IF11]]:
-; OPTSIZE-NEXT: [[TMP21:%.*]] = add i64 [[INDEX]], 3
-; OPTSIZE-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP21]]
-; OPTSIZE-NEXT: [[TMP23:%.*]] = extractelement <16 x i8> [[TMP7]], i32 3
-; OPTSIZE-NEXT: store i8 [[TMP23]], ptr [[TMP22]], align 1
-; OPTSIZE-NEXT: br label %[[PRED_STORE_CONTINUE12]]
-; OPTSIZE: [[PRED_STORE_CONTINUE12]]:
-; OPTSIZE-NEXT: [[TMP24:%.*]] = extractelement <16 x i1> [[TMP72]], i32 4
-; OPTSIZE-NEXT: br i1 [[TMP24]], label %[[PRED_STORE_IF13:.*]], label %[[PRED_STORE_CONTINUE14:.*]]
-; OPTSIZE: [[PRED_STORE_IF13]]:
-; OPTSIZE-NEXT: [[TMP25:%.*]] = add i64 [[INDEX]], 4
-; OPTSIZE-NEXT: [[TMP26:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP25]]
-; OPTSIZE-NEXT: [[TMP27:%.*]] = extractelement <16 x i8> [[TMP7]], i32 4
-; OPTSIZE-NEXT: store i8 [[TMP27]], ptr [[TMP26]], align 1
-; OPTSIZE-NEXT: br label %[[PRED_STORE_CONTINUE14]]
-; OPTSIZE: [[PRED_STORE_CONTINUE14]]:
-; OPTSIZE-NEXT: [[TMP28:%.*]] = extractelement <16 x i1> [[TMP72]], i32 5
-; OPTSIZE-NEXT: br i1 [[TMP28]], label %[[PRED_STORE_IF15:.*]], label %[[PRED_STORE_CONTINUE16:.*]]
-; OPTSIZE: [[PRED_STORE_IF15]]:
-; OPTSIZE-NEXT: [[TMP29:%.*]] = add i64 [[INDEX]], 5
-; OPTSIZE-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP29]]
-; OPTSIZE-NEXT: [[TMP31:%.*]] = extractelement <16 x i8> [[TMP7]], i32 5
-; OPTSIZE-NEXT: store i8 [[TMP31]], ptr [[TMP30]], align 1
-; OPTSIZE-NEXT: br label %[[PRED_STORE_CONTINUE16]]
-; OPTSIZE: [[PRED_STORE_CONTINUE16]]:
-; OPTSIZE-NEXT: [[TMP32:%.*]] = extractelement <16 x i1> [[TMP72]], i32 6
-; OPTSIZE-NEXT: br i1 [[TMP32]], label %[[PRED_STORE_IF17:.*]], label %[[PRED_STORE_CONTINUE18:.*]]
-; OPTSIZE: [[PRED_STORE_IF17]]:
-; OPTSIZE-NEXT: [[TMP33:%.*]] = add i64 [[INDEX]], 6
-; OPTSIZE-NEXT: [[TMP34:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP33]]
-; OPTSIZE-NEXT: [[TMP35:%.*]] = extractelement <16 x i8> [[TMP7]], i32 6
-; OPTSIZE-NEXT: store i8 [[TMP35]], ptr [[TMP34]], align 1
-; OPTSIZE-NEXT: br label %[[PRED_STORE_CONTINUE18]]
-; OPTSIZE: [[PRED_STORE_CONTINUE18]]:
-; OPTSIZE-NEXT: [[TMP36:%.*]] = extractelement <16 x i1> [[TMP72]], i32 7
-; OPTSIZE-NEXT: br i1 [[TMP36]], label %[[PRED_STORE_IF19:.*]], label %[[PRED_STORE_CONTINUE20:.*]]
-; OPTSIZE: [[PRED_STORE_IF19]]:
-; OPTSIZE-NEXT: [[TMP37:%.*]] = add i64 [[INDEX]], 7
-; OPTSIZE-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP37]]
-; OPTSIZE-NEXT: [[TMP39:%.*]] = extractelement <16 x i8> [[TMP7]], i32 7
-; OPTSIZE-NEXT: store i8 [[TMP39]], ptr [[TMP38]], align 1
-; OPTSIZE-NEXT: br label %[[PRED_STORE_CONTINUE20]]
-; OPTSIZE: [[PRED_STORE_CONTINUE20]]:
-; OPTSIZE-NEXT: [[TMP40:%.*]] = extractelement <16 x i1> [[TMP72]], i32 8
-; OPTSIZE-NEXT: br i1 [[TMP40]], label %[[PRED_STORE_IF21:.*]], label %[[PRED_STORE_CONTINUE22:.*]]
-; OPTSIZE: [[PRED_STORE_IF21]]:
-; OPTSIZE-NEXT: [[TMP41:%.*]] = add i64 [[INDEX]], 8
-; OPTSIZE-NEXT: [[TMP42:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP41]]
-; OPTSIZE-NEXT: [[TMP43:%.*]] = extractelement <16 x i8> [[TMP7]], i32 8
-; OPTSIZE-NEXT: store i8 [[TMP43]], ptr [[TMP42]], align 1
-; OPTSIZE-NEXT: br label %[[PRED_STORE_CONTINUE22]]
-; OPTSIZE: [[PRED_STORE_CONTINUE22]]:
-; OPTSIZE-NEXT: [[TMP44:%.*]] = extractelement <16 x i1> [[TMP72]], i32 9
-; OPTSIZE-NEXT: br i1 [[TMP44]], label %[[PRED_STORE_IF23:.*]], label %[[PRED_STORE_CONTINUE24:.*]]
-; OPTSIZE: [[PRED_STORE_IF23]]:
-; OPTSIZE-NEXT: [[TMP45:%.*]] = add i64 [[INDEX]], 9
-; OPTSIZE-NEXT: [[TMP46:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP45]]
-; OPTSIZE-NEXT: [[TMP47:%.*]] = extractelement <16 x i8> [[TMP7]], i32 9
-; OPTSIZE-NEXT: store i8 [[TMP47]], ptr [[TMP46]], align 1
-; OPTSIZE-NEXT: br label %[[PRED_STORE_CONTINUE24]]
-; OPTSIZE: [[PRED_STORE_CONTINUE24]]:
-; OPTSIZE-NEXT: [[TMP48:%.*]] = extractelement <16 x i1> [[TMP72]], i32 10
-; OPTSIZE-NEXT: br i1 [[TMP48]], label %[[PRED_STORE_IF25:.*]], label %[[PRED_STORE_CONTINUE26:.*]]
-; OPTSIZE: [[PRED_STORE_IF25]]:
-; OPTSIZE-NEXT: [[TMP49:%.*]] = add i64 [[INDEX]], 10
-; OPTSIZE-NEXT: [[TMP50:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP49]]
-; OPTSIZE-NEXT: [[TMP51:%.*]] = extractelement <16 x i8> [[TMP7]], i32 10
-; OPTSIZE-NEXT: store i8 [[TMP51]], ptr [[TMP50]], align 1
-; OPTSIZE-NEXT: br label %[[PRED_STORE_CONTINUE26]]
-; OPTSIZE: [[PRED_STORE_CONTINUE26]]:
-; OPTSIZE-NEXT: [[TMP52:%.*]] = extractelement <16 x i1> [[TMP72]], i32 11
-; OPTSIZE-NEXT: br i1 [[TMP52]], label %[[PRED_STORE_IF27:.*]], label %[[PRED_STORE_CONTINUE28:.*]]
-; OPTSIZE: [[PRED_STORE_IF27]]:
-; OPTSIZE-NEXT: [[TMP53:%.*]] = add i64 [[INDEX]], 11
-; OPTSIZE-NEXT: [[TMP54:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP53]]
-; OPTSIZE-NEXT: [[TMP55:%.*]] = extractelement <16 x i8> [[TMP7]], i32 11
-; OPTSIZE-NEXT: store i8 [[TMP55]], ptr [[TMP54]], align 1
-; OPTSIZE-NEXT: br label %[[PRED_STORE_CONTINUE28]]
-; OPTSIZE: [[PRED_STORE_CONTINUE28]]:
-; OPTSIZE-NEXT: [[TMP56:%.*]] = extractelement <16 x i1> [[TMP72]], i32 12
-; OPTSIZE-NEXT: br i1 [[TMP56]], label %[[PRED_STORE_IF29:.*]], label %[[PRED_STORE_CONTINUE30:.*]]
-; OPTSIZE: [[PRED_STORE_IF29]]:
-; OPTSIZE-NEXT: [[TMP57:%.*]] = add i64 [[INDEX]], 12
-; OPTSIZE-NEXT: [[TMP58:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP57]]
-; OPTSIZE-NEXT: [[TMP59:%.*]] = extractelement <16 x i8> [[TMP7]], i32 12
-; OPTSIZE-NEXT: store i8 [[TMP59]], ptr [[TMP58]], align 1
-; OPTSIZE-NEXT: br label %[[PRED_STORE_CONTINUE30]]
-; OPTSIZE: [[PRED_STORE_CONTINUE30]]:
-; OPTSIZE-NEXT: [[TMP60:%.*]] = extractelement <16 x i1> [[TMP72]], i32 13
-; OPTSIZE-NEXT: br i1 [[TMP60]], label %[[PRED_STORE_IF31:.*]], label %[[PRED_STORE_CONTINUE32:.*]]
-; OPTSIZE: [[PRED_STORE_IF31]]:
-; OPTSIZE-NEXT: [[TMP61:%.*]] = add i64 [[INDEX]], 13
-; OPTSIZE-NEXT: [[TMP62:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP61]]
-; OPTSIZE-NEXT: [[TMP63:%.*]] = extractelement <16 x i8> [[TMP7]], i32 13
-; OPTSIZE-NEXT: store i8 [[TMP63]], ptr [[TMP62]], align 1
-; OPTSIZE-NEXT: br label %[[PRED_STORE_CONTINUE32]]
-; OPTSIZE: [[PRED_STORE_CONTINUE32]]:
-; OPTSIZE-NEXT: [[TMP64:%.*]] = extractelement <16 x i1> [[TMP72]], i32 14
-; OPTSIZE-NEXT: br i1 [[TMP64]], label %[[PRED_STORE_IF33:.*]], label %[[PRED_STORE_CONTINUE34:.*]]
-; OPTSIZE: [[PRED_STORE_IF33]]:
-; OPTSIZE-NEXT: [[TMP65:%.*]] = add i64 [[INDEX]], 14
-; OPTSIZE-NEXT: [[TMP66:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP65]]
-; OPTSIZE-NEXT: [[TMP67:%.*]] = extractelement <16 x i8> [[TMP7]], i32 14
-; OPTSIZE-NEXT: store i8 [[TMP67]], ptr [[TMP66]], align 1
-; OPTSIZE-NEXT: br label %[[PRED_STORE_CONTINUE34]]
-; OPTSIZE: [[PRED_STORE_CONTINUE34]]:
-; OPTSIZE-NEXT: [[TMP68:%.*]] = extractelement <16 x i1> [[TMP72]], i32 15
-; OPTSIZE-NEXT: br i1 [[TMP68]], label %[[PRED_STORE_IF35:.*]], label %[[PRED_STORE_CONTINUE36]]
-; OPTSIZE: [[PRED_STORE_IF35]]:
-; OPTSIZE-NEXT: [[TMP69:%.*]] = add i64 [[INDEX]], 15
-; OPTSIZE-NEXT: [[TMP70:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP69]]
-; OPTSIZE-NEXT: [[TMP71:%.*]] = extractelement <16 x i8> [[TMP7]], i32 15
-; OPTSIZE-NEXT: store i8 [[TMP71]], ptr [[TMP70]], align 1
-; OPTSIZE-NEXT: br label %[[PRED_STORE_CONTINUE36]]
-; OPTSIZE: [[PRED_STORE_CONTINUE36]]:
-; OPTSIZE-NEXT: [[VEC_IND_NEXT]] = add <16 x i64> [[VEC_IND]], splat (i64 16)
-; OPTSIZE-NEXT: [[VEC_IND_NEXT2]] = add <16 x i8> [[VEC_IND1]], splat (i8 16)
-; OPTSIZE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
-; OPTSIZE-NEXT: br i1 true, label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
-; OPTSIZE: [[MIDDLE_BLOCK]]:
-; OPTSIZE-NEXT: br i1 true, label %[[FOR_COND_CLEANUP:.*]], label %[[SCALAR_PH]]
-; OPTSIZE: [[SCALAR_PH]]:
-; OPTSIZE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 16, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
; OPTSIZE-NEXT: br label %[[FOR_BODY:.*]]
; OPTSIZE: [[FOR_BODY]]:
-; OPTSIZE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; OPTSIZE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
; OPTSIZE-NEXT: [[TMP0:%.*]] = trunc nuw nsw i64 [[INDVARS_IV]] to i8
; OPTSIZE-NEXT: [[MUL:%.*]] = mul i8 [[A]], [[TMP0]]
; OPTSIZE-NEXT: [[SHR:%.*]] = lshr i8 [[TMP0]], 1
@@ -617,7 +443,7 @@ define void @tail_predicate_without_optsize(ptr %p, i8 %a, i8 %b, i8 %c, i32 %n)
; OPTSIZE-NEXT: store i8 [[ADD10]], ptr [[ARRAYIDX]], align 1
; OPTSIZE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; OPTSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 15
-; OPTSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; OPTSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP:.*]], label %[[FOR_BODY]]
; OPTSIZE: [[FOR_COND_CLEANUP]]:
; OPTSIZE-NEXT: ret void
;
@@ -1295,4 +1121,3 @@ attributes #0 = { "target-features"="+sve" }
!0 = distinct !{!0, !1}
!1 = !{!"llvm.loop.vectorize.enable", i1 true}
-
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/optsize_minsize.ll b/llvm/test/Transforms/LoopVectorize/ARM/optsize_minsize.ll
index 02874d4457219..0bd9cfce7bb93 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/optsize_minsize.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/optsize_minsize.ll
@@ -225,7 +225,6 @@ for.cond.cleanup:
; This should be vectorized and tail predicated without optsize, as that's
; faster, but not with optsize, as it's much larger.
-; FIXME: Currently we avoid tail predication only with minsize
define void @tail_predicate_without_optsize(ptr %p, i8 %a, i8 %b, i8 %c, i32 %n) {
; DEFAULT-LABEL: define void @tail_predicate_without_optsize(
; DEFAULT-SAME: ptr [[P:%.*]], i8 [[A:%.*]], i8 [[B:%.*]], i8 [[C:%.*]], i32 [[N:%.*]]) {
@@ -425,182 +424,9 @@ define void @tail_predicate_without_optsize(ptr %p, i8 %a, i8 %b, i8 %c, i32 %n)
; OPTSIZE-LABEL: define void @tail_predicate_without_optsize(
; OPTSIZE-SAME: ptr [[P:%.*]], i8 [[A:%.*]], i8 [[B:%.*]], i8 [[C:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
; OPTSIZE-NEXT: [[ENTRY:.*]]:
-; OPTSIZE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
-; OPTSIZE: [[VECTOR_PH]]:
-; OPTSIZE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[A]], i64 0
-; OPTSIZE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
-; OPTSIZE-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <16 x i8> poison, i8 [[B]], i64 0
-; OPTSIZE-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT3]], <16 x i8> poison, <16 x i32> zeroinitializer
-; OPTSIZE-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement <16 x i8> poison, i8 [[C]], i64 0
-; OPTSIZE-NEXT: [[BROADCAST_SPLAT6:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT5]], <16 x i8> poison, <16 x i32> zeroinitializer
-; OPTSIZE-NEXT: br label %[[VECTOR_BODY:.*]]
-; OPTSIZE: [[VECTOR_BODY]]:
-; OPTSIZE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE36:.*]] ]
-; OPTSIZE-NEXT: [[VEC_IND:%.*]] = phi <16 x i64> [ <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_STORE_CONTINUE36]] ]
-; OPTSIZE-NEXT: [[VEC_IND1:%.*]] = phi <16 x i8> [ <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT2:%.*]], %[[PRED_STORE_CONTINUE36]] ]
-; OPTSIZE-NEXT: [[TMP72:%.*]] = icmp ule <16 x i64> [[VEC_IND]], splat (i64 14)
-; OPTSIZE-NEXT: [[TMP1:%.*]] = mul <16 x i8> [[BROADCAST_SPLAT]], [[VEC_IND1]]
-; OPTSIZE-NEXT: [[TMP2:%.*]] = lshr <16 x i8> [[VEC_IND1]], splat (i8 1)
-; OPTSIZE-NEXT: [[TMP3:%.*]] = mul <16 x i8> [[TMP2]], [[BROADCAST_SPLAT4]]
-; OPTSIZE-NEXT: [[TMP4:%.*]] = add <16 x i8> [[TMP3]], [[TMP1]]
-; OPTSIZE-NEXT: [[TMP5:%.*]] = lshr <16 x i8> [[VEC_IND1]], splat (i8 2)
-; OPTSIZE-NEXT: [[TMP6:%.*]] = mul <16 x i8> [[TMP5]], [[BROADCAST_SPLAT6]]
-; OPTSIZE-NEXT: [[TMP7:%.*]] = add <16 x i8> [[TMP4]], [[TMP6]]
-; OPTSIZE-NEXT: [[TMP8:%.*]] = extractelement <16 x i1> [[TMP72]], i32 0
-; OPTSIZE-NEXT: br i1 [[TMP8]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]]
-; OPTSIZE: [[PRED_STORE_IF]]:
-; OPTSIZE-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 0
-; OPTSIZE-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP9]]
-; OPTSIZE-NEXT: [[TMP11:%.*]] = extractelement <16 x i8> [[TMP7]], i32 0
-; OPTSIZE-NEXT: store i8 [[TMP11]], ptr [[TMP10]], align 1
-; OPTSIZE-NEXT: br label %[[PRED_STORE_CONTINUE]]
-; OPTSIZE: [[PRED_STORE_CONTINUE]]:
-; OPTSIZE-NEXT: [[TMP12:%.*]] = extractelement <16 x i1> [[TMP72]], i32 1
-; OPTSIZE-NEXT: br i1 [[TMP12]], label %[[PRED_STORE_IF7:.*]], label %[[PRED_STORE_CONTINUE8:.*]]
-; OPTSIZE: [[PRED_STORE_IF7]]:
-; OPTSIZE-NEXT: [[TMP13:%.*]] = add i64 [[INDEX]], 1
-; OPTSIZE-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP13]]
-; OPTSIZE-NEXT: [[TMP15:%.*]] = extractelement <16 x i8> [[TMP7]], i32 1
-; OPTSIZE-NEXT: store i8 [[TMP15]], ptr [[TMP14]], align 1
-; OPTSIZE-NEXT: br label %[[PRED_STORE_CONTINUE8]]
-; OPTSIZE: [[PRED_STORE_CONTINUE8]]:
-; OPTSIZE-NEXT: [[TMP16:%.*]] = extractelement <16 x i1> [[TMP72]], i32 2
-; OPTSIZE-NEXT: br i1 [[TMP16]], label %[[PRED_STORE_IF9:.*]], label %[[PRED_STORE_CONTINUE10:.*]]
-; OPTSIZE: [[PRED_STORE_IF9]]:
-; OPTSIZE-NEXT: [[TMP17:%.*]] = add i64 [[INDEX]], 2
-; OPTSIZE-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP17]]
-; OPTSIZE-NEXT: [[TMP19:%.*]] = extractelement <16 x i8> [[TMP7]], i32 2
-; OPTSIZE-NEXT: store i8 [[TMP19]], ptr [[TMP18]], align 1
-; OPTSIZE-NEXT: br label %[[PRED_STORE_CONTINUE10]]
-; OPTSIZE: [[PRED_STORE_CONTINUE10]]:
-; OPTSIZE-NEXT: [[TMP20:%.*]] = extractelement <16 x i1> [[TMP72]], i32 3
-; OPTSIZE-NEXT: br i1 [[TMP20]], label %[[PRED_STORE_IF11:.*]], label %[[PRED_STORE_CONTINUE12:.*]]
-; OPTSIZE: [[PRED_STORE_IF11]]:
-; OPTSIZE-NEXT: [[TMP21:%.*]] = add i64 [[INDEX]], 3
-; OPTSIZE-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP21]]
-; OPTSIZE-NEXT: [[TMP23:%.*]] = extractelement <16 x i8> [[TMP7]], i32 3
-; OPTSIZE-NEXT: store i8 [[TMP23]], ptr [[TMP22]], align 1
-; OPTSIZE-NEXT: br label %[[PRED_STORE_CONTINUE12]]
-; OPTSIZE: [[PRED_STORE_CONTINUE12]]:
-; OPTSIZE-NEXT: [[TMP24:%.*]] = extractelement <16 x i1> [[TMP72]], i32 4
-; OPTSIZE-NEXT: br i1 [[TMP24]], label %[[PRED_STORE_IF13:.*]], label %[[PRED_STORE_CONTINUE14:.*]]
-; OPTSIZE: [[PRED_STORE_IF13]]:
-; OPTSIZE-NEXT: [[TMP25:%.*]] = add i64 [[INDEX]], 4
-; OPTSIZE-NEXT: [[TMP26:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP25]]
-; OPTSIZE-NEXT: [[TMP27:%.*]] = extractelement <16 x i8> [[TMP7]], i32 4
-; OPTSIZE-NEXT: store i8 [[TMP27]], ptr [[TMP26]], align 1
-; OPTSIZE-NEXT: br label %[[PRED_STORE_CONTINUE14]]
-; OPTSIZE: [[PRED_STORE_CONTINUE14]]:
-; OPTSIZE-NEXT: [[TMP28:%.*]] = extractelement <16 x i1> [[TMP72]], i32 5
-; OPTSIZE-NEXT: br i1 [[TMP28]], label %[[PRED_STORE_IF15:.*]], label %[[PRED_STORE_CONTINUE16:.*]]
-; OPTSIZE: [[PRED_STORE_IF15]]:
-; OPTSIZE-NEXT: [[TMP29:%.*]] = add i64 [[INDEX]], 5
-; OPTSIZE-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP29]]
-; OPTSIZE-NEXT: [[TMP31:%.*]] = extractelement <16 x i8> [[TMP7]], i32 5
-; OPTSIZE-NEXT: store i8 [[TMP31]], ptr [[TMP30]], align 1
-; OPTSIZE-NEXT: br label %[[PRED_STORE_CONTINUE16]]
-; OPTSIZE: [[PRED_STORE_CONTINUE16]]:
-; OPTSIZE-NEXT: [[TMP32:%.*]] = extractelement <16 x i1> [[TMP72]], i32 6
-; OPTSIZE-NEXT: br i1 [[TMP32]], label %[[PRED_STORE_IF17:.*]], label %[[PRED_STORE_CONTINUE18:.*]]
-; OPTSIZE: [[PRED_STORE_IF17]]:
-; OPTSIZE-NEXT: [[TMP33:%.*]] = add i64 [[INDEX]], 6
-; OPTSIZE-NEXT: [[TMP34:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP33]]
-; OPTSIZE-NEXT: [[TMP35:%.*]] = extractelement <16 x i8> [[TMP7]], i32 6
-; OPTSIZE-NEXT: store i8 [[TMP35]], ptr [[TMP34]], align 1
-; OPTSIZE-NEXT: br label %[[PRED_STORE_CONTINUE18]]
-; OPTSIZE: [[PRED_STORE_CONTINUE18]]:
-; OPTSIZE-NEXT: [[TMP36:%.*]] = extractelement <16 x i1> [[TMP72]], i32 7
-; OPTSIZE-NEXT: br i1 [[TMP36]], label %[[PRED_STORE_IF19:.*]], label %[[PRED_STORE_CONTINUE20:.*]]
-; OPTSIZE: [[PRED_STORE_IF19]]:
-; OPTSIZE-NEXT: [[TMP37:%.*]] = add i64 [[INDEX]], 7
-; OPTSIZE-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP37]]
-; OPTSIZE-NEXT: [[TMP39:%.*]] = extractelement <16 x i8> [[TMP7]], i32 7
-; OPTSIZE-NEXT: store i8 [[TMP39]], ptr [[TMP38]], align 1
-; OPTSIZE-NEXT: br label %[[PRED_STORE_CONTINUE20]]
-; OPTSIZE: [[PRED_STORE_CONTINUE20]]:
-; OPTSIZE-NEXT: [[TMP40:%.*]] = extractelement <16 x i1> [[TMP72]], i32 8
-; OPTSIZE-NEXT: br i1 [[TMP40]], label %[[PRED_STORE_IF21:.*]], label %[[PRED_STORE_CONTINUE22:.*]]
-; OPTSIZE: [[PRED_STORE_IF21]]:
-; OPTSIZE-NEXT: [[TMP41:%.*]] = add i64 [[INDEX]], 8
-; OPTSIZE-NEXT: [[TMP42:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP41]]
-; OPTSIZE-NEXT: [[TMP43:%.*]] = extractelement <16 x i8> [[TMP7]], i32 8
-; OPTSIZE-NEXT: store i8 [[TMP43]], ptr [[TMP42]], align 1
-; OPTSIZE-NEXT: br label %[[PRED_STORE_CONTINUE22]]
-; OPTSIZE: [[PRED_STORE_CONTINUE22]]:
-; OPTSIZE-NEXT: [[TMP44:%.*]] = extractelement <16 x i1> [[TMP72]], i32 9
-; OPTSIZE-NEXT: br i1 [[TMP44]], label %[[PRED_STORE_IF23:.*]], label %[[PRED_STORE_CONTINUE24:.*]]
-; OPTSIZE: [[PRED_STORE_IF23]]:
-; OPTSIZE-NEXT: [[TMP45:%.*]] = add i64 [[INDEX]], 9
-; OPTSIZE-NEXT: [[TMP46:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP45]]
-; OPTSIZE-NEXT: [[TMP47:%.*]] = extractelement <16 x i8> [[TMP7]], i32 9
-; OPTSIZE-NEXT: store i8 [[TMP47]], ptr [[TMP46]], align 1
-; OPTSIZE-NEXT: br label %[[PRED_STORE_CONTINUE24]]
-; OPTSIZE: [[PRED_STORE_CONTINUE24]]:
-; OPTSIZE-NEXT: [[TMP48:%.*]] = extractelement <16 x i1> [[TMP72]], i32 10
-; OPTSIZE-NEXT: br i1 [[TMP48]], label %[[PRED_STORE_IF25:.*]], label %[[PRED_STORE_CONTINUE26:.*]]
-; OPTSIZE: [[PRED_STORE_IF25]]:
-; OPTSIZE-NEXT: [[TMP49:%.*]] = add i64 [[INDEX]], 10
-; OPTSIZE-NEXT: [[TMP50:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP49]]
-; OPTSIZE-NEXT: [[TMP51:%.*]] = extractelement <16 x i8> [[TMP7]], i32 10
-; OPTSIZE-NEXT: store i8 [[TMP51]], ptr [[TMP50]], align 1
-; OPTSIZE-NEXT: br label %[[PRED_STORE_CONTINUE26]]
-; OPTSIZE: [[PRED_STORE_CONTINUE26]]:
-; OPTSIZE-NEXT: [[TMP52:%.*]] = extractelement <16 x i1> [[TMP72]], i32 11
-; OPTSIZE-NEXT: br i1 [[TMP52]], label %[[PRED_STORE_IF27:.*]], label %[[PRED_STORE_CONTINUE28:.*]]
-; OPTSIZE: [[PRED_STORE_IF27]]:
-; OPTSIZE-NEXT: [[TMP53:%.*]] = add i64 [[INDEX]], 11
-; OPTSIZE-NEXT: [[TMP54:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP53]]
-; OPTSIZE-NEXT: [[TMP55:%.*]] = extractelement <16 x i8> [[TMP7]], i32 11
-; OPTSIZE-NEXT: store i8 [[TMP55]], ptr [[TMP54]], align 1
-; OPTSIZE-NEXT: br label %[[PRED_STORE_CONTINUE28]]
-; OPTSIZE: [[PRED_STORE_CONTINUE28]]:
-; OPTSIZE-NEXT: [[TMP56:%.*]] = extractelement <16 x i1> [[TMP72]], i32 12
-; OPTSIZE-NEXT: br i1 [[TMP56]], label %[[PRED_STORE_IF29:.*]], label %[[PRED_STORE_CONTINUE30:.*]]
-; OPTSIZE: [[PRED_STORE_IF29]]:
-; OPTSIZE-NEXT: [[TMP57:%.*]] = add i64 [[INDEX]], 12
-; OPTSIZE-NEXT: [[TMP58:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP57]]
-; OPTSIZE-NEXT: [[TMP59:%.*]] = extractelement <16 x i8> [[TMP7]], i32 12
-; OPTSIZE-NEXT: store i8 [[TMP59]], ptr [[TMP58]], align 1
-; OPTSIZE-NEXT: br label %[[PRED_STORE_CONTINUE30]]
-; OPTSIZE: [[PRED_STORE_CONTINUE30]]:
-; OPTSIZE-NEXT: [[TMP60:%.*]] = extractelement <16 x i1> [[TMP72]], i32 13
-; OPTSIZE-NEXT: br i1 [[TMP60]], label %[[PRED_STORE_IF31:.*]], label %[[PRED_STORE_CONTINUE32:.*]]
-; OPTSIZE: [[PRED_STORE_IF31]]:
-; OPTSIZE-NEXT: [[TMP61:%.*]] = add i64 [[INDEX]], 13
-; OPTSIZE-NEXT: [[TMP62:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP61]]
-; OPTSIZE-NEXT: [[TMP63:%.*]] = extractelement <16 x i8> [[TMP7]], i32 13
-; OPTSIZE-NEXT: store i8 [[TMP63]], ptr [[TMP62]], align 1
-; OPTSIZE-NEXT: br label %[[PRED_STORE_CONTINUE32]]
-; OPTSIZE: [[PRED_STORE_CONTINUE32]]:
-; OPTSIZE-NEXT: [[TMP64:%.*]] = extractelement <16 x i1> [[TMP72]], i32 14
-; OPTSIZE-NEXT: br i1 [[TMP64]], label %[[PRED_STORE_IF33:.*]], label %[[PRED_STORE_CONTINUE34:.*]]
-; OPTSIZE: [[PRED_STORE_IF33]]:
-; OPTSIZE-NEXT: [[TMP65:%.*]] = add i64 [[INDEX]], 14
-; OPTSIZE-NEXT: [[TMP66:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP65]]
-; OPTSIZE-NEXT: [[TMP67:%.*]] = extractelement <16 x i8> [[TMP7]], i32 14
-; OPTSIZE-NEXT: store i8 [[TMP67]], ptr [[TMP66]], align 1
-; OPTSIZE-NEXT: br label %[[PRED_STORE_CONTINUE34]]
-; OPTSIZE: [[PRED_STORE_CONTINUE34]]:
-; OPTSIZE-NEXT: [[TMP68:%.*]] = extractelement <16 x i1> [[TMP72]], i32 15
-; OPTSIZE-NEXT: br i1 [[TMP68]], label %[[PRED_STORE_IF35:.*]], label %[[PRED_STORE_CONTINUE36]]
-; OPTSIZE: [[PRED_STORE_IF35]]:
-; OPTSIZE-NEXT: [[TMP69:%.*]] = add i64 [[INDEX]], 15
-; OPTSIZE-NEXT: [[TMP70:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP69]]
-; OPTSIZE-NEXT: [[TMP71:%.*]] = extractelement <16 x i8> [[TMP7]], i32 15
-; OPTSIZE-NEXT: store i8 [[TMP71]], ptr [[TMP70]], align 1
-; OPTSIZE-NEXT: br label %[[PRED_STORE_CONTINUE36]]
-; OPTSIZE: [[PRED_STORE_CONTINUE36]]:
-; OPTSIZE-NEXT: [[VEC_IND_NEXT]] = add <16 x i64> [[VEC_IND]], splat (i64 16)
-; OPTSIZE-NEXT: [[VEC_IND_NEXT2]] = add <16 x i8> [[VEC_IND1]], splat (i8 16)
-; OPTSIZE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
-; OPTSIZE-NEXT: br i1 true, label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
-; OPTSIZE: [[MIDDLE_BLOCK]]:
-; OPTSIZE-NEXT: br i1 true, label %[[FOR_COND_CLEANUP:.*]], label %[[SCALAR_PH]]
-; OPTSIZE: [[SCALAR_PH]]:
-; OPTSIZE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 16, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
; OPTSIZE-NEXT: br label %[[FOR_BODY:.*]]
; OPTSIZE: [[FOR_BODY]]:
-; OPTSIZE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; OPTSIZE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
; OPTSIZE-NEXT: [[TMP0:%.*]] = trunc nuw nsw i64 [[INDVARS_IV]] to i8
; OPTSIZE-NEXT: [[MUL:%.*]] = mul i8 [[A]], [[TMP0]]
; OPTSIZE-NEXT: [[SHR:%.*]] = lshr i8 [[TMP0]], 1
@@ -613,7 +439,7 @@ define void @tail_predicate_without_optsize(ptr %p, i8 %a, i8 %b, i8 %c, i32 %n)
; OPTSIZE-NEXT: store i8 [[ADD10]], ptr [[ARRAYIDX]], align 1
; OPTSIZE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; OPTSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 15
-; OPTSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; OPTSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP:.*]], label %[[FOR_BODY]]
; OPTSIZE: [[FOR_COND_CLEANUP]]:
; OPTSIZE-NEXT: ret void
;
@@ -740,7 +566,7 @@ define void @dont_vectorize_with_minsize() {
; OPTSIZE-NEXT: store <4 x i16> [[TMP9]], ptr [[TMP7]], align 2
; OPTSIZE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; OPTSIZE-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 64
-; OPTSIZE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; OPTSIZE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; OPTSIZE: [[MIDDLE_BLOCK]]:
; OPTSIZE-NEXT: br i1 true, label %[[FOR_COND_CLEANUP:.*]], label %[[SCALAR_PH]]
; OPTSIZE: [[SCALAR_PH]]:
@@ -760,7 +586,7 @@ define void @dont_vectorize_with_minsize() {
; OPTSIZE-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX4]], align 2
; OPTSIZE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; OPTSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64
-; OPTSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; OPTSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; OPTSIZE: [[FOR_COND_CLEANUP]]:
; OPTSIZE-NEXT: ret void
;
@@ -912,7 +738,7 @@ define void @vectorization_forced() {
; OPTSIZE-NEXT: store <4 x i16> [[TMP9]], ptr [[TMP7]], align 2
; OPTSIZE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; OPTSIZE-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 64
-; OPTSIZE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; OPTSIZE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; OPTSIZE: [[MIDDLE_BLOCK]]:
; OPTSIZE-NEXT: br i1 true, label %[[FOR_COND_CLEANUP:.*]], label %[[SCALAR_PH]]
; OPTSIZE: [[SCALAR_PH]]:
@@ -932,7 +758,7 @@ define void @vectorization_forced() {
; OPTSIZE-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX4]], align 2
; OPTSIZE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; OPTSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64
-; OPTSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; OPTSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; OPTSIZE: [[FOR_COND_CLEANUP]]:
; OPTSIZE-NEXT: ret void
;
@@ -1009,4 +835,3 @@ for.cond.cleanup:
!0 = distinct !{!0, !1}
!1 = !{!"llvm.loop.vectorize.enable", i1 true}
-
diff --git a/llvm/test/Transforms/LoopVectorize/X86/optsize.ll b/llvm/test/Transforms/LoopVectorize/X86/optsize.ll
index 9e87cc29be4e8..6025108e15715 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/optsize.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/optsize.ll
@@ -12,13 +12,13 @@ target datalayout = "E-m:e-p:32:32-i64:32-f64:32:64-a:0:32-n32-S128"
define i32 @foo_optsize() #0 {
; CHECK-LABEL: @foo_optsize(
; CHECK-NEXT: entry:
-; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
+; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[INDEX]], 0
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <64 x i32> poison, i32 [[INDEX]], i64 0
+; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[I_08]], 0
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <64 x i32> poison, i32 [[I_08]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <64 x i32> [[BROADCAST_SPLATINSERT]], <64 x i32> poison, <64 x i32> zeroinitializer
; CHECK-NEXT: [[VEC_IV:%.*]] = add <64 x i32> [[BROADCAST_SPLAT]], <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
; CHECK-NEXT: [[TMP1:%.*]] = icmp ule <64 x i32> [[VEC_IV]], splat (i32 202)
@@ -28,36 +28,36 @@ define i32 @foo_optsize() #0 {
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq <64 x i8> [[WIDE_MASKED_LOAD]], zeroinitializer
; CHECK-NEXT: [[TMP5:%.*]] = select <64 x i1> [[TMP4]], <64 x i8> splat (i8 2), <64 x i8> splat (i8 1)
; CHECK-NEXT: call void @llvm.masked.store.v64i8.p0(<64 x i8> [[TMP5]], ptr [[TMP3]], i32 1, <64 x i1> [[TMP1]])
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 64
-; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256
-; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: [[INC]] = add nuw i32 [[I_08]], 64
+; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INC]], 256
+; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 256, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
-; CHECK-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 256, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ]
+; CHECK-NEXT: br label [[FOR_BODY1:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [32 x i8], ptr @tab, i32 0, i32 [[I_08]]
+; CHECK-NEXT: [[I_8:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC1:%.*]], [[FOR_BODY1]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [32 x i8], ptr @tab, i32 0, i32 [[I_8]]
; CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[TMP7]], 0
; CHECK-NEXT: [[DOT:%.*]] = select i1 [[CMP1]], i8 2, i8 1
; CHECK-NEXT: store i8 [[DOT]], ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[INC]] = add nsw i32 [[I_08]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[I_08]], 202
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT: [[INC1]] = add nsw i32 [[I_8]], 1
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[I_8]], 202
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret i32 0
;
; AUTOVF-LABEL: @foo_optsize(
; AUTOVF-NEXT: entry:
-; AUTOVF-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; AUTOVF-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]]
; AUTOVF: vector.ph:
-; AUTOVF-NEXT: br label [[VECTOR_BODY:%.*]]
+; AUTOVF-NEXT: br label [[FOR_BODY:%.*]]
; AUTOVF: vector.body:
-; AUTOVF-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; AUTOVF-NEXT: [[TMP0:%.*]] = add i32 [[INDEX]], 0
-; AUTOVF-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <32 x i32> poison, i32 [[INDEX]], i64 0
+; AUTOVF-NEXT: [[I_08:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
+; AUTOVF-NEXT: [[TMP0:%.*]] = add i32 [[I_08]], 0
+; AUTOVF-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <32 x i32> poison, i32 [[I_08]], i64 0
; AUTOVF-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <32 x i32> [[BROADCAST_SPLATINSERT]], <32 x i32> poison, <32 x i32> zeroinitializer
; AUTOVF-NEXT: [[VEC_IV:%.*]] = add <32 x i32> [[BROADCAST_SPLAT]], <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
; AUTOVF-NEXT: [[TMP1:%.*]] = icmp ule <32 x i32> [[VEC_IV]], splat (i32 202)
@@ -67,24 +67,24 @@ define i32 @foo_optsize() #0 {
; AUTOVF-NEXT: [[TMP4:%.*]] = icmp eq <32 x i8> [[WIDE_MASKED_LOAD]], zeroinitializer
; AUTOVF-NEXT: [[TMP5:%.*]] = select <32 x i1> [[TMP4]], <32 x i8> splat (i8 2), <32 x i8> splat (i8 1)
; AUTOVF-NEXT: call void @llvm.masked.store.v32i8.p0(<32 x i8> [[TMP5]], ptr [[TMP3]], i32 1, <32 x i1> [[TMP1]])
-; AUTOVF-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 32
-; AUTOVF-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 224
-; AUTOVF-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; AUTOVF-NEXT: [[INC]] = add nuw i32 [[I_08]], 32
+; AUTOVF-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INC]], 224
+; AUTOVF-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; AUTOVF: middle.block:
; AUTOVF-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
; AUTOVF: scalar.ph:
-; AUTOVF-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 224, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
-; AUTOVF-NEXT: br label [[FOR_BODY:%.*]]
+; AUTOVF-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 224, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ]
+; AUTOVF-NEXT: br label [[FOR_BODY1:%.*]]
; AUTOVF: for.body:
-; AUTOVF-NEXT: [[I_08:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
-; AUTOVF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [32 x i8], ptr @tab, i32 0, i32 [[I_08]]
+; AUTOVF-NEXT: [[I_8:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC1:%.*]], [[FOR_BODY1]] ]
+; AUTOVF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [32 x i8], ptr @tab, i32 0, i32 [[I_8]]
; AUTOVF-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; AUTOVF-NEXT: [[CMP1:%.*]] = icmp eq i8 [[TMP7]], 0
; AUTOVF-NEXT: [[DOT:%.*]] = select i1 [[CMP1]], i8 2, i8 1
; AUTOVF-NEXT: store i8 [[DOT]], ptr [[ARRAYIDX]], align 1
-; AUTOVF-NEXT: [[INC]] = add nsw i32 [[I_08]], 1
-; AUTOVF-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[I_08]], 202
-; AUTOVF-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; AUTOVF-NEXT: [[INC1]] = add nsw i32 [[I_8]], 1
+; AUTOVF-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[I_8]], 202
+; AUTOVF-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP3:![0-9]+]]
; AUTOVF: for.end:
; AUTOVF-NEXT: ret i32 0
;
@@ -112,13 +112,13 @@ attributes #0 = { optsize }
define i32 @foo_minsize() #1 {
; CHECK-LABEL: @foo_minsize(
; CHECK-NEXT: entry:
-; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
+; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[INDEX]], 0
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <64 x i32> poison, i32 [[INDEX]], i64 0
+; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[I_08]], 0
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <64 x i32> poison, i32 [[I_08]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <64 x i32> [[BROADCAST_SPLATINSERT]], <64 x i32> poison, <64 x i32> zeroinitializer
; CHECK-NEXT: [[VEC_IV:%.*]] = add <64 x i32> [[BROADCAST_SPLAT]], <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
; CHECK-NEXT: [[TMP1:%.*]] = icmp ule <64 x i32> [[VEC_IV]], splat (i32 202)
@@ -128,36 +128,36 @@ define i32 @foo_minsize() #1 {
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq <64 x i8> [[WIDE_MASKED_LOAD]], zeroinitializer
; CHECK-NEXT: [[TMP5:%.*]] = select <64 x i1> [[TMP4]], <64 x i8> splat (i8 2), <64 x i8> splat (i8 1)
; CHECK-NEXT: call void @llvm.masked.store.v64i8.p0(<64 x i8> [[TMP5]], ptr [[TMP3]], i32 1, <64 x i1> [[TMP1]])
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 64
-; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256
-; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: [[INC]] = add nuw i32 [[I_08]], 64
+; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INC]], 256
+; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 256, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
-; CHECK-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 256, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ]
+; CHECK-NEXT: br label [[FOR_BODY1:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [32 x i8], ptr @tab, i32 0, i32 [[I_08]]
+; CHECK-NEXT: [[I_8:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC1:%.*]], [[FOR_BODY1]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [32 x i8], ptr @tab, i32 0, i32 [[I_8]]
; CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[TMP7]], 0
; CHECK-NEXT: [[DOT:%.*]] = select i1 [[CMP1]], i8 2, i8 1
; CHECK-NEXT: store i8 [[DOT]], ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[INC]] = add nsw i32 [[I_08]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[I_08]], 202
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK-NEXT: [[INC1]] = add nsw i32 [[I_8]], 1
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[I_8]], 202
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret i32 0
;
; AUTOVF-LABEL: @foo_minsize(
; AUTOVF-NEXT: entry:
-; AUTOVF-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; AUTOVF-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]]
; AUTOVF: vector.ph:
-; AUTOVF-NEXT: br label [[VECTOR_BODY:%.*]]
+; AUTOVF-NEXT: br label [[FOR_BODY:%.*]]
; AUTOVF: vector.body:
-; AUTOVF-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; AUTOVF-NEXT: [[TMP0:%.*]] = add i32 [[INDEX]], 0
-; AUTOVF-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <32 x i32> poison, i32 [[INDEX]], i64 0
+; AUTOVF-NEXT: [[I_08:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
+; AUTOVF-NEXT: [[TMP0:%.*]] = add i32 [[I_08]], 0
+; AUTOVF-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <32 x i32> poison, i32 [[I_08]], i64 0
; AUTOVF-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <32 x i32> [[BROADCAST_SPLATINSERT]], <32 x i32> poison, <32 x i32> zeroinitializer
; AUTOVF-NEXT: [[VEC_IV:%.*]] = add <32 x i32> [[BROADCAST_SPLAT]], <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
; AUTOVF-NEXT: [[TMP1:%.*]] = icmp ule <32 x i32> [[VEC_IV]], splat (i32 202)
@@ -167,24 +167,24 @@ define i32 @foo_minsize() #1 {
; AUTOVF-NEXT: [[TMP4:%.*]] = icmp eq <32 x i8> [[WIDE_MASKED_LOAD]], zeroinitializer
; AUTOVF-NEXT: [[TMP5:%.*]] = select <32 x i1> [[TMP4]], <32 x i8> splat (i8 2), <32 x i8> splat (i8 1)
; AUTOVF-NEXT: call void @llvm.masked.store.v32i8.p0(<32 x i8> [[TMP5]], ptr [[TMP3]], i32 1, <32 x i1> [[TMP1]])
-; AUTOVF-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 32
-; AUTOVF-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 224
-; AUTOVF-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; AUTOVF-NEXT: [[INC]] = add nuw i32 [[I_08]], 32
+; AUTOVF-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INC]], 224
+; AUTOVF-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; AUTOVF: middle.block:
; AUTOVF-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
; AUTOVF: scalar.ph:
-; AUTOVF-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 224, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
-; AUTOVF-NEXT: br label [[FOR_BODY:%.*]]
+; AUTOVF-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 224, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ]
+; AUTOVF-NEXT: br label [[FOR_BODY1:%.*]]
; AUTOVF: for.body:
-; AUTOVF-NEXT: [[I_08:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
-; AUTOVF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [32 x i8], ptr @tab, i32 0, i32 [[I_08]]
+; AUTOVF-NEXT: [[I_8:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC1:%.*]], [[FOR_BODY1]] ]
+; AUTOVF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [32 x i8], ptr @tab, i32 0, i32 [[I_8]]
; AUTOVF-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; AUTOVF-NEXT: [[CMP1:%.*]] = icmp eq i8 [[TMP7]], 0
; AUTOVF-NEXT: [[DOT:%.*]] = select i1 [[CMP1]], i8 2, i8 1
; AUTOVF-NEXT: store i8 [[DOT]], ptr [[ARRAYIDX]], align 1
-; AUTOVF-NEXT: [[INC]] = add nsw i32 [[I_08]], 1
-; AUTOVF-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[I_08]], 202
-; AUTOVF-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; AUTOVF-NEXT: [[INC1]] = add nsw i32 [[I_8]], 1
+; AUTOVF-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[I_8]], 202
+; AUTOVF-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP5:![0-9]+]]
; AUTOVF: for.end:
; AUTOVF-NEXT: ret i32 0
;
@@ -370,34 +370,34 @@ define void @tail_folded_store_avx512(ptr %start, ptr %end) #3 {
; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i32 [[TMP3]], 1
; CHECK-NEXT: [[TMP4:%.*]] = mul i32 [[N_VEC]], -72
-; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[START]], i32 [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[START]], i32 [[TMP4]]
; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <64 x i32> poison, i32 [[TRIP_COUNT_MINUS_1]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <64 x i32> [[BROADCAST_SPLATINSERT3]], <64 x i32> poison, <64 x i32> zeroinitializer
-; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
+; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: vector.body:
-; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[START]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <64 x i32> <i32 0, i32 -72, i32 -144, i32 -216, i32 -288, i32 -360, i32 -432, i32 -504, i32 -576, i32 -648, i32 -720, i32 -792, i32 -864, i32 -936, i32 -1008, i32 -1080, i32 -1152, i32 -1224, i32 -1296, i32 -1368, i32 -1440, i32 -1512, i32 -1584, i32 -1656, i32 -1728, i32 -1800, i32 -1872, i32 -1944, i32 -2016, i32 -2088, i32 -2160, i32 -2232, i32 -2304, i32 -2376, i32 -2448, i32 -2520, i32 -2592, i32 -2664, i32 -2736, i32 -2808, i32 -2880, i32 -2952, i32 -3024, i32 -3096, i32 -3168, i32 -3240, i32 -3312, i32 -3384, i32 -3456, i32 -3528, i32 -3600, i32 -3672, i32 -3744, i32 -3816, i32 -3888, i32 -3960, i32 -4032, i32 -4104, i32 -4176, i32 -4248, i32 -4320, i32 -4392, i32 -4464, i32 -4536>
+; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[START]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <64 x i32> <i32 0, i32 -72, i32 -144, i32 -216, i32 -288, i32 -360, i32 -432, i32 -504, i32 -576, i32 -648, i32 -720, i32 -792, i32 -864, i32 -936, i32 -1008, i32 -1080, i32 -1152, i32 -1224, i32 -1296, i32 -1368, i32 -1440, i32 -1512, i32 -1584, i32 -1656, i32 -1728, i32 -1800, i32 -1872, i32 -1944, i32 -2016, i32 -2088, i32 -2160, i32 -2232, i32 -2304, i32 -2376, i32 -2448, i32 -2520, i32 -2592, i32 -2664, i32 -2736, i32 -2808, i32 -2880, i32 -2952, i32 -3024, i32 -3096, i32 -3168, i32 -3240, i32 -3312, i32 -3384, i32 -3456, i32 -3528, i32 -3600, i32 -3672, i32 -3744, i32 -3816, i32 -3888, i32 -3960, i32 -4032, i32 -4104, i32 -4176, i32 -4248, i32 -4320, i32 -4392, i32 -4464, i32 -4536>
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <64 x i32> poison, i32 [[INDEX]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <64 x i32> [[BROADCAST_SPLATINSERT]], <64 x i32> poison, <64 x i32> zeroinitializer
; CHECK-NEXT: [[VEC_IV:%.*]] = add <64 x i32> [[BROADCAST_SPLAT]], <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
; CHECK-NEXT: [[TMP6:%.*]] = icmp ule <64 x i32> [[VEC_IV]], [[BROADCAST_SPLAT4]]
-; CHECK-NEXT: call void @llvm.masked.scatter.v64p0.v64p0(<64 x ptr> zeroinitializer, <64 x ptr> [[TMP5]], i32 8, <64 x i1> [[TMP6]])
+; CHECK-NEXT: call void @llvm.masked.scatter.v64p0.v64p0(<64 x ptr> zeroinitializer, <64 x ptr> [[VECTOR_GEP]], i32 8, <64 x i1> [[TMP6]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 64
; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i32 -4608
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[LOOP]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br i1 true, label [[EXIT:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[START]], [[ENTRY:%.*]] ]
-; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[TMP5]], [[MIDDLE_BLOCK]] ], [ [[START]], [[ENTRY:%.*]] ]
+; CHECK-NEXT: br label [[LOOP1:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], [[LOOP1]] ]
; CHECK-NEXT: [[PTR_IV_NEXT]] = getelementptr nusw i8, ptr [[PTR_IV]], i64 -72
; CHECK-NEXT: store ptr null, ptr [[PTR_IV]], align 8
; CHECK-NEXT: [[EC:%.*]] = icmp eq ptr [[PTR_IV_NEXT]], [[END]]
-; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP1]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -416,34 +416,34 @@ define void @tail_folded_store_avx512(ptr %start, ptr %end) #3 {
; AUTOVF-NEXT: [[N_VEC:%.*]] = sub i32 [[N_RND_UP]], [[N_MOD_VF]]
; AUTOVF-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i32 [[TMP3]], 1
; AUTOVF-NEXT: [[TMP4:%.*]] = mul i32 [[N_VEC]], -72
-; AUTOVF-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[START]], i32 [[TMP4]]
+; AUTOVF-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[START]], i32 [[TMP4]]
; AUTOVF-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <8 x i32> poison, i32 [[TRIP_COUNT_MINUS_1]], i64 0
; AUTOVF-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <8 x i32> [[BROADCAST_SPLATINSERT3]], <8 x i32> poison, <8 x i32> zeroinitializer
-; AUTOVF-NEXT: br label [[VECTOR_BODY:%.*]]
+; AUTOVF-NEXT: br label [[LOOP:%.*]]
; AUTOVF: vector.body:
-; AUTOVF-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[START]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
-; AUTOVF-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; AUTOVF-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <8 x i32> <i32 0, i32 -72, i32 -144, i32 -216, i32 -288, i32 -360, i32 -432, i32 -504>
+; AUTOVF-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[START]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[LOOP]] ]
+; AUTOVF-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[LOOP]] ]
+; AUTOVF-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <8 x i32> <i32 0, i32 -72, i32 -144, i32 -216, i32 -288, i32 -360, i32 -432, i32 -504>
; AUTOVF-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <8 x i32> poison, i32 [[INDEX]], i64 0
; AUTOVF-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <8 x i32> [[BROADCAST_SPLATINSERT]], <8 x i32> poison, <8 x i32> zeroinitializer
; AUTOVF-NEXT: [[VEC_IV:%.*]] = add <8 x i32> [[BROADCAST_SPLAT]], <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
; AUTOVF-NEXT: [[TMP6:%.*]] = icmp ule <8 x i32> [[VEC_IV]], [[BROADCAST_SPLAT4]]
-; AUTOVF-NEXT: call void @llvm.masked.scatter.v8p0.v8p0(<8 x ptr> zeroinitializer, <8 x ptr> [[TMP5]], i32 8, <8 x i1> [[TMP6]])
+; AUTOVF-NEXT: call void @llvm.masked.scatter.v8p0.v8p0(<8 x ptr> zeroinitializer, <8 x ptr> [[VECTOR_GEP]], i32 8, <8 x i1> [[TMP6]])
; AUTOVF-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; AUTOVF-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i32 -576
; AUTOVF-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; AUTOVF-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; AUTOVF-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[LOOP]], !llvm.loop [[LOOP8:![0-9]+]]
; AUTOVF: middle.block:
; AUTOVF-NEXT: br i1 true, label [[EXIT:%.*]], label [[SCALAR_PH]]
; AUTOVF: scalar.ph:
-; AUTOVF-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[START]], [[ENTRY:%.*]] ]
-; AUTOVF-NEXT: br label [[LOOP:%.*]]
+; AUTOVF-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[TMP5]], [[MIDDLE_BLOCK]] ], [ [[START]], [[ENTRY:%.*]] ]
+; AUTOVF-NEXT: br label [[LOOP1:%.*]]
; AUTOVF: loop:
-; AUTOVF-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], [[LOOP]] ]
+; AUTOVF-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], [[LOOP1]] ]
; AUTOVF-NEXT: [[PTR_IV_NEXT]] = getelementptr nusw i8, ptr [[PTR_IV]], i64 -72
; AUTOVF-NEXT: store ptr null, ptr [[PTR_IV]], align 8
; AUTOVF-NEXT: [[EC:%.*]] = icmp eq ptr [[PTR_IV_NEXT]], [[END]]
-; AUTOVF-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP9:![0-9]+]]
+; AUTOVF-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP1]], !llvm.loop [[LOOP9:![0-9]+]]
; AUTOVF: exit:
; AUTOVF-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/X86/small-size.ll b/llvm/test/Transforms/LoopVectorize/X86/small-size.ll
index 8914edf28372f..dbdee9713b638 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/small-size.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/small-size.ll
@@ -67,150 +67,44 @@ define void @example1() optsize {
ret void
}
-; Can vectorize in 'optsize' mode by masking the needed tail.
+; Can't vectorize in 'optsize' mode as it would require masking the needed tail.
define void @example2(i32 %n, i32 %x) optsize {
; CHECK-LABEL: @example2(
; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[N:%.*]], 0
; CHECK-NEXT: br i1 [[TMP1]], label [[DOTLR_PH5_PREHEADER:%.*]], label [[DOTPREHEADER:%.*]]
; CHECK: .lr.ph5.preheader:
-; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
-; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = zext nneg i32 [[N]] to i64
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add nuw nsw i64 [[TMP2]], 3
-; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N_RND_UP]], 4294967292
-; CHECK-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = add nsw i64 [[TMP2]], -1
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
-; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
-; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE6:%.*]] ]
-; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[PRED_STORE_CONTINUE6]] ]
-; CHECK-NEXT: [[TMP3:%.*]] = icmp ule <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i1> [[TMP3]], i64 0
-; CHECK-NEXT: br i1 [[TMP4]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
-; CHECK: pred.store.if:
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2048 x i32], ptr @b, i64 0, i64 [[INDEX]]
-; CHECK-NEXT: store i32 [[X:%.*]], ptr [[TMP5]], align 4
-; CHECK-NEXT: br label [[PRED_STORE_CONTINUE]]
-; CHECK: pred.store.continue:
-; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[TMP3]], i64 1
-; CHECK-NEXT: br i1 [[TMP6]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2:%.*]]
-; CHECK: pred.store.if1:
-; CHECK-NEXT: [[TMP7:%.*]] = or disjoint i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2048 x i32], ptr @b, i64 0, i64 [[TMP7]]
-; CHECK-NEXT: store i32 [[X]], ptr [[TMP8]], align 4
-; CHECK-NEXT: br label [[PRED_STORE_CONTINUE2]]
-; CHECK: pred.store.continue2:
-; CHECK-NEXT: [[TMP9:%.*]] = extractelement <4 x i1> [[TMP3]], i64 2
-; CHECK-NEXT: br i1 [[TMP9]], label [[PRED_STORE_IF3:%.*]], label [[PRED_STORE_CONTINUE4:%.*]]
-; CHECK: pred.store.if3:
-; CHECK-NEXT: [[TMP10:%.*]] = or disjoint i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2048 x i32], ptr @b, i64 0, i64 [[TMP10]]
-; CHECK-NEXT: store i32 [[X]], ptr [[TMP11]], align 4
-; CHECK-NEXT: br label [[PRED_STORE_CONTINUE4]]
-; CHECK: pred.store.continue4:
-; CHECK-NEXT: [[TMP12:%.*]] = extractelement <4 x i1> [[TMP3]], i64 3
-; CHECK-NEXT: br i1 [[TMP12]], label [[PRED_STORE_IF5:%.*]], label [[PRED_STORE_CONTINUE6]]
-; CHECK: pred.store.if5:
-; CHECK-NEXT: [[TMP13:%.*]] = or disjoint i64 [[INDEX]], 3
-; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [2048 x i32], ptr @b, i64 0, i64 [[TMP13]]
-; CHECK-NEXT: store i32 [[X]], ptr [[TMP14]], align 4
-; CHECK-NEXT: br label [[PRED_STORE_CONTINUE6]]
-; CHECK: pred.store.continue6:
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
-; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4)
-; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
-; CHECK: middle.block:
-; CHECK-NEXT: br i1 true, label [[DOT_PREHEADER_CRIT_EDGE:%.*]], label [[SCALAR_PH]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[DOTLR_PH5:%.*]]
+; CHECK-NEXT: br label [[DOTLR_PH6:%.*]]
; CHECK: ..preheader_crit_edge:
; CHECK-NEXT: [[PHITMP:%.*]] = zext nneg i32 [[N]] to i64
; CHECK-NEXT: br label [[DOTPREHEADER]]
; CHECK: .preheader:
-; CHECK-NEXT: [[I_0_LCSSA:%.*]] = phi i64 [ [[PHITMP]], [[DOT_PREHEADER_CRIT_EDGE]] ], [ 0, [[TMP0:%.*]] ]
+; CHECK-NEXT: [[I_0_LCSSA:%.*]] = phi i64 [ [[PHITMP]], [[DOT_PREHEADER_CRIT_EDGE:%.*]] ], [ 0, [[TMP0:%.*]] ]
; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i32 [[N]], 0
; CHECK-NEXT: br i1 [[TMP16]], label [[DOT_CRIT_EDGE:%.*]], label [[DOTLR_PH_PREHEADER:%.*]]
; CHECK: .lr.ph.preheader:
-; CHECK-NEXT: br i1 false, label [[SCALAR_PH8:%.*]], label [[VECTOR_PH8:%.*]]
-; CHECK: vector.ph8:
-; CHECK-NEXT: [[TMP17:%.*]] = zext i32 [[N]] to i64
-; CHECK-NEXT: [[N_RND_UP10:%.*]] = add nuw nsw i64 [[TMP17]], 3
-; CHECK-NEXT: [[N_VEC12:%.*]] = and i64 [[N_RND_UP10]], 8589934588
-; CHECK-NEXT: [[TRIP_COUNT_MINUS_114:%.*]] = add nsw i64 [[TMP17]], -1
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT19:%.*]] = insertelement <4 x i64> poison, i64 [[TRIP_COUNT_MINUS_114]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT20:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT19]], <4 x i64> poison, <4 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY13:%.*]]
-; CHECK: vector.body13:
-; CHECK-NEXT: [[INDEX16:%.*]] = phi i64 [ 0, [[VECTOR_PH8]] ], [ [[INDEX_NEXT29:%.*]], [[PRED_STORE_CONTINUE26:%.*]] ]
-; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[I_0_LCSSA]], [[INDEX16]]
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT17:%.*]] = insertelement <4 x i64> poison, i64 [[INDEX16]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT18:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT17]], <4 x i64> poison, <4 x i32> zeroinitializer
-; CHECK-NEXT: [[VEC_IV:%.*]] = or disjoint <4 x i64> [[BROADCAST_SPLAT18]], <i64 0, i64 1, i64 2, i64 3>
-; CHECK-NEXT: [[TMP18:%.*]] = icmp ule <4 x i64> [[VEC_IV]], [[BROADCAST_SPLAT20]]
-; CHECK-NEXT: [[TMP19:%.*]] = extractelement <4 x i1> [[TMP18]], i64 0
-; CHECK-NEXT: br i1 [[TMP19]], label [[PRED_STORE_IF19:%.*]], label [[PRED_STORE_CONTINUE20:%.*]]
-; CHECK: pred.store.if19:
+; CHECK: .lr.ph5:
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = phi i64 [ [[TMP27:%.*]], [[DOTLR_PH6]] ], [ 0, [[DOTLR_PH5_PREHEADER]] ]
; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds [2048 x i32], ptr @b, i64 0, i64 [[OFFSET_IDX]]
-; CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP20]], align 4
-; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2048 x i32], ptr @c, i64 0, i64 [[OFFSET_IDX]]
-; CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
-; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds [2048 x i32], ptr @a, i64 0, i64 [[OFFSET_IDX]]
-; CHECK-NEXT: [[TMP25:%.*]] = and i32 [[TMP23]], [[TMP21]]
-; CHECK-NEXT: store i32 [[TMP25]], ptr [[TMP24]], align 4
-; CHECK-NEXT: br label [[PRED_STORE_CONTINUE20]]
-; CHECK: pred.store.continue20:
-; CHECK-NEXT: [[TMP26:%.*]] = extractelement <4 x i1> [[TMP18]], i64 1
-; CHECK-NEXT: br i1 [[TMP26]], label [[PRED_STORE_IF21:%.*]], label [[PRED_STORE_CONTINUE22:%.*]]
-; CHECK: pred.store.if21:
-; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[OFFSET_IDX]], 1
-; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds [2048 x i32], ptr @b, i64 0, i64 [[TMP27]]
-; CHECK-NEXT: [[TMP29:%.*]] = load i32, ptr [[TMP28]], align 4
-; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds [2048 x i32], ptr @c, i64 0, i64 [[TMP27]]
-; CHECK-NEXT: [[TMP31:%.*]] = load i32, ptr [[TMP30]], align 4
-; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds [2048 x i32], ptr @a, i64 0, i64 [[TMP27]]
-; CHECK-NEXT: [[TMP33:%.*]] = and i32 [[TMP31]], [[TMP29]]
-; CHECK-NEXT: store i32 [[TMP33]], ptr [[TMP32]], align 4
-; CHECK-NEXT: br label [[PRED_STORE_CONTINUE22]]
-; CHECK: pred.store.continue22:
-; CHECK-NEXT: [[TMP34:%.*]] = extractelement <4 x i1> [[TMP18]], i64 2
-; CHECK-NEXT: br i1 [[TMP34]], label [[PRED_STORE_IF23:%.*]], label [[PRED_STORE_CONTINUE24:%.*]]
-; CHECK: pred.store.if23:
-; CHECK-NEXT: [[TMP35:%.*]] = add i64 [[OFFSET_IDX]], 2
+; CHECK-NEXT: store i32 [[X:%.*]], ptr [[TMP20]], align 4
+; CHECK-NEXT: [[TMP27]] = add i64 [[OFFSET_IDX]], 1
+; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[TMP27]] to i32
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[DOT_PREHEADER_CRIT_EDGE]], label [[DOTLR_PH6]]
+; CHECK: .lr.ph:
+; CHECK-NEXT: [[TMP35:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[VECTOR_BODY13]] ], [ [[I_0_LCSSA]], [[DOTLR_PH_PREHEADER]] ]
+; CHECK-NEXT: [[DOT02:%.*]] = phi i32 [ [[TMP4:%.*]], [[VECTOR_BODY13]] ], [ [[N]], [[DOTLR_PH_PREHEADER]] ]
+; CHECK-NEXT: [[TMP4]] = add nsw i32 [[DOT02]], -1
; CHECK-NEXT: [[TMP36:%.*]] = getelementptr inbounds [2048 x i32], ptr @b, i64 0, i64 [[TMP35]]
; CHECK-NEXT: [[TMP37:%.*]] = load i32, ptr [[TMP36]], align 4
; CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds [2048 x i32], ptr @c, i64 0, i64 [[TMP35]]
; CHECK-NEXT: [[TMP39:%.*]] = load i32, ptr [[TMP38]], align 4
-; CHECK-NEXT: [[TMP40:%.*]] = getelementptr inbounds [2048 x i32], ptr @a, i64 0, i64 [[TMP35]]
; CHECK-NEXT: [[TMP41:%.*]] = and i32 [[TMP39]], [[TMP37]]
+; CHECK-NEXT: [[TMP40:%.*]] = getelementptr inbounds [2048 x i32], ptr @a, i64 0, i64 [[TMP35]]
; CHECK-NEXT: store i32 [[TMP41]], ptr [[TMP40]], align 4
-; CHECK-NEXT: br label [[PRED_STORE_CONTINUE24]]
-; CHECK: pred.store.continue24:
-; CHECK-NEXT: [[TMP42:%.*]] = extractelement <4 x i1> [[TMP18]], i64 3
-; CHECK-NEXT: br i1 [[TMP42]], label [[PRED_STORE_IF25:%.*]], label [[PRED_STORE_CONTINUE26]]
-; CHECK: pred.store.if25:
-; CHECK-NEXT: [[TMP43:%.*]] = add i64 [[OFFSET_IDX]], 3
-; CHECK-NEXT: [[TMP44:%.*]] = getelementptr inbounds [2048 x i32], ptr @b, i64 0, i64 [[TMP43]]
-; CHECK-NEXT: [[TMP45:%.*]] = load i32, ptr [[TMP44]], align 4
-; CHECK-NEXT: [[TMP46:%.*]] = getelementptr inbounds [2048 x i32], ptr @c, i64 0, i64 [[TMP43]]
-; CHECK-NEXT: [[TMP47:%.*]] = load i32, ptr [[TMP46]], align 4
-; CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds [2048 x i32], ptr @a, i64 0, i64 [[TMP43]]
-; CHECK-NEXT: [[TMP49:%.*]] = and i32 [[TMP47]], [[TMP45]]
-; CHECK-NEXT: store i32 [[TMP49]], ptr [[TMP48]], align 4
-; CHECK-NEXT: br label [[PRED_STORE_CONTINUE26]]
-; CHECK: pred.store.continue26:
-; CHECK-NEXT: [[INDEX_NEXT29]] = add nuw i64 [[INDEX16]], 4
-; CHECK-NEXT: [[TMP50:%.*]] = icmp eq i64 [[INDEX_NEXT29]], [[N_VEC12]]
-; CHECK-NEXT: br i1 [[TMP50]], label [[MIDDLE_BLOCK28:%.*]], label [[VECTOR_BODY13]], !llvm.loop [[LOOP5:![0-9]+]]
-; CHECK: middle.block28:
-; CHECK-NEXT: br i1 true, label [[DOT_CRIT_EDGE_LOOPEXIT:%.*]], label [[SCALAR_PH8]]
-; CHECK: scalar.ph7:
-; CHECK-NEXT: br label [[DOTLR_PH:%.*]]
-; CHECK: .lr.ph5:
-; CHECK-NEXT: br i1 poison, label [[DOT_PREHEADER_CRIT_EDGE]], label [[DOTLR_PH5]], !llvm.loop [[LOOP6:![0-9]+]]
-; CHECK: .lr.ph:
-; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE_LOOPEXIT]], label [[DOTLR_PH]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[TMP35]], 1
+; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i32 [[TMP4]], 0
+; CHECK-NEXT: br i1 [[TMP11]], label [[DOT_CRIT_EDGE_LOOPEXIT:%.*]], label [[VECTOR_BODY13]]
; CHECK: ._crit_edge.loopexit:
; CHECK-NEXT: br label [[DOT_CRIT_EDGE]]
; CHECK: ._crit_edge:
@@ -257,80 +151,25 @@ define void @example2(i32 %n, i32 %x) optsize {
}
; Loop has no primary induction as its integer IV has step -1 starting at
-; unknown N, but can still be vectorized.
+; unknown N, but can still be vectorized. But that would require tail folding,
+; so we don't.
define void @example3(i32 %n, ptr noalias nocapture %p, ptr noalias nocapture %q) optsize {
; CHECK-LABEL: @example3(
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK-NEXT: br i1 [[TMP1]], label [[DOT_CRIT_EDGE:%.*]], label [[DOTLR_PH_PREHEADER:%.*]]
; CHECK: .lr.ph.preheader:
-; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
-; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[N]] to i64
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add nuw nsw i64 [[TMP2]], 3
-; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N_RND_UP]], 8589934588
-; CHECK-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = add nsw i64 [[TMP2]], -1
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT11:%.*]] = insertelement <4 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT12:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT11]], <4 x i64> poison, <4 x i32> zeroinitializer
-; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
-; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE16:%.*]] ]
-; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[OFFSET_IDX6:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[INDEX]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
-; CHECK-NEXT: [[VEC_IV:%.*]] = or disjoint <4 x i64> [[BROADCAST_SPLAT]], <i64 0, i64 1, i64 2, i64 3>
-; CHECK-NEXT: [[TMP3:%.*]] = icmp ule <4 x i64> [[VEC_IV]], [[BROADCAST_SPLAT12]]
-; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i1> [[TMP3]], i64 0
-; CHECK-NEXT: br i1 [[TMP4]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
-; CHECK: pred.store.if:
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 [[OFFSET_IDX]]
-; CHECK-NEXT: [[NEXT_GEP7:%.*]] = getelementptr i8, ptr [[Q:%.*]], i64 [[OFFSET_IDX6]]
-; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[NEXT_GEP7]], align 16
-; CHECK-NEXT: store i32 [[TMP5]], ptr [[NEXT_GEP]], align 16
-; CHECK-NEXT: br label [[PRED_STORE_CONTINUE]]
-; CHECK: pred.store.continue:
-; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[TMP3]], i64 1
-; CHECK-NEXT: br i1 [[TMP6]], label [[PRED_STORE_IF11:%.*]], label [[PRED_STORE_CONTINUE12:%.*]]
-; CHECK: pred.store.if11:
-; CHECK-NEXT: [[TMP7:%.*]] = or disjoint i64 [[OFFSET_IDX]], 4
-; CHECK-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP7]]
-; CHECK-NEXT: [[TMP8:%.*]] = or disjoint i64 [[OFFSET_IDX6]], 4
-; CHECK-NEXT: [[NEXT_GEP8:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP8]]
-; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[NEXT_GEP8]], align 16
-; CHECK-NEXT: store i32 [[TMP9]], ptr [[NEXT_GEP3]], align 16
-; CHECK-NEXT: br label [[PRED_STORE_CONTINUE12]]
-; CHECK: pred.store.continue12:
-; CHECK-NEXT: [[TMP10:%.*]] = extractelement <4 x i1> [[TMP3]], i64 2
-; CHECK-NEXT: br i1 [[TMP10]], label [[PRED_STORE_IF13:%.*]], label [[PRED_STORE_CONTINUE14:%.*]]
-; CHECK: pred.store.if13:
-; CHECK-NEXT: [[TMP11:%.*]] = or disjoint i64 [[OFFSET_IDX]], 8
-; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP11]]
-; CHECK-NEXT: [[TMP12:%.*]] = or disjoint i64 [[OFFSET_IDX6]], 8
-; CHECK-NEXT: [[NEXT_GEP9:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP12]]
-; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[NEXT_GEP9]], align 16
-; CHECK-NEXT: store i32 [[TMP13]], ptr [[NEXT_GEP4]], align 16
-; CHECK-NEXT: br label [[PRED_STORE_CONTINUE14]]
-; CHECK: pred.store.continue14:
-; CHECK-NEXT: [[TMP14:%.*]] = extractelement <4 x i1> [[TMP3]], i64 3
-; CHECK-NEXT: br i1 [[TMP14]], label [[PRED_STORE_IF15:%.*]], label [[PRED_STORE_CONTINUE16]]
-; CHECK: pred.store.if15:
-; CHECK-NEXT: [[TMP15:%.*]] = or disjoint i64 [[OFFSET_IDX]], 12
-; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP15]]
-; CHECK-NEXT: [[TMP16:%.*]] = or disjoint i64 [[OFFSET_IDX6]], 12
-; CHECK-NEXT: [[NEXT_GEP10:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP16]]
-; CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr [[NEXT_GEP10]], align 16
-; CHECK-NEXT: store i32 [[TMP17]], ptr [[NEXT_GEP5]], align 16
-; CHECK-NEXT: br label [[PRED_STORE_CONTINUE16]]
-; CHECK: pred.store.continue16:
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
-; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
-; CHECK: middle.block:
-; CHECK-NEXT: br i1 true, label [[DOT_CRIT_EDGE_LOOPEXIT:%.*]], label [[SCALAR_PH]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[DOTLR_PH:%.*]]
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE13:%.*]]
; CHECK: .lr.ph:
-; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE_LOOPEXIT]], label [[DOTLR_PH]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-NEXT: [[DOT05:%.*]] = phi i32 [ [[TMP2:%.*]], [[PRED_STORE_CONTINUE13]] ], [ [[N]], [[DOTLR_PH_PREHEADER]] ]
+; CHECK-NEXT: [[NEXT_GEP2:%.*]] = phi ptr [ [[TMP5:%.*]], [[PRED_STORE_CONTINUE13]] ], [ [[P:%.*]], [[DOTLR_PH_PREHEADER]] ]
+; CHECK-NEXT: [[NEXT_GEP7:%.*]] = phi ptr [ [[TMP3:%.*]], [[PRED_STORE_CONTINUE13]] ], [ [[Q:%.*]], [[DOTLR_PH_PREHEADER]] ]
+; CHECK-NEXT: [[TMP2]] = add nsw i32 [[DOT05]], -1
+; CHECK-NEXT: [[TMP3]] = getelementptr inbounds nuw i8, ptr [[NEXT_GEP7]], i64 4
+; CHECK-NEXT: [[TMP19:%.*]] = load i32, ptr [[NEXT_GEP7]], align 16
+; CHECK-NEXT: [[TMP5]] = getelementptr inbounds nuw i8, ptr [[NEXT_GEP2]], i64 4
+; CHECK-NEXT: store i32 [[TMP19]], ptr [[NEXT_GEP2]], align 16
+; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[TMP2]], 0
+; CHECK-NEXT: br i1 [[TMP6]], label [[DOT_CRIT_EDGE_LOOPEXIT:%.*]], label [[PRED_STORE_CONTINUE13]]
; CHECK: ._crit_edge.loopexit:
; CHECK-NEXT: br label [[DOT_CRIT_EDGE]]
; CHECK: ._crit_edge:
@@ -414,13 +253,13 @@ define void @example23b(ptr noalias nocapture %src, ptr noalias nocapture %dst)
; CHECK-NEXT: store <4 x i32> [[TMP2]], ptr [[NEXT_GEP3]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
-; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br i1 true, label [[TMP5:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: br label [[TMP4:%.*]]
; CHECK: 4:
-; CHECK-NEXT: br i1 poison, label [[TMP5]], label [[TMP4]], !llvm.loop [[LOOP11:![0-9]+]]
+; CHECK-NEXT: br i1 poison, label [[TMP5]], label [[TMP4]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: 5:
; CHECK-NEXT: ret void
;
@@ -444,80 +283,24 @@ define void @example23b(ptr noalias nocapture %src, ptr noalias nocapture %dst)
ret void
}
-; We CAN vectorize this example by folding the tail it entails.
+; We CAN'T vectorize this example as it would require tail folding.
define void @example23c(ptr noalias nocapture %src, ptr noalias nocapture %dst) optsize {
; CHECK-LABEL: @example23c(
-; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
-; CHECK: vector.ph:
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
-; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE15:%.*]] ]
-; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 1
-; CHECK-NEXT: [[OFFSET_IDX5:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[INDEX]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
-; CHECK-NEXT: [[VEC_IV:%.*]] = or disjoint <4 x i64> [[BROADCAST_SPLAT]], <i64 0, i64 1, i64 2, i64 3>
-; CHECK-NEXT: [[TMP1:%.*]] = icmp ult <4 x i64> [[VEC_IV]], splat (i64 257)
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i1> [[TMP1]], i64 0
-; CHECK-NEXT: br i1 [[TMP2]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
-; CHECK: pred.store.if:
-; CHECK-NEXT: [[NEXT_GEP6:%.*]] = getelementptr i8, ptr [[DST:%.*]], i64 [[OFFSET_IDX5]]
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[SRC:%.*]], i64 [[OFFSET_IDX]]
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr [[NEXT_GEP]], align 2
-; CHECK-NEXT: [[TMP4:%.*]] = zext i16 [[TMP3]] to i32
-; CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i32 [[TMP4]], 7
-; CHECK-NEXT: store i32 [[TMP5]], ptr [[NEXT_GEP6]], align 4
-; CHECK-NEXT: br label [[PRED_STORE_CONTINUE]]
-; CHECK: pred.store.continue:
-; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[TMP1]], i64 1
-; CHECK-NEXT: br i1 [[TMP6]], label [[PRED_STORE_IF9:%.*]], label [[PRED_STORE_CONTINUE10:%.*]]
-; CHECK: pred.store.if9:
-; CHECK-NEXT: [[TMP7:%.*]] = or disjoint i64 [[OFFSET_IDX5]], 4
-; CHECK-NEXT: [[NEXT_GEP7:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP7]]
-; CHECK-NEXT: [[TMP8:%.*]] = or disjoint i64 [[OFFSET_IDX]], 2
-; CHECK-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP8]]
-; CHECK-NEXT: [[TMP9:%.*]] = load i16, ptr [[NEXT_GEP2]], align 2
-; CHECK-NEXT: [[TMP10:%.*]] = zext i16 [[TMP9]] to i32
-; CHECK-NEXT: [[TMP11:%.*]] = shl nuw nsw i32 [[TMP10]], 7
-; CHECK-NEXT: store i32 [[TMP11]], ptr [[NEXT_GEP7]], align 4
-; CHECK-NEXT: br label [[PRED_STORE_CONTINUE10]]
-; CHECK: pred.store.continue10:
-; CHECK-NEXT: [[TMP12:%.*]] = extractelement <4 x i1> [[TMP1]], i64 2
-; CHECK-NEXT: br i1 [[TMP12]], label [[PRED_STORE_IF12:%.*]], label [[PRED_STORE_CONTINUE12:%.*]]
-; CHECK: pred.store.if11:
-; CHECK-NEXT: [[TMP13:%.*]] = or disjoint i64 [[OFFSET_IDX5]], 8
-; CHECK-NEXT: [[NEXT_GEP8:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP13]]
-; CHECK-NEXT: [[TMP14:%.*]] = or disjoint i64 [[OFFSET_IDX]], 4
-; CHECK-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP14]]
-; CHECK-NEXT: [[TMP15:%.*]] = load i16, ptr [[NEXT_GEP3]], align 2
-; CHECK-NEXT: [[TMP16:%.*]] = zext i16 [[TMP15]] to i32
-; CHECK-NEXT: [[TMP17:%.*]] = shl nuw nsw i32 [[TMP16]], 7
-; CHECK-NEXT: store i32 [[TMP17]], ptr [[NEXT_GEP8]], align 4
-; CHECK-NEXT: br label [[PRED_STORE_CONTINUE12]]
-; CHECK: pred.store.continue12:
-; CHECK-NEXT: [[TMP18:%.*]] = extractelement <4 x i1> [[TMP1]], i64 3
-; CHECK-NEXT: br i1 [[TMP18]], label [[PRED_STORE_IF13:%.*]], label [[PRED_STORE_CONTINUE15]]
-; CHECK: pred.store.if13:
-; CHECK-NEXT: [[TMP19:%.*]] = or disjoint i64 [[OFFSET_IDX5]], 12
-; CHECK-NEXT: [[NEXT_GEP9:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP19]]
-; CHECK-NEXT: [[TMP20:%.*]] = or disjoint i64 [[OFFSET_IDX]], 6
-; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP20]]
-; CHECK-NEXT: [[TMP21:%.*]] = load i16, ptr [[NEXT_GEP4]], align 2
-; CHECK-NEXT: [[TMP22:%.*]] = zext i16 [[TMP21]] to i32
-; CHECK-NEXT: [[TMP23:%.*]] = shl nuw nsw i32 [[TMP22]], 7
-; CHECK-NEXT: store i32 [[TMP23]], ptr [[NEXT_GEP9]], align 4
-; CHECK-NEXT: br label [[PRED_STORE_CONTINUE15]]
-; CHECK: pred.store.continue14:
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
-; CHECK-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
-; CHECK-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
-; CHECK: middle.block:
-; CHECK-NEXT: br i1 true, label [[TMP26:%.*]], label [[SCALAR_PH]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[TMP25:%.*]]
-; CHECK: 25:
-; CHECK-NEXT: br i1 poison, label [[TMP26]], label [[TMP25]], !llvm.loop [[LOOP13:![0-9]+]]
-; CHECK: 26:
+; CHECK: 1:
+; CHECK-NEXT: [[NEXT_GEP2:%.*]] = phi ptr [ [[SRC:%.*]], [[TMP0:%.*]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[NEXT_GEP7:%.*]] = phi ptr [ [[DST:%.*]], [[TMP0]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[I_02:%.*]] = phi i64 [ 0, [[TMP0]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP2]] = getelementptr inbounds nuw i8, ptr [[NEXT_GEP2]], i64 2
+; CHECK-NEXT: [[TMP27:%.*]] = load i16, ptr [[NEXT_GEP2]], align 2
+; CHECK-NEXT: [[TMP28:%.*]] = zext i16 [[TMP27]] to i32
+; CHECK-NEXT: [[TMP29:%.*]] = shl nuw nsw i32 [[TMP28]], 7
+; CHECK-NEXT: [[TMP6]] = getelementptr inbounds nuw i8, ptr [[NEXT_GEP7]], i64 4
+; CHECK-NEXT: store i32 [[TMP29]], ptr [[NEXT_GEP7]], align 4
+; CHECK-NEXT: [[TMP7]] = add nuw nsw i64 [[I_02]], 1
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP7]], 257
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[TMP8:%.*]], label [[VECTOR_BODY]]
+; CHECK: 8:
; CHECK-NEXT: ret void
;
br label %1
diff --git a/llvm/test/Transforms/LoopVectorize/X86/tail_loop_folding.ll b/llvm/test/Transforms/LoopVectorize/X86/tail_loop_folding.ll
index 428ed94dadb89..2c9392c82ac78 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/tail_loop_folding.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/tail_loop_folding.ll
@@ -223,7 +223,7 @@ for.body:
%sum.1 = add nuw nsw i32 %add, %sum.0
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %N
- br i1 %exitcond, label %for.cond.cleanup, label %for.body
+ br i1 %exitcond, label %for.cond.cleanup, label %for.body, !llvm.loop !6
for.cond.cleanup:
ret i32 %sum.1
@@ -236,6 +236,5 @@ attributes #0 = { nounwind optsize uwtable "target-cpu"="core-avx2" "target-feat
!7 = !{!"llvm.loop.vectorize.predicate.enable", i1 true}
!8 = !{!"llvm.loop.vectorize.enable", i1 true}
-!10 = distinct !{!10, !11, !12}
+!10 = distinct !{!10, !11}
!11 = !{!"llvm.loop.vectorize.predicate.enable", i1 false}
-!12 = !{!"llvm.loop.vectorize.enable", i1 true}
diff --git a/llvm/test/Transforms/LoopVectorize/X86/x86-interleaved-accesses-masked-group.ll b/llvm/test/Transforms/LoopVectorize/X86/x86-interleaved-accesses-masked-group.ll
index 08ba799b6afea..2d2f341081fc3 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/x86-interleaved-accesses-masked-group.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/x86-interleaved-accesses-masked-group.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -mcpu=skx -S -passes=loop-vectorize,instcombine,simplifycfg -simplifycfg-require-and-preserve-domtree=1 -force-vector-width=8 -force-vector-interleave=1 -enable-interleaved-mem-accesses < %s | FileCheck %s -check-prefix=DISABLED_MASKED_STRIDED
-; RUN: opt -mcpu=skx -S -passes=loop-vectorize,instcombine,simplifycfg -simplifycfg-require-and-preserve-domtree=1 -force-vector-width=8 -force-vector-interleave=1 -enable-interleaved-mem-accesses -enable-masked-interleaved-mem-accesses < %s | FileCheck %s -check-prefix=ENABLED_MASKED_STRIDED
+; RUN: opt -mcpu=skx -S -passes=loop-vectorize,instcombine,simplifycfg -simplifycfg-require-and-preserve-domtree=1 -force-vector-width=8 -force-vector-interleave=1 -enable-interleaved-mem-accesses -force-tail-folding-style=data-without-lane-mask < %s | FileCheck %s -check-prefix=DISABLED_MASKED_STRIDED
+; RUN: opt -mcpu=skx -S -passes=loop-vectorize,instcombine,simplifycfg -simplifycfg-require-and-preserve-domtree=1 -force-vector-width=8 -force-vector-interleave=1 -enable-interleaved-mem-accesses -enable-masked-interleaved-mem-accesses -force-tail-folding-style=data-without-lane-mask < %s | FileCheck %s -check-prefix=ENABLED_MASKED_STRIDED
target datalayout = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128"
target triple = "i386-unknown-linux-gnu"
diff --git a/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-divisible-TC.ll b/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-divisible-TC.ll
index 53686ee76cbbd..c0058efac6518 100644
--- a/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-divisible-TC.ll
+++ b/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-divisible-TC.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -passes=loop-vectorize -force-vector-width=4 -S | FileCheck %s
+; RUN: opt < %s -passes=loop-vectorize -force-vector-width=4 -force-tail-folding-style=data-without-lane-mask -S | FileCheck %s
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-sink-replicate-region.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-sink-replicate-region.ll
index 0b2e7fe484390..65a0dcee3e33e 100644
--- a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-sink-replicate-region.ll
+++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-sink-replicate-region.ll
@@ -1,5 +1,5 @@
; REQUIRES: asserts
-; RUN: opt < %s -passes=loop-vectorize -force-vector-width=2 -force-vector-interleave=1 -force-widen-divrem-via-safe-divisor=0 -disable-output -debug-only=loop-vectorize 2>&1 | FileCheck %s
+; RUN: opt < %s -passes=loop-vectorize -force-vector-width=2 -force-vector-interleave=1 -force-widen-divrem-via-safe-divisor=0 -force-tail-folding-style=data-without-lane-mask -disable-output -debug-only=loop-vectorize 2>&1 | FileCheck %s
target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
diff --git a/llvm/test/Transforms/LoopVectorize/pr45679-fold-tail-by-masking.ll b/llvm/test/Transforms/LoopVectorize/pr45679-fold-tail-by-masking.ll
index c301ef3c5319a..0a52e16fc6cc8 100644
--- a/llvm/test/Transforms/LoopVectorize/pr45679-fold-tail-by-masking.ll
+++ b/llvm/test/Transforms/LoopVectorize/pr45679-fold-tail-by-masking.ll
@@ -10,7 +10,7 @@ target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
; -force-vector-interleave, but is a multiple of the internally computed MaxVF;
; e.g., when all types are i32 lead to MaxVF=1.
-define void @pr45679(ptr %A) optsize {
+define void @pr45679(ptr %A) {
; CHECK-LABEL: @pr45679(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
diff --git a/llvm/test/Transforms/LoopVectorize/pr51614-fold-tail-by-masking.ll b/llvm/test/Transforms/LoopVectorize/pr51614-fold-tail-by-masking.ll
index 705152662be24..796cdb90cf60a 100644
--- a/llvm/test/Transforms/LoopVectorize/pr51614-fold-tail-by-masking.ll
+++ b/llvm/test/Transforms/LoopVectorize/pr51614-fold-tail-by-masking.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -passes=loop-vectorize -force-vector-width=2 -enable-masked-interleaved-mem-accesses -enable-interleaved-mem-accesses -S | FileCheck %s
+; RUN: opt < %s -passes=loop-vectorize -force-vector-width=2 -enable-masked-interleaved-mem-accesses -enable-interleaved-mem-accesses -force-tail-folding-style=data-without-lane-mask -S | FileCheck %s
target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
diff --git a/llvm/test/Transforms/LoopVectorize/store-reduction-results-in-tail-folded-loop.ll b/llvm/test/Transforms/LoopVectorize/store-reduction-results-in-tail-folded-loop.ll
index 57bc7b8337249..208fd48dc6924 100644
--- a/llvm/test/Transforms/LoopVectorize/store-reduction-results-in-tail-folded-loop.ll
+++ b/llvm/test/Transforms/LoopVectorize/store-reduction-results-in-tail-folded-loop.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
-; RUN: opt -p loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -S %s | FileCheck %s
+; RUN: opt -p loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -force-tail-folding-style=data-without-lane-mask -S %s | FileCheck %s
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
diff --git a/llvm/test/Transforms/LoopVectorize/struct-return.ll b/llvm/test/Transforms/LoopVectorize/struct-return.ll
index 1b2a809a552d8..57d0aa3787c45 100644
--- a/llvm/test/Transforms/LoopVectorize/struct-return.ll
+++ b/llvm/test/Transforms/LoopVectorize/struct-return.ll
@@ -211,7 +211,7 @@ exit:
; Test crafted to exercise computePredInstDiscount with struct results
; (mainly it does not crash).
; CHECK-REMARKS: remark: {{.*}} vectorized loop
-define void @scalarized_predicated_struct_return(ptr %a) optsize {
+define void @scalarized_predicated_struct_return(ptr %a) {
; CHECK-LABEL: define void @scalarized_predicated_struct_return
; CHECK: vector.body:
; CHECK: pred.store.if:
diff --git a/llvm/test/Transforms/LoopVectorize/tail-folding-counting-down.ll b/llvm/test/Transforms/LoopVectorize/tail-folding-counting-down.ll
index a757314ec7a46..ac915aa333973 100644
--- a/llvm/test/Transforms/LoopVectorize/tail-folding-counting-down.ll
+++ b/llvm/test/Transforms/LoopVectorize/tail-folding-counting-down.ll
@@ -43,7 +43,7 @@ while.end:
; Make sure a loop is successfully vectorized with fold-tail when the backedge
; taken count is constant and used inside the loop. Issue revealed by D76992.
;
-define void @reuse_const_btc(ptr %A) optsize {
+define void @reuse_const_btc(ptr %A) {
; CHECK-LABEL: @reuse_const_btc
; CHECK: {{%.*}} = icmp ule <4 x i32> {{%.*}}, splat (i32 13)
; CHECK: {{%.*}} = select <4 x i1> {{%.*}}, <4 x i32> splat (i32 12), <4 x i32> splat (i32 13)
diff --git a/llvm/test/Transforms/LoopVectorize/vplan-sink-scalars-and-merge.ll b/llvm/test/Transforms/LoopVectorize/vplan-sink-scalars-and-merge.ll
index aa05bb153966e..a56045e0e7820 100644
--- a/llvm/test/Transforms/LoopVectorize/vplan-sink-scalars-and-merge.ll
+++ b/llvm/test/Transforms/LoopVectorize/vplan-sink-scalars-and-merge.ll
@@ -1035,12 +1035,12 @@ exit:
; Test case with a dead GEP between the load and store regions. Dead recipes
; need to be removed before merging.
-define void @merge_with_dead_gep_between_regions(i32 %n, ptr noalias %src, ptr noalias %dst) optsize {
+define void @merge_with_dead_gep_between_regions(i32 %n, ptr noalias %src, ptr noalias %dst) {
; CHECK-LABEL: LV: Checking a loop in 'merge_with_dead_gep_between_regions'
; CHECK: VPlan 'Initial VPlan for VF={2},UF>=1' {
+; CHECK-NEXT: Live-in vp<[[VF:%.+]]> = VF
; CHECK-NEXT: Live-in vp<[[VFxUF:%.+]]> = VF * UF
; CHECK-NEXT: Live-in vp<[[VEC_TC:%.+]]> = vector-trip-count
-; CHECK-NEXT: Live-in vp<[[BTC:%.+]]> = backedge-taken count
; CHECK-NEXT: Live-in ir<%n> = original trip-count
; CHECK-EMPTY:
; CHECK-NEXT: ir-bb<entry>:
@@ -1052,42 +1052,27 @@ define void @merge_with_dead_gep_between_regions(i32 %n, ptr noalias %src, ptr n
; CHECK-EMPTY:
; CHECK-NEXT: <x1> vector loop: {
; CHECK-NEXT: vector.body:
-; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
+; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next>
; CHECK-NEXT: vp<[[DERIVED_IV:%.+]]> = DERIVED-IV ir<%n> + vp<[[CAN_IV]]> * ir<-1>
-; CHECK-NEXT: EMIT vp<[[WIDE_IV:%.+]]> = WIDEN-CANONICAL-INDUCTION vp<[[CAN_IV]]>
-; CHECK-NEXT: EMIT vp<[[MASK:%.+]]> = icmp ule vp<[[WIDE_IV]]>, vp<[[BTC]]>
-; CHECK-NEXT: Successor(s): pred.store
-; CHECK-EMPTY:
-; CHECK-NEXT: <xVFxUF> pred.store: {
-; CHECK-NEXT: pred.store.entry:
-; CHECK-NEXT: BRANCH-ON-MASK vp<[[MASK]]>
-; CHECK-NEXT: Successor(s): pred.store.if, pred.store.continue
-; CHECK-EMPTY:
-; CHECK-NEXT: pred.store.if:
-; CHECK-NEXT: vp<[[SCALAR_STEPS:%.+]]> = SCALAR-STEPS vp<[[DERIVED_IV]]>, ir<-1>
-; CHECK-NEXT: REPLICATE ir<%gep.src> = getelementptr inbounds ir<%src>, vp<[[SCALAR_STEPS]]>
-; CHECK-NEXT: REPLICATE ir<%l> = load ir<%gep.src>
-; CHECK-NEXT: REPLICATE ir<%gep.dst> = getelementptr inbounds ir<%dst>, vp<[[SCALAR_STEPS]]>
-; CHECK-NEXT: REPLICATE store ir<%l>, ir<%gep.dst>
-; CHECK-NEXT: Successor(s): pred.store.continue
-; CHECK-EMPTY:
-; CHECK-NEXT: pred.store.continue:
-; CHECK-NEXT: No successors
-; CHECK-NEXT: }
-; CHECK-NEXT: Successor(s): loop.1
-; CHECK-EMPTY:
-; CHECK-NEXT: loop.1:
-; CHECK-NEXT: EMIT vp<[[CAN_IV_NEXT:%.+]]> = add vp<[[CAN_IV]]>, vp<[[VFxUF]]>
-; CHECK-NEXT: EMIT branch-on-count vp<[[CAN_IV_NEXT]]>, vp<[[VEC_TC]]>
+; CHECK-NEXT: vp<[[SCALAR_STEPS:%.+]]> = SCALAR-STEPS vp<[[DERIVED_IV]]>, ir<-1>
+; CHECK-NEXT: CLONE ir<%gep.src> = getelementptr inbounds ir<%src>, vp<[[SCALAR_STEPS]]>
+; CHECK-NEXT: vp<[[REVERSE_GEP_SRC:%.+]]> = reverse-vector-pointer inbounds ir<%gep.src>, vp<[[VF]]>
+; CHECK-NEXT: WIDEN ir<%l> = load vp<[[REVERSE_GEP_SRC]]>
+; CHECK-NEXT: CLONE ir<%gep.dst> = getelementptr inbounds ir<%dst>, vp<[[SCALAR_STEPS]]>
+; CHECK-NEXT: vp<[[REVERSE_GEP_DST:%.+]]> = reverse-vector-pointer inbounds ir<%gep.dst>, vp<[[VF]]>
+; CHECK-NEXT: WIDEN store vp<[[REVERSE_GEP_DST]]>, ir<%l>
+; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[CAN_IV]]>, vp<[[VFxUF]]>
+; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VEC_TC]]>
; CHECK-NEXT: No successors
; CHECK-NEXT: }
; CHECK-NEXT: Successor(s): middle.block
; CHECK-EMPTY:
; CHECK-NEXT: middle.block:
-; CHECK-NEXT: EMIT branch-on-cond ir<true>
+; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<%n>, vp<[[VEC_TC]]>
+; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n>
; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph
; CHECK-EMPTY:
-; CHECK-NEXT: scalar.ph
+; CHECK-NEXT: scalar.ph:
; CHECK-NEXT: EMIT vp<[[RESUME:%.+]]> = resume-phi vp<[[END]]>, ir<%n>
; CHECK-NEXT: Successor(s): ir-bb<loop>
; CHECK-EMPTY:
@@ -1102,7 +1087,7 @@ define void @merge_with_dead_gep_between_regions(i32 %n, ptr noalias %src, ptr n
; CHECK-NEXT: IR %ec = icmp eq i32 %iv.next, 0
; CHECK-NEXT: No successors
; CHECK-EMPTY:
-; CHECK-NEXT: ir-bb<exit>
+; CHECK-NEXT: ir-bb<exit>:
; CHECK-NEXT: No successors
; CHECK-NEXT: }
;
More information about the llvm-commits
mailing list