[llvm] LAA: don't speculate stride when BTC >= 0 (PR #96927)
Ramkumar Ramachandra via llvm-commits
llvm-commits at lists.llvm.org
Thu Jun 27 09:31:19 PDT 2024
https://github.com/artagnon created https://github.com/llvm/llvm-project/pull/96927
Speculating the stride currently inserts a Stride == 1 predicate, which
is equivalent to asserting that the that the loop executes atleast once.
However, when the backedge-taken-count is known-non-negative,
speculating the stride unnecessarily versions the loop. Avoid this.
Fixes https://github.com/llvm/llvm-project/issues/96656.
-- 8< --
I'm not 100% sure this is correct. My reasoning tells me that it is, but the test updates seem to tell a different story.
Based on #96925.
>From ac1a7ff6c82ab5c25e6a3f358d4afa1d72e7b8d5 Mon Sep 17 00:00:00 2001
From: Ramkumar Ramachandra <ramkumar.ramachandra at codasip.com>
Date: Tue, 25 Jun 2024 17:32:24 +0100
Subject: [PATCH 1/2] LAA, LVer: add pre-commit tests for #96656
The issue is in LoopAccessAnalysis, but the regression was seen in the
user LoopVersioning. Hence, add pre-commit tests for both, in
preparation to fix the issue in LoopAccessAnalysis.
---
.../Analysis/LoopAccessAnalysis/pr96656.ll | 49 ++++++++++++
.../test/Transforms/LoopVersioning/pr96656.ll | 78 +++++++++++++++++++
2 files changed, 127 insertions(+)
create mode 100644 llvm/test/Analysis/LoopAccessAnalysis/pr96656.ll
create mode 100644 llvm/test/Transforms/LoopVersioning/pr96656.ll
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/pr96656.ll b/llvm/test/Analysis/LoopAccessAnalysis/pr96656.ll
new file mode 100644
index 0000000000000..5b9833553fa02
--- /dev/null
+++ b/llvm/test/Analysis/LoopAccessAnalysis/pr96656.ll
@@ -0,0 +1,49 @@
+; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -passes='print<access-info>' -disable-output %s 2>&1 | FileCheck %s
+
+define void @false.equal.predicate(ptr %arg, ptr %arg1, i1 %arg2) {
+; CHECK-LABEL: 'false.equal.predicate'
+; CHECK-NEXT: loop.body:
+; CHECK-NEXT: Memory dependences are safe
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-NEXT: Equal predicate: %load == 1
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+; CHECK-NEXT: [PSE] %gep10 = getelementptr double, ptr %gep8, i64 %mul:
+; CHECK-NEXT: {(8 + %arg1),+,(8 * (sext i32 %load to i64))<nsw>}<%loop.body>
+; CHECK-NEXT: --> {(8 + %arg1),+,8}<%loop.body>
+;
+entry:
+ %load = load i32, ptr %arg, align 4
+ br i1 %arg2, label %noloop.exit, label %loop.ph
+
+loop.ph: ; preds = %entry
+ %sext7 = sext i32 %load to i64
+ %gep8 = getelementptr i8, ptr %arg1, i64 8
+ br label %loop.body
+
+loop.body: ; preds = %loop.body, %loop.ph
+ %phi = phi i64 [ 0, %loop.ph ], [ %add, %loop.body ]
+ %mul = mul i64 %phi, %sext7
+ %gep10 = getelementptr double, ptr %gep8, i64 %mul
+ %load11 = load double, ptr %gep10, align 8
+ store double %load11, ptr %arg1, align 8
+ %add = add i64 %phi, 1
+ %icmp = icmp eq i64 %phi, 0
+ br i1 %icmp, label %loop.exit, label %loop.body
+
+noloop.exit: ; preds = %entry
+ %sext = sext i32 %load to i64
+ %gep = getelementptr double, ptr %arg1, i64 %sext
+ %load5 = load double, ptr %gep, align 8
+ store double %load5, ptr %arg, align 8
+ ret void
+
+loop.exit: ; preds = %loop.body
+ ret void
+}
diff --git a/llvm/test/Transforms/LoopVersioning/pr96656.ll b/llvm/test/Transforms/LoopVersioning/pr96656.ll
new file mode 100644
index 0000000000000..0264fe40a9430
--- /dev/null
+++ b/llvm/test/Transforms/LoopVersioning/pr96656.ll
@@ -0,0 +1,78 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -passes=loop-versioning -S %s | FileCheck %s
+
+define void @lver.check.unnecessary(ptr %arg, ptr %arg1, i1 %arg2) {
+; CHECK-LABEL: define void @lver.check.unnecessary(
+; CHECK-SAME: ptr [[ARG:%.*]], ptr [[ARG1:%.*]], i1 [[ARG2:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[ARG]], align 4
+; CHECK-NEXT: br i1 [[ARG2]], label %[[NOLOOP_EXIT:.*]], label %[[LOOP_BODY_LVER_CHECK:.*]]
+; CHECK: [[LOOP_BODY_LVER_CHECK]]:
+; CHECK-NEXT: [[SEXT7:%.*]] = sext i32 [[LOAD]] to i64
+; CHECK-NEXT: [[GEP8:%.*]] = getelementptr i8, ptr [[ARG1]], i64 8
+; CHECK-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i32 [[LOAD]], 1
+; CHECK-NEXT: br i1 [[IDENT_CHECK]], label %[[LOOP_BODY_PH_LVER_ORIG:.*]], label %[[LOOP_BODY_PH:.*]]
+; CHECK: [[LOOP_BODY_PH_LVER_ORIG]]:
+; CHECK-NEXT: br label %[[LOOP_BODY_LVER_ORIG:.*]]
+; CHECK: [[LOOP_BODY_LVER_ORIG]]:
+; CHECK-NEXT: [[PHI_LVER_ORIG:%.*]] = phi i64 [ 0, %[[LOOP_BODY_PH_LVER_ORIG]] ], [ [[ADD_LVER_ORIG:%.*]], %[[LOOP_BODY_LVER_ORIG]] ]
+; CHECK-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i64 [[PHI_LVER_ORIG]], [[SEXT7]]
+; CHECK-NEXT: [[GEP10_LVER_ORIG:%.*]] = getelementptr double, ptr [[GEP8]], i64 [[MUL_LVER_ORIG]]
+; CHECK-NEXT: [[LOAD11_LVER_ORIG:%.*]] = load double, ptr [[GEP10_LVER_ORIG]], align 8
+; CHECK-NEXT: store double [[LOAD11_LVER_ORIG]], ptr [[ARG1]], align 8
+; CHECK-NEXT: [[ADD_LVER_ORIG]] = add i64 [[PHI_LVER_ORIG]], 1
+; CHECK-NEXT: [[ICMP_LVER_ORIG:%.*]] = icmp eq i64 [[PHI_LVER_ORIG]], 0
+; CHECK-NEXT: br i1 [[ICMP_LVER_ORIG]], label %[[LOOP_EXIT_LOOPEXIT:.*]], label %[[LOOP_BODY_LVER_ORIG]]
+; CHECK: [[LOOP_BODY_PH]]:
+; CHECK-NEXT: br label %[[LOOP_BODY:.*]]
+; CHECK: [[LOOP_BODY]]:
+; CHECK-NEXT: [[PHI:%.*]] = phi i64 [ 0, %[[LOOP_BODY_PH]] ], [ [[ADD:%.*]], %[[LOOP_BODY]] ]
+; CHECK-NEXT: [[MUL:%.*]] = mul i64 [[PHI]], [[SEXT7]]
+; CHECK-NEXT: [[GEP10:%.*]] = getelementptr double, ptr [[GEP8]], i64 [[MUL]]
+; CHECK-NEXT: [[LOAD11:%.*]] = load double, ptr [[GEP10]], align 8
+; CHECK-NEXT: store double [[LOAD11]], ptr [[ARG1]], align 8
+; CHECK-NEXT: [[ADD]] = add i64 [[PHI]], 1
+; CHECK-NEXT: [[ICMP:%.*]] = icmp eq i64 [[PHI]], 0
+; CHECK-NEXT: br i1 [[ICMP]], label %[[LOOP_EXIT_LOOPEXIT1:.*]], label %[[LOOP_BODY]]
+; CHECK: [[NOLOOP_EXIT]]:
+; CHECK-NEXT: [[SEXT:%.*]] = sext i32 [[LOAD]] to i64
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr double, ptr [[ARG1]], i64 [[SEXT]]
+; CHECK-NEXT: [[LOAD5:%.*]] = load double, ptr [[GEP]], align 8
+; CHECK-NEXT: store double [[LOAD5]], ptr [[ARG]], align 8
+; CHECK-NEXT: ret void
+; CHECK: [[LOOP_EXIT_LOOPEXIT]]:
+; CHECK-NEXT: br label %[[LOOP_EXIT:.*]]
+; CHECK: [[LOOP_EXIT_LOOPEXIT1]]:
+; CHECK-NEXT: br label %[[LOOP_EXIT]]
+; CHECK: [[LOOP_EXIT]]:
+; CHECK-NEXT: ret void
+;
+entry:
+ %load = load i32, ptr %arg, align 4
+ br i1 %arg2, label %noloop.exit, label %loop.ph
+
+loop.ph: ; preds = %entry
+ %sext7 = sext i32 %load to i64
+ %gep8 = getelementptr i8, ptr %arg1, i64 8
+ br label %loop.body
+
+loop.body: ; preds = %loop.body, %loop.ph
+ %phi = phi i64 [ 0, %loop.ph ], [ %add, %loop.body ]
+ %mul = mul i64 %phi, %sext7
+ %gep10 = getelementptr double, ptr %gep8, i64 %mul
+ %load11 = load double, ptr %gep10, align 8
+ store double %load11, ptr %arg1, align 8
+ %add = add i64 %phi, 1
+ %icmp = icmp eq i64 %phi, 0
+ br i1 %icmp, label %loop.exit, label %loop.body
+
+noloop.exit: ; preds = %entry
+ %sext = sext i32 %load to i64
+ %gep = getelementptr double, ptr %arg1, i64 %sext
+ %load5 = load double, ptr %gep, align 8
+ store double %load5, ptr %arg, align 8
+ ret void
+
+loop.exit: ; preds = %loop.body
+ ret void
+}
>From e8976b9ec3663f69c5428c5fa4de53c605a32513 Mon Sep 17 00:00:00 2001
From: Ramkumar Ramachandra <ramkumar.ramachandra at codasip.com>
Date: Thu, 27 Jun 2024 17:12:33 +0100
Subject: [PATCH 2/2] LAA: don't speculate stride when BTC >= 0
Speculating the stride currently inserts a Stride == 1 predicate, which
is equivalent to asserting that the that the loop executes atleast once.
However, when the backedge-taken-count is known-non-negative,
speculating the stride unnecessarily versions the loop. Avoid this.
Fixes #96656.
---
llvm/lib/Analysis/LoopAccessAnalysis.cpp | 20 +-
.../Analysis/LoopAccessAnalysis/pr96656.ll | 4 -
.../LoopAccessAnalysis/symbolic-stride.ll | 8 +-
.../Transforms/LoopDistribute/debug-print.ll | 10 +-
.../LoopDistribute/symbolic-stride.ll | 62 +--
.../invalidate-laa-after-versioning.ll | 62 +--
.../LoopVectorize/RISCV/strided-accesses.ll | 459 +++++-------------
.../version-stride-with-integer-casts.ll | 220 ++++++---
.../invalidate-laa-after-versioning.ll | 59 +--
.../test/Transforms/LoopVersioning/pr96656.ll | 26 +-
10 files changed, 347 insertions(+), 583 deletions(-)
diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
index 1f2bdf83651d6..59224ab74eff2 100644
--- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
@@ -2930,7 +2930,8 @@ void LoopAccessInfo::collectStridedAccess(Value *MemAccess) {
// computation of an interesting IV - but we chose not to as we
// don't have a cost model here, and broadening the scope exposes
// far too many unprofitable cases.
- const SCEV *StrideExpr = getStrideFromPointer(Ptr, PSE->getSE(), TheLoop);
+ ScalarEvolution *SE = PSE->getSE();
+ const SCEV *StrideExpr = getStrideFromPointer(Ptr, SE, TheLoop);
if (!StrideExpr)
return;
@@ -2944,8 +2945,8 @@ void LoopAccessInfo::collectStridedAccess(Value *MemAccess) {
}
// Avoid adding the "Stride == 1" predicate when we know that
- // Stride >= Trip-Count. Such a predicate will effectively optimize a single
- // or zero iteration loop, as Trip-Count <= Stride == 1.
+ // Backedge-Taken-Count is non-negative, or when Stride >
+ // Backedge-Taken-Count. Trip-Count = Backedge-Taken-Count + 1.
//
// TODO: We are currently not making a very informed decision on when it is
// beneficial to apply stride versioning. It might make more sense that the
@@ -2966,20 +2967,17 @@ void LoopAccessInfo::collectStridedAccess(Value *MemAccess) {
uint64_t BETypeSizeBits = DL.getTypeSizeInBits(MaxBTC->getType());
const SCEV *CastedStride = StrideExpr;
const SCEV *CastedBECount = MaxBTC;
- ScalarEvolution *SE = PSE->getSE();
if (BETypeSizeBits >= StrideTypeSizeBits)
CastedStride = SE->getNoopOrSignExtend(StrideExpr, MaxBTC->getType());
else
CastedBECount = SE->getZeroExtendExpr(MaxBTC, StrideExpr->getType());
const SCEV *StrideMinusBETaken = SE->getMinusSCEV(CastedStride, CastedBECount);
- // Since TripCount == BackEdgeTakenCount + 1, checking:
- // "Stride >= TripCount" is equivalent to checking:
- // Stride - MaxBTC> 0
- if (SE->isKnownPositive(StrideMinusBETaken)) {
+ if (SE->isKnownPositive(StrideMinusBETaken) ||
+ SE->isKnownNonNegative(MaxBTC)) {
LLVM_DEBUG(
- dbgs() << "LAA: Stride>=TripCount; No point in versioning as the "
- "Stride==1 predicate will imply that the loop executes "
- "at most once.\n");
+ dbgs() << "LAA: Stride > Backedge-Taken-Count or Backedge-Taken-Count "
+ ">= 0; No point in versioning as the Stride==1 predicate "
+ "will imply that the loop executes at most once.\n");
return;
}
LLVM_DEBUG(dbgs() << "LAA: Found a strided access that we can version.\n");
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/pr96656.ll b/llvm/test/Analysis/LoopAccessAnalysis/pr96656.ll
index 5b9833553fa02..cc7aca6f4f8f6 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/pr96656.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/pr96656.ll
@@ -11,12 +11,8 @@ define void @false.equal.predicate(ptr %arg, ptr %arg1, i1 %arg2) {
; CHECK-EMPTY:
; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
; CHECK-NEXT: SCEV assumptions:
-; CHECK-NEXT: Equal predicate: %load == 1
; CHECK-EMPTY:
; CHECK-NEXT: Expressions re-written:
-; CHECK-NEXT: [PSE] %gep10 = getelementptr double, ptr %gep8, i64 %mul:
-; CHECK-NEXT: {(8 + %arg1),+,(8 * (sext i32 %load to i64))<nsw>}<%loop.body>
-; CHECK-NEXT: --> {(8 + %arg1),+,8}<%loop.body>
;
entry:
%load = load i32, ptr %arg, align 4
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/symbolic-stride.ll b/llvm/test/Analysis/LoopAccessAnalysis/symbolic-stride.ll
index 7c1b11e22aef2..9d17b7dad6a84 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/symbolic-stride.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/symbolic-stride.ll
@@ -108,20 +108,16 @@ define void @single_stride_castexpr(i32 %offset, ptr %src, ptr %dst, i1 %cond) {
; CHECK-NEXT: %gep.src = getelementptr inbounds i32, ptr %src, i32 %iv.3
; CHECK-NEXT: Grouped accesses:
; CHECK-NEXT: Group [[GRP1]]:
-; CHECK-NEXT: (Low: ((4 * %iv.1) + %dst) High: (804 + (4 * %iv.1) + %dst))
-; CHECK-NEXT: Member: {((4 * %iv.1) + %dst),+,4}<%inner.loop>
+; CHECK-NEXT: (Low: (((4 * %iv.1) + %dst) umin ((4 * %iv.1) + (800 * (sext i32 %offset to i64))<nsw> + %dst)) High: (4 + (((4 * %iv.1) + %dst) umax ((4 * %iv.1) + (800 * (sext i32 %offset to i64))<nsw> + %dst))))
+; CHECK-NEXT: Member: {((4 * %iv.1) + %dst),+,(4 * (sext i32 %offset to i64))<nsw>}<%inner.loop>
; CHECK-NEXT: Group [[GRP2]]:
; CHECK-NEXT: (Low: %src High: (804 + %src))
; CHECK-NEXT: Member: {%src,+,4}<nuw><%inner.loop>
; CHECK-EMPTY:
; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
; CHECK-NEXT: SCEV assumptions:
-; CHECK-NEXT: Equal predicate: %offset == 1
; CHECK-EMPTY:
; CHECK-NEXT: Expressions re-written:
-; CHECK-NEXT: [PSE] %gep.dst = getelementptr i32, ptr %dst, i64 %iv.2:
-; CHECK-NEXT: {((4 * %iv.1) + %dst),+,(4 * (sext i32 %offset to i64))<nsw>}<%inner.loop>
-; CHECK-NEXT: --> {((4 * %iv.1) + %dst),+,4}<%inner.loop>
; CHECK-NEXT: outer.header:
; CHECK-NEXT: Report: loop is not the innermost loop
; CHECK-NEXT: Dependences:
diff --git a/llvm/test/Transforms/LoopDistribute/debug-print.ll b/llvm/test/Transforms/LoopDistribute/debug-print.ll
index 733c33483ecc4..55ab35d36633a 100644
--- a/llvm/test/Transforms/LoopDistribute/debug-print.ll
+++ b/llvm/test/Transforms/LoopDistribute/debug-print.ll
@@ -6,15 +6,13 @@ define void @f(ptr noalias %a, ptr noalias %b, ptr noalias %c, ptr noalias %d, i
; CHECK-LABEL: 'f'
; CHECK: LDist: Found a candidate loop: for.body
; CHECK: Backward dependences:
+; CHECK-NEXT: Unknown:
+; CHECK-NEXT: store i32 %mul.a, ptr %gep.a.plus4, align 4 ->
+; CHECK-NEXT: %load.strided.a = load i32, ptr %gep.strided.a, align 4
; CHECK-NEXT: Backward:
; CHECK-NEXT: %load.a = load i32, ptr %gep.a, align 4 ->
; CHECK-NEXT: store i32 %mul.a, ptr %gep.a.plus4, align 4
-; CHECK: Seeded partitions:
-; CHECK: Partition 0
-; CHECK: Partition 1
-; CHECK: Partition 2
-; CHECK: Partition 3
-; CHECK: Distributing loop
+; CHECK: Skipping; cannot isolate unsafe dependencies
entry:
br label %for.body
diff --git a/llvm/test/Transforms/LoopDistribute/symbolic-stride.ll b/llvm/test/Transforms/LoopDistribute/symbolic-stride.ll
index 07c50b5b61162..0b3b0d8e8afa0 100644
--- a/llvm/test/Transforms/LoopDistribute/symbolic-stride.ll
+++ b/llvm/test/Transforms/LoopDistribute/symbolic-stride.ll
@@ -22,65 +22,27 @@ define void @f(ptr noalias %a,
;
; DEFAULT-LABEL: @f(
; DEFAULT-NEXT: entry:
-; DEFAULT-NEXT: br label [[FOR_BODY_LVER_CHECK:%.*]]
-; DEFAULT: for.body.lver.check:
-; DEFAULT-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE:%.*]], 1
-; DEFAULT-NEXT: br i1 [[IDENT_CHECK]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH_LDIST1:%.*]]
-; DEFAULT: for.body.ph.lver.orig:
-; DEFAULT-NEXT: br label [[FOR_BODY_LVER_ORIG:%.*]]
-; DEFAULT: for.body.lver.orig:
-; DEFAULT-NEXT: [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[ADD_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
-; DEFAULT-NEXT: [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[IND_LVER_ORIG]]
-; DEFAULT-NEXT: [[LOADA_LVER_ORIG:%.*]] = load i32, ptr [[ARRAYIDXA_LVER_ORIG]], align 4
-; DEFAULT-NEXT: [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[IND_LVER_ORIG]]
-; DEFAULT-NEXT: [[LOADB_LVER_ORIG:%.*]] = load i32, ptr [[ARRAYIDXB_LVER_ORIG]], align 4
-; DEFAULT-NEXT: [[MULA_LVER_ORIG:%.*]] = mul i32 [[LOADB_LVER_ORIG]], [[LOADA_LVER_ORIG]]
-; DEFAULT-NEXT: [[ADD_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1
-; DEFAULT-NEXT: [[ARRAYIDXA_PLUS_4_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[ADD_LVER_ORIG]]
-; DEFAULT-NEXT: store i32 [[MULA_LVER_ORIG]], ptr [[ARRAYIDXA_PLUS_4_LVER_ORIG]], align 4
-; DEFAULT-NEXT: [[ARRAYIDXD_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[D:%.*]], i64 [[IND_LVER_ORIG]]
-; DEFAULT-NEXT: [[LOADD_LVER_ORIG:%.*]] = load i32, ptr [[ARRAYIDXD_LVER_ORIG]], align 4
-; DEFAULT-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i64 [[IND_LVER_ORIG]], [[STRIDE]]
-; DEFAULT-NEXT: [[ARRAYIDXSTRIDEDA_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[MUL_LVER_ORIG]]
-; DEFAULT-NEXT: [[LOADSTRIDEDA_LVER_ORIG:%.*]] = load i32, ptr [[ARRAYIDXSTRIDEDA_LVER_ORIG]], align 4
-; DEFAULT-NEXT: [[MULC_LVER_ORIG:%.*]] = mul i32 [[LOADD_LVER_ORIG]], [[LOADSTRIDEDA_LVER_ORIG]]
-; DEFAULT-NEXT: [[ARRAYIDXC_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[IND_LVER_ORIG]]
-; DEFAULT-NEXT: store i32 [[MULC_LVER_ORIG]], ptr [[ARRAYIDXC_LVER_ORIG]], align 4
-; DEFAULT-NEXT: [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[ADD_LVER_ORIG]], 20
-; DEFAULT-NEXT: br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]]
-; DEFAULT: for.body.ph.ldist1:
-; DEFAULT-NEXT: br label [[FOR_BODY_LDIST1:%.*]]
-; DEFAULT: for.body.ldist1:
-; DEFAULT-NEXT: [[IND_LDIST1:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LDIST1]] ], [ [[ADD_LDIST1:%.*]], [[FOR_BODY_LDIST1]] ]
-; DEFAULT-NEXT: [[ARRAYIDXA_LDIST1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IND_LDIST1]]
-; DEFAULT-NEXT: [[LOADA_LDIST1:%.*]] = load i32, ptr [[ARRAYIDXA_LDIST1]], align 4
-; DEFAULT-NEXT: [[ARRAYIDXB_LDIST1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IND_LDIST1]]
-; DEFAULT-NEXT: [[LOADB_LDIST1:%.*]] = load i32, ptr [[ARRAYIDXB_LDIST1]], align 4
-; DEFAULT-NEXT: [[MULA_LDIST1:%.*]] = mul i32 [[LOADB_LDIST1]], [[LOADA_LDIST1]]
-; DEFAULT-NEXT: [[ADD_LDIST1]] = add nuw nsw i64 [[IND_LDIST1]], 1
-; DEFAULT-NEXT: [[ARRAYIDXA_PLUS_4_LDIST1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[ADD_LDIST1]]
-; DEFAULT-NEXT: store i32 [[MULA_LDIST1]], ptr [[ARRAYIDXA_PLUS_4_LDIST1]], align 4
-; DEFAULT-NEXT: [[EXITCOND_LDIST1:%.*]] = icmp eq i64 [[ADD_LDIST1]], 20
-; DEFAULT-NEXT: br i1 [[EXITCOND_LDIST1]], label [[FOR_BODY_PH:%.*]], label [[FOR_BODY_LDIST1]]
-; DEFAULT: for.body.ph:
; DEFAULT-NEXT: br label [[FOR_BODY:%.*]]
; DEFAULT: for.body:
-; DEFAULT-NEXT: [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
+; DEFAULT-NEXT: [[IND:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
+; DEFAULT-NEXT: [[ARRAYIDXA:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[IND]]
+; DEFAULT-NEXT: [[LOADA:%.*]] = load i32, ptr [[ARRAYIDXA]], align 4
+; DEFAULT-NEXT: [[ARRAYIDXB:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[IND]]
+; DEFAULT-NEXT: [[LOADB:%.*]] = load i32, ptr [[ARRAYIDXB]], align 4
+; DEFAULT-NEXT: [[MULA:%.*]] = mul i32 [[LOADB]], [[LOADA]]
; DEFAULT-NEXT: [[ADD]] = add nuw nsw i64 [[IND]], 1
-; DEFAULT-NEXT: [[ARRAYIDXD:%.*]] = getelementptr inbounds i32, ptr [[D]], i64 [[IND]]
+; DEFAULT-NEXT: [[ARRAYIDXA_PLUS_4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[ADD]]
+; DEFAULT-NEXT: store i32 [[MULA]], ptr [[ARRAYIDXA_PLUS_4]], align 4
+; DEFAULT-NEXT: [[ARRAYIDXD:%.*]] = getelementptr inbounds i32, ptr [[D:%.*]], i64 [[IND]]
; DEFAULT-NEXT: [[LOADD:%.*]] = load i32, ptr [[ARRAYIDXD]], align 4
-; DEFAULT-NEXT: [[MUL:%.*]] = mul i64 [[IND]], [[STRIDE]]
+; DEFAULT-NEXT: [[MUL:%.*]] = mul i64 [[IND]], [[STRIDE:%.*]]
; DEFAULT-NEXT: [[ARRAYIDXSTRIDEDA:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[MUL]]
; DEFAULT-NEXT: [[LOADSTRIDEDA:%.*]] = load i32, ptr [[ARRAYIDXSTRIDEDA]], align 4
; DEFAULT-NEXT: [[MULC:%.*]] = mul i32 [[LOADD]], [[LOADSTRIDEDA]]
-; DEFAULT-NEXT: [[ARRAYIDXC:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IND]]
+; DEFAULT-NEXT: [[ARRAYIDXC:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[IND]]
; DEFAULT-NEXT: store i32 [[MULC]], ptr [[ARRAYIDXC]], align 4
; DEFAULT-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[ADD]], 20
-; DEFAULT-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT1:%.*]], label [[FOR_BODY]]
-; DEFAULT: for.end.loopexit:
-; DEFAULT-NEXT: br label [[FOR_END:%.*]]
-; DEFAULT: for.end.loopexit1:
-; DEFAULT-NEXT: br label [[FOR_END]]
+; DEFAULT-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
; DEFAULT: for.end:
; DEFAULT-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopLoadElim/invalidate-laa-after-versioning.ll b/llvm/test/Transforms/LoopLoadElim/invalidate-laa-after-versioning.ll
index 10e10653a431d..8cd5e65083ba4 100644
--- a/llvm/test/Transforms/LoopLoadElim/invalidate-laa-after-versioning.ll
+++ b/llvm/test/Transforms/LoopLoadElim/invalidate-laa-after-versioning.ll
@@ -9,36 +9,15 @@
define void @test(ptr %arg, i64 %arg1) {
; CHECK-LABEL: @test(
; CHECK-NEXT: bb:
-; CHECK-NEXT: br label [[INNER_1_LVER_CHECK:%.*]]
-; CHECK: inner.1.lver.check:
+; CHECK-NEXT: br label [[OUTER_HEADER:%.*]]
+; CHECK: outer.header:
; CHECK-NEXT: [[PTR_PHI:%.*]] = phi ptr [ [[ARG:%.*]], [[BB:%.*]] ], [ @glob.1, [[OUTER_LATCH:%.*]] ]
-; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr double, ptr [[PTR_PHI]], i64 3
-; CHECK-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[ARG1:%.*]], 1
-; CHECK-NEXT: br i1 [[IDENT_CHECK]], label [[INNER_1_PH_LVER_ORIG:%.*]], label [[INNER_1_PH:%.*]]
-; CHECK: inner.1.ph.lver.orig:
-; CHECK-NEXT: br label [[INNER_1_LVER_ORIG:%.*]]
-; CHECK: inner.1.lver.orig:
-; CHECK-NEXT: [[IV_1_LVER_ORIG:%.*]] = phi i64 [ 0, [[INNER_1_PH_LVER_ORIG]] ], [ [[IV_NEXT_LVER_ORIG:%.*]], [[INNER_1_LVER_ORIG]] ]
-; CHECK-NEXT: [[PTR_IV_1_LVER_ORIG:%.*]] = phi ptr [ @glob.2, [[INNER_1_PH_LVER_ORIG]] ], [ [[PTR_IV_1_NEXT_LVER_ORIG:%.*]], [[INNER_1_LVER_ORIG]] ]
-; CHECK-NEXT: [[TMP25_LVER_ORIG:%.*]] = mul nuw nsw i64 [[IV_1_LVER_ORIG]], [[ARG1]]
-; CHECK-NEXT: [[GEP_2_LVER_ORIG:%.*]] = getelementptr inbounds double, ptr [[GEP_1]], i64 [[TMP25_LVER_ORIG]]
-; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_2_LVER_ORIG]], align 8
-; CHECK-NEXT: [[GEP_3_LVER_ORIG:%.*]] = getelementptr double, ptr [[PTR_PHI]], i64 [[TMP25_LVER_ORIG]]
-; CHECK-NEXT: [[GEP_4_LVER_ORIG:%.*]] = getelementptr double, ptr [[GEP_3_LVER_ORIG]], i64 2
-; CHECK-NEXT: [[TMP29_LVER_ORIG:%.*]] = load double, ptr [[GEP_4_LVER_ORIG]], align 8
-; CHECK-NEXT: [[PTR_IV_1_NEXT_LVER_ORIG]] = getelementptr inbounds double, ptr [[PTR_IV_1_LVER_ORIG]], i64 1
-; CHECK-NEXT: [[IV_NEXT_LVER_ORIG]] = add nuw nsw i64 [[IV_1_LVER_ORIG]], 1
-; CHECK-NEXT: [[C_1_LVER_ORIG:%.*]] = icmp eq i64 [[IV_1_LVER_ORIG]], 1
-; CHECK-NEXT: br i1 [[C_1_LVER_ORIG]], label [[INNER_1_EXIT_LOOPEXIT:%.*]], label [[INNER_1_LVER_ORIG]]
-; CHECK: inner.1.ph:
-; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[PTR_PHI]], i64 16
-; CHECK-NEXT: [[LOAD_INITIAL:%.*]] = load double, ptr [[SCEVGEP]], align 8
+; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr inbounds double, ptr [[PTR_PHI]], i64 3
; CHECK-NEXT: br label [[INNER_1:%.*]]
; CHECK: inner.1:
-; CHECK-NEXT: [[STORE_FORWARDED:%.*]] = phi double [ [[LOAD_INITIAL]], [[INNER_1_PH]] ], [ 0.000000e+00, [[INNER_1]] ]
-; CHECK-NEXT: [[IV_1:%.*]] = phi i64 [ 0, [[INNER_1_PH]] ], [ [[IV_NEXT:%.*]], [[INNER_1]] ]
-; CHECK-NEXT: [[PTR_IV_1:%.*]] = phi ptr [ @glob.2, [[INNER_1_PH]] ], [ [[PTR_IV_1_NEXT:%.*]], [[INNER_1]] ]
-; CHECK-NEXT: [[TMP25:%.*]] = mul nuw nsw i64 [[IV_1]], [[ARG1]]
+; CHECK-NEXT: [[IV_1:%.*]] = phi i64 [ 0, [[OUTER_HEADER]] ], [ [[IV_NEXT:%.*]], [[INNER_1]] ]
+; CHECK-NEXT: [[PTR_IV_1:%.*]] = phi ptr [ @glob.2, [[OUTER_HEADER]] ], [ [[PTR_IV_1_NEXT:%.*]], [[INNER_1]] ]
+; CHECK-NEXT: [[TMP25:%.*]] = mul nuw nsw i64 [[IV_1]], [[ARG1:%.*]]
; CHECK-NEXT: [[GEP_2:%.*]] = getelementptr inbounds double, ptr [[GEP_1]], i64 [[TMP25]]
; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_2]], align 8
; CHECK-NEXT: [[GEP_3:%.*]] = getelementptr double, ptr [[PTR_PHI]], i64 [[TMP25]]
@@ -47,15 +26,10 @@ define void @test(ptr %arg, i64 %arg1) {
; CHECK-NEXT: [[PTR_IV_1_NEXT]] = getelementptr inbounds double, ptr [[PTR_IV_1]], i64 1
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV_1]], 1
; CHECK-NEXT: [[C_1:%.*]] = icmp eq i64 [[IV_1]], 1
-; CHECK-NEXT: br i1 [[C_1]], label [[INNER_1_EXIT_LOOPEXIT1:%.*]], label [[INNER_1]]
-; CHECK: inner.1.exit.loopexit:
-; CHECK-NEXT: [[LCSSA_PTR_IV_1_PH:%.*]] = phi ptr [ [[PTR_IV_1_LVER_ORIG]], [[INNER_1_LVER_ORIG]] ]
-; CHECK-NEXT: br label [[INNER_1_EXIT:%.*]]
-; CHECK: inner.1.exit.loopexit1:
-; CHECK-NEXT: [[LCSSA_PTR_IV_1_PH2:%.*]] = phi ptr [ [[PTR_IV_1]], [[INNER_1]] ]
-; CHECK-NEXT: br label [[INNER_1_EXIT]]
+; CHECK-NEXT: br i1 [[C_1]], label [[INNER_1_EXIT:%.*]], label [[INNER_1]]
; CHECK: inner.1.exit:
-; CHECK-NEXT: [[LCSSA_PTR_IV_1:%.*]] = phi ptr [ [[LCSSA_PTR_IV_1_PH]], [[INNER_1_EXIT_LOOPEXIT]] ], [ [[LCSSA_PTR_IV_1_PH2]], [[INNER_1_EXIT_LOOPEXIT1]] ]
+; CHECK-NEXT: [[IV_1_LCSSA:%.*]] = phi i64 [ [[IV_1]], [[INNER_1]] ]
+; CHECK-NEXT: [[LCSSA_PTR_IV_1:%.*]] = phi ptr [ [[PTR_IV_1]], [[INNER_1]] ]
; CHECK-NEXT: [[GEP_5:%.*]] = getelementptr inbounds double, ptr [[LCSSA_PTR_IV_1]], i64 1
; CHECK-NEXT: br label [[INNER_2:%.*]]
; CHECK: inner.2:
@@ -69,11 +43,13 @@ define void @test(ptr %arg, i64 %arg1) {
; CHECK-NEXT: [[LCSSA_PTR_IV_2:%.*]] = phi ptr [ [[PTR_IV_2]], [[INNER_2]] ]
; CHECK-NEXT: [[GEP_6:%.*]] = getelementptr inbounds double, ptr [[PTR_PHI]], i64 1
; CHECK-NEXT: [[GEP_7:%.*]] = getelementptr inbounds double, ptr [[LCSSA_PTR_IV_2]], i64 1
-; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[INDVAR_LCSSA]], 3
+; CHECK-NEXT: [[TMP0:%.*]] = shl nuw nsw i64 [[IV_1_LCSSA]], 3
; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[TMP0]], 24
-; CHECK-NEXT: [[SCEVGEP3:%.*]] = getelementptr i8, ptr [[LCSSA_PTR_IV_1]], i64 [[TMP1]]
+; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[INDVAR_LCSSA]], 3
+; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[TMP2]], [[TMP1]]
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr @glob.2, i64 [[TMP3]]
; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[GEP_7]], [[GEP_1]]
-; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[PTR_PHI]], [[SCEVGEP3]]
+; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[PTR_PHI]], [[SCEVGEP]]
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[INNER_3_PH_LVER_ORIG:%.*]], label [[INNER_3_PH:%.*]]
; CHECK: inner.3.ph.lver.orig:
@@ -89,10 +65,10 @@ define void @test(ptr %arg, i64 %arg1) {
; CHECK-NEXT: [[C_2_LVER_ORIG:%.*]] = icmp eq i64 [[IV_2_LVER_ORIG]], 1
; CHECK-NEXT: br i1 [[C_2_LVER_ORIG]], label [[OUTER_LATCH_LOOPEXIT:%.*]], label [[INNER_3_LVER_ORIG]]
; CHECK: inner.3.ph:
-; CHECK-NEXT: [[LOAD_INITIAL5:%.*]] = load double, ptr [[PTR_PHI]], align 8
+; CHECK-NEXT: [[LOAD_INITIAL:%.*]] = load double, ptr [[PTR_PHI]], align 8
; CHECK-NEXT: br label [[INNER_3:%.*]]
; CHECK: inner.3:
-; CHECK-NEXT: [[STORE_FORWARDED6:%.*]] = phi double [ [[LOAD_INITIAL5]], [[INNER_3_PH]] ], [ 0.000000e+00, [[INNER_3]] ]
+; CHECK-NEXT: [[STORE_FORWARDED:%.*]] = phi double [ [[LOAD_INITIAL]], [[INNER_3_PH]] ], [ 0.000000e+00, [[INNER_3]] ]
; CHECK-NEXT: [[IV_2:%.*]] = phi i64 [ 0, [[INNER_3_PH]] ], [ [[IV_2_NEXT:%.*]], [[INNER_3]] ]
; CHECK-NEXT: [[GEP_8:%.*]] = getelementptr inbounds double, ptr [[GEP_6]], i64 [[IV_2]]
; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_7]], align 8
@@ -101,13 +77,13 @@ define void @test(ptr %arg, i64 %arg1) {
; CHECK-NEXT: [[TMP18:%.*]] = load double, ptr [[GEP_9]], align 8
; CHECK-NEXT: [[IV_2_NEXT]] = add nuw nsw i64 [[IV_2]], 1
; CHECK-NEXT: [[C_2:%.*]] = icmp eq i64 [[IV_2]], 1
-; CHECK-NEXT: br i1 [[C_2]], label [[OUTER_LATCH_LOOPEXIT4:%.*]], label [[INNER_3]]
+; CHECK-NEXT: br i1 [[C_2]], label [[OUTER_LATCH_LOOPEXIT1:%.*]], label [[INNER_3]]
; CHECK: outer.latch.loopexit:
; CHECK-NEXT: br label [[OUTER_LATCH]]
-; CHECK: outer.latch.loopexit4:
+; CHECK: outer.latch.loopexit1:
; CHECK-NEXT: br label [[OUTER_LATCH]]
; CHECK: outer.latch:
-; CHECK-NEXT: br label [[INNER_1_LVER_CHECK]]
+; CHECK-NEXT: br label [[OUTER_HEADER]]
;
bb:
br label %outer.header
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll b/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
index 6936887cd166c..815c26dbba886 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
@@ -237,69 +237,21 @@ exit:
define void @single_stride_int_scaled(ptr %p, i64 %stride) {
-; NOSTRIDED-LABEL: @single_stride_int_scaled(
-; NOSTRIDED-NEXT: entry:
-; NOSTRIDED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; NOSTRIDED-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4
-; NOSTRIDED-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 8, i64 [[TMP1]])
-; NOSTRIDED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP2]]
-; NOSTRIDED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
-; NOSTRIDED: vector.scevcheck:
-; NOSTRIDED-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE:%.*]], 1
-; NOSTRIDED-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
-; NOSTRIDED: vector.ph:
-; NOSTRIDED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; NOSTRIDED-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 4
-; NOSTRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]]
-; NOSTRIDED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; NOSTRIDED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; NOSTRIDED-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 4
-; NOSTRIDED-NEXT: br label [[VECTOR_BODY:%.*]]
-; NOSTRIDED: vector.body:
-; NOSTRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; NOSTRIDED-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 0
-; NOSTRIDED-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP7]]
-; NOSTRIDED-NEXT: [[TMP9:%.*]] = getelementptr i32, ptr [[TMP8]], i32 0
-; NOSTRIDED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP9]], align 4
-; NOSTRIDED-NEXT: [[TMP10:%.*]] = add <vscale x 4 x i32> [[WIDE_LOAD]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
-; NOSTRIDED-NEXT: store <vscale x 4 x i32> [[TMP10]], ptr [[TMP9]], align 4
-; NOSTRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
-; NOSTRIDED-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; NOSTRIDED-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
-; NOSTRIDED: middle.block:
-; NOSTRIDED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; NOSTRIDED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
-; NOSTRIDED: scalar.ph:
-; NOSTRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
-; NOSTRIDED-NEXT: br label [[LOOP:%.*]]
-; NOSTRIDED: loop:
-; NOSTRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; NOSTRIDED-NEXT: [[OFFSET:%.*]] = mul nuw nsw i64 [[I]], [[STRIDE]]
-; NOSTRIDED-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]]
-; NOSTRIDED-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
-; NOSTRIDED-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
-; NOSTRIDED-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4
-; NOSTRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; NOSTRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP9:![0-9]+]]
-; NOSTRIDED: exit:
-; NOSTRIDED-NEXT: ret void
-;
-; STRIDED-LABEL: @single_stride_int_scaled(
-; STRIDED-NEXT: entry:
-; STRIDED-NEXT: br label [[LOOP:%.*]]
-; STRIDED: loop:
-; STRIDED-NEXT: [[I:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; STRIDED-NEXT: [[OFFSET:%.*]] = mul nuw nsw i64 [[I]], [[STRIDE:%.*]]
-; STRIDED-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[OFFSET]]
-; STRIDED-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
-; STRIDED-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
-; STRIDED-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4
-; STRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; STRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; STRIDED-NEXT: br i1 [[DONE]], label [[EXIT:%.*]], label [[LOOP]]
-; STRIDED: exit:
-; STRIDED-NEXT: ret void
+; CHECK-LABEL: @single_stride_int_scaled(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK: loop:
+; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[OFFSET:%.*]] = mul nuw nsw i64 [[I]], [[STRIDE:%.*]]
+; CHECK-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[OFFSET]]
+; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
+; CHECK-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
+; CHECK-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4
+; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
+; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
+; CHECK-NEXT: br i1 [[DONE]], label [[EXIT:%.*]], label [[LOOP]]
+; CHECK: exit:
+; CHECK-NEXT: ret void
;
entry:
br label %loop
@@ -320,73 +272,22 @@ exit:
}
define void @single_stride_int_iv(ptr %p, i64 %stride) {
-; NOSTRIDED-LABEL: @single_stride_int_iv(
-; NOSTRIDED-NEXT: entry:
-; NOSTRIDED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; NOSTRIDED-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4
-; NOSTRIDED-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 8, i64 [[TMP1]])
-; NOSTRIDED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP2]]
-; NOSTRIDED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
-; NOSTRIDED: vector.scevcheck:
-; NOSTRIDED-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE:%.*]], 1
-; NOSTRIDED-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
-; NOSTRIDED: vector.ph:
-; NOSTRIDED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; NOSTRIDED-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 4
-; NOSTRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]]
-; NOSTRIDED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; NOSTRIDED-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], [[STRIDE]]
-; NOSTRIDED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; NOSTRIDED-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 4
-; NOSTRIDED-NEXT: br label [[VECTOR_BODY:%.*]]
-; NOSTRIDED: vector.body:
-; NOSTRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; NOSTRIDED-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 0
-; NOSTRIDED-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP7]]
-; NOSTRIDED-NEXT: [[TMP9:%.*]] = getelementptr i32, ptr [[TMP8]], i32 0
-; NOSTRIDED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP9]], align 4
-; NOSTRIDED-NEXT: [[TMP10:%.*]] = add <vscale x 4 x i32> [[WIDE_LOAD]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
-; NOSTRIDED-NEXT: store <vscale x 4 x i32> [[TMP10]], ptr [[TMP9]], align 4
-; NOSTRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
-; NOSTRIDED-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; NOSTRIDED-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
-; NOSTRIDED: middle.block:
-; NOSTRIDED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; NOSTRIDED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
-; NOSTRIDED: scalar.ph:
-; NOSTRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
-; NOSTRIDED-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
-; NOSTRIDED-NEXT: br label [[LOOP:%.*]]
-; NOSTRIDED: loop:
-; NOSTRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; NOSTRIDED-NEXT: [[OFFSET:%.*]] = phi i64 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[OFFSET_NEXT:%.*]], [[LOOP]] ]
-; NOSTRIDED-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]]
-; NOSTRIDED-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
-; NOSTRIDED-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
-; NOSTRIDED-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4
-; NOSTRIDED-NEXT: [[OFFSET_NEXT]] = add nuw nsw i64 [[OFFSET]], [[STRIDE]]
-; NOSTRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; NOSTRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP11:![0-9]+]]
-; NOSTRIDED: exit:
-; NOSTRIDED-NEXT: ret void
-;
-; STRIDED-LABEL: @single_stride_int_iv(
-; STRIDED-NEXT: entry:
-; STRIDED-NEXT: br label [[LOOP:%.*]]
-; STRIDED: loop:
-; STRIDED-NEXT: [[I:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; STRIDED-NEXT: [[OFFSET:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[OFFSET_NEXT:%.*]], [[LOOP]] ]
-; STRIDED-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[OFFSET]]
-; STRIDED-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
-; STRIDED-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
-; STRIDED-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4
-; STRIDED-NEXT: [[OFFSET_NEXT]] = add nuw nsw i64 [[OFFSET]], [[STRIDE:%.*]]
-; STRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; STRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; STRIDED-NEXT: br i1 [[DONE]], label [[EXIT:%.*]], label [[LOOP]]
-; STRIDED: exit:
-; STRIDED-NEXT: ret void
+; CHECK-LABEL: @single_stride_int_iv(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK: loop:
+; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[OFFSET:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[OFFSET_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[OFFSET]]
+; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
+; CHECK-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
+; CHECK-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4
+; CHECK-NEXT: [[OFFSET_NEXT]] = add nuw nsw i64 [[OFFSET]], [[STRIDE:%.*]]
+; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
+; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
+; CHECK-NEXT: br i1 [[DONE]], label [[EXIT:%.*]], label [[LOOP]]
+; CHECK: exit:
+; CHECK-NEXT: ret void
;
entry:
br label %loop
@@ -444,142 +345,82 @@ exit:
}
define void @double_stride_int_scaled(ptr %p, ptr %p2, i64 %stride) {
-; NOSTRIDED-LABEL: @double_stride_int_scaled(
-; NOSTRIDED-NEXT: entry:
-; NOSTRIDED-NEXT: [[P3:%.*]] = ptrtoint ptr [[P:%.*]] to i64
-; NOSTRIDED-NEXT: [[P21:%.*]] = ptrtoint ptr [[P2:%.*]] to i64
-; NOSTRIDED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; NOSTRIDED-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4
-; NOSTRIDED-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]])
-; NOSTRIDED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP2]]
-; NOSTRIDED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
-; NOSTRIDED: vector.scevcheck:
-; NOSTRIDED-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE:%.*]], 1
-; NOSTRIDED-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH]], label [[VECTOR_MEMCHECK:%.*]]
-; NOSTRIDED: vector.memcheck:
-; NOSTRIDED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; NOSTRIDED-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 4
-; NOSTRIDED-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4
-; NOSTRIDED-NEXT: [[TMP6:%.*]] = sub i64 [[P21]], [[P3]]
-; NOSTRIDED-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]]
-; NOSTRIDED-NEXT: br i1 [[DIFF_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
-; NOSTRIDED: vector.ph:
-; NOSTRIDED-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; NOSTRIDED-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 4
-; NOSTRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP8]]
-; NOSTRIDED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; NOSTRIDED-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
-; NOSTRIDED-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 4
-; NOSTRIDED-NEXT: br label [[VECTOR_BODY:%.*]]
-; NOSTRIDED: vector.body:
-; NOSTRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; NOSTRIDED-NEXT: [[TMP11:%.*]] = add i64 [[INDEX]], 0
-; NOSTRIDED-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[P]], i64 [[TMP11]]
-; NOSTRIDED-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[TMP12]], i32 0
-; NOSTRIDED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP13]], align 4
-; NOSTRIDED-NEXT: [[TMP14:%.*]] = add <vscale x 4 x i32> [[WIDE_LOAD]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
-; NOSTRIDED-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[P2]], i64 [[TMP11]]
-; NOSTRIDED-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[TMP15]], i32 0
-; NOSTRIDED-NEXT: store <vscale x 4 x i32> [[TMP14]], ptr [[TMP16]], align 4
-; NOSTRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
-; NOSTRIDED-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; NOSTRIDED-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
-; NOSTRIDED: middle.block:
-; NOSTRIDED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; NOSTRIDED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
-; NOSTRIDED: scalar.ph:
-; NOSTRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[VECTOR_MEMCHECK]] ]
-; NOSTRIDED-NEXT: br label [[LOOP:%.*]]
-; NOSTRIDED: loop:
-; NOSTRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; NOSTRIDED-NEXT: [[OFFSET:%.*]] = mul nuw nsw i64 [[I]], [[STRIDE]]
-; NOSTRIDED-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]]
-; NOSTRIDED-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
-; NOSTRIDED-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
-; NOSTRIDED-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P2]], i64 [[OFFSET]]
-; NOSTRIDED-NEXT: store i32 [[Y0]], ptr [[Q1]], align 4
-; NOSTRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; NOSTRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP13:![0-9]+]]
-; NOSTRIDED: exit:
-; NOSTRIDED-NEXT: ret void
-;
-; STRIDED-LABEL: @double_stride_int_scaled(
-; STRIDED-NEXT: entry:
-; STRIDED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; STRIDED-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4
-; STRIDED-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 24, i64 [[TMP1]])
-; STRIDED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP2]]
-; STRIDED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
-; STRIDED: vector.memcheck:
-; STRIDED-NEXT: [[TMP3:%.*]] = mul i64 [[STRIDE:%.*]], 4092
-; STRIDED-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[P2:%.*]], i64 [[TMP3]]
-; STRIDED-NEXT: [[TMP4:%.*]] = icmp ult ptr [[P2]], [[SCEVGEP]]
-; STRIDED-NEXT: [[UMIN:%.*]] = select i1 [[TMP4]], ptr [[P2]], ptr [[SCEVGEP]]
-; STRIDED-NEXT: [[TMP5:%.*]] = icmp ugt ptr [[P2]], [[SCEVGEP]]
-; STRIDED-NEXT: [[UMAX:%.*]] = select i1 [[TMP5]], ptr [[P2]], ptr [[SCEVGEP]]
-; STRIDED-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[UMAX]], i64 4
-; STRIDED-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 [[TMP3]]
-; STRIDED-NEXT: [[TMP6:%.*]] = icmp ult ptr [[P]], [[SCEVGEP2]]
-; STRIDED-NEXT: [[UMIN3:%.*]] = select i1 [[TMP6]], ptr [[P]], ptr [[SCEVGEP2]]
-; STRIDED-NEXT: [[TMP7:%.*]] = icmp ugt ptr [[P]], [[SCEVGEP2]]
-; STRIDED-NEXT: [[UMAX4:%.*]] = select i1 [[TMP7]], ptr [[P]], ptr [[SCEVGEP2]]
-; STRIDED-NEXT: [[SCEVGEP5:%.*]] = getelementptr i8, ptr [[UMAX4]], i64 4
-; STRIDED-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[UMIN]], [[SCEVGEP5]]
-; STRIDED-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[UMIN3]], [[SCEVGEP1]]
-; STRIDED-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
-; STRIDED-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
-; STRIDED: vector.ph:
-; STRIDED-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
-; STRIDED-NEXT: [[TMP9:%.*]] = mul i64 [[TMP8]], 4
-; STRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP9]]
-; STRIDED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; STRIDED-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
-; STRIDED-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 4
-; STRIDED-NEXT: [[TMP12:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
-; STRIDED-NEXT: [[TMP13:%.*]] = add <vscale x 4 x i64> [[TMP12]], zeroinitializer
-; STRIDED-NEXT: [[TMP14:%.*]] = mul <vscale x 4 x i64> [[TMP13]], shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 1, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
-; STRIDED-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP14]]
-; STRIDED-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
-; STRIDED-NEXT: [[TMP16:%.*]] = mul i64 [[TMP15]], 4
-; STRIDED-NEXT: [[TMP17:%.*]] = mul i64 1, [[TMP16]]
-; STRIDED-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP17]], i64 0
-; STRIDED-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-; STRIDED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[STRIDE]], i64 0
-; STRIDED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-; STRIDED-NEXT: br label [[VECTOR_BODY:%.*]]
-; STRIDED: vector.body:
-; STRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; STRIDED-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; STRIDED-NEXT: [[TMP18:%.*]] = mul nuw nsw <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
-; STRIDED-NEXT: [[TMP19:%.*]] = getelementptr i32, ptr [[P]], <vscale x 4 x i64> [[TMP18]]
-; STRIDED-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP19]], i32 4, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i32> poison), !alias.scope [[META8:![0-9]+]]
-; STRIDED-NEXT: [[TMP20:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
-; STRIDED-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[P2]], <vscale x 4 x i64> [[TMP18]]
-; STRIDED-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP20]], <vscale x 4 x ptr> [[TMP21]], i32 4, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer)), !alias.scope [[META11:![0-9]+]], !noalias [[META8]]
-; STRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]]
-; STRIDED-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; STRIDED-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; STRIDED-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
-; STRIDED: middle.block:
-; STRIDED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; STRIDED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
-; STRIDED: scalar.ph:
-; STRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
-; STRIDED-NEXT: br label [[LOOP:%.*]]
-; STRIDED: loop:
-; STRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; STRIDED-NEXT: [[OFFSET:%.*]] = mul nuw nsw i64 [[I]], [[STRIDE]]
-; STRIDED-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]]
-; STRIDED-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
-; STRIDED-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
-; STRIDED-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P2]], i64 [[OFFSET]]
-; STRIDED-NEXT: store i32 [[Y0]], ptr [[Q1]], align 4
-; STRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; STRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; STRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP14:![0-9]+]]
-; STRIDED: exit:
-; STRIDED-NEXT: ret void
+; CHECK-LABEL: @double_stride_int_scaled(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 24, i64 [[TMP1]])
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP2]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
+; CHECK: vector.memcheck:
+; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[STRIDE:%.*]], 4092
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[P2:%.*]], i64 [[TMP3]]
+; CHECK-NEXT: [[TMP4:%.*]] = icmp ult ptr [[P2]], [[SCEVGEP]]
+; CHECK-NEXT: [[UMIN:%.*]] = select i1 [[TMP4]], ptr [[P2]], ptr [[SCEVGEP]]
+; CHECK-NEXT: [[TMP5:%.*]] = icmp ugt ptr [[P2]], [[SCEVGEP]]
+; CHECK-NEXT: [[UMAX:%.*]] = select i1 [[TMP5]], ptr [[P2]], ptr [[SCEVGEP]]
+; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[UMAX]], i64 4
+; CHECK-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 [[TMP3]]
+; CHECK-NEXT: [[TMP6:%.*]] = icmp ult ptr [[P]], [[SCEVGEP2]]
+; CHECK-NEXT: [[UMIN3:%.*]] = select i1 [[TMP6]], ptr [[P]], ptr [[SCEVGEP2]]
+; CHECK-NEXT: [[TMP7:%.*]] = icmp ugt ptr [[P]], [[SCEVGEP2]]
+; CHECK-NEXT: [[UMAX4:%.*]] = select i1 [[TMP7]], ptr [[P]], ptr [[SCEVGEP2]]
+; CHECK-NEXT: [[SCEVGEP5:%.*]] = getelementptr i8, ptr [[UMAX4]], i64 4
+; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[UMIN]], [[SCEVGEP5]]
+; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[UMIN3]], [[SCEVGEP1]]
+; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
+; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
+; CHECK: vector.ph:
+; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP9:%.*]] = mul i64 [[TMP8]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP9]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 4
+; CHECK-NEXT: [[TMP12:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 4 x i64> [[TMP12]], zeroinitializer
+; CHECK-NEXT: [[TMP14:%.*]] = mul <vscale x 4 x i64> [[TMP13]], shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 1, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
+; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP14]]
+; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP16:%.*]] = mul i64 [[TMP15]], 4
+; CHECK-NEXT: [[TMP17:%.*]] = mul i64 1, [[TMP16]]
+; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP17]], i64 0
+; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[STRIDE]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
+; CHECK: vector.body:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP18:%.*]] = mul nuw nsw <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i32, ptr [[P]], <vscale x 4 x i64> [[TMP18]]
+; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP19]], i32 4, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i32> poison), !alias.scope [[META8:![0-9]+]]
+; CHECK-NEXT: [[TMP20:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[P2]], <vscale x 4 x i64> [[TMP18]]
+; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP20]], <vscale x 4 x ptr> [[TMP21]], i32 4, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer)), !alias.scope [[META11:![0-9]+]], !noalias [[META8]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]]
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
+; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; CHECK: middle.block:
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK: scalar.ph:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
+; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK: loop:
+; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[OFFSET:%.*]] = mul nuw nsw i64 [[I]], [[STRIDE]]
+; CHECK-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]]
+; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
+; CHECK-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
+; CHECK-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P2]], i64 [[OFFSET]]
+; CHECK-NEXT: store i32 [[Y0]], ptr [[Q1]], align 4
+; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
+; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
+; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP14:![0-9]+]]
+; CHECK: exit:
+; CHECK-NEXT: ret void
;
entry:
br label %loop
@@ -601,75 +442,23 @@ exit:
}
define void @double_stride_int_iv(ptr %p, ptr %p2, i64 %stride) {
-; NOSTRIDED-LABEL: @double_stride_int_iv(
-; NOSTRIDED-NEXT: entry:
-; NOSTRIDED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; NOSTRIDED-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4
-; NOSTRIDED-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 8, i64 [[TMP1]])
-; NOSTRIDED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP2]]
-; NOSTRIDED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
-; NOSTRIDED: vector.scevcheck:
-; NOSTRIDED-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE:%.*]], 1
-; NOSTRIDED-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
-; NOSTRIDED: vector.ph:
-; NOSTRIDED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; NOSTRIDED-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 4
-; NOSTRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]]
-; NOSTRIDED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; NOSTRIDED-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], [[STRIDE]]
-; NOSTRIDED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; NOSTRIDED-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 4
-; NOSTRIDED-NEXT: br label [[VECTOR_BODY:%.*]]
-; NOSTRIDED: vector.body:
-; NOSTRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; NOSTRIDED-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 0
-; NOSTRIDED-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP7]]
-; NOSTRIDED-NEXT: [[TMP9:%.*]] = getelementptr i32, ptr [[TMP8]], i32 0
-; NOSTRIDED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP9]], align 4
-; NOSTRIDED-NEXT: [[TMP10:%.*]] = add <vscale x 4 x i32> [[WIDE_LOAD]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
-; NOSTRIDED-NEXT: store <vscale x 4 x i32> [[TMP10]], ptr [[TMP9]], align 4
-; NOSTRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
-; NOSTRIDED-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; NOSTRIDED-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
-; NOSTRIDED: middle.block:
-; NOSTRIDED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; NOSTRIDED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
-; NOSTRIDED: scalar.ph:
-; NOSTRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
-; NOSTRIDED-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
-; NOSTRIDED-NEXT: br label [[LOOP:%.*]]
-; NOSTRIDED: loop:
-; NOSTRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; NOSTRIDED-NEXT: [[OFFSET:%.*]] = phi i64 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[OFFSET_NEXT:%.*]], [[LOOP]] ]
-; NOSTRIDED-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]]
-; NOSTRIDED-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
-; NOSTRIDED-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
-; NOSTRIDED-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]]
-; NOSTRIDED-NEXT: store i32 [[Y0]], ptr [[Q1]], align 4
-; NOSTRIDED-NEXT: [[OFFSET_NEXT]] = add nuw nsw i64 [[OFFSET]], [[STRIDE]]
-; NOSTRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; NOSTRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP15:![0-9]+]]
-; NOSTRIDED: exit:
-; NOSTRIDED-NEXT: ret void
-;
-; STRIDED-LABEL: @double_stride_int_iv(
-; STRIDED-NEXT: entry:
-; STRIDED-NEXT: br label [[LOOP:%.*]]
-; STRIDED: loop:
-; STRIDED-NEXT: [[I:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; STRIDED-NEXT: [[OFFSET:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[OFFSET_NEXT:%.*]], [[LOOP]] ]
-; STRIDED-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[OFFSET]]
-; STRIDED-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
-; STRIDED-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
-; STRIDED-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]]
-; STRIDED-NEXT: store i32 [[Y0]], ptr [[Q1]], align 4
-; STRIDED-NEXT: [[OFFSET_NEXT]] = add nuw nsw i64 [[OFFSET]], [[STRIDE:%.*]]
-; STRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; STRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; STRIDED-NEXT: br i1 [[DONE]], label [[EXIT:%.*]], label [[LOOP]]
-; STRIDED: exit:
-; STRIDED-NEXT: ret void
+; CHECK-LABEL: @double_stride_int_iv(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK: loop:
+; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[OFFSET:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[OFFSET_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[OFFSET]]
+; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
+; CHECK-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
+; CHECK-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]]
+; CHECK-NEXT: store i32 [[Y0]], ptr [[Q1]], align 4
+; CHECK-NEXT: [[OFFSET_NEXT]] = add nuw nsw i64 [[OFFSET]], [[STRIDE:%.*]]
+; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
+; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
+; CHECK-NEXT: br i1 [[DONE]], label [[EXIT:%.*]], label [[LOOP]]
+; CHECK: exit:
+; CHECK-NEXT: ret void
;
entry:
br label %loop
diff --git a/llvm/test/Transforms/LoopVectorize/version-stride-with-integer-casts.ll b/llvm/test/Transforms/LoopVectorize/version-stride-with-integer-casts.ll
index 45596169da3cc..b181ed1bd9ac7 100644
--- a/llvm/test/Transforms/LoopVectorize/version-stride-with-integer-casts.ll
+++ b/llvm/test/Transforms/LoopVectorize/version-stride-with-integer-casts.ll
@@ -18,31 +18,42 @@ define void @test_versioned_with_sext_use(i32 %offset, ptr %dst) {
; CHECK-NEXT: br i1 [[C]], label [[INNER_LOOP_PREHEADER:%.*]], label [[EXIT:%.*]]
; CHECK: inner.loop.preheader:
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
-; CHECK: vector.scevcheck:
-; CHECK-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i32 [[OFFSET]], 1
-; CHECK-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP0:%.*]] = mul i64 200, [[OFFSET_EXT]]
; CHECK-NEXT: [[IND_END]] = add i64 [[IV_1]], [[TMP0]]
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_SCEVCHECK]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[INDEX]], [[OFFSET_EXT]]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[IV_1]], [[TMP1]]
; CHECK-NEXT: [[TMP2:%.*]] = mul i64 0, [[OFFSET_EXT]]
; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[OFFSET_IDX]], [[TMP2]]
+; CHECK-NEXT: [[TMP10:%.*]] = mul i64 1, [[OFFSET_EXT]]
+; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[OFFSET_IDX]], [[TMP10]]
+; CHECK-NEXT: [[TMP6:%.*]] = mul i64 2, [[OFFSET_EXT]]
+; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[OFFSET_IDX]], [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = mul i64 3, [[OFFSET_EXT]]
+; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[OFFSET_IDX]], [[TMP8]]
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP3]]
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i32, ptr [[TMP4]], i32 0
-; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr [[TMP5]], align 8
-; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP3]], 1
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP5]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP18]]
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP9]]
+; CHECK-NEXT: store i32 0, ptr [[TMP4]], align 8
+; CHECK-NEXT: store i32 0, ptr [[TMP11]], align 8
+; CHECK-NEXT: store i32 0, ptr [[TMP12]], align 8
+; CHECK-NEXT: store i32 0, ptr [[TMP13]], align 8
+; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[TMP3]], [[OFFSET_EXT]]
+; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[TMP5]], [[OFFSET_EXT]]
+; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP18]], [[OFFSET_EXT]]
+; CHECK-NEXT: [[TMP17:%.*]] = add i64 [[TMP9]], [[OFFSET_EXT]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200
; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br i1 false, label [[OUTER_HEADER_LOOPEXIT]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[IV_1]], [[INNER_LOOP_PREHEADER]] ], [ [[IV_1]], [[VECTOR_SCEVCHECK]] ]
-; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i32 [ 200, [[MIDDLE_BLOCK]] ], [ 0, [[INNER_LOOP_PREHEADER]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[IV_1]], [[INNER_LOOP_PREHEADER]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i32 [ 200, [[MIDDLE_BLOCK]] ], [ 0, [[INNER_LOOP_PREHEADER]] ]
; CHECK-NEXT: br label [[INNER_LOOP]]
; CHECK: inner.loop:
; CHECK-NEXT: [[IV_2:%.*]] = phi i64 [ [[IV_2_NEXT]], [[INNER_LOOP]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
@@ -94,31 +105,42 @@ define void @test_versioned_with_zext_use(i32 %offset, ptr %dst) {
; CHECK-NEXT: br i1 [[C]], label [[INNER_LOOP_PREHEADER:%.*]], label [[EXIT:%.*]]
; CHECK: inner.loop.preheader:
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
-; CHECK: vector.scevcheck:
-; CHECK-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i32 [[OFFSET]], 1
-; CHECK-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP0:%.*]] = mul i64 200, [[OFFSET_EXT]]
; CHECK-NEXT: [[IND_END]] = add i64 [[IV_1]], [[TMP0]]
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_SCEVCHECK]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[INDEX]], [[OFFSET_EXT]]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[IV_1]], [[TMP1]]
; CHECK-NEXT: [[TMP2:%.*]] = mul i64 0, [[OFFSET_EXT]]
; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[OFFSET_IDX]], [[TMP2]]
+; CHECK-NEXT: [[TMP10:%.*]] = mul i64 1, [[OFFSET_EXT]]
+; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[OFFSET_IDX]], [[TMP10]]
+; CHECK-NEXT: [[TMP6:%.*]] = mul i64 2, [[OFFSET_EXT]]
+; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[OFFSET_IDX]], [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = mul i64 3, [[OFFSET_EXT]]
+; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[OFFSET_IDX]], [[TMP8]]
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP3]]
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i32, ptr [[TMP4]], i32 0
-; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr [[TMP5]], align 8
-; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP3]], 1
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP5]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP18]]
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP9]]
+; CHECK-NEXT: store i32 0, ptr [[TMP4]], align 8
+; CHECK-NEXT: store i32 0, ptr [[TMP11]], align 8
+; CHECK-NEXT: store i32 0, ptr [[TMP12]], align 8
+; CHECK-NEXT: store i32 0, ptr [[TMP13]], align 8
+; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[TMP3]], [[OFFSET_EXT]]
+; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[TMP5]], [[OFFSET_EXT]]
+; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP18]], [[OFFSET_EXT]]
+; CHECK-NEXT: [[TMP17:%.*]] = add i64 [[TMP9]], [[OFFSET_EXT]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200
; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br i1 false, label [[OUTER_HEADER_LOOPEXIT]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[IV_1]], [[INNER_LOOP_PREHEADER]] ], [ [[IV_1]], [[VECTOR_SCEVCHECK]] ]
-; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i32 [ 200, [[MIDDLE_BLOCK]] ], [ 0, [[INNER_LOOP_PREHEADER]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[IV_1]], [[INNER_LOOP_PREHEADER]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i32 [ 200, [[MIDDLE_BLOCK]] ], [ 0, [[INNER_LOOP_PREHEADER]] ]
; CHECK-NEXT: br label [[INNER_LOOP]]
; CHECK: inner.loop:
; CHECK-NEXT: [[IV_2:%.*]] = phi i64 [ [[IV_2_NEXT]], [[INNER_LOOP]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
@@ -161,33 +183,45 @@ define void @versioned_sext_use_in_gep(i32 %scale, ptr %dst, i64 %scale.2) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[SCALE_EXT:%.*]] = sext i32 [[SCALE]] to i64
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
-; CHECK: vector.scevcheck:
-; CHECK-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i32 [[SCALE]], 1
-; CHECK-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[SCALE_EXT]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <4 x i64> poison, i64 [[SCALE_2]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT1]], <4 x i64> poison, <4 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], 0
-; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[INDEX]], 3
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_SCEVCHECK]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, [[VECTOR_SCEVCHECK]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP10:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP10]]
+; CHECK-NEXT: [[TMP12:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1
; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP12]]
+; CHECK-NEXT: [[TMP14:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2
; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP14]]
+; CHECK-NEXT: [[TMP16:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3
; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP16]]
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[DST]], i64 [[SCALE_2]]
+; CHECK-NEXT: [[TMP9:%.*]] = mul <4 x i64> [[BROADCAST_SPLAT]], [[BROADCAST_SPLAT2]]
+; CHECK-NEXT: [[TMP19:%.*]] = extractelement <4 x i64> [[TMP9]], i32 0
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP19]]
+; CHECK-NEXT: [[TMP20:%.*]] = extractelement <4 x i64> [[TMP9]], i32 1
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP20]]
+; CHECK-NEXT: [[TMP22:%.*]] = extractelement <4 x i64> [[TMP9]], i32 2
+; CHECK-NEXT: [[TMP23:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP22]]
+; CHECK-NEXT: [[TMP24:%.*]] = extractelement <4 x i64> [[TMP9]], i32 3
+; CHECK-NEXT: [[TMP25:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP24]]
; CHECK-NEXT: store ptr [[TMP8]], ptr [[TMP11]], align 8
-; CHECK-NEXT: store ptr [[TMP8]], ptr [[TMP13]], align 8
-; CHECK-NEXT: store ptr [[TMP8]], ptr [[TMP15]], align 8
-; CHECK-NEXT: store ptr [[TMP8]], ptr [[TMP17]], align 8
+; CHECK-NEXT: store ptr [[TMP21]], ptr [[TMP13]], align 8
+; CHECK-NEXT: store ptr [[TMP23]], ptr [[TMP15]], align 8
+; CHECK-NEXT: store ptr [[TMP25]], ptr [[TMP17]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], <i64 4, i64 4, i64 4, i64 4>
; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br i1 true, label [[EXIT:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 256, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 256, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
@@ -228,6 +262,7 @@ define void @test_versioned_with_different_uses(i32 %offset, ptr noalias %dst.1,
; CHECK-SAME: i32 [[OFFSET:%.*]], ptr noalias [[DST_1:%.*]], ptr [[DST_2:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[OFFSET_EXT:%.*]] = zext i32 [[OFFSET]] to i64
+; CHECK-NEXT: [[TMP19:%.*]] = sub i32 0, [[OFFSET]]
; CHECK-NEXT: br label [[OUTER_HEADER:%.*]]
; CHECK: outer.header.loopexit:
; CHECK-NEXT: [[IV_2_NEXT_LCSSA:%.*]] = phi i64 [ [[IV_2_NEXT:%.*]], [[INNER_LOOP:%.*]] ], [ [[IND_END:%.*]], [[MIDDLE_BLOCK:%.*]] ]
@@ -239,36 +274,63 @@ define void @test_versioned_with_different_uses(i32 %offset, ptr noalias %dst.1,
; CHECK: inner.loop.preheader:
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
; CHECK: vector.scevcheck:
-; CHECK-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i32 [[OFFSET]], 1
+; CHECK-NEXT: [[TMP20:%.*]] = icmp slt i32 [[OFFSET]], 0
+; CHECK-NEXT: [[TMP21:%.*]] = select i1 [[TMP20]], i32 [[TMP19]], i32 [[OFFSET]]
+; CHECK-NEXT: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[TMP21]], i32 200)
+; CHECK-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL]], 0
+; CHECK-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL]], 1
+; CHECK-NEXT: [[TMP22:%.*]] = sub i32 0, [[MUL_RESULT]]
+; CHECK-NEXT: [[TMP23:%.*]] = icmp slt i32 [[MUL_RESULT]], 0
+; CHECK-NEXT: [[TMP24:%.*]] = icmp sgt i32 [[TMP22]], 0
+; CHECK-NEXT: [[TMP25:%.*]] = select i1 [[TMP20]], i1 [[TMP24]], i1 [[TMP23]]
+; CHECK-NEXT: [[IDENT_CHECK:%.*]] = or i1 [[TMP25]], [[MUL_OVERFLOW]]
; CHECK-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP0:%.*]] = mul i64 200, [[OFFSET_EXT]]
; CHECK-NEXT: [[IND_END]] = add i64 [[IV_1]], [[TMP0]]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[OFFSET]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[INDEX]], [[OFFSET_EXT]]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[IV_1]], [[TMP1]]
; CHECK-NEXT: [[TMP2:%.*]] = mul i64 0, [[OFFSET_EXT]]
; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[OFFSET_IDX]], [[TMP2]]
-; CHECK-NEXT: [[OFFSET_IDX2:%.*]] = trunc i64 [[INDEX]] to i32
-; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[OFFSET_IDX2]], 0
-; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[OFFSET_IDX2]], 1
-; CHECK-NEXT: [[TMP6:%.*]] = add i32 [[OFFSET_IDX2]], 2
-; CHECK-NEXT: [[TMP7:%.*]] = add i32 [[OFFSET_IDX2]], 3
+; CHECK-NEXT: [[TMP26:%.*]] = mul i64 1, [[OFFSET_EXT]]
+; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[OFFSET_IDX]], [[TMP26]]
+; CHECK-NEXT: [[TMP14:%.*]] = mul i64 2, [[OFFSET_EXT]]
+; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[OFFSET_IDX]], [[TMP14]]
+; CHECK-NEXT: [[TMP16:%.*]] = mul i64 3, [[OFFSET_EXT]]
+; CHECK-NEXT: [[TMP17:%.*]] = add i64 [[OFFSET_IDX]], [[TMP16]]
+; CHECK-NEXT: [[TMP18:%.*]] = mul <4 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP18]], i32 0
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[DST_1]], i32 [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x i32> [[TMP18]], i32 1
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[DST_1]], i32 [[TMP5]]
+; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i32> [[TMP18]], i32 2
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[DST_1]], i32 [[TMP6]]
+; CHECK-NEXT: [[TMP7:%.*]] = extractelement <4 x i32> [[TMP18]], i32 3
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[DST_1]], i32 [[TMP7]]
; CHECK-NEXT: store i32 0, ptr [[TMP8]], align 8
; CHECK-NEXT: store i32 0, ptr [[TMP9]], align 8
; CHECK-NEXT: store i32 0, ptr [[TMP10]], align 8
; CHECK-NEXT: store i32 0, ptr [[TMP11]], align 8
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[DST_2]], i64 [[TMP3]]
-; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[TMP12]], i32 0
-; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr [[TMP13]], align 8
-; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[TMP3]], 1
+; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i32, ptr [[DST_2]], i64 [[TMP13]]
+; CHECK-NEXT: [[TMP29:%.*]] = getelementptr i32, ptr [[DST_2]], i64 [[TMP27]]
+; CHECK-NEXT: [[TMP30:%.*]] = getelementptr i32, ptr [[DST_2]], i64 [[TMP17]]
+; CHECK-NEXT: store i32 0, ptr [[TMP12]], align 8
+; CHECK-NEXT: store i32 0, ptr [[TMP28]], align 8
+; CHECK-NEXT: store i32 0, ptr [[TMP29]], align 8
+; CHECK-NEXT: store i32 0, ptr [[TMP30]], align 8
+; CHECK-NEXT: [[TMP31:%.*]] = add i64 [[TMP3]], [[OFFSET_EXT]]
+; CHECK-NEXT: [[TMP32:%.*]] = add i64 [[TMP13]], [[OFFSET_EXT]]
+; CHECK-NEXT: [[TMP33:%.*]] = add i64 [[TMP27]], [[OFFSET_EXT]]
+; CHECK-NEXT: [[TMP34:%.*]] = add i64 [[TMP17]], [[OFFSET_EXT]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], <i32 4, i32 4, i32 4, i32 4>
; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200
; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: middle.block:
@@ -336,18 +398,18 @@ define void @test_versioned_with_non_ex_use(i32 %offset, ptr noalias %dst.1, ptr
; CHECK-NEXT: [[TMP4:%.*]] = icmp slt i32 [[MUL_RESULT]], 0
; CHECK-NEXT: [[TMP5:%.*]] = icmp sgt i32 [[TMP3]], 0
; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP1]], i1 [[TMP5]], i1 [[TMP4]]
-; CHECK-NEXT: [[TMP7:%.*]] = or i1 [[TMP6]], [[MUL_OVERFLOW]]
-; CHECK-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i32 [[OFFSET]], 1
-; CHECK-NEXT: [[TMP8:%.*]] = or i1 [[TMP7]], [[IDENT_CHECK]]
+; CHECK-NEXT: [[TMP8:%.*]] = or i1 [[TMP6]], [[MUL_OVERFLOW]]
; CHECK-NEXT: br i1 [[TMP8]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[ADD]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT4:%.*]] = insertelement <4 x i64> poison, i64 [[OFFSET_EXT]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT5:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT4]], <4 x i64> poison, <4 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_IND1:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT1:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 0
; CHECK-NEXT: [[TMP10:%.*]] = mul <4 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP11:%.*]] = extractelement <4 x i32> [[TMP10]], i32 0
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[DST_1]], i32 [[TMP11]]
@@ -361,10 +423,21 @@ define void @test_versioned_with_non_ex_use(i32 %offset, ptr noalias %dst.1, ptr
; CHECK-NEXT: store i32 0, ptr [[TMP14]], align 8
; CHECK-NEXT: store i32 0, ptr [[TMP16]], align 8
; CHECK-NEXT: store i32 0, ptr [[TMP18]], align 8
+; CHECK-NEXT: [[TMP19:%.*]] = mul <4 x i64> [[VEC_IND1]], [[BROADCAST_SPLAT5]]
+; CHECK-NEXT: [[TMP9:%.*]] = extractelement <4 x i64> [[TMP19]], i32 0
; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i32, ptr [[DST_2]], i64 [[TMP9]]
-; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[TMP20]], i32 0
-; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr [[TMP21]], align 8
+; CHECK-NEXT: [[TMP26:%.*]] = extractelement <4 x i64> [[TMP19]], i32 1
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[DST_2]], i64 [[TMP26]]
+; CHECK-NEXT: [[TMP27:%.*]] = extractelement <4 x i64> [[TMP19]], i32 2
+; CHECK-NEXT: [[TMP23:%.*]] = getelementptr i32, ptr [[DST_2]], i64 [[TMP27]]
+; CHECK-NEXT: [[TMP24:%.*]] = extractelement <4 x i64> [[TMP19]], i32 3
+; CHECK-NEXT: [[TMP25:%.*]] = getelementptr i32, ptr [[DST_2]], i64 [[TMP24]]
+; CHECK-NEXT: store i32 0, ptr [[TMP20]], align 8
+; CHECK-NEXT: store i32 0, ptr [[TMP21]], align 8
+; CHECK-NEXT: store i32 0, ptr [[TMP23]], align 8
+; CHECK-NEXT: store i32 0, ptr [[TMP25]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT: [[VEC_IND_NEXT1]] = add <4 x i64> [[VEC_IND1]], <i64 4, i64 4, i64 4, i64 4>
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], <i32 4, i32 4, i32 4, i32 4>
; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200
; CHECK-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
@@ -424,10 +497,7 @@ define void @zext_of_i1_stride(i1 %g, ptr %dst) mustprogress {
; CHECK-NEXT: [[TMP0:%.*]] = udiv i64 15, [[G_64]]
; CHECK-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[TMP0]], 1
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP1]], 4
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
-; CHECK: vector.scevcheck:
-; CHECK-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i1 [[G]], true
-; CHECK-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP1]], 4
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP1]], [[N_MOD_VF]]
@@ -438,9 +508,20 @@ define void @zext_of_i1_stride(i1 %g, ptr %dst) mustprogress {
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], [[G_64]]
; CHECK-NEXT: [[TMP2:%.*]] = mul i64 0, [[G_64]]
; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[OFFSET_IDX]], [[TMP2]]
+; CHECK-NEXT: [[TMP10:%.*]] = mul i64 1, [[G_64]]
+; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[OFFSET_IDX]], [[TMP10]]
+; CHECK-NEXT: [[TMP14:%.*]] = mul i64 2, [[G_64]]
+; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[OFFSET_IDX]], [[TMP14]]
+; CHECK-NEXT: [[TMP8:%.*]] = mul i64 3, [[G_64]]
+; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[OFFSET_IDX]], [[TMP8]]
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i16, ptr [[DST]], i64 [[TMP3]]
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i16, ptr [[TMP4]], i32 0
-; CHECK-NEXT: store <4 x i16> <i16 1, i16 1, i16 1, i16 1>, ptr [[TMP5]], align 2
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i16, ptr [[DST]], i64 [[TMP5]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i16, ptr [[DST]], i64 [[TMP7]]
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i16, ptr [[DST]], i64 [[TMP9]]
+; CHECK-NEXT: store i16 [[G_16]], ptr [[TMP4]], align 2
+; CHECK-NEXT: store i16 [[G_16]], ptr [[TMP11]], align 2
+; CHECK-NEXT: store i16 [[G_16]], ptr [[TMP12]], align 2
+; CHECK-NEXT: store i16 [[G_16]], ptr [[TMP13]], align 2
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
@@ -448,7 +529,7 @@ define void @zext_of_i1_stride(i1 %g, ptr %dst) mustprogress {
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP1]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
@@ -490,10 +571,7 @@ define void @sext_of_i1_stride(i1 %g, ptr %dst) mustprogress {
; CHECK-NEXT: [[TMP1:%.*]] = udiv i64 [[TMP0]], [[G_64]]
; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], 4
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
-; CHECK: vector.scevcheck:
-; CHECK-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i1 [[G]], true
-; CHECK-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], 4
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]]
@@ -504,17 +582,27 @@ define void @sext_of_i1_stride(i1 %g, ptr %dst) mustprogress {
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], [[G_64]]
; CHECK-NEXT: [[TMP3:%.*]] = mul i64 0, [[G_64]]
; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[OFFSET_IDX]], [[TMP3]]
+; CHECK-NEXT: [[TMP11:%.*]] = mul i64 1, [[G_64]]
+; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[OFFSET_IDX]], [[TMP11]]
+; CHECK-NEXT: [[TMP7:%.*]] = mul i64 2, [[G_64]]
+; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[OFFSET_IDX]], [[TMP7]]
+; CHECK-NEXT: [[TMP9:%.*]] = mul i64 3, [[G_64]]
+; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[OFFSET_IDX]], [[TMP9]]
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i16, ptr [[DST]], i64 [[TMP4]]
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i16, ptr [[TMP5]], i32 0
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i16, ptr [[TMP6]], i32 -3
-; CHECK-NEXT: store <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>, ptr [[TMP7]], align 2
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i16, ptr [[DST]], i64 [[TMP6]]
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i16, ptr [[DST]], i64 [[TMP8]]
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i16, ptr [[DST]], i64 [[TMP10]]
+; CHECK-NEXT: store i16 [[G_16]], ptr [[TMP5]], align 2
+; CHECK-NEXT: store i16 [[G_16]], ptr [[TMP12]], align 2
+; CHECK-NEXT: store i16 [[G_16]], ptr [[TMP13]], align 2
+; CHECK-NEXT: store i16 [[G_16]], ptr [[TMP14]], align 2
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP2]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
@@ -548,17 +636,17 @@ exit:
; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
-; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]}
+; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
-; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]]}
+; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META2]], [[META1]]}
; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]}
-; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]]}
+; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META2]], [[META1]]}
; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]}
; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]]}
; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META2]]}
; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META1]]}
; CHECK: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]], [[META2]]}
-; CHECK: [[LOOP13]] = distinct !{[[LOOP13]], [[META1]]}
+; CHECK: [[LOOP13]] = distinct !{[[LOOP13]], [[META2]], [[META1]]}
; CHECK: [[LOOP14]] = distinct !{[[LOOP14]], [[META1]], [[META2]]}
-; CHECK: [[LOOP15]] = distinct !{[[LOOP15]], [[META1]]}
+; CHECK: [[LOOP15]] = distinct !{[[LOOP15]], [[META2]], [[META1]]}
;.
diff --git a/llvm/test/Transforms/LoopVersioning/invalidate-laa-after-versioning.ll b/llvm/test/Transforms/LoopVersioning/invalidate-laa-after-versioning.ll
index 8075314a65b49..22cfd81dec52d 100644
--- a/llvm/test/Transforms/LoopVersioning/invalidate-laa-after-versioning.ll
+++ b/llvm/test/Transforms/LoopVersioning/invalidate-laa-after-versioning.ll
@@ -9,33 +9,15 @@
define void @test(ptr %arg, i64 %arg1) {
; CHECK-LABEL: @test(
; CHECK-NEXT: bb:
-; CHECK-NEXT: br label [[INNER_1_LVER_CHECK:%.*]]
-; CHECK: inner.1.lver.check:
+; CHECK-NEXT: br label [[OUTER_HEADER:%.*]]
+; CHECK: outer.header:
; CHECK-NEXT: [[PTR_PHI:%.*]] = phi ptr [ [[ARG:%.*]], [[BB:%.*]] ], [ @glob.1, [[OUTER_LATCH:%.*]] ]
-; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr double, ptr [[PTR_PHI]], i64 3
-; CHECK-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[ARG1:%.*]], 1
-; CHECK-NEXT: br i1 [[IDENT_CHECK]], label [[INNER_1_PH_LVER_ORIG:%.*]], label [[INNER_1_PH:%.*]]
-; CHECK: inner.1.ph.lver.orig:
-; CHECK-NEXT: br label [[INNER_1_LVER_ORIG:%.*]]
-; CHECK: inner.1.lver.orig:
-; CHECK-NEXT: [[IV_1_LVER_ORIG:%.*]] = phi i64 [ 0, [[INNER_1_PH_LVER_ORIG]] ], [ [[IV_NEXT_LVER_ORIG:%.*]], [[INNER_1_LVER_ORIG]] ]
-; CHECK-NEXT: [[PTR_IV_1_LVER_ORIG:%.*]] = phi ptr [ @glob.2, [[INNER_1_PH_LVER_ORIG]] ], [ [[PTR_IV_1_NEXT_LVER_ORIG:%.*]], [[INNER_1_LVER_ORIG]] ]
-; CHECK-NEXT: [[TMP25_LVER_ORIG:%.*]] = mul nuw nsw i64 [[IV_1_LVER_ORIG]], [[ARG1]]
-; CHECK-NEXT: [[GEP_2_LVER_ORIG:%.*]] = getelementptr inbounds double, ptr [[GEP_1]], i64 [[TMP25_LVER_ORIG]]
-; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_2_LVER_ORIG]], align 8
-; CHECK-NEXT: [[GEP_3_LVER_ORIG:%.*]] = getelementptr double, ptr [[PTR_PHI]], i64 [[TMP25_LVER_ORIG]]
-; CHECK-NEXT: [[GEP_4_LVER_ORIG:%.*]] = getelementptr double, ptr [[GEP_3_LVER_ORIG]], i64 2
-; CHECK-NEXT: [[TMP29_LVER_ORIG:%.*]] = load double, ptr [[GEP_4_LVER_ORIG]], align 8
-; CHECK-NEXT: [[PTR_IV_1_NEXT_LVER_ORIG]] = getelementptr inbounds double, ptr [[PTR_IV_1_LVER_ORIG]], i64 1
-; CHECK-NEXT: [[IV_NEXT_LVER_ORIG]] = add nuw nsw i64 [[IV_1_LVER_ORIG]], 1
-; CHECK-NEXT: [[C_1_LVER_ORIG:%.*]] = icmp eq i64 [[IV_1_LVER_ORIG]], 1
-; CHECK-NEXT: br i1 [[C_1_LVER_ORIG]], label [[INNER_1_EXIT_LOOPEXIT:%.*]], label [[INNER_1_LVER_ORIG]]
-; CHECK: inner.1.ph:
+; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr inbounds double, ptr [[PTR_PHI]], i64 3
; CHECK-NEXT: br label [[INNER_1:%.*]]
; CHECK: inner.1:
-; CHECK-NEXT: [[IV_1:%.*]] = phi i64 [ 0, [[INNER_1_PH]] ], [ [[IV_NEXT:%.*]], [[INNER_1]] ]
-; CHECK-NEXT: [[PTR_IV_1:%.*]] = phi ptr [ @glob.2, [[INNER_1_PH]] ], [ [[PTR_IV_1_NEXT:%.*]], [[INNER_1]] ]
-; CHECK-NEXT: [[TMP25:%.*]] = mul nuw nsw i64 [[IV_1]], [[ARG1]]
+; CHECK-NEXT: [[IV_1:%.*]] = phi i64 [ 0, [[OUTER_HEADER]] ], [ [[IV_NEXT:%.*]], [[INNER_1]] ]
+; CHECK-NEXT: [[PTR_IV_1:%.*]] = phi ptr [ @glob.2, [[OUTER_HEADER]] ], [ [[PTR_IV_1_NEXT:%.*]], [[INNER_1]] ]
+; CHECK-NEXT: [[TMP25:%.*]] = mul nuw nsw i64 [[IV_1]], [[ARG1:%.*]]
; CHECK-NEXT: [[GEP_2:%.*]] = getelementptr inbounds double, ptr [[GEP_1]], i64 [[TMP25]]
; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_2]], align 8
; CHECK-NEXT: [[GEP_3:%.*]] = getelementptr double, ptr [[PTR_PHI]], i64 [[TMP25]]
@@ -44,15 +26,10 @@ define void @test(ptr %arg, i64 %arg1) {
; CHECK-NEXT: [[PTR_IV_1_NEXT]] = getelementptr inbounds double, ptr [[PTR_IV_1]], i64 1
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV_1]], 1
; CHECK-NEXT: [[C_1:%.*]] = icmp eq i64 [[IV_1]], 1
-; CHECK-NEXT: br i1 [[C_1]], label [[INNER_1_EXIT_LOOPEXIT1:%.*]], label [[INNER_1]]
-; CHECK: inner.1.exit.loopexit:
-; CHECK-NEXT: [[LCSSA_PTR_IV_1_PH:%.*]] = phi ptr [ [[PTR_IV_1_LVER_ORIG]], [[INNER_1_LVER_ORIG]] ]
-; CHECK-NEXT: br label [[INNER_1_EXIT:%.*]]
-; CHECK: inner.1.exit.loopexit1:
-; CHECK-NEXT: [[LCSSA_PTR_IV_1_PH2:%.*]] = phi ptr [ [[PTR_IV_1]], [[INNER_1]] ]
-; CHECK-NEXT: br label [[INNER_1_EXIT]]
+; CHECK-NEXT: br i1 [[C_1]], label [[INNER_1_EXIT:%.*]], label [[INNER_1]]
; CHECK: inner.1.exit:
-; CHECK-NEXT: [[LCSSA_PTR_IV_1:%.*]] = phi ptr [ [[LCSSA_PTR_IV_1_PH]], [[INNER_1_EXIT_LOOPEXIT]] ], [ [[LCSSA_PTR_IV_1_PH2]], [[INNER_1_EXIT_LOOPEXIT1]] ]
+; CHECK-NEXT: [[IV_1_LCSSA:%.*]] = phi i64 [ [[IV_1]], [[INNER_1]] ]
+; CHECK-NEXT: [[LCSSA_PTR_IV_1:%.*]] = phi ptr [ [[PTR_IV_1]], [[INNER_1]] ]
; CHECK-NEXT: [[GEP_5:%.*]] = getelementptr inbounds double, ptr [[LCSSA_PTR_IV_1]], i64 1
; CHECK-NEXT: br label [[INNER_2:%.*]]
; CHECK: inner.2:
@@ -66,9 +43,11 @@ define void @test(ptr %arg, i64 %arg1) {
; CHECK-NEXT: [[LCSSA_PTR_IV_2:%.*]] = phi ptr [ [[PTR_IV_2]], [[INNER_2]] ]
; CHECK-NEXT: [[GEP_6:%.*]] = getelementptr inbounds double, ptr [[PTR_PHI]], i64 1
; CHECK-NEXT: [[GEP_7:%.*]] = getelementptr inbounds double, ptr [[LCSSA_PTR_IV_2]], i64 1
-; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[INDVAR_LCSSA]], 3
+; CHECK-NEXT: [[TMP0:%.*]] = shl nuw nsw i64 [[IV_1_LCSSA]], 3
; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[TMP0]], 24
-; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[LCSSA_PTR_IV_1]], i64 [[TMP1]]
+; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[INDVAR_LCSSA]], 3
+; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[TMP2]], [[TMP1]]
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr @glob.2, i64 [[TMP3]]
; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[GEP_7]], [[GEP_1]]
; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[PTR_PHI]], [[SCEVGEP]]
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
@@ -90,19 +69,19 @@ define void @test(ptr %arg, i64 %arg1) {
; CHECK: inner.3:
; CHECK-NEXT: [[IV_2:%.*]] = phi i64 [ 0, [[INNER_3_PH]] ], [ [[IV_2_NEXT:%.*]], [[INNER_3]] ]
; CHECK-NEXT: [[GEP_8:%.*]] = getelementptr inbounds double, ptr [[GEP_6]], i64 [[IV_2]]
-; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_7]], align 8, !alias.scope !0, !noalias !3
-; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_8]], align 8, !alias.scope !3
+; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_7]], align 8, !alias.scope [[META0:![0-9]+]], !noalias [[META3:![0-9]+]]
+; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_8]], align 8, !alias.scope [[META3]]
; CHECK-NEXT: [[GEP_9:%.*]] = getelementptr double, ptr [[PTR_PHI]], i64 [[IV_2]]
-; CHECK-NEXT: [[TMP18:%.*]] = load double, ptr [[GEP_9]], align 8, !alias.scope !3
+; CHECK-NEXT: [[TMP18:%.*]] = load double, ptr [[GEP_9]], align 8, !alias.scope [[META3]]
; CHECK-NEXT: [[IV_2_NEXT]] = add nuw nsw i64 [[IV_2]], 1
; CHECK-NEXT: [[C_2:%.*]] = icmp eq i64 [[IV_2]], 1
-; CHECK-NEXT: br i1 [[C_2]], label [[OUTER_LATCH_LOOPEXIT3:%.*]], label [[INNER_3]]
+; CHECK-NEXT: br i1 [[C_2]], label [[OUTER_LATCH_LOOPEXIT1:%.*]], label [[INNER_3]]
; CHECK: outer.latch.loopexit:
; CHECK-NEXT: br label [[OUTER_LATCH]]
-; CHECK: outer.latch.loopexit3:
+; CHECK: outer.latch.loopexit1:
; CHECK-NEXT: br label [[OUTER_LATCH]]
; CHECK: outer.latch:
-; CHECK-NEXT: br label [[INNER_1_LVER_CHECK]]
+; CHECK-NEXT: br label [[OUTER_HEADER]]
;
bb:
br label %outer.header
diff --git a/llvm/test/Transforms/LoopVersioning/pr96656.ll b/llvm/test/Transforms/LoopVersioning/pr96656.ll
index 0264fe40a9430..2ef8ccbb8f9d1 100644
--- a/llvm/test/Transforms/LoopVersioning/pr96656.ll
+++ b/llvm/test/Transforms/LoopVersioning/pr96656.ll
@@ -6,44 +6,26 @@ define void @lver.check.unnecessary(ptr %arg, ptr %arg1, i1 %arg2) {
; CHECK-SAME: ptr [[ARG:%.*]], ptr [[ARG1:%.*]], i1 [[ARG2:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[ARG]], align 4
-; CHECK-NEXT: br i1 [[ARG2]], label %[[NOLOOP_EXIT:.*]], label %[[LOOP_BODY_LVER_CHECK:.*]]
-; CHECK: [[LOOP_BODY_LVER_CHECK]]:
+; CHECK-NEXT: br i1 [[ARG2]], label %[[NOLOOP_EXIT:.*]], label %[[LOOP_PH:.*]]
+; CHECK: [[LOOP_PH]]:
; CHECK-NEXT: [[SEXT7:%.*]] = sext i32 [[LOAD]] to i64
; CHECK-NEXT: [[GEP8:%.*]] = getelementptr i8, ptr [[ARG1]], i64 8
-; CHECK-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i32 [[LOAD]], 1
-; CHECK-NEXT: br i1 [[IDENT_CHECK]], label %[[LOOP_BODY_PH_LVER_ORIG:.*]], label %[[LOOP_BODY_PH:.*]]
-; CHECK: [[LOOP_BODY_PH_LVER_ORIG]]:
-; CHECK-NEXT: br label %[[LOOP_BODY_LVER_ORIG:.*]]
-; CHECK: [[LOOP_BODY_LVER_ORIG]]:
-; CHECK-NEXT: [[PHI_LVER_ORIG:%.*]] = phi i64 [ 0, %[[LOOP_BODY_PH_LVER_ORIG]] ], [ [[ADD_LVER_ORIG:%.*]], %[[LOOP_BODY_LVER_ORIG]] ]
-; CHECK-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i64 [[PHI_LVER_ORIG]], [[SEXT7]]
-; CHECK-NEXT: [[GEP10_LVER_ORIG:%.*]] = getelementptr double, ptr [[GEP8]], i64 [[MUL_LVER_ORIG]]
-; CHECK-NEXT: [[LOAD11_LVER_ORIG:%.*]] = load double, ptr [[GEP10_LVER_ORIG]], align 8
-; CHECK-NEXT: store double [[LOAD11_LVER_ORIG]], ptr [[ARG1]], align 8
-; CHECK-NEXT: [[ADD_LVER_ORIG]] = add i64 [[PHI_LVER_ORIG]], 1
-; CHECK-NEXT: [[ICMP_LVER_ORIG:%.*]] = icmp eq i64 [[PHI_LVER_ORIG]], 0
-; CHECK-NEXT: br i1 [[ICMP_LVER_ORIG]], label %[[LOOP_EXIT_LOOPEXIT:.*]], label %[[LOOP_BODY_LVER_ORIG]]
-; CHECK: [[LOOP_BODY_PH]]:
; CHECK-NEXT: br label %[[LOOP_BODY:.*]]
; CHECK: [[LOOP_BODY]]:
-; CHECK-NEXT: [[PHI:%.*]] = phi i64 [ 0, %[[LOOP_BODY_PH]] ], [ [[ADD:%.*]], %[[LOOP_BODY]] ]
+; CHECK-NEXT: [[PHI:%.*]] = phi i64 [ 0, %[[LOOP_PH]] ], [ [[ADD:%.*]], %[[LOOP_BODY]] ]
; CHECK-NEXT: [[MUL:%.*]] = mul i64 [[PHI]], [[SEXT7]]
; CHECK-NEXT: [[GEP10:%.*]] = getelementptr double, ptr [[GEP8]], i64 [[MUL]]
; CHECK-NEXT: [[LOAD11:%.*]] = load double, ptr [[GEP10]], align 8
; CHECK-NEXT: store double [[LOAD11]], ptr [[ARG1]], align 8
; CHECK-NEXT: [[ADD]] = add i64 [[PHI]], 1
; CHECK-NEXT: [[ICMP:%.*]] = icmp eq i64 [[PHI]], 0
-; CHECK-NEXT: br i1 [[ICMP]], label %[[LOOP_EXIT_LOOPEXIT1:.*]], label %[[LOOP_BODY]]
+; CHECK-NEXT: br i1 [[ICMP]], label %[[LOOP_EXIT:.*]], label %[[LOOP_BODY]]
; CHECK: [[NOLOOP_EXIT]]:
; CHECK-NEXT: [[SEXT:%.*]] = sext i32 [[LOAD]] to i64
; CHECK-NEXT: [[GEP:%.*]] = getelementptr double, ptr [[ARG1]], i64 [[SEXT]]
; CHECK-NEXT: [[LOAD5:%.*]] = load double, ptr [[GEP]], align 8
; CHECK-NEXT: store double [[LOAD5]], ptr [[ARG]], align 8
; CHECK-NEXT: ret void
-; CHECK: [[LOOP_EXIT_LOOPEXIT]]:
-; CHECK-NEXT: br label %[[LOOP_EXIT:.*]]
-; CHECK: [[LOOP_EXIT_LOOPEXIT1]]:
-; CHECK-NEXT: br label %[[LOOP_EXIT]]
; CHECK: [[LOOP_EXIT]]:
; CHECK-NEXT: ret void
;
More information about the llvm-commits
mailing list