[clang] [llvm] [InstCombine] Split GEPs with multiple non-zero offsets (PR #151333)
Nikita Popov via cfe-commits
cfe-commits at lists.llvm.org
Wed Jul 30 06:25:34 PDT 2025
https://github.com/nikic created https://github.com/llvm/llvm-project/pull/151333
Split GEPs that have more than one non-zero offset into two GEPs. This is in preparation for the ptradd migration, which can only represent such GEPs.
This also enables CSE and LICM of the common base.
>From 04b2c59fc958a2c45ef87bd5f45952917d673925 Mon Sep 17 00:00:00 2001
From: Nikita Popov <npopov at redhat.com>
Date: Wed, 30 Jul 2025 14:38:15 +0200
Subject: [PATCH] [InstCombine] Split GEPs with multiple non-zero offsets
Split GEPs that have more than one non-zero offset into two GEPs.
This is in preparation for the ptradd migration, which can only
represent such GEPs.
This also enables CSE and LICM of the common base.
---
clang/test/CodeGen/union-tbaa1.c | 8 +-
.../InstCombine/InstructionCombining.cpp | 149 +++---------
.../InstCombine/2010-11-21-SizeZeroTypeGEP.ll | 2 +-
.../InstCombine/gep-merge-constant-indices.ll | 14 +-
.../Transforms/InstCombine/gepofconstgepi8.ll | 3 +-
llvm/test/Transforms/InstCombine/gepphigep.ll | 18 +-
.../Transforms/InstCombine/getelementptr.ll | 6 +-
llvm/test/Transforms/InstCombine/load-cmp.ll | 43 +++-
.../test/Transforms/InstCombine/opaque-ptr.ll | 13 +-
.../InstCombine/ptrtoint-nullgep.ll | 24 +-
.../test/Transforms/InstCombine/select-gep.ll | 18 +-
llvm/test/Transforms/InstCombine/strcmp-3.ll | 2 +-
llvm/test/Transforms/InstCombine/strlen-7.ll | 7 +-
llvm/test/Transforms/InstCombine/strlen-8.ll | 8 +-
.../Transforms/InstCombine/vectorgep-crash.ll | 3 +-
.../AArch64/sve-interleaved-accesses.ll | 33 ++-
.../LoopVectorize/AArch64/sve2-histcnt.ll | 3 +-
.../Transforms/LoopVectorize/induction.ll | 223 ++++++++++--------
.../interleaved-accesses-pred-stores.ll | 34 ++-
.../LoopVectorize/interleaved-accesses.ll | 82 ++++---
.../LoopVectorize/reduction-inloop.ll | 12 +-
21 files changed, 370 insertions(+), 335 deletions(-)
diff --git a/clang/test/CodeGen/union-tbaa1.c b/clang/test/CodeGen/union-tbaa1.c
index 1e2f384c29a5b..20b5abee2bf92 100644
--- a/clang/test/CodeGen/union-tbaa1.c
+++ b/clang/test/CodeGen/union-tbaa1.c
@@ -18,16 +18,16 @@ void bar(vect32 p[][2]);
// CHECK-NEXT: [[MUL:%.*]] = mul i32 [[TMP1]], [[NUM]]
// CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x [2 x %union.vect32]], ptr [[TMP]], i32 0, i32 [[TMP0]]
// CHECK-NEXT: store i32 [[MUL]], ptr [[ARRAYIDX2]], align 8, !tbaa [[TBAA6:![0-9]+]]
-// CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [2 x i32], ptr [[ARR]], i32 [[TMP0]], i32 1
+// CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds nuw i8, ptr [[ARRAYIDX]], i32 4
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX5]], align 4, !tbaa [[TBAA2]]
// CHECK-NEXT: [[MUL6:%.*]] = mul i32 [[TMP2]], [[NUM]]
-// CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [4 x [2 x %union.vect32]], ptr [[TMP]], i32 0, i32 [[TMP0]], i32 1
+// CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds nuw i8, ptr [[ARRAYIDX2]], i32 4
// CHECK-NEXT: store i32 [[MUL6]], ptr [[ARRAYIDX8]], align 4, !tbaa [[TBAA6]]
// CHECK-NEXT: [[TMP3:%.*]] = lshr i32 [[MUL]], 16
// CHECK-NEXT: store i32 [[TMP3]], ptr [[VEC]], align 4, !tbaa [[TBAA2]]
// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[INDEX]], align 4, !tbaa [[TBAA2]]
-// CHECK-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [4 x [2 x %union.vect32]], ptr [[TMP]], i32 0, i32 [[TMP4]], i32 1
-// CHECK-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds nuw i8, ptr [[ARRAYIDX14]], i32 2
+// CHECK-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds [4 x [2 x %union.vect32]], ptr [[TMP]], i32 0, i32 [[TMP4]]
+// CHECK-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds nuw i8, ptr [[ARRAYIDX13]], i32 6
// CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[ARRAYIDX15]], align 2, !tbaa [[TBAA6]]
// CHECK-NEXT: [[CONV16:%.*]] = zext i16 [[TMP5]] to i32
// CHECK-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds nuw i8, ptr [[VEC]], i32 4
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 873b104bc0944..c56d2ffdaff59 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -2671,125 +2671,53 @@ Instruction *InstCombinerImpl::visitGEPOfGEP(GetElementPtrInst &GEP,
if (auto *I = canonicalizeGEPOfConstGEPI8(GEP, Src, *this))
return I;
- // For constant GEPs, use a more general offset-based folding approach.
- Type *PtrTy = Src->getType()->getScalarType();
- if (GEP.hasAllConstantIndices() &&
- (Src->hasOneUse() || Src->hasAllConstantIndices())) {
- // Split Src into a variable part and a constant suffix.
- gep_type_iterator GTI = gep_type_begin(*Src);
- Type *BaseType = GTI.getIndexedType();
- bool IsFirstType = true;
- unsigned NumVarIndices = 0;
- for (auto Pair : enumerate(Src->indices())) {
- if (!isa<ConstantInt>(Pair.value())) {
- BaseType = GTI.getIndexedType();
- IsFirstType = false;
- NumVarIndices = Pair.index() + 1;
- }
- ++GTI;
- }
-
- // Determine the offset for the constant suffix of Src.
- APInt Offset(DL.getIndexTypeSizeInBits(PtrTy), 0);
- if (NumVarIndices != Src->getNumIndices()) {
- // FIXME: getIndexedOffsetInType() does not handled scalable vectors.
- if (BaseType->isScalableTy())
- return nullptr;
-
- SmallVector<Value *> ConstantIndices;
- if (!IsFirstType)
- ConstantIndices.push_back(
- Constant::getNullValue(Type::getInt32Ty(GEP.getContext())));
- append_range(ConstantIndices, drop_begin(Src->indices(), NumVarIndices));
- Offset += DL.getIndexedOffsetInType(BaseType, ConstantIndices);
- }
-
- // Add the offset for GEP (which is fully constant).
- if (!GEP.accumulateConstantOffset(DL, Offset))
- return nullptr;
-
- // Convert the total offset back into indices.
- SmallVector<APInt> ConstIndices =
- DL.getGEPIndicesForOffset(BaseType, Offset);
- if (!Offset.isZero() || (!IsFirstType && !ConstIndices[0].isZero()))
- return nullptr;
-
- GEPNoWrapFlags NW = getMergedGEPNoWrapFlags(*Src, *cast<GEPOperator>(&GEP));
- SmallVector<Value *> Indices(
- drop_end(Src->indices(), Src->getNumIndices() - NumVarIndices));
- for (const APInt &Idx : drop_begin(ConstIndices, !IsFirstType)) {
- Indices.push_back(ConstantInt::get(GEP.getContext(), Idx));
- // Even if the total offset is inbounds, we may end up representing it
- // by first performing a larger negative offset, and then a smaller
- // positive one. The large negative offset might go out of bounds. Only
- // preserve inbounds if all signs are the same.
- if (Idx.isNonNegative() != ConstIndices[0].isNonNegative())
- NW = NW.withoutNoUnsignedSignedWrap();
- if (!Idx.isNonNegative())
- NW = NW.withoutNoUnsignedWrap();
- }
-
- return replaceInstUsesWith(
- GEP, Builder.CreateGEP(Src->getSourceElementType(), Src->getOperand(0),
- Indices, "", NW));
- }
-
if (Src->getResultElementType() != GEP.getSourceElementType())
return nullptr;
- SmallVector<Value*, 8> Indices;
-
// Find out whether the last index in the source GEP is a sequential idx.
bool EndsWithSequential = false;
for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
I != E; ++I)
EndsWithSequential = I.isSequential();
+ if (!EndsWithSequential)
+ return nullptr;
- // Can we combine the two pointer arithmetics offsets?
- if (EndsWithSequential) {
- // Replace: gep (gep %P, long B), long A, ...
- // With: T = long A+B; gep %P, T, ...
- Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
- Value *GO1 = GEP.getOperand(1);
-
- // If they aren't the same type, then the input hasn't been processed
- // by the loop above yet (which canonicalizes sequential index types to
- // intptr_t). Just avoid transforming this until the input has been
- // normalized.
- if (SO1->getType() != GO1->getType())
- return nullptr;
+ // Replace: gep (gep %P, long B), long A, ...
+ // With: T = long A+B; gep %P, T, ...
+ Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
+ Value *GO1 = GEP.getOperand(1);
- Value *Sum =
- simplifyAddInst(GO1, SO1, false, false, SQ.getWithInstruction(&GEP));
- // Only do the combine when we are sure the cost after the
- // merge is never more than that before the merge.
- if (Sum == nullptr)
- return nullptr;
+ // If they aren't the same type, then the input hasn't been processed
+ // by the loop above yet (which canonicalizes sequential index types to
+ // intptr_t). Just avoid transforming this until the input has been
+ // normalized.
+ if (SO1->getType() != GO1->getType())
+ return nullptr;
- Indices.append(Src->op_begin()+1, Src->op_end()-1);
- Indices.push_back(Sum);
- Indices.append(GEP.op_begin()+2, GEP.op_end());
- } else if (isa<Constant>(*GEP.idx_begin()) &&
- cast<Constant>(*GEP.idx_begin())->isNullValue() &&
- Src->getNumOperands() != 1) {
- // Otherwise we can do the fold if the first index of the GEP is a zero
- Indices.append(Src->op_begin()+1, Src->op_end());
- Indices.append(GEP.idx_begin()+1, GEP.idx_end());
- }
-
- // Don't create GEPs with more than one variable index.
- unsigned NumVarIndices =
- count_if(Indices, [](Value *Idx) { return !isa<Constant>(Idx); });
- if (NumVarIndices > 1)
+ Value *Sum =
+ simplifyAddInst(GO1, SO1, false, false, SQ.getWithInstruction(&GEP));
+ // Only do the combine when we are sure the cost after the
+ // merge is never more than that before the merge.
+ if (Sum == nullptr)
return nullptr;
- if (!Indices.empty())
- return replaceInstUsesWith(
- GEP, Builder.CreateGEP(
- Src->getSourceElementType(), Src->getOperand(0), Indices, "",
- getMergedGEPNoWrapFlags(*Src, *cast<GEPOperator>(&GEP))));
+ SmallVector<Value *, 8> Indices;
+ Indices.append(Src->op_begin()+1, Src->op_end()-1);
+ Indices.push_back(Sum);
+ Indices.append(GEP.op_begin()+2, GEP.op_end());
- return nullptr;
+ // Don't create GEPs with more than one non-zero index.
+ unsigned NumNonZeroIndices = count_if(Indices, [](Value *Idx) {
+ auto *C = dyn_cast<Constant>(Idx);
+ return !C || !C->isNullValue();
+ });
+ if (NumNonZeroIndices > 1)
+ return nullptr;
+
+ return replaceInstUsesWith(
+ GEP, Builder.CreateGEP(
+ Src->getSourceElementType(), Src->getOperand(0), Indices, "",
+ getMergedGEPNoWrapFlags(*Src, *cast<GEPOperator>(&GEP))));
}
Value *InstCombiner::getFreelyInvertedImpl(Value *V, bool WillInvertAllUses,
@@ -3231,17 +3159,18 @@ Instruction *InstCombinerImpl::visitGetElementPtrInst(GetElementPtrInst &GEP) {
return replaceInstUsesWith(GEP, Res);
}
- bool SeenVarIndex = false;
+ bool SeenNonZeroIndex = false;
for (auto [IdxNum, Idx] : enumerate(Indices)) {
- if (isa<Constant>(Idx))
+ auto *C = dyn_cast<Constant>(Idx);
+ if (C && C->isNullValue())
continue;
- if (!SeenVarIndex) {
- SeenVarIndex = true;
+ if (!SeenNonZeroIndex) {
+ SeenNonZeroIndex = true;
continue;
}
- // GEP has multiple variable indices: Split it.
+ // GEP has multiple non-zero indices: Split it.
ArrayRef<Value *> FrontIndices = ArrayRef(Indices).take_front(IdxNum);
Value *FrontGEP =
Builder.CreateGEP(GEPEltType, PtrOp, FrontIndices,
diff --git a/llvm/test/Transforms/InstCombine/2010-11-21-SizeZeroTypeGEP.ll b/llvm/test/Transforms/InstCombine/2010-11-21-SizeZeroTypeGEP.ll
index ba36005244589..dc798356a56ec 100644
--- a/llvm/test/Transforms/InstCombine/2010-11-21-SizeZeroTypeGEP.ll
+++ b/llvm/test/Transforms/InstCombine/2010-11-21-SizeZeroTypeGEP.ll
@@ -15,7 +15,7 @@ define ptr @foo(ptr %x, i32 %n) {
define ptr @bar(i64 %n, ptr %p) {
; CHECK-LABEL: define ptr @bar(
; CHECK-SAME: i64 [[N:%.*]], ptr [[P:%.*]]) {
-; CHECK-NEXT: [[G:%.*]] = getelementptr { {}, [0 x { [0 x i8] }] }, ptr [[P]], i64 0, i32 1, i64 0, i32 0, i64 [[N]]
+; CHECK-NEXT: [[G:%.*]] = getelementptr [0 x i8], ptr [[P]], i64 0, i64 [[N]]
; CHECK-NEXT: ret ptr [[G]]
;
%g = getelementptr {{}, [0 x {[0 x i8]}]}, ptr %p, i64 %n, i32 1, i64 %n, i32 0, i64 %n
diff --git a/llvm/test/Transforms/InstCombine/gep-merge-constant-indices.ll b/llvm/test/Transforms/InstCombine/gep-merge-constant-indices.ll
index acea9f8f555c9..ecebaa7d9ae5d 100644
--- a/llvm/test/Transforms/InstCombine/gep-merge-constant-indices.ll
+++ b/llvm/test/Transforms/InstCombine/gep-merge-constant-indices.ll
@@ -118,7 +118,8 @@ define ptr @structStruct(ptr %p) {
; result = (ptr) &((struct.B*) p)[i].member1 + 2
define ptr @appendIndex(ptr %p, i64 %i) {
; CHECK-LABEL: @appendIndex(
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_B:%.*]], ptr [[P:%.*]], i64 [[I:%.*]], i32 1, i64 2
+; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[STRUCT_B:%.*]], ptr [[P:%.*]], i64 [[I:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 6
; CHECK-NEXT: ret ptr [[TMP1]]
;
%1 = getelementptr inbounds %struct.B, ptr %p, i64 %i, i32 1
@@ -173,7 +174,8 @@ define ptr @partialConstant3(ptr %p) {
; result = &((struct.C*) p + a).member2
define ptr @partialConstantMemberAliasing1(ptr %p, i64 %a) {
; CHECK-LABEL: @partialConstantMemberAliasing1(
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_C:%.*]], ptr [[P:%.*]], i64 [[A:%.*]], i32 2
+; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[STRUCT_C:%.*]], ptr [[P:%.*]], i64 [[A:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 8
; CHECK-NEXT: ret ptr [[TMP1]]
;
%1 = getelementptr inbounds %struct.C, ptr %p, i64 %a, i32 1
@@ -185,8 +187,8 @@ define ptr @partialConstantMemberAliasing1(ptr %p, i64 %a) {
; address of another member.
define ptr @partialConstantMemberAliasing2(ptr %p, i64 %a) {
; CHECK-LABEL: @partialConstantMemberAliasing2(
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_C:%.*]], ptr [[P:%.*]], i64 [[A:%.*]], i32 1
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP1]], i64 1
+; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[STRUCT_C:%.*]], ptr [[P:%.*]], i64 [[A:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 5
; CHECK-NEXT: ret ptr [[TMP2]]
;
%1 = getelementptr inbounds %struct.C, ptr %p, i64 %a, i32 1
@@ -198,8 +200,8 @@ define ptr @partialConstantMemberAliasing2(ptr %p, i64 %a) {
; range of the object currently pointed by the non-constant GEP.
define ptr @partialConstantMemberAliasing3(ptr %p, i64 %a) {
; CHECK-LABEL: @partialConstantMemberAliasing3(
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_C:%.*]], ptr [[P:%.*]], i64 [[A:%.*]], i32 2
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP1]], i64 4
+; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[STRUCT_C:%.*]], ptr [[P:%.*]], i64 [[A:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 12
; CHECK-NEXT: ret ptr [[TMP2]]
;
%1 = getelementptr inbounds %struct.C, ptr %p, i64 %a, i32 2
diff --git a/llvm/test/Transforms/InstCombine/gepofconstgepi8.ll b/llvm/test/Transforms/InstCombine/gepofconstgepi8.ll
index a92e0c263d357..380236c40d30d 100644
--- a/llvm/test/Transforms/InstCombine/gepofconstgepi8.ll
+++ b/llvm/test/Transforms/InstCombine/gepofconstgepi8.ll
@@ -150,7 +150,8 @@ define ptr @test_too_many_indices(ptr %base, i64 %a, i64 %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[P1:%.*]] = getelementptr i8, ptr [[BASE]], i64 [[B]]
; CHECK-NEXT: [[INDEX:%.*]] = add i64 [[A]], 1
-; CHECK-NEXT: [[P2:%.*]] = getelementptr [8 x i32], ptr [[P1]], i64 1, i64 [[INDEX]]
+; CHECK-NEXT: [[P2_SPLIT:%.*]] = getelementptr i8, ptr [[P1]], i64 32
+; CHECK-NEXT: [[P2:%.*]] = getelementptr [8 x i32], ptr [[P2_SPLIT]], i64 0, i64 [[INDEX]]
; CHECK-NEXT: ret ptr [[P2]]
;
entry:
diff --git a/llvm/test/Transforms/InstCombine/gepphigep.ll b/llvm/test/Transforms/InstCombine/gepphigep.ll
index cd1e38bbb2dee..c085dc593e3ef 100644
--- a/llvm/test/Transforms/InstCombine/gepphigep.ll
+++ b/llvm/test/Transforms/InstCombine/gepphigep.ll
@@ -21,7 +21,8 @@ define i32 @test1(ptr %dm, i1 %c, i64 %idx1, i64 %idx2) {
; CHECK-NEXT: br label [[BB3]]
; CHECK: bb3:
; CHECK-NEXT: [[TMP0:%.*]] = phi i64 [ [[IDX1]], [[BB1]] ], [ [[IDX2]], [[BB2]] ]
-; CHECK-NEXT: [[INST24:%.*]] = getelementptr inbounds [[STRUCT2]], ptr [[INST1]], i64 [[TMP0]], i32 1
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT2]], ptr [[INST1]], i64 [[TMP0]]
+; CHECK-NEXT: [[INST24:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP1]], i64 4
; CHECK-NEXT: [[INST25:%.*]] = load i32, ptr [[INST24]], align 4
; CHECK-NEXT: ret i32 [[INST25]]
;
@@ -71,20 +72,22 @@ bb:
; Check that instcombine doesn't insert GEPs before landingpad.
-define i32 @test3(ptr %dm, i1 %c, i64 %idx1, i64 %idx2, i64 %idx3) personality ptr @__gxx_personality_v0 {
+define i32 @test3(ptr %dm, i1 %c, i64 %idx1, i64 %idx2, i64 %idx3) "instcombine-no-verify-fixpoint" personality ptr @__gxx_personality_v0 {
; CHECK-LABEL: @test3(
; CHECK-NEXT: bb:
; CHECK-NEXT: br i1 [[C:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: bb1:
-; CHECK-NEXT: [[INST1:%.*]] = getelementptr inbounds [[STRUCT3:%.*]], ptr [[DM:%.*]], i64 [[IDX1:%.*]], i32 1
+; CHECK-NEXT: [[INST1_SPLIT:%.*]] = getelementptr inbounds [[STRUCT3:%.*]], ptr [[DM:%.*]], i64 [[IDX1:%.*]]
+; CHECK-NEXT: [[INST1:%.*]] = getelementptr inbounds nuw i8, ptr [[INST1_SPLIT]], i64 4
; CHECK-NEXT: store i32 0, ptr [[INST1]], align 4
; CHECK-NEXT: br label [[BB3:%.*]]
; CHECK: bb2:
-; CHECK-NEXT: [[INST12:%.*]] = getelementptr inbounds [[STRUCT3]], ptr [[DM]], i64 [[IDX2:%.*]], i32 1, i32 0, i32 1
+; CHECK-NEXT: [[INST2_SPLIT:%.*]] = getelementptr inbounds [[STRUCT3]], ptr [[DM]], i64 [[IDX2:%.*]]
+; CHECK-NEXT: [[INST12:%.*]] = getelementptr inbounds nuw i8, ptr [[INST2_SPLIT]], i64 8
; CHECK-NEXT: store i32 0, ptr [[INST12]], align 4
; CHECK-NEXT: br label [[BB3]]
; CHECK: bb3:
-; CHECK-NEXT: [[TMP0:%.*]] = phi i64 [ [[IDX1]], [[BB1]] ], [ [[IDX2]], [[BB2]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = phi ptr [ [[INST1_SPLIT]], [[BB1]] ], [ [[INST2_SPLIT]], [[BB2]] ]
; CHECK-NEXT: [[INST22:%.*]] = invoke i32 @foo1(i32 11)
; CHECK-NEXT: to label [[BB4:%.*]] unwind label [[BB5:%.*]]
; CHECK: bb4:
@@ -92,8 +95,9 @@ define i32 @test3(ptr %dm, i1 %c, i64 %idx1, i64 %idx2, i64 %idx3) personality p
; CHECK: bb5:
; CHECK-NEXT: [[INST27:%.*]] = landingpad { ptr, i32 }
; CHECK-NEXT: catch ptr @_ZTIi
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT3]], ptr [[DM]], i64 [[TMP0]], i32 1
-; CHECK-NEXT: [[INST35:%.*]] = getelementptr inbounds [[STRUCT4:%.*]], ptr [[TMP1]], i64 [[IDX3:%.*]], i32 1, i32 1
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 4
+; CHECK-NEXT: [[INST34_SPLIT:%.*]] = getelementptr inbounds [[STRUCT4:%.*]], ptr [[TMP1]], i64 [[IDX3:%.*]]
+; CHECK-NEXT: [[INST35:%.*]] = getelementptr inbounds nuw i8, ptr [[INST34_SPLIT]], i64 12
; CHECK-NEXT: [[INST25:%.*]] = load i32, ptr [[INST35]], align 4
; CHECK-NEXT: ret i32 [[INST25]]
;
diff --git a/llvm/test/Transforms/InstCombine/getelementptr.ll b/llvm/test/Transforms/InstCombine/getelementptr.ll
index 7c1342a7db84b..b9c78f6e870e9 100644
--- a/llvm/test/Transforms/InstCombine/getelementptr.ll
+++ b/llvm/test/Transforms/InstCombine/getelementptr.ll
@@ -1947,7 +1947,8 @@ define ptr @gep_merge_nusw_add_zero(ptr %p, i64 %idx, i64 %idx2) {
define ptr @gep_merge_nuw_const(ptr %p, i64 %idx, i64 %idx2) {
; CHECK-LABEL: @gep_merge_nuw_const(
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr nuw [2 x i32], ptr [[P:%.*]], i64 [[IDX:%.*]], i64 1
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr nuw [2 x i32], ptr [[P:%.*]], i64 [[IDX:%.*]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr nuw i8, ptr [[GEP1]], i64 4
; CHECK-NEXT: ret ptr [[GEP]]
;
%gep1 = getelementptr nuw [2 x i32], ptr %p, i64 %idx
@@ -1970,7 +1971,8 @@ define ptr @gep_merge_nuw_const_neg(ptr %p, i64 %idx, i64 %idx2) {
; does not overflow.
define ptr @gep_merge_nusw_const(ptr %p, i64 %idx, i64 %idx2) {
; CHECK-LABEL: @gep_merge_nusw_const(
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr [2 x i32], ptr [[P:%.*]], i64 [[IDX:%.*]], i64 1
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr nusw [2 x i32], ptr [[P:%.*]], i64 [[IDX:%.*]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr nusw nuw i8, ptr [[GEP1]], i64 4
; CHECK-NEXT: ret ptr [[GEP]]
;
%gep1 = getelementptr nusw [2 x i32], ptr %p, i64 %idx
diff --git a/llvm/test/Transforms/InstCombine/load-cmp.ll b/llvm/test/Transforms/InstCombine/load-cmp.ll
index 12be81b8f815d..7d8d4749e1d03 100644
--- a/llvm/test/Transforms/InstCombine/load-cmp.ll
+++ b/llvm/test/Transforms/InstCombine/load-cmp.ll
@@ -193,8 +193,10 @@ define i1 @test8(i32 %X) {
define i1 @test9(i32 %X) {
; CHECK-LABEL: @test9(
-; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[X:%.*]], -1
-; CHECK-NEXT: [[R:%.*]] = icmp ult i32 [[TMP1]], 2
+; CHECK-NEXT: [[P_SPLIT:%.*]] = getelementptr inbounds [4 x { i32, i32 }], ptr @GA, i32 0, i32 [[X:%.*]]
+; CHECK-NEXT: [[P:%.*]] = getelementptr inbounds nuw i8, ptr [[P_SPLIT]], i32 4
+; CHECK-NEXT: [[Q:%.*]] = load i32, ptr [[P]], align 4
+; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[Q]], 1
; CHECK-NEXT: ret i1 [[R]]
;
%P = getelementptr inbounds [4 x { i32, i32 } ], ptr @GA, i32 0, i32 %X, i32 1
@@ -266,7 +268,10 @@ define i1 @test10_struct_noinbounds_i16(i16 %x) {
define i1 @test10_struct_arr(i32 %x) {
; CHECK-LABEL: @test10_struct_arr(
-; CHECK-NEXT: [[R:%.*]] = icmp ne i32 [[X:%.*]], 1
+; CHECK-NEXT: [[P_SPLIT:%.*]] = getelementptr inbounds [4 x %Foo], ptr @GStructArr, i32 0, i32 [[X:%.*]]
+; CHECK-NEXT: [[P:%.*]] = getelementptr inbounds nuw i8, ptr [[P_SPLIT]], i32 8
+; CHECK-NEXT: [[Q:%.*]] = load i32, ptr [[P]], align 4
+; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[Q]], 9
; CHECK-NEXT: ret i1 [[R]]
;
%p = getelementptr inbounds [4 x %Foo], ptr @GStructArr, i32 0, i32 %x, i32 2
@@ -277,8 +282,10 @@ define i1 @test10_struct_arr(i32 %x) {
define i1 @test10_struct_arr_noinbounds(i32 %x) {
; CHECK-LABEL: @test10_struct_arr_noinbounds(
-; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], 268435455
-; CHECK-NEXT: [[R:%.*]] = icmp ne i32 [[TMP1]], 1
+; CHECK-NEXT: [[P_SPLIT:%.*]] = getelementptr [4 x %Foo], ptr @GStructArr, i32 0, i32 [[X:%.*]]
+; CHECK-NEXT: [[P:%.*]] = getelementptr i8, ptr [[P_SPLIT]], i32 8
+; CHECK-NEXT: [[Q:%.*]] = load i32, ptr [[P]], align 4
+; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[Q]], 9
; CHECK-NEXT: ret i1 [[R]]
;
%p = getelementptr [4 x %Foo], ptr @GStructArr, i32 0, i32 %x, i32 2
@@ -289,7 +296,11 @@ define i1 @test10_struct_arr_noinbounds(i32 %x) {
define i1 @test10_struct_arr_i16(i16 %x) {
; CHECK-LABEL: @test10_struct_arr_i16(
-; CHECK-NEXT: [[R:%.*]] = icmp ne i16 [[X:%.*]], 1
+; CHECK-NEXT: [[TMP1:%.*]] = sext i16 [[X:%.*]] to i32
+; CHECK-NEXT: [[P_SPLIT:%.*]] = getelementptr inbounds [4 x %Foo], ptr @GStructArr, i32 0, i32 [[TMP1]]
+; CHECK-NEXT: [[P:%.*]] = getelementptr inbounds nuw i8, ptr [[P_SPLIT]], i32 8
+; CHECK-NEXT: [[Q:%.*]] = load i32, ptr [[P]], align 4
+; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[Q]], 9
; CHECK-NEXT: ret i1 [[R]]
;
%p = getelementptr inbounds [4 x %Foo], ptr @GStructArr, i16 0, i16 %x, i32 2
@@ -300,8 +311,11 @@ define i1 @test10_struct_arr_i16(i16 %x) {
define i1 @test10_struct_arr_i64(i64 %x) {
; CHECK-LABEL: @test10_struct_arr_i64(
-; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[X:%.*]], 4294967295
-; CHECK-NEXT: [[R:%.*]] = icmp ne i64 [[TMP1]], 1
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[X:%.*]] to i32
+; CHECK-NEXT: [[P_SPLIT:%.*]] = getelementptr inbounds [4 x %Foo], ptr @GStructArr, i32 0, i32 [[TMP1]]
+; CHECK-NEXT: [[P:%.*]] = getelementptr inbounds nuw i8, ptr [[P_SPLIT]], i32 8
+; CHECK-NEXT: [[Q:%.*]] = load i32, ptr [[P]], align 4
+; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[Q]], 9
; CHECK-NEXT: ret i1 [[R]]
;
%p = getelementptr inbounds [4 x %Foo], ptr @GStructArr, i64 0, i64 %x, i32 2
@@ -312,7 +326,11 @@ define i1 @test10_struct_arr_i64(i64 %x) {
define i1 @test10_struct_arr_noinbounds_i16(i16 %x) {
; CHECK-LABEL: @test10_struct_arr_noinbounds_i16(
-; CHECK-NEXT: [[R:%.*]] = icmp ne i16 [[X:%.*]], 1
+; CHECK-NEXT: [[TMP1:%.*]] = sext i16 [[X:%.*]] to i32
+; CHECK-NEXT: [[P_SPLIT:%.*]] = getelementptr [4 x %Foo], ptr @GStructArr, i32 0, i32 [[TMP1]]
+; CHECK-NEXT: [[P:%.*]] = getelementptr i8, ptr [[P_SPLIT]], i32 8
+; CHECK-NEXT: [[Q:%.*]] = load i32, ptr [[P]], align 4
+; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[Q]], 9
; CHECK-NEXT: ret i1 [[R]]
;
%p = getelementptr [4 x %Foo], ptr @GStructArr, i32 0, i16 %x, i32 2
@@ -323,8 +341,11 @@ define i1 @test10_struct_arr_noinbounds_i16(i16 %x) {
define i1 @test10_struct_arr_noinbounds_i64(i64 %x) {
; CHECK-LABEL: @test10_struct_arr_noinbounds_i64(
-; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[X:%.*]], 268435455
-; CHECK-NEXT: [[R:%.*]] = icmp ne i64 [[TMP1]], 1
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[X:%.*]] to i32
+; CHECK-NEXT: [[P_SPLIT:%.*]] = getelementptr [4 x %Foo], ptr @GStructArr, i32 0, i32 [[TMP1]]
+; CHECK-NEXT: [[P:%.*]] = getelementptr i8, ptr [[P_SPLIT]], i32 8
+; CHECK-NEXT: [[Q:%.*]] = load i32, ptr [[P]], align 4
+; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[Q]], 9
; CHECK-NEXT: ret i1 [[R]]
;
%p = getelementptr [4 x %Foo], ptr @GStructArr, i32 0, i64 %x, i32 2
diff --git a/llvm/test/Transforms/InstCombine/opaque-ptr.ll b/llvm/test/Transforms/InstCombine/opaque-ptr.ll
index 99d1fa032db15..872ec2c881bda 100644
--- a/llvm/test/Transforms/InstCombine/opaque-ptr.ll
+++ b/llvm/test/Transforms/InstCombine/opaque-ptr.ll
@@ -223,7 +223,8 @@ define ptr @geps_combinable_different_elem_type5(ptr %a) {
define ptr @geps_combinable_different_elem_type6(ptr %a, i64 %idx) {
; CHECK-LABEL: @geps_combinable_different_elem_type6(
-; CHECK-NEXT: [[A3:%.*]] = getelementptr { i32, i32 }, ptr [[A:%.*]], i64 [[IDX:%.*]], i32 1
+; CHECK-NEXT: [[A2:%.*]] = getelementptr { i32, i32 }, ptr [[A:%.*]], i64 [[IDX:%.*]]
+; CHECK-NEXT: [[A3:%.*]] = getelementptr i8, ptr [[A2]], i64 4
; CHECK-NEXT: ret ptr [[A3]]
;
%a2 = getelementptr { i32, i32 }, ptr %a, i64 %idx
@@ -233,8 +234,8 @@ define ptr @geps_combinable_different_elem_type6(ptr %a, i64 %idx) {
define ptr @geps_combinable_different_elem_type7(ptr %a, i64 %idx) {
; CHECK-LABEL: @geps_combinable_different_elem_type7(
-; CHECK-NEXT: [[A2:%.*]] = getelementptr { i32, i32 }, ptr [[A:%.*]], i64 [[IDX:%.*]], i32 1
-; CHECK-NEXT: [[A3:%.*]] = getelementptr i8, ptr [[A2]], i64 4
+; CHECK-NEXT: [[A2_SPLIT:%.*]] = getelementptr { i32, i32 }, ptr [[A:%.*]], i64 [[IDX:%.*]]
+; CHECK-NEXT: [[A3:%.*]] = getelementptr i8, ptr [[A2_SPLIT]], i64 8
; CHECK-NEXT: ret ptr [[A3]]
;
%a2 = getelementptr { i32, i32 }, ptr %a, i64 %idx, i32 1
@@ -244,8 +245,8 @@ define ptr @geps_combinable_different_elem_type7(ptr %a, i64 %idx) {
define ptr @geps_combinable_different_elem_type8(ptr %a, i64 %idx) {
; CHECK-LABEL: @geps_combinable_different_elem_type8(
-; CHECK-NEXT: [[A2:%.*]] = getelementptr inbounds { { i32, i32 } }, ptr [[A:%.*]], i64 [[IDX:%.*]], i32 0, i32 1
-; CHECK-NEXT: [[A3:%.*]] = getelementptr inbounds nuw i8, ptr [[A2]], i64 4
+; CHECK-NEXT: [[A2_SPLIT:%.*]] = getelementptr inbounds { { i32, i32 } }, ptr [[A:%.*]], i64 [[IDX:%.*]], i32 0
+; CHECK-NEXT: [[A3:%.*]] = getelementptr inbounds nuw i8, ptr [[A2_SPLIT]], i64 8
; CHECK-NEXT: ret ptr [[A3]]
;
%a2 = getelementptr inbounds { { i32, i32 } }, ptr %a, i64 %idx, i32 0, i32 1
@@ -255,7 +256,7 @@ define ptr @geps_combinable_different_elem_type8(ptr %a, i64 %idx) {
define ptr @geps_combinable_different_elem_type9(ptr %a, i64 %idx) {
; CHECK-LABEL: @geps_combinable_different_elem_type9(
-; CHECK-NEXT: [[A3:%.*]] = getelementptr inbounds { { i32, i32 } }, ptr [[A:%.*]], i64 [[IDX:%.*]]
+; CHECK-NEXT: [[A3:%.*]] = getelementptr inbounds { { i32, i32 } }, ptr [[A:%.*]], i64 [[IDX:%.*]], i32 0
; CHECK-NEXT: ret ptr [[A3]]
;
%a2 = getelementptr inbounds { { i32, i32 } }, ptr %a, i64 %idx, i32 0, i32 1
diff --git a/llvm/test/Transforms/InstCombine/ptrtoint-nullgep.ll b/llvm/test/Transforms/InstCombine/ptrtoint-nullgep.ll
index 17a9d540983ca..ad832f26f7284 100644
--- a/llvm/test/Transforms/InstCombine/ptrtoint-nullgep.ll
+++ b/llvm/test/Transforms/InstCombine/ptrtoint-nullgep.ll
@@ -307,8 +307,8 @@ define i64 @fold_ptrtoint_nullgep_variable_known_nonzero_inbounds_multiple_indic
; INSTCOMBINE-LABEL: define {{[^@]+}}@fold_ptrtoint_nullgep_variable_known_nonzero_inbounds_multiple_indices
; INSTCOMBINE-SAME: (i64 [[VAL:%.*]]) {
; INSTCOMBINE-NEXT: [[NON_ZERO_OFFSET:%.*]] = shl i64 [[VAL]], 1
-; INSTCOMBINE-NEXT: [[PTR_OFFS:%.*]] = or i64 [[NON_ZERO_OFFSET]], 3
-; INSTCOMBINE-NEXT: ret i64 [[PTR_OFFS]]
+; INSTCOMBINE-NEXT: [[RET:%.*]] = or i64 [[NON_ZERO_OFFSET]], 3
+; INSTCOMBINE-NEXT: ret i64 [[RET]]
;
%non_zero_offset = or i64 %val, 1
%ptr = getelementptr inbounds [2 x i8], ptr addrspace(1) null, i64 %non_zero_offset, i32 1
@@ -455,8 +455,8 @@ define i64 @fold_complex_index_multiple_nonzero(i64 %x) local_unnamed_addr #0 {
; INSTCOMBINE-LABEL: define {{[^@]+}}@fold_complex_index_multiple_nonzero
; INSTCOMBINE-SAME: (i64 [[X:%.*]]) local_unnamed_addr {
; INSTCOMBINE-NEXT: entry:
-; INSTCOMBINE-NEXT: [[PTR_OFFS:%.*]] = add nsw i64 [[X]], 96
-; INSTCOMBINE-NEXT: ret i64 [[PTR_OFFS]]
+; INSTCOMBINE-NEXT: [[RET:%.*]] = add i64 [[X]], 96
+; INSTCOMBINE-NEXT: ret i64 [[RET]]
;
entry:
%ptr = getelementptr inbounds %struct.S, ptr addrspace(1) null, i64 1, i32 0, i64 1, i32 0, i64 %x
@@ -500,9 +500,9 @@ define i64 @fold_ptrtoint_nullgep_array_one_var_1(i64 %x) {
;
; INSTCOMBINE-LABEL: define {{[^@]+}}@fold_ptrtoint_nullgep_array_one_var_1
; INSTCOMBINE-SAME: (i64 [[X:%.*]]) {
-; INSTCOMBINE-NEXT: [[PTR_IDX:%.*]] = shl i64 [[X]], 2
-; INSTCOMBINE-NEXT: [[PTR_OFFS:%.*]] = add i64 [[PTR_IDX]], 6
-; INSTCOMBINE-NEXT: ret i64 [[PTR_OFFS]]
+; INSTCOMBINE-NEXT: [[PTR_SPLIT_IDX:%.*]] = shl i64 [[X]], 2
+; INSTCOMBINE-NEXT: [[RET:%.*]] = add i64 [[PTR_SPLIT_IDX]], 6
+; INSTCOMBINE-NEXT: ret i64 [[RET]]
;
%ptr = getelementptr [2 x i16], ptr addrspace(1) null, i64 %x, i64 3
%ret = ptrtoint ptr addrspace(1) %ptr to i64
@@ -525,8 +525,8 @@ define i64 @fold_ptrtoint_nullgep_array_one_var_2(i64 %x) {
; INSTCOMBINE-LABEL: define {{[^@]+}}@fold_ptrtoint_nullgep_array_one_var_2
; INSTCOMBINE-SAME: (i64 [[X:%.*]]) {
; INSTCOMBINE-NEXT: [[PTR_IDX:%.*]] = shl i64 [[X]], 1
-; INSTCOMBINE-NEXT: [[PTR_OFFS:%.*]] = add i64 [[PTR_IDX]], 28
-; INSTCOMBINE-NEXT: ret i64 [[PTR_OFFS]]
+; INSTCOMBINE-NEXT: [[RET:%.*]] = add i64 [[PTR_IDX]], 28
+; INSTCOMBINE-NEXT: ret i64 [[RET]]
;
%ptr = getelementptr [2 x i16], ptr addrspace(1) null, i64 7, i64 %x
%ret = ptrtoint ptr addrspace(1) %ptr to i64
@@ -600,9 +600,9 @@ define i64 @fold_ptrtoint_nested_array_two_vars_plus_const(i64 %x, i64 %y) {
; INSTCOMBINE-LABEL: define {{[^@]+}}@fold_ptrtoint_nested_array_two_vars_plus_const
; INSTCOMBINE-SAME: (i64 [[X:%.*]], i64 [[Y:%.*]]) {
; INSTCOMBINE-NEXT: [[PTR_SPLIT_IDX:%.*]] = shl i64 [[X]], 3
-; INSTCOMBINE-NEXT: [[PTR_IDX:%.*]] = shl i64 [[Y]], 2
-; INSTCOMBINE-NEXT: [[PTR_OFFS:%.*]] = or disjoint i64 [[PTR_IDX]], 2
-; INSTCOMBINE-NEXT: [[RET:%.*]] = add i64 [[PTR_SPLIT_IDX]], [[PTR_OFFS]]
+; INSTCOMBINE-NEXT: [[PTR_SPLIT1_IDX:%.*]] = shl i64 [[Y]], 2
+; INSTCOMBINE-NEXT: [[TMP1:%.*]] = add i64 [[PTR_SPLIT_IDX]], [[PTR_SPLIT1_IDX]]
+; INSTCOMBINE-NEXT: [[RET:%.*]] = or disjoint i64 [[TMP1]], 2
; INSTCOMBINE-NEXT: ret i64 [[RET]]
;
%ptr = getelementptr [2 x [2 x i16]], ptr addrspace(1) null, i64 %x, i64 %y, i64 1
diff --git a/llvm/test/Transforms/InstCombine/select-gep.ll b/llvm/test/Transforms/InstCombine/select-gep.ll
index d3c357ea9751b..cd91b6b89b61f 100644
--- a/llvm/test/Transforms/InstCombine/select-gep.ll
+++ b/llvm/test/Transforms/InstCombine/select-gep.ll
@@ -161,8 +161,10 @@ define ptr @test2d(ptr %p, i64 %x, i64 %y) {
define ptr @test3a(ptr %p, i64 %x, i64 %y) {
; CHECK-LABEL: @test3a(
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds [4 x i32], ptr [[P:%.*]], i64 2, i64 [[X:%.*]]
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds [4 x i32], ptr [[P]], i64 2, i64 [[Y:%.*]]
+; CHECK-NEXT: [[GEP1_SPLIT:%.*]] = getelementptr inbounds nuw i8, ptr [[P:%.*]], i64 32
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds [4 x i32], ptr [[GEP1_SPLIT]], i64 0, i64 [[X:%.*]]
+; CHECK-NEXT: [[GEP2_SPLIT:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 32
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds [4 x i32], ptr [[GEP2_SPLIT]], i64 0, i64 [[Y:%.*]]
; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i64 [[X]], [[Y]]
; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], ptr [[GEP1]], ptr [[GEP2]]
; CHECK-NEXT: ret ptr [[SELECT]]
@@ -176,8 +178,10 @@ define ptr @test3a(ptr %p, i64 %x, i64 %y) {
define ptr @test3b(ptr %p, ptr %q, i64 %x, i64 %y) {
; CHECK-LABEL: @test3b(
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds [4 x i32], ptr [[P:%.*]], i64 2, i64 [[X:%.*]]
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds [4 x i32], ptr [[Q:%.*]], i64 2, i64 [[X]]
+; CHECK-NEXT: [[GEP1_SPLIT:%.*]] = getelementptr inbounds nuw i8, ptr [[P:%.*]], i64 32
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds [4 x i32], ptr [[GEP1_SPLIT]], i64 0, i64 [[X:%.*]]
+; CHECK-NEXT: [[GEP2_SPLIT:%.*]] = getelementptr inbounds nuw i8, ptr [[Q:%.*]], i64 32
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds [4 x i32], ptr [[GEP2_SPLIT]], i64 0, i64 [[X]]
; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i64 [[X]], [[Y:%.*]]
; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], ptr [[GEP1]], ptr [[GEP2]]
; CHECK-NEXT: ret ptr [[SELECT]]
@@ -191,7 +195,8 @@ define ptr @test3b(ptr %p, ptr %q, i64 %x, i64 %y) {
define ptr @test3c(ptr %p, ptr %q, i64 %x, i64 %y) {
; CHECK-LABEL: @test3c(
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds [4 x i32], ptr [[P:%.*]], i64 [[X:%.*]], i64 2
+; CHECK-NEXT: [[GEP1_SPLIT:%.*]] = getelementptr inbounds [4 x i32], ptr [[P:%.*]], i64 [[X:%.*]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds nuw i8, ptr [[GEP1_SPLIT]], i64 8
; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds i32, ptr [[Q:%.*]], i64 [[X]]
; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i64 [[X]], [[Y:%.*]]
; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], ptr [[GEP1]], ptr [[GEP2]]
@@ -207,7 +212,8 @@ define ptr @test3c(ptr %p, ptr %q, i64 %x, i64 %y) {
define ptr @test3d(ptr %p, ptr %q, i64 %x, i64 %y) {
; CHECK-LABEL: @test3d(
; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 [[X:%.*]]
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds [4 x i32], ptr [[Q:%.*]], i64 [[X]], i64 2
+; CHECK-NEXT: [[GEP2_SPLIT:%.*]] = getelementptr inbounds [4 x i32], ptr [[Q:%.*]], i64 [[X]]
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds nuw i8, ptr [[GEP2_SPLIT]], i64 8
; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i64 [[X]], [[Y:%.*]]
; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], ptr [[GEP1]], ptr [[GEP2]]
; CHECK-NEXT: ret ptr [[SELECT]]
diff --git a/llvm/test/Transforms/InstCombine/strcmp-3.ll b/llvm/test/Transforms/InstCombine/strcmp-3.ll
index 66910b410f4cf..b5f1e70053474 100644
--- a/llvm/test/Transforms/InstCombine/strcmp-3.ll
+++ b/llvm/test/Transforms/InstCombine/strcmp-3.ll
@@ -68,7 +68,7 @@ define i32 @fold_strcmp_a5i0_a5i1_p1_to_0() {
define i32 @call_strcmp_a5i0_a5i1_pI(i64 %I) {
; CHECK-LABEL: @call_strcmp_a5i0_a5i1_pI(
-; CHECK-NEXT: [[Q:%.*]] = getelementptr [5 x [4 x i8]], ptr @a5, i64 0, i64 1, i64 [[I:%.*]]
+; CHECK-NEXT: [[Q:%.*]] = getelementptr [4 x i8], ptr getelementptr inbounds nuw (i8, ptr @a5, i64 4), i64 0, i64 [[I:%.*]]
; CHECK-NEXT: [[CMP:%.*]] = call i32 @strcmp(ptr noundef nonnull dereferenceable(4) @a5, ptr noundef nonnull dereferenceable(1) [[Q]])
; CHECK-NEXT: ret i32 [[CMP]]
;
diff --git a/llvm/test/Transforms/InstCombine/strlen-7.ll b/llvm/test/Transforms/InstCombine/strlen-7.ll
index 6eb7885abd041..b6e3a4568b458 100644
--- a/llvm/test/Transforms/InstCombine/strlen-7.ll
+++ b/llvm/test/Transforms/InstCombine/strlen-7.ll
@@ -170,15 +170,16 @@ define void @fold_strlen_A_pI(ptr %plen, i64 %I) {
; CHECK-NEXT: [[PA0A:%.*]] = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 0, i32 0, i64 [[I:%.*]]
; CHECK-NEXT: [[LENA0A:%.*]] = call i64 @strlen(ptr noundef nonnull dereferenceable(1) [[PA0A]])
; CHECK-NEXT: store i64 [[LENA0A]], ptr [[PLEN:%.*]], align 4
-; CHECK-NEXT: [[PA0B:%.*]] = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 0, i32 1, i64 [[I]]
+; CHECK-NEXT: [[PA0B:%.*]] = getelementptr [5 x i8], ptr getelementptr inbounds nuw (i8, ptr @a, i64 4), i64 0, i64 [[I]]
; CHECK-NEXT: [[LENA0B:%.*]] = call i64 @strlen(ptr noundef nonnull dereferenceable(1) [[PA0B]])
; CHECK-NEXT: [[PLEN1:%.*]] = getelementptr i8, ptr [[PLEN]], i64 8
; CHECK-NEXT: store i64 [[LENA0B]], ptr [[PLEN1]], align 4
-; CHECK-NEXT: [[PA1A:%.*]] = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 1, i32 0, i64 [[I]]
+; CHECK-NEXT: [[PA1A:%.*]] = getelementptr [4 x i8], ptr getelementptr inbounds nuw (i8, ptr @a, i64 9), i64 0, i64 [[I]]
; CHECK-NEXT: [[LENA1A:%.*]] = call i64 @strlen(ptr noundef nonnull dereferenceable(1) [[PA1A]])
; CHECK-NEXT: [[PLEN2:%.*]] = getelementptr i8, ptr [[PLEN]], i64 16
; CHECK-NEXT: store i64 [[LENA1A]], ptr [[PLEN2]], align 4
-; CHECK-NEXT: [[PA1B:%.*]] = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 1, i32 1, i64 [[I]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr getelementptr inbounds nuw (i8, ptr @a, i64 9), i64 [[I]]
+; CHECK-NEXT: [[PA1B:%.*]] = getelementptr i8, ptr [[TMP1]], i64 4
; CHECK-NEXT: [[LENA1B:%.*]] = call i64 @strlen(ptr noundef nonnull dereferenceable(1) [[PA1B]])
; CHECK-NEXT: [[PLEN3:%.*]] = getelementptr i8, ptr [[PLEN]], i64 24
; CHECK-NEXT: store i64 [[LENA1B]], ptr [[PLEN3]], align 4
diff --git a/llvm/test/Transforms/InstCombine/strlen-8.ll b/llvm/test/Transforms/InstCombine/strlen-8.ll
index b4334ddd8f1ac..bd7ca832581ab 100644
--- a/llvm/test/Transforms/InstCombine/strlen-8.ll
+++ b/llvm/test/Transforms/InstCombine/strlen-8.ll
@@ -30,7 +30,7 @@ define i64 @fold_a5_4_i0_pI(i64 %I) {
define i64 @fold_a5_4_i1_pI(i64 %I) {
; CHECK-LABEL: @fold_a5_4_i1_pI(
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr [5 x [4 x i8]], ptr @a5_4, i64 0, i64 1, i64 [[I:%.*]]
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr [4 x i8], ptr getelementptr inbounds nuw (i8, ptr @a5_4, i64 4), i64 0, i64 [[I:%.*]]
; CHECK-NEXT: [[LEN:%.*]] = call i64 @strlen(ptr noundef nonnull dereferenceable(1) [[PTR]])
; CHECK-NEXT: ret i64 [[LEN]]
;
@@ -44,7 +44,7 @@ define i64 @fold_a5_4_i1_pI(i64 %I) {
define i64 @fold_a5_4_i2_pI(i64 %I) {
; CHECK-LABEL: @fold_a5_4_i2_pI(
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr [5 x [4 x i8]], ptr @a5_4, i64 0, i64 2, i64 [[I:%.*]]
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr [4 x i8], ptr getelementptr inbounds nuw (i8, ptr @a5_4, i64 8), i64 0, i64 [[I:%.*]]
; CHECK-NEXT: [[LEN:%.*]] = call i64 @strlen(ptr noundef nonnull dereferenceable(1) [[PTR]])
; CHECK-NEXT: ret i64 [[LEN]]
;
@@ -58,7 +58,7 @@ define i64 @fold_a5_4_i2_pI(i64 %I) {
define i64 @fold_a5_4_i3_pI_to_0(i64 %I) {
; CHECK-LABEL: @fold_a5_4_i3_pI_to_0(
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr [5 x [4 x i8]], ptr @a5_4, i64 0, i64 3, i64 [[I:%.*]]
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr [4 x i8], ptr getelementptr inbounds nuw (i8, ptr @a5_4, i64 12), i64 0, i64 [[I:%.*]]
; CHECK-NEXT: [[LEN:%.*]] = call i64 @strlen(ptr noundef nonnull dereferenceable(1) [[PTR]])
; CHECK-NEXT: ret i64 [[LEN]]
;
@@ -72,7 +72,7 @@ define i64 @fold_a5_4_i3_pI_to_0(i64 %I) {
define i64 @fold_a5_4_i4_pI_to_0(i64 %I) {
; CHECK-LABEL: @fold_a5_4_i4_pI_to_0(
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr [5 x [4 x i8]], ptr @a5_4, i64 0, i64 4, i64 [[I:%.*]]
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr [4 x i8], ptr getelementptr inbounds nuw (i8, ptr @a5_4, i64 16), i64 0, i64 [[I:%.*]]
; CHECK-NEXT: [[LEN:%.*]] = call i64 @strlen(ptr noundef nonnull dereferenceable(1) [[PTR]])
; CHECK-NEXT: ret i64 [[LEN]]
;
diff --git a/llvm/test/Transforms/InstCombine/vectorgep-crash.ll b/llvm/test/Transforms/InstCombine/vectorgep-crash.ll
index 3904c30a86718..d33a2d3417bfd 100644
--- a/llvm/test/Transforms/InstCombine/vectorgep-crash.ll
+++ b/llvm/test/Transforms/InstCombine/vectorgep-crash.ll
@@ -14,7 +14,8 @@ define <8 x ptr> @test_vector_gep(ptr %arg1, <8 x i64> %arg2) {
; CHECK-LABEL: define <8 x ptr> @test_vector_gep(
; CHECK-SAME: ptr [[ARG1:%.*]], <8 x i64> [[ARG2:%.*]]) {
; CHECK-NEXT: [[TOP:.*:]]
-; CHECK-NEXT: [[VECTORGEP14:%.*]] = getelementptr inbounds [[DUAL:%.*]], ptr [[ARG1]], <8 x i64> [[ARG2]], i32 1, i32 0, i64 0, i32 1, i32 0, i64 0
+; CHECK-NEXT: [[VECTORGEP14_SPLIT:%.*]] = getelementptr inbounds [[DUAL:%.*]], ptr [[ARG1]], <8 x i64> [[ARG2]]
+; CHECK-NEXT: [[VECTORGEP14:%.*]] = getelementptr inbounds i8, <8 x ptr> [[VECTORGEP14_SPLIT]], i64 32
; CHECK-NEXT: ret <8 x ptr> [[VECTORGEP14]]
;
top:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll
index b349c55d3e09a..11c4559cc9b66 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll
@@ -269,15 +269,20 @@ define i32 @test_struct_load6(ptr %S) #1 {
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[S:%.*]], <vscale x 4 x i64> [[VEC_IND]], i32 0
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP5]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[S]], <vscale x 4 x i64> [[VEC_IND]], i32 1
+; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[S]], <vscale x 4 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, <vscale x 4 x ptr> [[DOTSPLIT]], i64 4
; CHECK-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP6]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[S]], <vscale x 4 x i64> [[VEC_IND]], i32 2
+; CHECK-NEXT: [[DOTSPLIT6:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[S]], <vscale x 4 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, <vscale x 4 x ptr> [[DOTSPLIT6]], i64 8
; CHECK-NEXT: [[WIDE_MASKED_GATHER2:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP7]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[S]], <vscale x 4 x i64> [[VEC_IND]], i32 3
+; CHECK-NEXT: [[DOTSPLIT7:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[S]], <vscale x 4 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, <vscale x 4 x ptr> [[DOTSPLIT7]], i64 12
; CHECK-NEXT: [[WIDE_MASKED_GATHER3:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP8]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
-; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[S]], <vscale x 4 x i64> [[VEC_IND]], i32 4
+; CHECK-NEXT: [[DOTSPLIT8:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[S]], <vscale x 4 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, <vscale x 4 x ptr> [[DOTSPLIT8]], i64 16
; CHECK-NEXT: [[WIDE_MASKED_GATHER4:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP9]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[S]], <vscale x 4 x i64> [[VEC_IND]], i32 5
+; CHECK-NEXT: [[DOTSPLIT9:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[S]], <vscale x 4 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, <vscale x 4 x ptr> [[DOTSPLIT9]], i64 20
; CHECK-NEXT: [[WIDE_MASKED_GATHER5:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP10]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
; CHECK-NEXT: [[TMP11:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], [[VEC_PHI]]
; CHECK-NEXT: [[TMP12:%.*]] = add <vscale x 4 x i32> [[TMP11]], [[WIDE_MASKED_GATHER2]]
@@ -588,7 +593,8 @@ define void @load_gap_reverse(ptr noalias nocapture readonly %P1, ptr noalias no
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP4:%.*]] = add nsw <vscale x 4 x i64> [[BROADCAST_SPLAT1]], [[VEC_IND]]
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[PAIR:%.*]], ptr [[P1:%.*]], <vscale x 4 x i64> [[VEC_IND]], i32 0
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P2:%.*]], <vscale x 4 x i64> [[VEC_IND]], i32 1
+; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P2:%.*]], <vscale x 4 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, <vscale x 4 x ptr> [[DOTSPLIT]], i64 8
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i64> @llvm.masked.gather.nxv4i64.nxv4p0(<vscale x 4 x ptr> [[TMP6]], i32 8, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i64> poison)
; CHECK-NEXT: [[TMP7:%.*]] = sub nsw <vscale x 4 x i64> [[WIDE_MASKED_GATHER]], [[VEC_IND]]
; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i64.nxv4p0(<vscale x 4 x i64> [[TMP4]], <vscale x 4 x ptr> [[TMP5]], i32 8, <vscale x 4 x i1> splat (i1 true))
@@ -815,7 +821,8 @@ define void @PR27626_0(ptr %p, i32 %z, i64 %n) #1 {
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP9]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], <vscale x 4 x i64> [[VEC_IND]], i32 0
-; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], <vscale x 4 x i64> [[VEC_IND]], i32 1
+; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], <vscale x 4 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, <vscale x 4 x ptr> [[DOTSPLIT]], i64 4
; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[BROADCAST_SPLAT]], <vscale x 4 x ptr> [[TMP12]], i32 4, <vscale x 4 x i1> splat (i1 true))
; CHECK-NEXT: [[TMP14:%.*]] = extractelement <vscale x 4 x ptr> [[TMP12]], i64 0
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP14]], align 4
@@ -886,7 +893,8 @@ define i32 @PR27626_1(ptr %p, i64 %n) #1 {
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP9]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], i64 [[INDEX]], i32 0
-; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], <vscale x 4 x i64> [[VEC_IND]], i32 1
+; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], <vscale x 4 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, <vscale x 4 x ptr> [[DOTSPLIT]], i64 4
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP12]], align 4
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]])
; CHECK-NEXT: [[TMP14:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
@@ -965,7 +973,8 @@ define void @PR27626_2(ptr %p, i64 %n, i32 %z) #1 {
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP9]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], <vscale x 4 x i64> [[VEC_IND]], i32 0
; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 -8
-; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], <vscale x 4 x i64> [[VEC_IND]], i32 1
+; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], <vscale x 4 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, <vscale x 4 x ptr> [[DOTSPLIT]], i64 4
; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[BROADCAST_SPLAT]], <vscale x 4 x ptr> [[TMP12]], i32 4, <vscale x 4 x i1> splat (i1 true))
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP13]], align 4
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]])
@@ -1037,8 +1046,10 @@ define i32 @PR27626_3(ptr %p, i64 %n, i32 %z) #1 {
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP18:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP12:%.*]] = add nuw nsw <vscale x 4 x i64> [[VEC_IND]], splat (i64 1)
; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], i64 [[INDEX]], i32 0
-; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[INDEX]], i32 1
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], <vscale x 4 x i64> [[TMP12]], i32 1
+; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 4
+; CHECK-NEXT: [[DOTSPLIT3:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], <vscale x 4 x i64> [[TMP12]]
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, <vscale x 4 x ptr> [[DOTSPLIT3]], i64 4
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP13]], align 4
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]])
; CHECK-NEXT: [[TMP16:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll
index 9257e45c809e9..0f5df0116b72c 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll
@@ -511,7 +511,8 @@ define void @histogram_array_4op_gep_nonzero_const_idx(i64 noundef %N, ptr reado
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[INDICES]], i64 [[IV]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP5]], align 4
; CHECK-NEXT: [[TMP6:%.*]] = sext <vscale x 4 x i32> [[WIDE_LOAD]] to <vscale x 4 x i64>
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[SOMESTRUCT:%.*]], ptr [[DATA_STRUCT]], i64 1, i32 0, <vscale x 4 x i64> [[TMP6]]
+; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds nuw i8, ptr [[DATA_STRUCT]], i64 8388608
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1048576 x i32], ptr [[DOTSPLIT]], i64 0, <vscale x 4 x i64> [[TMP6]]
; CHECK-NEXT: call void @llvm.experimental.vector.histogram.add.nxv4p0.i32(<vscale x 4 x ptr> [[TMP7]], i32 1, <vscale x 4 x i1> splat (i1 true))
; CHECK-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP4]]
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
diff --git a/llvm/test/Transforms/LoopVectorize/induction.ll b/llvm/test/Transforms/LoopVectorize/induction.ll
index 77b91ccb913cf..898c21a61329f 100644
--- a/llvm/test/Transforms/LoopVectorize/induction.ll
+++ b/llvm/test/Transforms/LoopVectorize/induction.ll
@@ -1283,9 +1283,10 @@ define void @scalarize_induction_variable_03(ptr %p, i32 %y, i64 %n) {
; IND-NEXT: br label [[VECTOR_BODY:%.*]]
; IND: vector.body:
; IND-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IND-NEXT: [[TMP0:%.*]] = or disjoint i64 [[INDEX]], 1
-; IND-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], i64 [[INDEX]], i32 1
-; IND-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP0]], i32 1
+; IND-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], i64 [[INDEX]]
+; IND-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 4
+; IND-NEXT: [[TMP13:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; IND-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP13]], i64 12
; IND-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP1]], align 8
; IND-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 8
; IND-NEXT: [[TMP5:%.*]] = insertelement <2 x i32> poison, i32 [[TMP3]], i64 0
@@ -1306,7 +1307,8 @@ define void @scalarize_induction_variable_03(ptr %p, i32 %y, i64 %n) {
; IND-NEXT: br label [[FOR_BODY:%.*]]
; IND: for.body:
; IND-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; IND-NEXT: [[F:%.*]] = getelementptr inbounds nuw [[PAIR_I32]], ptr [[P]], i64 [[I]], i32 1
+; IND-NEXT: [[F_SPLIT:%.*]] = getelementptr inbounds nuw [[PAIR_I32]], ptr [[P]], i64 [[I]]
+; IND-NEXT: [[F:%.*]] = getelementptr inbounds nuw i8, ptr [[F_SPLIT]], i64 4
; IND-NEXT: [[TMP11:%.*]] = load i32, ptr [[F]], align 8
; IND-NEXT: [[TMP12:%.*]] = xor i32 [[TMP11]], [[Y]]
; IND-NEXT: store i32 [[TMP12]], ptr [[F]], align 8
@@ -1328,13 +1330,14 @@ define void @scalarize_induction_variable_03(ptr %p, i32 %y, i64 %n) {
; UNROLL-NEXT: br label [[VECTOR_BODY:%.*]]
; UNROLL: vector.body:
; UNROLL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; UNROLL-NEXT: [[TMP0:%.*]] = or disjoint i64 [[INDEX]], 1
-; UNROLL-NEXT: [[TMP1:%.*]] = or disjoint i64 [[INDEX]], 2
-; UNROLL-NEXT: [[TMP2:%.*]] = or disjoint i64 [[INDEX]], 3
-; UNROLL-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], i64 [[INDEX]], i32 1
-; UNROLL-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP0]], i32 1
-; UNROLL-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP1]], i32 1
-; UNROLL-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP2]], i32 1
+; UNROLL-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], i64 [[INDEX]]
+; UNROLL-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 4
+; UNROLL-NEXT: [[TMP1:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; UNROLL-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[TMP1]], i64 12
+; UNROLL-NEXT: [[TMP24:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; UNROLL-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP24]], i64 20
+; UNROLL-NEXT: [[TMP25:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; UNROLL-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP25]], i64 28
; UNROLL-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP3]], align 8
; UNROLL-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP4]], align 8
; UNROLL-NEXT: [[TMP9:%.*]] = insertelement <2 x i32> poison, i32 [[TMP7]], i64 0
@@ -1364,7 +1367,8 @@ define void @scalarize_induction_variable_03(ptr %p, i32 %y, i64 %n) {
; UNROLL-NEXT: br label [[FOR_BODY:%.*]]
; UNROLL: for.body:
; UNROLL-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; UNROLL-NEXT: [[F:%.*]] = getelementptr inbounds nuw [[PAIR_I32]], ptr [[P]], i64 [[I]], i32 1
+; UNROLL-NEXT: [[F_SPLIT:%.*]] = getelementptr inbounds nuw [[PAIR_I32]], ptr [[P]], i64 [[I]]
+; UNROLL-NEXT: [[F:%.*]] = getelementptr inbounds nuw i8, ptr [[F_SPLIT]], i64 4
; UNROLL-NEXT: [[TMP22:%.*]] = load i32, ptr [[F]], align 8
; UNROLL-NEXT: [[TMP23:%.*]] = xor i32 [[TMP22]], [[Y]]
; UNROLL-NEXT: store i32 [[TMP23]], ptr [[F]], align 8
@@ -1448,21 +1452,22 @@ define void @scalarize_induction_variable_03(ptr %p, i32 %y, i64 %n) {
; INTERLEAVE-NEXT: br label [[VECTOR_BODY:%.*]]
; INTERLEAVE: vector.body:
; INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; INTERLEAVE-NEXT: [[TMP2:%.*]] = or disjoint i64 [[INDEX]], 1
-; INTERLEAVE-NEXT: [[TMP3:%.*]] = or disjoint i64 [[INDEX]], 2
-; INTERLEAVE-NEXT: [[TMP4:%.*]] = or disjoint i64 [[INDEX]], 3
-; INTERLEAVE-NEXT: [[TMP5:%.*]] = or disjoint i64 [[INDEX]], 4
-; INTERLEAVE-NEXT: [[TMP6:%.*]] = or disjoint i64 [[INDEX]], 5
-; INTERLEAVE-NEXT: [[TMP7:%.*]] = or disjoint i64 [[INDEX]], 6
-; INTERLEAVE-NEXT: [[TMP8:%.*]] = or disjoint i64 [[INDEX]], 7
-; INTERLEAVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], i64 [[INDEX]], i32 1
-; INTERLEAVE-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP2]], i32 1
-; INTERLEAVE-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP3]], i32 1
-; INTERLEAVE-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP4]], i32 1
-; INTERLEAVE-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP5]], i32 1
-; INTERLEAVE-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP6]], i32 1
-; INTERLEAVE-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP7]], i32 1
-; INTERLEAVE-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP8]], i32 1
+; INTERLEAVE-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 4
+; INTERLEAVE-NEXT: [[TMP3:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[TMP3]], i64 12
+; INTERLEAVE-NEXT: [[TMP5:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[TMP5]], i64 20
+; INTERLEAVE-NEXT: [[TMP7:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[TMP7]], i64 28
+; INTERLEAVE-NEXT: [[TMP30:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[TMP30]], i64 36
+; INTERLEAVE-NEXT: [[TMP31:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[TMP31]], i64 44
+; INTERLEAVE-NEXT: [[TMP32:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[TMP32]], i64 52
+; INTERLEAVE-NEXT: [[TMP33:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[TMP33]], i64 60
; INTERLEAVE-NEXT: [[WIDE_VEC:%.*]] = load <8 x i32>, ptr [[TMP9]], align 8
; INTERLEAVE-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
; INTERLEAVE-NEXT: [[WIDE_VEC1:%.*]] = load <8 x i32>, ptr [[TMP13]], align 8
@@ -1495,7 +1500,8 @@ define void @scalarize_induction_variable_03(ptr %p, i32 %y, i64 %n) {
; INTERLEAVE-NEXT: br label [[FOR_BODY:%.*]]
; INTERLEAVE: for.body:
; INTERLEAVE-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; INTERLEAVE-NEXT: [[F:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]], i32 1
+; INTERLEAVE-NEXT: [[F_SPLIT:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]]
+; INTERLEAVE-NEXT: [[F:%.*]] = getelementptr inbounds nuw i8, ptr [[F_SPLIT]], i64 4
; INTERLEAVE-NEXT: [[TMP28:%.*]] = load i32, ptr [[F]], align 8
; INTERLEAVE-NEXT: [[TMP29:%.*]] = xor i32 [[TMP28]], [[Y]]
; INTERLEAVE-NEXT: store i32 [[TMP29]], ptr [[F]], align 8
@@ -1622,18 +1628,19 @@ define void @scalarize_induction_variable_04(ptr %a, ptr %p, i32 %n) {
; IND: vector.body:
; IND-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IND-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ <i64 0, i64 1>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IND-NEXT: [[TMP9:%.*]] = or disjoint i64 [[INDEX]], 1
; IND-NEXT: [[TMP10:%.*]] = shl nsw <2 x i64> [[VEC_IND]], splat (i64 2)
; IND-NEXT: [[TMP11:%.*]] = extractelement <2 x i64> [[TMP10]], i64 0
; IND-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP11]]
; IND-NEXT: [[TMP13:%.*]] = extractelement <2 x i64> [[TMP10]], i64 1
; IND-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP13]]
-; IND-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP12]], align 1, !alias.scope [[META17:![0-9]+]]
-; IND-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP14]], align 1, !alias.scope [[META17]]
-; IND-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P]], i64 [[INDEX]], i32 1
-; IND-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP9]], i32 1
-; IND-NEXT: store i32 [[TMP15]], ptr [[TMP17]], align 1, !alias.scope [[META20:![0-9]+]], !noalias [[META17]]
-; IND-NEXT: store i32 [[TMP16]], ptr [[TMP18]], align 1, !alias.scope [[META20]], !noalias [[META17]]
+; IND-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP12]], align 1, !alias.scope [[META17:![0-9]+]]
+; IND-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 1, !alias.scope [[META17]]
+; IND-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P]], i64 [[INDEX]]
+; IND-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 4
+; IND-NEXT: [[TMP17:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; IND-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[TMP17]], i64 12
+; IND-NEXT: store i32 [[TMP24]], ptr [[TMP16]], align 1, !alias.scope [[META20:![0-9]+]], !noalias [[META17]]
+; IND-NEXT: store i32 [[TMP15]], ptr [[TMP18]], align 1, !alias.scope [[META20]], !noalias [[META17]]
; IND-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; IND-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2)
; IND-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -1649,7 +1656,8 @@ define void @scalarize_induction_variable_04(ptr %a, ptr %p, i32 %n) {
; IND-NEXT: [[DOTIDX:%.*]] = shl nsw i64 [[I]], 4
; IND-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[DOTIDX]]
; IND-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP20]], align 1
-; IND-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[PAIR_I32]], ptr [[P]], i64 [[I]], i32 1
+; IND-NEXT: [[DOTSPLIT4:%.*]] = getelementptr inbounds nuw [[PAIR_I32]], ptr [[P]], i64 [[I]]
+; IND-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT4]], i64 4
; IND-NEXT: store i32 [[TMP21]], ptr [[TMP22]], align 1
; IND-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
; IND-NEXT: [[TMP23:%.*]] = trunc i64 [[I_NEXT]] to i32
@@ -1685,9 +1693,6 @@ define void @scalarize_induction_variable_04(ptr %a, ptr %p, i32 %n) {
; UNROLL: vector.body:
; UNROLL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; UNROLL-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ <i64 0, i64 1>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; UNROLL-NEXT: [[TMP9:%.*]] = or disjoint i64 [[INDEX]], 1
-; UNROLL-NEXT: [[TMP10:%.*]] = or disjoint i64 [[INDEX]], 2
-; UNROLL-NEXT: [[TMP11:%.*]] = or disjoint i64 [[INDEX]], 3
; UNROLL-NEXT: [[TMP12:%.*]] = shl nsw <2 x i64> [[VEC_IND]], splat (i64 2)
; UNROLL-NEXT: [[STEP_ADD:%.*]] = shl <2 x i64> [[VEC_IND]], splat (i64 2)
; UNROLL-NEXT: [[TMP13:%.*]] = add <2 x i64> [[STEP_ADD]], splat (i64 8)
@@ -1699,18 +1704,22 @@ define void @scalarize_induction_variable_04(ptr %a, ptr %p, i32 %n) {
; UNROLL-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP18]]
; UNROLL-NEXT: [[TMP20:%.*]] = extractelement <2 x i64> [[TMP13]], i64 1
; UNROLL-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP20]]
-; UNROLL-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP15]], align 1, !alias.scope [[META17:![0-9]+]]
-; UNROLL-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP17]], align 1, !alias.scope [[META17]]
-; UNROLL-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP19]], align 1, !alias.scope [[META17]]
-; UNROLL-NEXT: [[TMP25:%.*]] = load i32, ptr [[TMP21]], align 1, !alias.scope [[META17]]
-; UNROLL-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P]], i64 [[INDEX]], i32 1
-; UNROLL-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP9]], i32 1
-; UNROLL-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP10]], i32 1
-; UNROLL-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP11]], i32 1
-; UNROLL-NEXT: store i32 [[TMP22]], ptr [[TMP26]], align 1, !alias.scope [[META20:![0-9]+]], !noalias [[META17]]
-; UNROLL-NEXT: store i32 [[TMP23]], ptr [[TMP27]], align 1, !alias.scope [[META20]], !noalias [[META17]]
-; UNROLL-NEXT: store i32 [[TMP24]], ptr [[TMP28]], align 1, !alias.scope [[META20]], !noalias [[META17]]
-; UNROLL-NEXT: store i32 [[TMP25]], ptr [[TMP29]], align 1, !alias.scope [[META20]], !noalias [[META17]]
+; UNROLL-NEXT: [[TMP35:%.*]] = load i32, ptr [[TMP15]], align 1, !alias.scope [[META17:![0-9]+]]
+; UNROLL-NEXT: [[TMP36:%.*]] = load i32, ptr [[TMP17]], align 1, !alias.scope [[META17]]
+; UNROLL-NEXT: [[TMP37:%.*]] = load i32, ptr [[TMP19]], align 1, !alias.scope [[META17]]
+; UNROLL-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 1, !alias.scope [[META17]]
+; UNROLL-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P]], i64 [[INDEX]]
+; UNROLL-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 4
+; UNROLL-NEXT: [[TMP24:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; UNROLL-NEXT: [[TMP25:%.*]] = getelementptr i8, ptr [[TMP24]], i64 12
+; UNROLL-NEXT: [[TMP26:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; UNROLL-NEXT: [[TMP27:%.*]] = getelementptr i8, ptr [[TMP26]], i64 20
+; UNROLL-NEXT: [[TMP28:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; UNROLL-NEXT: [[TMP29:%.*]] = getelementptr i8, ptr [[TMP28]], i64 28
+; UNROLL-NEXT: store i32 [[TMP35]], ptr [[TMP23]], align 1, !alias.scope [[META20:![0-9]+]], !noalias [[META17]]
+; UNROLL-NEXT: store i32 [[TMP36]], ptr [[TMP25]], align 1, !alias.scope [[META20]], !noalias [[META17]]
+; UNROLL-NEXT: store i32 [[TMP37]], ptr [[TMP27]], align 1, !alias.scope [[META20]], !noalias [[META17]]
+; UNROLL-NEXT: store i32 [[TMP22]], ptr [[TMP29]], align 1, !alias.scope [[META20]], !noalias [[META17]]
; UNROLL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; UNROLL-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4)
; UNROLL-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -1726,7 +1735,8 @@ define void @scalarize_induction_variable_04(ptr %a, ptr %p, i32 %n) {
; UNROLL-NEXT: [[DOTIDX:%.*]] = shl nsw i64 [[I]], 4
; UNROLL-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[DOTIDX]]
; UNROLL-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP31]], align 1
-; UNROLL-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[PAIR_I32]], ptr [[P]], i64 [[I]], i32 1
+; UNROLL-NEXT: [[DOTSPLIT6:%.*]] = getelementptr inbounds nuw [[PAIR_I32]], ptr [[P]], i64 [[I]]
+; UNROLL-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT6]], i64 4
; UNROLL-NEXT: store i32 [[TMP32]], ptr [[TMP33]], align 1
; UNROLL-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
; UNROLL-NEXT: [[TMP34:%.*]] = trunc i64 [[I_NEXT]] to i32
@@ -1843,41 +1853,43 @@ define void @scalarize_induction_variable_04(ptr %a, ptr %p, i32 %n) {
; INTERLEAVE-NEXT: br label [[VECTOR_BODY:%.*]]
; INTERLEAVE: vector.body:
; INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; INTERLEAVE-NEXT: [[TMP11:%.*]] = or disjoint i64 [[INDEX]], 1
-; INTERLEAVE-NEXT: [[TMP12:%.*]] = or disjoint i64 [[INDEX]], 2
-; INTERLEAVE-NEXT: [[TMP13:%.*]] = or disjoint i64 [[INDEX]], 3
; INTERLEAVE-NEXT: [[TMP14:%.*]] = or disjoint i64 [[INDEX]], 4
-; INTERLEAVE-NEXT: [[TMP15:%.*]] = or disjoint i64 [[INDEX]], 5
-; INTERLEAVE-NEXT: [[TMP16:%.*]] = or disjoint i64 [[INDEX]], 6
-; INTERLEAVE-NEXT: [[TMP17:%.*]] = or disjoint i64 [[INDEX]], 7
; INTERLEAVE-NEXT: [[DOTIDX:%.*]] = shl nsw i64 [[INDEX]], 4
; INTERLEAVE-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[DOTIDX]]
; INTERLEAVE-NEXT: [[DOTIDX5:%.*]] = shl nsw i64 [[TMP14]], 4
; INTERLEAVE-NEXT: [[TMP19:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[DOTIDX5]]
; INTERLEAVE-NEXT: [[WIDE_VEC:%.*]] = load <16 x i32>, ptr [[TMP18]], align 1
; INTERLEAVE-NEXT: [[WIDE_VEC3:%.*]] = load <16 x i32>, ptr [[TMP19]], align 1
-; INTERLEAVE-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P]], i64 [[INDEX]], i32 1
-; INTERLEAVE-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP11]], i32 1
-; INTERLEAVE-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP12]], i32 1
-; INTERLEAVE-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP13]], i32 1
-; INTERLEAVE-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP14]], i32 1
-; INTERLEAVE-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP15]], i32 1
-; INTERLEAVE-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP16]], i32 1
-; INTERLEAVE-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP17]], i32 1
+; INTERLEAVE-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP41:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 4
+; INTERLEAVE-NEXT: [[TMP15:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[TMP15]], i64 12
+; INTERLEAVE-NEXT: [[TMP17:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP42:%.*]] = getelementptr i8, ptr [[TMP17]], i64 20
+; INTERLEAVE-NEXT: [[TMP43:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[TMP43]], i64 28
+; INTERLEAVE-NEXT: [[DOTSPLIT9:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP14]]
+; INTERLEAVE-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT9]], i64 4
+; INTERLEAVE-NEXT: [[TMP22:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP23:%.*]] = getelementptr i8, ptr [[TMP22]], i64 44
+; INTERLEAVE-NEXT: [[TMP24:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP25:%.*]] = getelementptr i8, ptr [[TMP24]], i64 52
+; INTERLEAVE-NEXT: [[TMP26:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP27:%.*]] = getelementptr i8, ptr [[TMP26]], i64 60
; INTERLEAVE-NEXT: [[TMP28:%.*]] = extractelement <16 x i32> [[WIDE_VEC]], i64 0
-; INTERLEAVE-NEXT: store i32 [[TMP28]], ptr [[TMP20]], align 1, !alias.scope [[META17:![0-9]+]], !noalias [[META20:![0-9]+]]
+; INTERLEAVE-NEXT: store i32 [[TMP28]], ptr [[TMP41]], align 1, !alias.scope [[META17:![0-9]+]], !noalias [[META20:![0-9]+]]
; INTERLEAVE-NEXT: [[TMP29:%.*]] = extractelement <16 x i32> [[WIDE_VEC]], i64 4
-; INTERLEAVE-NEXT: store i32 [[TMP29]], ptr [[TMP21]], align 1, !alias.scope [[META17]], !noalias [[META20]]
+; INTERLEAVE-NEXT: store i32 [[TMP29]], ptr [[TMP16]], align 1, !alias.scope [[META17]], !noalias [[META20]]
; INTERLEAVE-NEXT: [[TMP30:%.*]] = extractelement <16 x i32> [[WIDE_VEC]], i64 8
-; INTERLEAVE-NEXT: store i32 [[TMP30]], ptr [[TMP22]], align 1, !alias.scope [[META17]], !noalias [[META20]]
+; INTERLEAVE-NEXT: store i32 [[TMP30]], ptr [[TMP42]], align 1, !alias.scope [[META17]], !noalias [[META20]]
; INTERLEAVE-NEXT: [[TMP31:%.*]] = extractelement <16 x i32> [[WIDE_VEC]], i64 12
-; INTERLEAVE-NEXT: store i32 [[TMP31]], ptr [[TMP23]], align 1, !alias.scope [[META17]], !noalias [[META20]]
+; INTERLEAVE-NEXT: store i32 [[TMP31]], ptr [[TMP20]], align 1, !alias.scope [[META17]], !noalias [[META20]]
; INTERLEAVE-NEXT: [[TMP32:%.*]] = extractelement <16 x i32> [[WIDE_VEC3]], i64 0
-; INTERLEAVE-NEXT: store i32 [[TMP32]], ptr [[TMP24]], align 1, !alias.scope [[META17]], !noalias [[META20]]
+; INTERLEAVE-NEXT: store i32 [[TMP32]], ptr [[TMP21]], align 1, !alias.scope [[META17]], !noalias [[META20]]
; INTERLEAVE-NEXT: [[TMP33:%.*]] = extractelement <16 x i32> [[WIDE_VEC3]], i64 4
-; INTERLEAVE-NEXT: store i32 [[TMP33]], ptr [[TMP25]], align 1, !alias.scope [[META17]], !noalias [[META20]]
+; INTERLEAVE-NEXT: store i32 [[TMP33]], ptr [[TMP23]], align 1, !alias.scope [[META17]], !noalias [[META20]]
; INTERLEAVE-NEXT: [[TMP34:%.*]] = extractelement <16 x i32> [[WIDE_VEC3]], i64 8
-; INTERLEAVE-NEXT: store i32 [[TMP34]], ptr [[TMP26]], align 1, !alias.scope [[META17]], !noalias [[META20]]
+; INTERLEAVE-NEXT: store i32 [[TMP34]], ptr [[TMP25]], align 1, !alias.scope [[META17]], !noalias [[META20]]
; INTERLEAVE-NEXT: [[TMP35:%.*]] = extractelement <16 x i32> [[WIDE_VEC3]], i64 12
; INTERLEAVE-NEXT: store i32 [[TMP35]], ptr [[TMP27]], align 1, !alias.scope [[META17]], !noalias [[META20]]
; INTERLEAVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
@@ -1893,7 +1905,8 @@ define void @scalarize_induction_variable_04(ptr %a, ptr %p, i32 %n) {
; INTERLEAVE-NEXT: [[DOTIDX6:%.*]] = shl nsw i64 [[I]], 4
; INTERLEAVE-NEXT: [[TMP37:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[DOTIDX6]]
; INTERLEAVE-NEXT: [[TMP38:%.*]] = load i32, ptr [[TMP37]], align 1
-; INTERLEAVE-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]], i32 1
+; INTERLEAVE-NEXT: [[DOTSPLIT14:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]]
+; INTERLEAVE-NEXT: [[TMP39:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT14]], i64 4
; INTERLEAVE-NEXT: store i32 [[TMP38]], ptr [[TMP39]], align 1
; INTERLEAVE-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
; INTERLEAVE-NEXT: [[TMP40:%.*]] = trunc i64 [[I_NEXT]] to i32
@@ -2477,11 +2490,12 @@ define void @iv_vector_and_scalar_users(ptr %p, i32 %a, i32 %n) {
; IND: vector.body:
; IND-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IND-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ <i32 0, i32 1>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IND-NEXT: [[TMP3:%.*]] = or disjoint i64 [[INDEX]], 1
; IND-NEXT: [[TMP4:%.*]] = add <2 x i32> [[BROADCAST_SPLAT]], [[VEC_IND]]
; IND-NEXT: [[TMP5:%.*]] = trunc <2 x i32> [[TMP4]] to <2 x i16>
-; IND-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[PAIR_I16:%.*]], ptr [[P:%.*]], i64 [[INDEX]], i32 1
-; IND-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[PAIR_I16]], ptr [[P]], i64 [[TMP3]], i32 1
+; IND-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR_I16:%.*]], ptr [[P:%.*]], i64 [[INDEX]]
+; IND-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 2
+; IND-NEXT: [[TMP16:%.*]] = getelementptr [[PAIR_I16]], ptr [[P]], i64 [[INDEX]]
+; IND-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP16]], i64 6
; IND-NEXT: [[TMP8:%.*]] = extractelement <2 x i16> [[TMP5]], i64 0
; IND-NEXT: store i16 [[TMP8]], ptr [[TMP6]], align 2
; IND-NEXT: [[TMP9:%.*]] = extractelement <2 x i16> [[TMP5]], i64 1
@@ -2501,7 +2515,8 @@ define void @iv_vector_and_scalar_users(ptr %p, i32 %a, i32 %n) {
; IND-NEXT: [[TMP11:%.*]] = trunc i64 [[I]] to i32
; IND-NEXT: [[TMP12:%.*]] = add i32 [[A]], [[TMP11]]
; IND-NEXT: [[TMP13:%.*]] = trunc i32 [[TMP12]] to i16
-; IND-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw [[PAIR_I16]], ptr [[P]], i64 [[I]], i32 1
+; IND-NEXT: [[DOTSPLIT2:%.*]] = getelementptr inbounds nuw [[PAIR_I16]], ptr [[P]], i64 [[I]]
+; IND-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT2]], i64 2
; IND-NEXT: store i16 [[TMP13]], ptr [[TMP14]], align 2
; IND-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
; IND-NEXT: [[TMP15:%.*]] = trunc i64 [[I_NEXT]] to i32
@@ -2526,17 +2541,18 @@ define void @iv_vector_and_scalar_users(ptr %p, i32 %a, i32 %n) {
; UNROLL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; UNROLL-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ <i32 0, i32 1>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; UNROLL-NEXT: [[STEP_ADD:%.*]] = add <2 x i32> [[VEC_IND]], splat (i32 2)
-; UNROLL-NEXT: [[TMP3:%.*]] = or disjoint i64 [[INDEX]], 1
-; UNROLL-NEXT: [[TMP4:%.*]] = or disjoint i64 [[INDEX]], 2
-; UNROLL-NEXT: [[TMP5:%.*]] = or disjoint i64 [[INDEX]], 3
; UNROLL-NEXT: [[TMP6:%.*]] = add <2 x i32> [[BROADCAST_SPLAT]], [[VEC_IND]]
; UNROLL-NEXT: [[TMP7:%.*]] = add <2 x i32> [[BROADCAST_SPLAT]], [[STEP_ADD]]
; UNROLL-NEXT: [[TMP8:%.*]] = trunc <2 x i32> [[TMP6]] to <2 x i16>
; UNROLL-NEXT: [[TMP9:%.*]] = trunc <2 x i32> [[TMP7]] to <2 x i16>
-; UNROLL-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[PAIR_I16:%.*]], ptr [[P:%.*]], i64 [[INDEX]], i32 1
-; UNROLL-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[PAIR_I16]], ptr [[P]], i64 [[TMP3]], i32 1
-; UNROLL-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[PAIR_I16]], ptr [[P]], i64 [[TMP4]], i32 1
-; UNROLL-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[PAIR_I16]], ptr [[P]], i64 [[TMP5]], i32 1
+; UNROLL-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR_I16:%.*]], ptr [[P:%.*]], i64 [[INDEX]]
+; UNROLL-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 2
+; UNROLL-NEXT: [[TMP24:%.*]] = getelementptr [[PAIR_I16]], ptr [[P]], i64 [[INDEX]]
+; UNROLL-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[TMP24]], i64 6
+; UNROLL-NEXT: [[TMP25:%.*]] = getelementptr [[PAIR_I16]], ptr [[P]], i64 [[INDEX]]
+; UNROLL-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[TMP25]], i64 10
+; UNROLL-NEXT: [[TMP26:%.*]] = getelementptr [[PAIR_I16]], ptr [[P]], i64 [[INDEX]]
+; UNROLL-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[TMP26]], i64 14
; UNROLL-NEXT: [[TMP14:%.*]] = extractelement <2 x i16> [[TMP8]], i64 0
; UNROLL-NEXT: store i16 [[TMP14]], ptr [[TMP10]], align 2
; UNROLL-NEXT: [[TMP15:%.*]] = extractelement <2 x i16> [[TMP8]], i64 1
@@ -2560,7 +2576,8 @@ define void @iv_vector_and_scalar_users(ptr %p, i32 %a, i32 %n) {
; UNROLL-NEXT: [[TMP19:%.*]] = trunc i64 [[I]] to i32
; UNROLL-NEXT: [[TMP20:%.*]] = add i32 [[A]], [[TMP19]]
; UNROLL-NEXT: [[TMP21:%.*]] = trunc i32 [[TMP20]] to i16
-; UNROLL-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[PAIR_I16]], ptr [[P]], i64 [[I]], i32 1
+; UNROLL-NEXT: [[DOTSPLIT4:%.*]] = getelementptr inbounds nuw [[PAIR_I16]], ptr [[P]], i64 [[I]]
+; UNROLL-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT4]], i64 2
; UNROLL-NEXT: store i16 [[TMP21]], ptr [[TMP22]], align 2
; UNROLL-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
; UNROLL-NEXT: [[TMP23:%.*]] = trunc i64 [[I_NEXT]] to i32
@@ -2646,25 +2663,26 @@ define void @iv_vector_and_scalar_users(ptr %p, i32 %a, i32 %n) {
; INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; INTERLEAVE-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; INTERLEAVE-NEXT: [[STEP_ADD:%.*]] = add <4 x i32> [[VEC_IND]], splat (i32 4)
-; INTERLEAVE-NEXT: [[TMP3:%.*]] = or disjoint i64 [[INDEX]], 1
-; INTERLEAVE-NEXT: [[TMP4:%.*]] = or disjoint i64 [[INDEX]], 2
-; INTERLEAVE-NEXT: [[TMP5:%.*]] = or disjoint i64 [[INDEX]], 3
-; INTERLEAVE-NEXT: [[TMP6:%.*]] = or disjoint i64 [[INDEX]], 4
-; INTERLEAVE-NEXT: [[TMP7:%.*]] = or disjoint i64 [[INDEX]], 5
-; INTERLEAVE-NEXT: [[TMP8:%.*]] = or disjoint i64 [[INDEX]], 6
-; INTERLEAVE-NEXT: [[TMP9:%.*]] = or disjoint i64 [[INDEX]], 7
; INTERLEAVE-NEXT: [[TMP10:%.*]] = add <4 x i32> [[BROADCAST_SPLAT]], [[VEC_IND]]
; INTERLEAVE-NEXT: [[TMP11:%.*]] = add <4 x i32> [[BROADCAST_SPLAT]], [[STEP_ADD]]
; INTERLEAVE-NEXT: [[TMP12:%.*]] = trunc <4 x i32> [[TMP10]] to <4 x i16>
; INTERLEAVE-NEXT: [[TMP13:%.*]] = trunc <4 x i32> [[TMP11]] to <4 x i16>
-; INTERLEAVE-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[PAIR_I16:%.*]], ptr [[P:%.*]], i64 [[INDEX]], i32 1
-; INTERLEAVE-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[PAIR_I16]], ptr [[P]], i64 [[TMP3]], i32 1
-; INTERLEAVE-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[PAIR_I16]], ptr [[P]], i64 [[TMP4]], i32 1
-; INTERLEAVE-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[PAIR_I16]], ptr [[P]], i64 [[TMP5]], i32 1
-; INTERLEAVE-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[PAIR_I16]], ptr [[P]], i64 [[TMP6]], i32 1
-; INTERLEAVE-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[PAIR_I16]], ptr [[P]], i64 [[TMP7]], i32 1
-; INTERLEAVE-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[PAIR_I16]], ptr [[P]], i64 [[TMP8]], i32 1
-; INTERLEAVE-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[PAIR_I16]], ptr [[P]], i64 [[TMP9]], i32 1
+; INTERLEAVE-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR_I16:%.*]], ptr [[P:%.*]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 2
+; INTERLEAVE-NEXT: [[TMP8:%.*]] = getelementptr [[PAIR_I16]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[TMP8]], i64 6
+; INTERLEAVE-NEXT: [[TMP36:%.*]] = getelementptr [[PAIR_I16]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[TMP36]], i64 10
+; INTERLEAVE-NEXT: [[TMP37:%.*]] = getelementptr [[PAIR_I16]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP37]], i64 14
+; INTERLEAVE-NEXT: [[TMP38:%.*]] = getelementptr [[PAIR_I16]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[TMP38]], i64 18
+; INTERLEAVE-NEXT: [[TMP39:%.*]] = getelementptr [[PAIR_I16]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[TMP39]], i64 22
+; INTERLEAVE-NEXT: [[TMP40:%.*]] = getelementptr [[PAIR_I16]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[TMP40]], i64 26
+; INTERLEAVE-NEXT: [[TMP41:%.*]] = getelementptr [[PAIR_I16]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[TMP41]], i64 30
; INTERLEAVE-NEXT: [[TMP22:%.*]] = extractelement <4 x i16> [[TMP12]], i64 0
; INTERLEAVE-NEXT: store i16 [[TMP22]], ptr [[TMP14]], align 2
; INTERLEAVE-NEXT: [[TMP23:%.*]] = extractelement <4 x i16> [[TMP12]], i64 1
@@ -2696,7 +2714,8 @@ define void @iv_vector_and_scalar_users(ptr %p, i32 %a, i32 %n) {
; INTERLEAVE-NEXT: [[TMP31:%.*]] = trunc i64 [[I]] to i32
; INTERLEAVE-NEXT: [[TMP32:%.*]] = add i32 [[A]], [[TMP31]]
; INTERLEAVE-NEXT: [[TMP33:%.*]] = trunc i32 [[TMP32]] to i16
-; INTERLEAVE-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[PAIR_I16]], ptr [[P]], i64 [[I]], i32 1
+; INTERLEAVE-NEXT: [[DOTSPLIT8:%.*]] = getelementptr inbounds nuw [[PAIR_I16]], ptr [[P]], i64 [[I]]
+; INTERLEAVE-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT8]], i64 2
; INTERLEAVE-NEXT: store i16 [[TMP33]], ptr [[TMP34]], align 2
; INTERLEAVE-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
; INTERLEAVE-NEXT: [[TMP35:%.*]] = trunc i64 [[I_NEXT]] to i32
diff --git a/llvm/test/Transforms/LoopVectorize/interleaved-accesses-pred-stores.ll b/llvm/test/Transforms/LoopVectorize/interleaved-accesses-pred-stores.ll
index fb4545cb715bb..b8899b2f70b40 100644
--- a/llvm/test/Transforms/LoopVectorize/interleaved-accesses-pred-stores.ll
+++ b/llvm/test/Transforms/LoopVectorize/interleaved-accesses-pred-stores.ll
@@ -27,14 +27,16 @@ define void @interleaved_with_cond_store_0(ptr %p, i64 %x, i64 %n) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE2:%.*]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[PAIR:%.*]], ptr [[P:%.*]], i64 [[INDEX]], i32 1
+; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR:%.*]], ptr [[P:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 8
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <4 x i64>, ptr [[TMP0]], align 8
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <4 x i64> [[WIDE_VEC]], <4 x i64> poison, <2 x i32> <i32 0, i32 2>
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq <2 x i64> [[STRIDED_VEC]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x i1> [[TMP1]], i64 0
; CHECK-NEXT: br i1 [[TMP2]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
; CHECK: pred.store.if:
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[INDEX]], i32 1
+; CHECK-NEXT: [[DOTSPLIT3:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT3]], i64 8
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[WIDE_VEC]], i64 0
; CHECK-NEXT: store i64 [[TMP4]], ptr [[TMP3]], align 8
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE]]
@@ -42,8 +44,8 @@ define void @interleaved_with_cond_store_0(ptr %p, i64 %x, i64 %n) {
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x i1> [[TMP1]], i64 1
; CHECK-NEXT: br i1 [[TMP5]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2]]
; CHECK: pred.store.if1:
-; CHECK-NEXT: [[TMP6:%.*]] = or disjoint i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[TMP6]], i32 1
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr [[PAIR]], ptr [[P]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP6]], i64 24
; CHECK-NEXT: [[TMP8:%.*]] = extractelement <4 x i64> [[WIDE_VEC]], i64 2
; CHECK-NEXT: store i64 [[TMP8]], ptr [[TMP7]], align 8
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE2]]
@@ -58,7 +60,8 @@ define void @interleaved_with_cond_store_0(ptr %p, i64 %x, i64 %n) {
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[IF_MERGE:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[P_1:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[I]], i32 1
+; CHECK-NEXT: [[P_1_SPLIT:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[I]]
+; CHECK-NEXT: [[P_1:%.*]] = getelementptr inbounds nuw i8, ptr [[P_1_SPLIT]], i64 8
; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr [[P_1]], align 8
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[TMP10]], [[X]]
; CHECK-NEXT: br i1 [[TMP11]], label [[IF_THEN:%.*]], label [[IF_MERGE]]
@@ -121,8 +124,10 @@ define void @interleaved_with_cond_store_1(ptr %p, i64 %x, i64 %n) {
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE2:%.*]] ]
; CHECK-NEXT: [[TMP0:%.*]] = or disjoint i64 [[INDEX]], 1
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[PAIR:%.*]], ptr [[P:%.*]], i64 [[INDEX]], i32 0
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[INDEX]], i32 1
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[TMP0]], i32 1
+; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 8
+; CHECK-NEXT: [[DOTSPLIT5:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT5]], i64 8
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <4 x i64>, ptr [[TMP2]], align 8
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <4 x i64> [[WIDE_VEC]], <4 x i64> poison, <2 x i32> <i32 0, i32 2>
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq <2 x i64> [[STRIDED_VEC]], [[BROADCAST_SPLAT]]
@@ -158,7 +163,8 @@ define void @interleaved_with_cond_store_1(ptr %p, i64 %x, i64 %n) {
; CHECK: for.body:
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[IF_MERGE:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[P_0:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[I]], i32 0
-; CHECK-NEXT: [[P_1:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[I]], i32 1
+; CHECK-NEXT: [[P_1_SPLIT:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[I]]
+; CHECK-NEXT: [[P_1:%.*]] = getelementptr inbounds nuw i8, ptr [[P_1_SPLIT]], i64 8
; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr [[P_1]], align 8
; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[TMP14]], [[X]]
; CHECK-NEXT: br i1 [[TMP15]], label [[IF_THEN:%.*]], label [[IF_MERGE]]
@@ -226,7 +232,8 @@ define void @interleaved_with_cond_store_2(ptr %p, i64 %x, i64 %n) {
; CHECK-NEXT: [[TMP0:%.*]] = or disjoint i64 [[INDEX]], 1
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[PAIR:%.*]], ptr [[P:%.*]], i64 [[INDEX]], i32 0
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[TMP0]], i32 0
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[INDEX]], i32 1
+; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 8
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <4 x i64>, ptr [[TMP3]], align 8
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <4 x i64> [[WIDE_VEC]], <4 x i64> poison, <2 x i32> <i32 0, i32 2>
; CHECK-NEXT: store i64 [[X]], ptr [[TMP1]], align 8
@@ -235,7 +242,8 @@ define void @interleaved_with_cond_store_2(ptr %p, i64 %x, i64 %n) {
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x i1> [[TMP4]], i64 0
; CHECK-NEXT: br i1 [[TMP5]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
; CHECK: pred.store.if:
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[INDEX]], i32 1
+; CHECK-NEXT: [[DOTSPLIT3:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT3]], i64 8
; CHECK-NEXT: [[TMP7:%.*]] = extractelement <4 x i64> [[WIDE_VEC]], i64 0
; CHECK-NEXT: store i64 [[TMP7]], ptr [[TMP6]], align 8
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE]]
@@ -243,7 +251,8 @@ define void @interleaved_with_cond_store_2(ptr %p, i64 %x, i64 %n) {
; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x i1> [[TMP4]], i64 1
; CHECK-NEXT: br i1 [[TMP8]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2]]
; CHECK: pred.store.if1:
-; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[TMP0]], i32 1
+; CHECK-NEXT: [[DOTSPLIT4:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT4]], i64 8
; CHECK-NEXT: [[TMP10:%.*]] = extractelement <4 x i64> [[WIDE_VEC]], i64 2
; CHECK-NEXT: store i64 [[TMP10]], ptr [[TMP9]], align 8
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE2]]
@@ -259,7 +268,8 @@ define void @interleaved_with_cond_store_2(ptr %p, i64 %x, i64 %n) {
; CHECK: for.body:
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[IF_MERGE:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[P_0:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[I]], i32 0
-; CHECK-NEXT: [[P_1:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[I]], i32 1
+; CHECK-NEXT: [[P_1_SPLIT:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[I]]
+; CHECK-NEXT: [[P_1:%.*]] = getelementptr inbounds nuw i8, ptr [[P_1_SPLIT]], i64 8
; CHECK-NEXT: [[TMP12:%.*]] = load i64, ptr [[P_1]], align 8
; CHECK-NEXT: store i64 [[X]], ptr [[P_0]], align 8
; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[TMP12]], [[X]]
diff --git a/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll b/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll
index 0afd2e15e4480..dbb8b21302e8f 100644
--- a/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll
@@ -582,10 +582,14 @@ define void @load_gap_reverse(ptr noalias nocapture %P1, ptr noalias nocapture %
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P1]], i64 [[TMP0]], i32 0
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P1]], i64 [[TMP1]], i32 0
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P1]], i64 [[TMP2]], i32 0
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P2:%.*]], i64 [[OFFSET_IDX]], i32 1
-; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P2]], i64 [[TMP0]], i32 1
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P2]], i64 [[TMP1]], i32 1
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P2]], i64 [[TMP2]], i32 1
+; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P2:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 8
+; CHECK-NEXT: [[DOTSPLIT1:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P2]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT1]], i64 8
+; CHECK-NEXT: [[DOTSPLIT2:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P2]], i64 [[TMP1]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT2]], i64 8
+; CHECK-NEXT: [[DOTSPLIT3:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P2]], i64 [[TMP2]]
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT3]], i64 8
; CHECK-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8
; CHECK-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP9]], align 8
; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP10]], align 8
@@ -909,10 +913,14 @@ define void @PR27626_0(ptr %p, i32 %z, i64 %n) {
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP2]], i32 0
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP3]], i32 0
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP4]], i32 0
-; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[INDEX]], i32 1
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP2]], i32 1
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP3]], i32 1
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP4]], i32 1
+; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 4
+; CHECK-NEXT: [[DOTSPLIT1:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP2]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT1]], i64 4
+; CHECK-NEXT: [[DOTSPLIT2:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP3]]
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT2]], i64 4
+; CHECK-NEXT: [[DOTSPLIT3:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP4]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT3]], i64 4
; CHECK-NEXT: store i32 [[Z:%.*]], ptr [[TMP5]], align 4
; CHECK-NEXT: store i32 [[Z]], ptr [[TMP6]], align 4
; CHECK-NEXT: store i32 [[Z]], ptr [[TMP7]], align 4
@@ -937,7 +945,8 @@ define void @PR27626_0(ptr %p, i32 %z, i64 %n) {
; CHECK: for.body:
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[P_I_X:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]], i32 0
-; CHECK-NEXT: [[P_I_Y:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]], i32 1
+; CHECK-NEXT: [[P_I_Y_SPLIT:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]]
+; CHECK-NEXT: [[P_I_Y:%.*]] = getelementptr inbounds nuw i8, ptr [[P_I_Y_SPLIT]], i64 4
; CHECK-NEXT: store i32 [[Z]], ptr [[P_I_X]], align 4
; CHECK-NEXT: store i32 [[Z]], ptr [[P_I_Y]], align 4
; CHECK-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
@@ -990,14 +999,15 @@ define i32 @PR27626_1(ptr %p, i64 %n) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP2:%.*]] = or disjoint i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP3:%.*]] = or disjoint i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP4:%.*]] = or disjoint i64 [[INDEX]], 3
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], i64 [[INDEX]], i32 0
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[INDEX]], i32 1
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP2]], i32 1
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP3]], i32 1
-; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP4]], i32 1
+; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 4
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP4]], i64 12
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP19]], i64 20
+; CHECK-NEXT: [[TMP20:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[TMP20]], i64 28
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <8 x i32>, ptr [[TMP5]], align 4
; CHECK-NEXT: [[TMP10:%.*]] = extractelement <8 x i32> [[WIDE_VEC]], i64 0
; CHECK-NEXT: store i32 [[TMP10]], ptr [[TMP6]], align 4
@@ -1024,7 +1034,8 @@ define i32 @PR27626_1(ptr %p, i64 %n) {
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[S:%.*]] = phi i32 [ [[TMP18:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[P_I_X:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]], i32 0
-; CHECK-NEXT: [[P_I_Y:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]], i32 1
+; CHECK-NEXT: [[P_I_Y_SPLIT:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]]
+; CHECK-NEXT: [[P_I_Y:%.*]] = getelementptr inbounds nuw i8, ptr [[P_I_Y_SPLIT]], i64 4
; CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr [[P_I_X]], align 4
; CHECK-NEXT: store i32 [[TMP17]], ptr [[P_I_Y]], align 4
; CHECK-NEXT: [[TMP18]] = add nsw i32 [[TMP17]], [[S]]
@@ -1087,10 +1098,14 @@ define void @PR27626_2(ptr %p, i64 %n, i32 %z) {
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP3]], i32 0
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP4]], i32 0
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 -8
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[INDEX]], i32 1
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP2]], i32 1
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP3]], i32 1
-; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP4]], i32 1
+; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 4
+; CHECK-NEXT: [[DOTSPLIT1:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP2]]
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT1]], i64 4
+; CHECK-NEXT: [[DOTSPLIT2:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP3]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT2]], i64 4
+; CHECK-NEXT: [[DOTSPLIT3:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP4]]
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT3]], i64 4
; CHECK-NEXT: store i32 [[Z:%.*]], ptr [[TMP5]], align 4
; CHECK-NEXT: store i32 [[Z]], ptr [[TMP6]], align 4
; CHECK-NEXT: store i32 [[Z]], ptr [[TMP7]], align 4
@@ -1116,7 +1131,8 @@ define void @PR27626_2(ptr %p, i64 %n, i32 %z) {
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[P_I_X:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]], i32 0
; CHECK-NEXT: [[P_I_MINUS_1_X:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 -8
-; CHECK-NEXT: [[P_I_Y:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]], i32 1
+; CHECK-NEXT: [[P_I_Y_SPLIT:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]]
+; CHECK-NEXT: [[P_I_Y:%.*]] = getelementptr inbounds nuw i8, ptr [[P_I_Y_SPLIT]], i64 4
; CHECK-NEXT: store i32 [[Z]], ptr [[P_I_X]], align 4
; CHECK-NEXT: [[TMP19:%.*]] = load i32, ptr [[P_I_MINUS_1_X]], align 4
; CHECK-NEXT: store i32 [[TMP19]], ptr [[P_I_Y]], align 4
@@ -1174,15 +1190,20 @@ define i32 @PR27626_3(ptr %p, i64 %n, i32 %z) {
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw <4 x i64> [[VEC_IND]], splat (i64 1)
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], i64 [[INDEX]], i32 0
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[INDEX]], i32 1
+; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 4
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x i64> [[TMP2]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP5]], i32 1
+; CHECK-NEXT: [[DOTSPLIT3:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP5]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT3]], i64 4
; CHECK-NEXT: [[TMP7:%.*]] = extractelement <4 x i64> [[TMP2]], i64 1
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP7]], i32 1
+; CHECK-NEXT: [[DOTSPLIT4:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP7]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT4]], i64 4
; CHECK-NEXT: [[TMP9:%.*]] = extractelement <4 x i64> [[TMP2]], i64 2
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP9]], i32 1
+; CHECK-NEXT: [[DOTSPLIT5:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP9]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT5]], i64 4
; CHECK-NEXT: [[TMP11:%.*]] = extractelement <4 x i64> [[TMP2]], i64 3
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP11]], i32 1
+; CHECK-NEXT: [[DOTSPLIT6:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP11]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT6]], i64 4
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <8 x i32>, ptr [[TMP3]], align 4
; CHECK-NEXT: [[TMP13:%.*]] = extractelement <8 x i32> [[WIDE_VEC]], i64 0
; CHECK-NEXT: store i32 [[TMP13]], ptr [[TMP6]], align 4
@@ -1209,10 +1230,11 @@ define i32 @PR27626_3(ptr %p, i64 %n, i32 %z) {
; CHECK: for.body:
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[S:%.*]] = phi i32 [ [[TMP22:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[I_PLUS_1:%.*]] = add nuw nsw i64 [[I]], 1
; CHECK-NEXT: [[P_I_X:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]], i32 0
-; CHECK-NEXT: [[P_I_Y:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]], i32 1
-; CHECK-NEXT: [[P_I_PLUS_1_Y:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I_PLUS_1]], i32 1
+; CHECK-NEXT: [[P_I_Y_SPLIT:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]]
+; CHECK-NEXT: [[P_I_Y:%.*]] = getelementptr inbounds nuw i8, ptr [[P_I_Y_SPLIT]], i64 4
+; CHECK-NEXT: [[TMP23:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[I]]
+; CHECK-NEXT: [[P_I_PLUS_1_Y:%.*]] = getelementptr i8, ptr [[TMP23]], i64 12
; CHECK-NEXT: [[TMP20:%.*]] = load i32, ptr [[P_I_X]], align 4
; CHECK-NEXT: store i32 [[TMP20]], ptr [[P_I_PLUS_1_Y]], align 4
; CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[P_I_Y]], align 4
diff --git a/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll b/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll
index b302868535078..5a1a5fcf85bdd 100644
--- a/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll
+++ b/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll
@@ -1378,13 +1378,17 @@ define i32 @predicated_or_dominates_reduction(ptr %b) {
; CHECK-NEXT: [[TMP1:%.*]] = or disjoint i32 [[INDEX]], 2
; CHECK-NEXT: [[TMP2:%.*]] = or disjoint i32 [[INDEX]], 3
; CHECK-NEXT: [[TMP3:%.*]] = sext i32 [[INDEX]] to i64
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [0 x %struct.e], ptr [[B:%.*]], i64 0, i64 [[TMP3]], i32 1
+; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [0 x %struct.e], ptr [[B:%.*]], i64 0, i64 [[TMP3]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 4
; CHECK-NEXT: [[TMP5:%.*]] = sext i32 [[TMP0]] to i64
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [0 x %struct.e], ptr [[B]], i64 0, i64 [[TMP5]], i32 1
+; CHECK-NEXT: [[DOTSPLIT7:%.*]] = getelementptr inbounds [0 x %struct.e], ptr [[B]], i64 0, i64 [[TMP5]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT7]], i64 4
; CHECK-NEXT: [[TMP7:%.*]] = sext i32 [[TMP1]] to i64
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [0 x %struct.e], ptr [[B]], i64 0, i64 [[TMP7]], i32 1
+; CHECK-NEXT: [[DOTSPLIT8:%.*]] = getelementptr inbounds [0 x %struct.e], ptr [[B]], i64 0, i64 [[TMP7]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT8]], i64 4
; CHECK-NEXT: [[TMP9:%.*]] = sext i32 [[TMP2]] to i64
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [0 x %struct.e], ptr [[B]], i64 0, i64 [[TMP9]], i32 1
+; CHECK-NEXT: [[DOTSPLIT9:%.*]] = getelementptr inbounds [0 x %struct.e], ptr [[B]], i64 0, i64 [[TMP9]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT9]], i64 4
; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP4]], align 4
; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP6]], align 4
; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP8]], align 4
More information about the cfe-commits
mailing list