[clang] [flang] [llvm] [InstCombine] Canonicalize GEP source element types (PR #180745)
Nikita Popov via cfe-commits
cfe-commits at lists.llvm.org
Fri Feb 13 01:05:07 PST 2026
https://github.com/nikic updated https://github.com/llvm/llvm-project/pull/180745
>From 786512e2bba4714051b71d0636f4a242a029bfcb Mon Sep 17 00:00:00 2001
From: Nikita Popov <npopov at redhat.com>
Date: Tue, 10 Feb 2026 15:12:59 +0100
Subject: [PATCH] [InstCombine] Canonicalize GEP source element types
Canonicalize GEP source element types from `%T` to `[sizeof(%T) x i8]`.
This is intended to flush out any remaining places that rely on
GEP element types, as part of the `ptradd` migration.
---
clang/test/CodeGen/allow-ubsan-check.c | 6 +-
.../CodeGen/attr-counted-by-for-pointers.c | 24 +-
clang/test/CodeGen/attr-counted-by.c | 154 ++++-----
clang/test/CodeGen/union-tbaa1.c | 8 +-
.../taskloop_strictmodifier_codegen.cpp | 4 +-
flang/test/Integration/unroll-loops.f90 | 4 +-
flang/test/Lower/HLFIR/unroll-loops.fir | 4 +-
.../InstCombine/InstructionCombining.cpp | 18 +
llvm/test/Analysis/BasicAA/featuretest.ll | 10 +-
.../CodeGen/AArch64/sme-aarch64-svcount-O3.ll | 2 +-
.../amdgpu-uniform-intrinsic-combine.ll | 12 +-
.../AMDGPU/vector-alloca-addrspacecast.ll | 2 +-
.../Hexagon/autohvx/vector-align-tbaa.ll | 18 +-
.../test/DebugInfo/Generic/instcombine-phi.ll | 2 +-
.../InstCombine/2006-12-15-Range-Test.ll | 4 +-
.../AArch64/sve-intrinsic-gatherscatter.ll | 8 +-
.../AMDGPU/memcpy-from-constant.ll | 4 +-
.../test/Transforms/InstCombine/align-addr.ll | 4 +-
.../Transforms/InstCombine/and-or-icmps.ll | 4 +-
llvm/test/Transforms/InstCombine/array.ll | 58 ++--
.../InstCombine/assume-loop-align.ll | 4 +-
.../InstCombine/canonicalize-gep-constglob.ll | 8 +-
.../InstCombine/canonicalize-gep-mul.ll | 2 +-
llvm/test/Transforms/InstCombine/cast.ll | 2 +-
llvm/test/Transforms/InstCombine/cast_phi.ll | 8 +-
llvm/test/Transforms/InstCombine/cast_ptr.ll | 14 +-
.../constant-fold-address-space-pointer.ll | 2 +-
.../Transforms/InstCombine/dependent-ivs.ll | 2 +-
llvm/test/Transforms/InstCombine/fmul.ll | 2 +-
.../Transforms/InstCombine/gep-addrspace.ll | 4 +-
llvm/test/Transforms/InstCombine/gep-alias.ll | 2 +-
.../gep-canonicalize-constant-indices.ll | 40 +--
.../InstCombine/gep-combine-loop-invariant.ll | 16 +-
.../Transforms/InstCombine/gep-custom-dl.ll | 8 +-
.../gep-fold-chained-const-select.ll | 2 +-
.../InstCombine/gep-merge-constant-indices.ll | 8 +-
llvm/test/Transforms/InstCombine/gep-sext.ll | 8 +-
.../InstCombine/gep-srem-to-and-deref.ll | 2 +-
.../InstCombine/gep-vector-indices.ll | 16 +-
.../test/Transforms/InstCombine/gep-vector.ll | 32 +-
.../Transforms/InstCombine/gepofconstgepi8.ll | 58 ++--
llvm/test/Transforms/InstCombine/gepphigep.ll | 20 +-
.../Transforms/InstCombine/getelementptr.ll | 86 ++---
.../Transforms/InstCombine/icmp-custom-dl.ll | 4 +-
llvm/test/Transforms/InstCombine/icmp-gep.ll | 36 +-
.../InstCombine/indexed-gep-compares.ll | 6 +-
.../InstCombine/known-phi-recurse.ll | 2 +-
.../InstCombine/load-bitcast-select.ll | 4 +-
llvm/test/Transforms/InstCombine/load-cmp.ll | 20 +-
llvm/test/Transforms/InstCombine/load.ll | 8 +-
.../InstCombine/loadstore-alignment.ll | 12 +-
.../InstCombine/loadstore-metadata.ll | 4 +-
.../masked_intrinsics-inseltpoison.ll | 10 +-
.../InstCombine/masked_intrinsics.ll | 10 +-
.../Transforms/InstCombine/mem-gep-zidx.ll | 2 +-
.../InstCombine/memcpy-addrspace.ll | 32 +-
.../InstCombine/memcpy-from-global.ll | 10 +-
.../multi-size-address-space-pointer.ll | 4 +-
llvm/test/Transforms/InstCombine/narrow.ll | 4 +-
.../test/Transforms/InstCombine/opaque-ptr.ll | 18 +-
llvm/test/Transforms/InstCombine/phi.ll | 2 +-
.../InstCombine/pr38984-inseltpoison.ll | 4 +-
llvm/test/Transforms/InstCombine/pr38984.ll | 4 +-
llvm/test/Transforms/InstCombine/pr58901.ll | 6 +-
.../InstCombine/ptrtoint-nullgep.ll | 4 +-
.../remove-loop-phi-multiply-by-zero.ll | 20 +-
.../Transforms/InstCombine/scalable-select.ll | 4 +-
.../test/Transforms/InstCombine/select-gep.ll | 42 +--
llvm/test/Transforms/InstCombine/shift.ll | 12 +-
.../InstCombine/sink_instruction.ll | 6 +-
llvm/test/Transforms/InstCombine/store.ll | 8 +-
llvm/test/Transforms/InstCombine/sub-gep.ll | 20 +-
.../vec_demanded_elts-inseltpoison.ll | 14 +-
.../InstCombine/vec_demanded_elts.ll | 14 +-
.../vec_phi_extract-inseltpoison.ll | 6 +-
.../Transforms/InstCombine/vec_phi_extract.ll | 6 +-
.../InstCombine/vector_gep1-inseltpoison.ll | 2 +-
.../Transforms/InstCombine/vector_gep1.ll | 2 +-
.../Transforms/InstCombine/vector_gep2.ll | 4 +-
.../Transforms/InstCombine/vectorgep-crash.ll | 6 +-
llvm/test/Transforms/InstCombine/wcslen-1.ll | 12 +-
llvm/test/Transforms/InstCombine/wcslen-3.ll | 4 +-
llvm/test/Transforms/InstCombine/wcslen-5.ll | 16 +-
.../Transforms/LoopSimplify/merge-exits.ll | 8 +-
.../AArch64/runtime-unroll-generic.ll | 48 +--
.../LoopUnroll/WebAssembly/basic-unrolling.ll | 40 +--
.../LoopUnroll/runtime-multiexit-heuristic.ll | 42 +--
.../LoopUnroll/runtime-unroll-remainder.ll | 28 +-
.../AArch64/aarch64-predication.ll | 4 +-
.../AArch64/sve-cond-inv-loads.ll | 14 +-
.../AArch64/sve-gather-scatter.ll | 20 +-
.../LoopVectorize/AArch64/sve-inductions.ll | 4 +-
.../AArch64/sve-interleaved-accesses.ll | 98 +++---
.../AArch64/sve-large-strides.ll | 6 +-
.../AArch64/sve-masked-loadstore.ll | 4 +-
.../sve-tail-folding-overflow-checks.ll | 8 +-
.../AArch64/sve-vector-reverse.ll | 32 +-
.../LoopVectorize/AArch64/sve-vfabi.ll | 10 +-
.../LoopVectorize/AArch64/sve-widen-phi.ll | 16 +-
.../AArch64/sve2-histcnt-epilogue.ll | 12 +-
.../sve2-histcnt-no-scalar-interleave.ll | 4 +-
.../AArch64/sve2-histcnt-too-many-deps.ll | 20 +-
.../LoopVectorize/AArch64/sve2-histcnt.ll | 121 ++++---
.../AArch64/uniform-args-call-variants.ll | 36 +-
.../AArch64/vector-reverse-mask4.ll | 8 +-
.../LoopVectorize/AMDGPU/packed-math.ll | 6 +-
.../ARM/mve-reduction-predselect.ll | 40 +--
.../ARM/mve-reductions-interleave.ll | 12 +-
.../LoopVectorize/ARM/mve-reductions.ll | 94 ++---
.../ARM/tail-fold-multiple-icmps.ll | 4 +-
.../LoopVectorize/PowerPC/vsx-tsvc-s173.ll | 2 +-
.../PowerPC/widened-massv-call.ll | 2 +-
.../PowerPC/widened-massv-vfabi-attr.ll | 2 +-
.../LoopVectorize/SystemZ/addressing.ll | 6 +-
.../LoopVectorize/X86/interleaving.ll | 12 +-
.../LoopVectorize/X86/metadata-enable.ll | 52 +--
.../LoopVectorize/X86/parallel-loops.ll | 58 ++--
.../LoopVectorize/X86/small-size.ll | 38 +-
...86-interleaved-store-accesses-with-gaps.ll | 68 ++--
.../Transforms/LoopVectorize/bsd_regex.ll | 8 +-
.../LoopVectorize/extract-last-veclane.ll | 16 +-
.../LoopVectorize/float-induction.ll | 182 +++++-----
.../LoopVectorize/forked-pointers.ll | 24 +-
.../LoopVectorize/gep_with_bitcast.ll | 2 +-
.../Transforms/LoopVectorize/histograms.ll | 10 +-
.../LoopVectorize/if-conversion-nest.ll | 16 +-
.../Transforms/LoopVectorize/if-conversion.ll | 12 +-
.../Transforms/LoopVectorize/induction.ll | 326 +++++++++---------
.../LoopVectorize/interleaved-accesses-2.ll | 4 +-
.../interleaved-accesses-pred-stores.ll | 36 +-
.../LoopVectorize/interleaved-accesses.ll | 172 ++++-----
.../invariant-store-vectorization-2.ll | 14 +-
.../invariant-store-vectorization.ll | 32 +-
.../Transforms/LoopVectorize/loop-scalars.ll | 36 +-
.../Transforms/LoopVectorize/non-const-n.ll | 6 +-
.../test/Transforms/LoopVectorize/phi-cost.ll | 16 +-
.../LoopVectorize/reduction-inloop-cond.ll | 168 ++++-----
.../LoopVectorize/reduction-inloop-min-max.ll | 12 +-
.../LoopVectorize/reduction-inloop-pred.ll | 180 +++++-----
.../LoopVectorize/reduction-inloop-uf4.ll | 70 ++--
.../LoopVectorize/reduction-predselect.ll | 136 ++++----
.../Transforms/LoopVectorize/reduction.ll | 100 +++---
.../Transforms/LoopVectorize/runtime-check.ll | 62 ++--
.../LoopVectorize/scalable-inductions.ll | 32 +-
.../scalar_after_vectorization.ll | 2 +-
.../LoopVectorize/trunc-reductions.ll | 12 +-
.../uniform-args-call-variants.ll | 16 +-
.../Transforms/LoopVectorize/vector-geps.ll | 12 +-
.../LoopVersioningLICM/loopversioningLICM1.ll | 6 +-
.../multiply-fused-loops.ll | 24 +-
.../AArch64/block_scaling_decompr_8bit.ll | 4 +-
.../constraint-elimination-placement.ll | 2 +-
.../AArch64/extra-unroll-simplifications.ll | 28 +-
.../AArch64/hoist-load-from-vector-loop.ll | 14 +-
.../AArch64/hoist-runtime-checks.ll | 10 +-
...ting-sinking-required-for-vectorization.ll | 20 +-
.../AArch64/indvars-vectorization.ll | 8 +-
.../PhaseOrdering/AArch64/interleave_vec.ll | 88 ++---
.../AArch64/interleavevectorization.ll | 30 +-
.../PhaseOrdering/AArch64/loopflatten.ll | 2 +-
.../AArch64/matrix-extract-insert.ll | 46 +--
...ple-unreachable-exits-for-vectorization.ll | 22 +-
.../AArch64/predicated-reduction.ll | 8 +-
.../PhaseOrdering/AArch64/reduce_submuladd.ll | 24 +-
.../AArch64/sinking-vs-if-conversion.ll | 8 +-
.../PhaseOrdering/SystemZ/sub-xor.ll | 48 +--
...-simplifycfg-two-entry-phi-node-folding.ll | 2 +-
.../PhaseOrdering/X86/excessive-unrolling.ll | 12 +-
.../X86/hoist-load-of-baseptr.ll | 10 +-
.../PhaseOrdering/X86/merge-functions2.ll | 2 +-
.../PhaseOrdering/X86/merge-functions3.ll | 2 +-
.../PhaseOrdering/X86/pixel-splat.ll | 4 +-
.../Transforms/PhaseOrdering/X86/pr88239.ll | 4 +-
.../X86/preserve-access-group.ll | 24 +-
.../PhaseOrdering/X86/simplifycfg-late.ll | 2 +-
.../PhaseOrdering/X86/speculation-vs-tbaa.ll | 6 +-
.../PhaseOrdering/X86/spurious-peeling.ll | 16 +-
.../PhaseOrdering/X86/vdiv-nounroll.ll | 2 +-
.../test/Transforms/PhaseOrdering/X86/vdiv.ll | 61 ++--
.../PhaseOrdering/gvn-replacement-vs-hoist.ll | 4 +-
.../PhaseOrdering/loop-access-checks.ll | 6 +-
.../PhaseOrdering/loop-vectorize-bfi.ll | 2 +-
.../PhaseOrdering/lto-argpromotion-ipsccp.ll | 2 +-
.../test/Transforms/PhaseOrdering/lto-licm.ll | 2 +-
.../PhaseOrdering/scev-custom-dl.ll | 14 +-
.../PhaseOrdering/simplifycfg-options.ll | 4 +-
.../AMDGPU/uniform-unswitch.ll | 2 +-
.../Hexagon/switch-to-lookup-table.ll | 2 +-
188 files changed, 2215 insertions(+), 2155 deletions(-)
diff --git a/clang/test/CodeGen/allow-ubsan-check.c b/clang/test/CodeGen/allow-ubsan-check.c
index 1e128854d6a75..da3bf3d4d72f5 100644
--- a/clang/test/CodeGen/allow-ubsan-check.c
+++ b/clang/test/CodeGen/allow-ubsan-check.c
@@ -204,7 +204,7 @@ void use(double*);
// CHECK-NEXT: [[TMP3:%.*]] = and i1 [[TMP1]], [[TMP2]], !nosanitize [[META6]]
// CHECK-NEXT: br i1 [[TMP3]], label %[[TRAP:.*]], label %[[BB4:.*]]
// CHECK: [[BB4]]:
-// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[VLA]], i64 [[IDXPROM]]
+// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i8], ptr [[VLA]], i64 [[IDXPROM]]
// CHECK-NEXT: [[TMP5:%.*]] = load double, ptr [[ARRAYIDX]], align 8, !tbaa [[DOUBLE_TBAA9:![0-9]+]]
// CHECK-NEXT: ret double [[TMP5]]
// CHECK: [[TRAP]]:
@@ -223,7 +223,7 @@ void use(double*);
// TR-NEXT: [[TMP3:%.*]] = and i1 [[TMP1]], [[TMP2]], !nosanitize [[META6]]
// TR-NEXT: br i1 [[TMP3]], label %[[TRAP:.*]], label %[[BB4:.*]]
// TR: [[BB4]]:
-// TR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[VLA]], i64 [[IDXPROM]]
+// TR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i8], ptr [[VLA]], i64 [[IDXPROM]]
// TR-NEXT: [[TMP5:%.*]] = load double, ptr [[ARRAYIDX]], align 8, !tbaa [[DOUBLE_TBAA9:![0-9]+]]
// TR-NEXT: ret double [[TMP5]]
// TR: [[TRAP]]:
@@ -242,7 +242,7 @@ void use(double*);
// REC-NEXT: [[TMP3:%.*]] = and i1 [[TMP1]], [[TMP2]], !nosanitize [[META6]]
// REC-NEXT: br i1 [[TMP3]], label %[[TRAP:.*]], label %[[BB4:.*]]
// REC: [[BB4]]:
-// REC-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[VLA]], i64 [[IDXPROM]]
+// REC-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i8], ptr [[VLA]], i64 [[IDXPROM]]
// REC-NEXT: [[TMP5:%.*]] = load double, ptr [[ARRAYIDX]], align 8, !tbaa [[DOUBLE_TBAA9:![0-9]+]]
// REC-NEXT: ret double [[TMP5]]
// REC: [[TRAP]]:
diff --git a/clang/test/CodeGen/attr-counted-by-for-pointers.c b/clang/test/CodeGen/attr-counted-by-for-pointers.c
index 7b0be04b51a30..e667be2afedc6 100644
--- a/clang/test/CodeGen/attr-counted-by-for-pointers.c
+++ b/clang/test/CodeGen/attr-counted-by-for-pointers.c
@@ -44,7 +44,7 @@ struct annotated_ptr {
// SANITIZE-WITH-ATTR-NEXT: [[BUF:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 8
// SANITIZE-WITH-ATTR-NEXT: [[TMP4:%.*]] = load ptr, ptr [[BUF]], align 8, !tbaa [[_ZTS3FOOPTR_TBAA8:![0-9]+]]
// SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP4]], i64 [[IDXPROM]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i8], ptr [[TMP4]], i64 [[IDXPROM]]
// SANITIZE-WITH-ATTR-NEXT: store ptr [[VALUE]], ptr [[ARRAYIDX]], align 8, !tbaa [[_ZTS3FOOPTR_TBAA14:![0-9]+]]
// SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -54,7 +54,7 @@ struct annotated_ptr {
// NO-SANITIZE-WITH-ATTR-NEXT: [[BUF:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 8
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BUF]], align 8, !tbaa [[_ZTS3FOOPTR_TBAA6:![0-9]+]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP0]], i64 [[IDXPROM]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i8], ptr [[TMP0]], i64 [[IDXPROM]]
// NO-SANITIZE-WITH-ATTR-NEXT: store ptr [[VALUE]], ptr [[ARRAYIDX]], align 8, !tbaa [[_ZTS3FOOPTR_TBAA12:![0-9]+]]
// NO-SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -64,7 +64,7 @@ struct annotated_ptr {
// SANITIZE-WITHOUT-ATTR-NEXT: [[BUF:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 8
// SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BUF]], align 8, !tbaa [[_ZTS3FOOPTR_TBAA6:![0-9]+]]
// SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP0]], i64 [[IDXPROM]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i8], ptr [[TMP0]], i64 [[IDXPROM]]
// SANITIZE-WITHOUT-ATTR-NEXT: store ptr [[VALUE]], ptr [[ARRAYIDX]], align 8, !tbaa [[_ZTS3FOOPTR_TBAA12:![0-9]+]]
// SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -74,7 +74,7 @@ struct annotated_ptr {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[BUF:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 8
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BUF]], align 8, !tbaa [[_ZTS3FOOPTR_TBAA6:![0-9]+]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP0]], i64 [[IDXPROM]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i8], ptr [[TMP0]], i64 [[IDXPROM]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: store ptr [[VALUE]], ptr [[ARRAYIDX]], align 8, !tbaa [[_ZTS3FOOPTR_TBAA12:![0-9]+]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -99,7 +99,7 @@ void test1(struct annotated_ptr *p, int index, struct foo *value) {
// SANITIZE-WITH-ATTR-NEXT: [[BUF:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 8
// SANITIZE-WITH-ATTR-NEXT: [[TMP4:%.*]] = load ptr, ptr [[BUF]], align 8, !tbaa [[_ZTS3FOOPTR_TBAA8]]
// SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP4]], i64 [[IDXPROM]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i8], ptr [[TMP4]], i64 [[IDXPROM]]
// SANITIZE-WITH-ATTR-NEXT: store ptr [[VALUE]], ptr [[ARRAYIDX]], align 8, !tbaa [[_ZTS3FOOPTR_TBAA14]]
// SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -109,7 +109,7 @@ void test1(struct annotated_ptr *p, int index, struct foo *value) {
// NO-SANITIZE-WITH-ATTR-NEXT: [[BUF:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 8
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BUF]], align 8, !tbaa [[_ZTS3FOOPTR_TBAA6]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP0]], i64 [[IDXPROM]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i8], ptr [[TMP0]], i64 [[IDXPROM]]
// NO-SANITIZE-WITH-ATTR-NEXT: store ptr [[VALUE]], ptr [[ARRAYIDX]], align 8, !tbaa [[_ZTS3FOOPTR_TBAA12]]
// NO-SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -119,7 +119,7 @@ void test1(struct annotated_ptr *p, int index, struct foo *value) {
// SANITIZE-WITHOUT-ATTR-NEXT: [[BUF:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 8
// SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BUF]], align 8, !tbaa [[_ZTS3FOOPTR_TBAA6]]
// SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP0]], i64 [[IDXPROM]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i8], ptr [[TMP0]], i64 [[IDXPROM]]
// SANITIZE-WITHOUT-ATTR-NEXT: store ptr [[VALUE]], ptr [[ARRAYIDX]], align 8, !tbaa [[_ZTS3FOOPTR_TBAA12]]
// SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -129,7 +129,7 @@ void test1(struct annotated_ptr *p, int index, struct foo *value) {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[BUF:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 8
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BUF]], align 8, !tbaa [[_ZTS3FOOPTR_TBAA6]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP0]], i64 [[IDXPROM]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i8], ptr [[TMP0]], i64 [[IDXPROM]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: store ptr [[VALUE]], ptr [[ARRAYIDX]], align 8, !tbaa [[_ZTS3FOOPTR_TBAA12]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -154,7 +154,7 @@ void test2(struct annotated_ptr *p, int index, struct foo *value) {
// SANITIZE-WITH-ATTR-NEXT: [[BUF:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 8
// SANITIZE-WITH-ATTR-NEXT: [[TMP4:%.*]] = load ptr, ptr [[BUF]], align 8, !tbaa [[_ZTS3FOOPTR_TBAA8]]
// SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP4]], i64 [[IDXPROM]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i8], ptr [[TMP4]], i64 [[IDXPROM]]
// SANITIZE-WITH-ATTR-NEXT: store ptr [[VALUE]], ptr [[ARRAYIDX]], align 8, !tbaa [[_ZTS3FOOPTR_TBAA14]]
// SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -164,7 +164,7 @@ void test2(struct annotated_ptr *p, int index, struct foo *value) {
// NO-SANITIZE-WITH-ATTR-NEXT: [[BUF:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 8
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BUF]], align 8, !tbaa [[_ZTS3FOOPTR_TBAA6]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP0]], i64 [[IDXPROM]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i8], ptr [[TMP0]], i64 [[IDXPROM]]
// NO-SANITIZE-WITH-ATTR-NEXT: store ptr [[VALUE]], ptr [[ARRAYIDX]], align 8, !tbaa [[_ZTS3FOOPTR_TBAA12]]
// NO-SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -174,7 +174,7 @@ void test2(struct annotated_ptr *p, int index, struct foo *value) {
// SANITIZE-WITHOUT-ATTR-NEXT: [[BUF:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 8
// SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BUF]], align 8, !tbaa [[_ZTS3FOOPTR_TBAA6]]
// SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP0]], i64 [[IDXPROM]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i8], ptr [[TMP0]], i64 [[IDXPROM]]
// SANITIZE-WITHOUT-ATTR-NEXT: store ptr [[VALUE]], ptr [[ARRAYIDX]], align 8, !tbaa [[_ZTS3FOOPTR_TBAA12]]
// SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -184,7 +184,7 @@ void test2(struct annotated_ptr *p, int index, struct foo *value) {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[BUF:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 8
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BUF]], align 8, !tbaa [[_ZTS3FOOPTR_TBAA6]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP0]], i64 [[IDXPROM]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i8], ptr [[TMP0]], i64 [[IDXPROM]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: store ptr [[VALUE]], ptr [[ARRAYIDX]], align 8, !tbaa [[_ZTS3FOOPTR_TBAA12]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
diff --git a/clang/test/CodeGen/attr-counted-by.c b/clang/test/CodeGen/attr-counted-by.c
index a51681a9c00fc..58d06f411b2c5 100644
--- a/clang/test/CodeGen/attr-counted-by.c
+++ b/clang/test/CodeGen/attr-counted-by.c
@@ -74,7 +74,7 @@ struct anon_struct {
// SANITIZE-WITH-ATTR: [[CONT3]]:
// SANITIZE-WITH-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
// SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[ARRAY]], i64 [[IDXPROM]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[ARRAY]], i64 [[IDXPROM]]
// SANITIZE-WITH-ATTR-NEXT: store i32 [[VAL]], ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2:![0-9]+]]
// SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -83,7 +83,7 @@ struct anon_struct {
// NO-SANITIZE-WITH-ATTR-NEXT: [[ENTRY:.*:]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[ARRAY]], i64 [[IDXPROM]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[ARRAY]], i64 [[IDXPROM]]
// NO-SANITIZE-WITH-ATTR-NEXT: store i32 [[VAL]], ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2:![0-9]+]]
// NO-SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -92,7 +92,7 @@ struct anon_struct {
// SANITIZE-WITHOUT-ATTR-NEXT: [[ENTRY:.*:]]
// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
// SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[ARRAY]], i64 [[IDXPROM]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[ARRAY]], i64 [[IDXPROM]]
// SANITIZE-WITHOUT-ATTR-NEXT: store i32 [[VAL]], ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2:![0-9]+]]
// SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -101,7 +101,7 @@ struct anon_struct {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ENTRY:.*:]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[ARRAY]], i64 [[IDXPROM]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[ARRAY]], i64 [[IDXPROM]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: store i32 [[VAL]], ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2:![0-9]+]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -124,7 +124,7 @@ void test1(struct annotated *p, int index, int val) {
// SANITIZE-WITH-ATTR-NEXT: unreachable, !nosanitize [[META6]]
// SANITIZE-WITH-ATTR: [[CONT6]]:
// SANITIZE-WITH-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[ARRAY]], i64 [[INDEX]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[ARRAY]], i64 [[INDEX]]
// SANITIZE-WITH-ATTR-NEXT: [[CONV:%.*]] = shl i32 [[COUNTED_BY_LOAD]], 2
// SANITIZE-WITH-ATTR-NEXT: store i32 [[CONV]], ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// SANITIZE-WITH-ATTR-NEXT: ret void
@@ -137,7 +137,7 @@ void test1(struct annotated *p, int index, int val) {
// NO-SANITIZE-WITH-ATTR-NEXT: [[COUNTED_BY_LOAD:%.*]] = load i32, ptr [[COUNTED_BY_GEP]], align 4
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.smax.i32(i32 [[COUNTED_BY_LOAD]], i32 0)
// NO-SANITIZE-WITH-ATTR-NEXT: [[CONV:%.*]] = shl i32 [[TMP0]], 2
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[ARRAY]], i64 [[INDEX]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[ARRAY]], i64 [[INDEX]]
// NO-SANITIZE-WITH-ATTR-NEXT: store i32 [[CONV]], ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// NO-SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -145,7 +145,7 @@ void test1(struct annotated *p, int index, int val) {
// SANITIZE-WITHOUT-ATTR-SAME: ptr noundef [[P:%.*]], i64 noundef [[INDEX:%.*]]) local_unnamed_addr #[[ATTR0]] {
// SANITIZE-WITHOUT-ATTR-NEXT: [[ENTRY:.*:]]
// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[ARRAY]], i64 [[INDEX]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[ARRAY]], i64 [[INDEX]]
// SANITIZE-WITHOUT-ATTR-NEXT: store i32 -1, ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -153,7 +153,7 @@ void test1(struct annotated *p, int index, int val) {
// NO-SANITIZE-WITHOUT-ATTR-SAME: ptr noundef [[P:%.*]], i64 noundef [[INDEX:%.*]]) local_unnamed_addr #[[ATTR0]] {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ENTRY:.*:]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[ARRAY]], i64 [[INDEX]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[ARRAY]], i64 [[INDEX]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: store i32 -1, ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -248,7 +248,7 @@ size_t test2_bdos_cast(struct annotated *p) {
// SANITIZE-WITH-ATTR-NEXT: unreachable, !nosanitize [[META6]]
// SANITIZE-WITH-ATTR: [[CONT3]]:
// SANITIZE-WITH-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[ARRAY]], i64 [[INDEX]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[ARRAY]], i64 [[INDEX]]
// SANITIZE-WITH-ATTR-NEXT: store i32 -1, ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -256,7 +256,7 @@ size_t test2_bdos_cast(struct annotated *p) {
// NO-SANITIZE-WITH-ATTR-SAME: ptr noundef [[P:%.*]], i64 noundef [[INDEX:%.*]]) local_unnamed_addr #[[ATTR0]] {
// NO-SANITIZE-WITH-ATTR-NEXT: [[ENTRY:.*:]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[ARRAY]], i64 [[INDEX]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[ARRAY]], i64 [[INDEX]]
// NO-SANITIZE-WITH-ATTR-NEXT: store i32 -1, ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// NO-SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -264,7 +264,7 @@ size_t test2_bdos_cast(struct annotated *p) {
// SANITIZE-WITHOUT-ATTR-SAME: ptr noundef [[P:%.*]], i64 noundef [[INDEX:%.*]]) local_unnamed_addr #[[ATTR0]] {
// SANITIZE-WITHOUT-ATTR-NEXT: [[ENTRY:.*:]]
// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[ARRAY]], i64 [[INDEX]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[ARRAY]], i64 [[INDEX]]
// SANITIZE-WITHOUT-ATTR-NEXT: store i32 -1, ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -272,7 +272,7 @@ size_t test2_bdos_cast(struct annotated *p) {
// NO-SANITIZE-WITHOUT-ATTR-SAME: ptr noundef [[P:%.*]], i64 noundef [[INDEX:%.*]]) local_unnamed_addr #[[ATTR0]] {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ENTRY:.*:]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[ARRAY]], i64 [[INDEX]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[ARRAY]], i64 [[INDEX]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: store i32 -1, ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -352,7 +352,7 @@ size_t test3_bdos_cast(struct annotated *p) {
// SANITIZE-WITH-ATTR: [[CONT12]]:
// SANITIZE-WITH-ATTR-NEXT: [[RESULT:%.*]] = add i32 [[FLEXIBLE_ARRAY_MEMBER_SIZE]], 244
// SANITIZE-WITH-ATTR-NEXT: [[TMP2:%.*]] = and i32 [[RESULT]], 252
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds nuw i32, ptr [[ARRAY]], i64 [[IDXPROM]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[ARRAY]], i64 [[IDXPROM]]
// SANITIZE-WITH-ATTR-NEXT: store i32 [[TMP2]], ptr [[ARRAYIDX10]], align 4, !tbaa [[INT_TBAA2]]
// SANITIZE-WITH-ATTR-NEXT: [[DOTNOT79:%.*]] = icmp eq i32 [[DOTCOUNTED_BY_LOAD]], 3
// SANITIZE-WITH-ATTR-NEXT: br i1 [[DOTNOT79]], label %[[HANDLER_OUT_OF_BOUNDS18:.*]], label %[[CONT19:.*]], !prof [[PROF8:![0-9]+]], !nosanitize [[META6]]
@@ -370,7 +370,7 @@ size_t test3_bdos_cast(struct annotated *p) {
// SANITIZE-WITH-ATTR: [[CONT38]]:
// SANITIZE-WITH-ATTR-NEXT: [[RESULT25:%.*]] = add i32 [[FLEXIBLE_ARRAY_MEMBER_SIZE]], 240
// SANITIZE-WITH-ATTR-NEXT: [[TMP4:%.*]] = and i32 [[RESULT25]], 252
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX36:%.*]] = getelementptr inbounds nuw i32, ptr [[ARRAY]], i64 [[IDXPROM31]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX36:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[ARRAY]], i64 [[IDXPROM31]]
// SANITIZE-WITH-ATTR-NEXT: store i32 [[TMP4]], ptr [[ARRAYIDX36]], align 4, !tbaa [[INT_TBAA2]]
// SANITIZE-WITH-ATTR-NEXT: [[DOTNOT:%.*]] = icmp ugt i32 [[FAM_IDX]], [[DOTCOUNTED_BY_LOAD]], !nosanitize [[META6]]
// SANITIZE-WITH-ATTR-NEXT: br i1 [[DOTNOT]], label %[[HANDLER_OUT_OF_BOUNDS45:.*]], label %[[CONT46:.*]], !prof [[PROF8]], !nosanitize [[META6]]
@@ -387,7 +387,7 @@ size_t test3_bdos_cast(struct annotated *p) {
// SANITIZE-WITH-ATTR-NEXT: tail call void @__ubsan_handle_out_of_bounds_abort(ptr nonnull @[[GLOB10:[0-9]+]], i64 [[IDXPROM60]]) #[[ATTR8]], !nosanitize [[META6]]
// SANITIZE-WITH-ATTR-NEXT: unreachable, !nosanitize [[META6]]
// SANITIZE-WITH-ATTR: [[CONT67]]:
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX65:%.*]] = getelementptr inbounds nuw i32, ptr [[ARRAY]], i64 [[IDXPROM60]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX65:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[ARRAY]], i64 [[IDXPROM60]]
// SANITIZE-WITH-ATTR-NEXT: [[DOTTR:%.*]] = sub nsw i32 [[DOTCOUNTED_BY_LOAD]], [[FAM_IDX]]
// SANITIZE-WITH-ATTR-NEXT: [[TMP7:%.*]] = shl i32 [[DOTTR]], 2
// SANITIZE-WITH-ATTR-NEXT: [[TMP8:%.*]] = and i32 [[TMP7]], 252
@@ -406,7 +406,7 @@ size_t test3_bdos_cast(struct annotated *p) {
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP1:%.*]] = and i32 [[RESULT]], 252
// NO-SANITIZE-WITH-ATTR-NEXT: [[CONV1:%.*]] = select i1 [[TMP0]], i32 [[TMP1]], i32 0
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[ARRAY]], i64 [[IDXPROM]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [4 x i8], ptr [[ARRAY]], i64 [[IDXPROM]]
// NO-SANITIZE-WITH-ATTR-NEXT: store i32 [[CONV1]], ptr [[ARRAYIDX3]], align 4, !tbaa [[INT_TBAA2]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[COUNTED_BY_LOAD7:%.*]] = load i32, ptr [[COUNTED_BY_GEP]], align 4
// NO-SANITIZE-WITH-ATTR-NEXT: [[FLEXIBLE_ARRAY_MEMBER_SIZE9:%.*]] = shl i32 [[COUNTED_BY_LOAD7]], 2
@@ -436,7 +436,7 @@ size_t test3_bdos_cast(struct annotated *p) {
// SANITIZE-WITHOUT-ATTR-NEXT: [[ENTRY:.*:]]
// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
// SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX5:%.*]] = getelementptr i32, ptr [[ARRAY]], i64 [[IDXPROM]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX5:%.*]] = getelementptr [4 x i8], ptr [[ARRAY]], i64 [[IDXPROM]]
// SANITIZE-WITHOUT-ATTR-NEXT: store i32 255, ptr [[ARRAYIDX5]], align 4, !tbaa [[INT_TBAA2]]
// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX18:%.*]] = getelementptr i8, ptr [[ARRAYIDX5]], i64 4
// SANITIZE-WITHOUT-ATTR-NEXT: store i32 255, ptr [[ARRAYIDX18]], align 4, !tbaa [[INT_TBAA2]]
@@ -449,7 +449,7 @@ size_t test3_bdos_cast(struct annotated *p) {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ENTRY:.*:]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[ARRAY]], i64 [[IDXPROM]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [4 x i8], ptr [[ARRAY]], i64 [[IDXPROM]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: store i32 255, ptr [[ARRAYIDX3]], align 4, !tbaa [[INT_TBAA2]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX10:%.*]] = getelementptr i8, ptr [[ARRAYIDX3]], i64 4
// NO-SANITIZE-WITHOUT-ATTR-NEXT: store i32 255, ptr [[ARRAYIDX10]], align 4, !tbaa [[INT_TBAA2]]
@@ -616,7 +616,7 @@ size_t test4_bdos_cast2(struct annotated *p, int index) {
// SANITIZE-WITH-ATTR-NEXT: unreachable, !nosanitize [[META6]]
// SANITIZE-WITH-ATTR: [[CONT3]]:
// SANITIZE-WITH-ATTR-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 16
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 [[IDXPROM]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP1]], i64 [[IDXPROM]]
// SANITIZE-WITH-ATTR-NEXT: store i32 -1, ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -625,7 +625,7 @@ size_t test4_bdos_cast2(struct annotated *p, int index) {
// NO-SANITIZE-WITH-ATTR-NEXT: [[ENTRY:.*:]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 16
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i64 [[IDXPROM]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP0]], i64 [[IDXPROM]]
// NO-SANITIZE-WITH-ATTR-NEXT: store i32 -1, ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// NO-SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -634,7 +634,7 @@ size_t test4_bdos_cast2(struct annotated *p, int index) {
// SANITIZE-WITHOUT-ATTR-NEXT: [[ENTRY:.*:]]
// SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 16
// SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i64 [[IDXPROM]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP0]], i64 [[IDXPROM]]
// SANITIZE-WITHOUT-ATTR-NEXT: store i32 -1, ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -643,7 +643,7 @@ size_t test4_bdos_cast2(struct annotated *p, int index) {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ENTRY:.*:]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 16
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i64 [[IDXPROM]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP0]], i64 [[IDXPROM]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: store i32 -1, ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -688,7 +688,7 @@ size_t test5_bdos(struct anon_struct *p) {
// SANITIZE-WITH-ATTR-NEXT: unreachable, !nosanitize [[META6]]
// SANITIZE-WITH-ATTR: [[CONT6]]:
// SANITIZE-WITH-ATTR-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 16
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 [[IDXPROM]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP1]], i64 [[IDXPROM]]
// SANITIZE-WITH-ATTR-NEXT: [[FLEXIBLE_ARRAY_MEMBER_SIZE:%.*]] = shl nuw i64 [[COUNTED_BY_LOAD]], 2
// SANITIZE-WITH-ATTR-NEXT: [[TMP2:%.*]] = tail call i64 @llvm.smax.i64(i64 [[FLEXIBLE_ARRAY_MEMBER_SIZE]], i64 0)
// SANITIZE-WITH-ATTR-NEXT: [[CONV:%.*]] = trunc i64 [[TMP2]] to i32
@@ -705,7 +705,7 @@ size_t test5_bdos(struct anon_struct *p) {
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.smax.i64(i64 [[FLEXIBLE_ARRAY_MEMBER_SIZE]], i64 0)
// NO-SANITIZE-WITH-ATTR-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i64 [[IDXPROM]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP0]], i64 [[IDXPROM]]
// NO-SANITIZE-WITH-ATTR-NEXT: store i32 [[CONV]], ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// NO-SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -714,7 +714,7 @@ size_t test5_bdos(struct anon_struct *p) {
// SANITIZE-WITHOUT-ATTR-NEXT: [[ENTRY:.*:]]
// SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 16
// SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i64 [[IDXPROM]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP0]], i64 [[IDXPROM]]
// SANITIZE-WITHOUT-ATTR-NEXT: store i32 -1, ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -723,7 +723,7 @@ size_t test5_bdos(struct anon_struct *p) {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ENTRY:.*:]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 16
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i64 [[IDXPROM]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP0]], i64 [[IDXPROM]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: store i32 -1, ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -1098,7 +1098,7 @@ size_t test10_bdos(struct union_of_fams *p) {
// SANITIZE-WITH-ATTR: [[CONT6]]:
// SANITIZE-WITH-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
// SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[ARRAY]], i64 [[IDXPROM]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[ARRAY]], i64 [[IDXPROM]]
// SANITIZE-WITH-ATTR-NEXT: [[FLEXIBLE_ARRAY_MEMBER_SIZE:%.*]] = shl i32 [[COUNTED_BY_LOAD]], 2
// SANITIZE-WITH-ATTR-NEXT: [[RESULT:%.*]] = add i32 [[FLEXIBLE_ARRAY_MEMBER_SIZE]], 8
// SANITIZE-WITH-ATTR-NEXT: store i32 [[RESULT]], ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
@@ -1115,7 +1115,7 @@ size_t test10_bdos(struct union_of_fams *p) {
// NO-SANITIZE-WITH-ATTR-NEXT: [[CONV:%.*]] = select i1 [[TMP0]], i32 [[RESULT]], i32 0
// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[ARRAY]], i64 [[IDXPROM]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[ARRAY]], i64 [[IDXPROM]]
// NO-SANITIZE-WITH-ATTR-NEXT: store i32 [[CONV]], ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// NO-SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -1124,7 +1124,7 @@ size_t test10_bdos(struct union_of_fams *p) {
// SANITIZE-WITHOUT-ATTR-NEXT: [[ENTRY:.*:]]
// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
// SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[ARRAY]], i64 [[IDXPROM]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[ARRAY]], i64 [[IDXPROM]]
// SANITIZE-WITHOUT-ATTR-NEXT: store i32 -1, ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -1133,7 +1133,7 @@ size_t test10_bdos(struct union_of_fams *p) {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ENTRY:.*:]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[ARRAY]], i64 [[IDXPROM]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[ARRAY]], i64 [[IDXPROM]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: store i32 -1, ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -1207,7 +1207,7 @@ int test12_a, test12_b;
// SANITIZE-WITH-ATTR-NEXT: tail call void @__ubsan_handle_out_of_bounds_abort(ptr nonnull @[[GLOB22:[0-9]+]], i64 [[TMP0]]) #[[ATTR8]], !nosanitize [[META6]]
// SANITIZE-WITH-ATTR-NEXT: unreachable, !nosanitize [[META6]]
// SANITIZE-WITH-ATTR: [[CONT]]:
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[BAZ]], i64 [[TMP0]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[BAZ]], i64 [[TMP0]]
// SANITIZE-WITH-ATTR-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// SANITIZE-WITH-ATTR-NEXT: store i32 [[TMP2]], ptr @test12_b, align 4, !tbaa [[INT_TBAA2]]
// SANITIZE-WITH-ATTR-NEXT: [[DOTCOUNTED_BY_LOAD:%.*]] = load i32, ptr @test12_foo, align 4
@@ -1227,7 +1227,7 @@ int test12_a, test12_b;
// NO-SANITIZE-WITH-ATTR-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[BAZ]]) #[[ATTR12:[0-9]+]]
// NO-SANITIZE-WITH-ATTR-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 4 dereferenceable(24) [[BAZ]], ptr noundef nonnull align 4 dereferenceable(24) @test12_bar, i64 24, i1 false), !tbaa.struct [[TBAA_STRUCT7:![0-9]+]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[BAZ]], i64 [[IDXPROM]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[BAZ]], i64 [[IDXPROM]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// NO-SANITIZE-WITH-ATTR-NEXT: store i32 [[TMP0]], ptr @test12_b, align 4, !tbaa [[INT_TBAA2]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr inbounds nuw (i8, ptr @test12_foo, i64 4), align 4, !tbaa [[INT_TBAA2]]
@@ -1249,7 +1249,7 @@ int test12_a, test12_b;
// SANITIZE-WITHOUT-ATTR-NEXT: tail call void @__ubsan_handle_out_of_bounds_abort(ptr nonnull @[[GLOB2:[0-9]+]], i64 [[TMP0]]) #[[ATTR8:[0-9]+]], !nosanitize [[META8]]
// SANITIZE-WITHOUT-ATTR-NEXT: unreachable, !nosanitize [[META8]]
// SANITIZE-WITHOUT-ATTR: [[CONT]]:
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[BAZ]], i64 [[TMP0]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[BAZ]], i64 [[TMP0]]
// SANITIZE-WITHOUT-ATTR-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// SANITIZE-WITHOUT-ATTR-NEXT: store i32 [[TMP2]], ptr @test12_b, align 4, !tbaa [[INT_TBAA2]]
// SANITIZE-WITHOUT-ATTR-NEXT: [[DOTCOUNTED_BY_LOAD:%.*]] = load i32, ptr @test12_foo, align 4
@@ -1269,7 +1269,7 @@ int test12_a, test12_b;
// NO-SANITIZE-WITHOUT-ATTR-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[BAZ]]) #[[ATTR11:[0-9]+]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 4 dereferenceable(24) [[BAZ]], ptr noundef nonnull align 4 dereferenceable(24) @test12_bar, i64 24, i1 false), !tbaa.struct [[TBAA_STRUCT7:![0-9]+]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[BAZ]], i64 [[IDXPROM]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[BAZ]], i64 [[IDXPROM]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: store i32 [[TMP0]], ptr @test12_b, align 4, !tbaa [[INT_TBAA2]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr inbounds nuw (i8, ptr @test12_foo, i64 4), align 4, !tbaa [[INT_TBAA2]]
@@ -1313,7 +1313,7 @@ struct test13_bar {
// SANITIZE-WITH-ATTR-NEXT: unreachable, !nosanitize [[META6]]
// SANITIZE-WITH-ATTR: [[CONT5]]:
// SANITIZE-WITH-ATTR-NEXT: [[REVMAP:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 16
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw ptr, ptr [[REVMAP]], i64 [[INDEX]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[REVMAP]], i64 [[INDEX]]
// SANITIZE-WITH-ATTR-NEXT: store ptr null, ptr [[ARRAYIDX]], align 8, !tbaa [[_ZTS10TEST13_FOOPTR_TBAA15:![0-9]+]]
// SANITIZE-WITH-ATTR-NEXT: ret i32 0
//
@@ -1322,7 +1322,7 @@ struct test13_bar {
// NO-SANITIZE-WITH-ATTR-NEXT: [[ENTRY:.*:]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = load ptr, ptr @test13_f, align 8, !tbaa [[_ZTS10TEST13_BARPTR_TBAA8:![0-9]+]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[REVMAP:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 16
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[REVMAP]], i64 [[INDEX]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i8], ptr [[REVMAP]], i64 [[INDEX]]
// NO-SANITIZE-WITH-ATTR-NEXT: store ptr null, ptr [[ARRAYIDX]], align 8, !tbaa [[_ZTS10TEST13_FOOPTR_TBAA12:![0-9]+]]
// NO-SANITIZE-WITH-ATTR-NEXT: ret i32 0
//
@@ -1342,7 +1342,7 @@ struct test13_bar {
// SANITIZE-WITHOUT-ATTR-NEXT: unreachable, !nosanitize [[META8]]
// SANITIZE-WITHOUT-ATTR: [[CONT5]]:
// SANITIZE-WITHOUT-ATTR-NEXT: [[REVMAP:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 16
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw ptr, ptr [[REVMAP]], i64 [[INDEX]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[REVMAP]], i64 [[INDEX]]
// SANITIZE-WITHOUT-ATTR-NEXT: store ptr null, ptr [[ARRAYIDX]], align 8, !tbaa [[_ZTS10TEST13_FOOPTR_TBAA14:![0-9]+]]
// SANITIZE-WITHOUT-ATTR-NEXT: ret i32 0
//
@@ -1351,7 +1351,7 @@ struct test13_bar {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ENTRY:.*:]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = load ptr, ptr @test13_f, align 8, !tbaa [[_ZTS10TEST13_BARPTR_TBAA8:![0-9]+]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[REVMAP:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 16
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[REVMAP]], i64 [[INDEX]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i8], ptr [[REVMAP]], i64 [[INDEX]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: store ptr null, ptr [[ARRAYIDX]], align 8, !tbaa [[_ZTS10TEST13_FOOPTR_TBAA12:![0-9]+]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret i32 0
//
@@ -1393,7 +1393,7 @@ struct test14_foo {
// NO-SANITIZE-WITH-ATTR-NEXT: store i32 2, ptr [[Y]], align 4, !tbaa [[INT_TBAA2]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[BLAH:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTCOMPOUNDLITERAL]], i64 8
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[IDX]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[BLAH]], i64 [[IDXPROM]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[BLAH]], i64 [[IDXPROM]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// NO-SANITIZE-WITH-ATTR-NEXT: ret i32 [[TMP0]]
//
@@ -1425,7 +1425,7 @@ struct test14_foo {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: store i32 2, ptr [[Y]], align 4, !tbaa [[INT_TBAA2]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[BLAH:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTCOMPOUNDLITERAL]], i64 8
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[IDX]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[BLAH]], i64 [[IDXPROM]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[BLAH]], i64 [[IDXPROM]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret i32 [[TMP0]]
//
@@ -1455,7 +1455,7 @@ int test14(int idx) {
// NO-SANITIZE-WITH-ATTR-SAME: i32 noundef [[IDX:%.*]]) local_unnamed_addr #[[ATTR3]] {
// NO-SANITIZE-WITH-ATTR-NEXT: [[ENTRY:.*:]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[IDX]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds nuw (i8, ptr @__const.test15.foo, i64 8), i64 [[IDXPROM]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr getelementptr inbounds nuw (i8, ptr @__const.test15.foo, i64 8), i64 [[IDXPROM]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// NO-SANITIZE-WITH-ATTR-NEXT: ret i32 [[TMP0]]
//
@@ -1481,7 +1481,7 @@ int test14(int idx) {
// NO-SANITIZE-WITHOUT-ATTR-SAME: i32 noundef [[IDX:%.*]]) local_unnamed_addr #[[ATTR1]] {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ENTRY:.*:]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[IDX]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds nuw (i8, ptr @__const.test15.foo, i64 8), i64 [[IDXPROM]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr getelementptr inbounds nuw (i8, ptr @__const.test15.foo, i64 8), i64 [[IDXPROM]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret i32 [[TMP0]]
//
@@ -1739,7 +1739,7 @@ struct test26_foo {
// SANITIZE-WITH-ATTR: [[CONT5]]:
// SANITIZE-WITH-ATTR-NEXT: [[ARR:%.*]] = getelementptr inbounds nuw i8, ptr [[FOO]], i64 8
// SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[C]] to i64
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[IDXPROM]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[ARR]], i64 [[IDXPROM]]
// SANITIZE-WITH-ATTR-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// SANITIZE-WITH-ATTR-NEXT: ret i32 [[TMP4]]
//
@@ -1748,7 +1748,7 @@ struct test26_foo {
// NO-SANITIZE-WITH-ATTR-NEXT: [[ENTRY:.*:]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[ARR:%.*]] = getelementptr inbounds nuw i8, ptr [[FOO]], i64 8
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[C]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[IDXPROM]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[ARR]], i64 [[IDXPROM]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// NO-SANITIZE-WITH-ATTR-NEXT: ret i32 [[TMP0]]
//
@@ -1757,7 +1757,7 @@ struct test26_foo {
// SANITIZE-WITHOUT-ATTR-NEXT: [[ENTRY:.*:]]
// SANITIZE-WITHOUT-ATTR-NEXT: [[ARR:%.*]] = getelementptr inbounds nuw i8, ptr [[FOO]], i64 8
// SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[C]] to i64
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[IDXPROM]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[ARR]], i64 [[IDXPROM]]
// SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// SANITIZE-WITHOUT-ATTR-NEXT: ret i32 [[TMP0]]
//
@@ -1766,7 +1766,7 @@ struct test26_foo {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ENTRY:.*:]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARR:%.*]] = getelementptr inbounds nuw i8, ptr [[FOO]], i64 8
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[C]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[IDXPROM]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[ARR]], i64 [[IDXPROM]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret i32 [[TMP0]]
//
@@ -1810,10 +1810,10 @@ struct test27_foo {
// SANITIZE-WITH-ATTR: [[CONT3]]:
// SANITIZE-WITH-ATTR-NEXT: [[ENTRIES:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 24
// SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[I]] to i64
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[ENTRIES]], i64 [[IDXPROM]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i8], ptr [[ENTRIES]], i64 [[IDXPROM]]
// SANITIZE-WITH-ATTR-NEXT: [[TMP2:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8, !tbaa [[_ZTS10TEST27_BARPTR_TBAA19:![0-9]+]]
// SANITIZE-WITH-ATTR-NEXT: [[IDXPROM4:%.*]] = sext i32 [[J]] to i64
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [[STRUCT_TEST27_BAR:%.*]], ptr [[TMP2]], i64 [[IDXPROM4]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [6 x i8], ptr [[TMP2]], i64 [[IDXPROM4]]
// SANITIZE-WITH-ATTR-NEXT: ret ptr [[ARRAYIDX5]]
//
// NO-SANITIZE-WITH-ATTR-LABEL: define dso_local ptr @test27(
@@ -1821,10 +1821,10 @@ struct test27_foo {
// NO-SANITIZE-WITH-ATTR-NEXT: [[ENTRY:.*:]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[ENTRIES:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 24
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[I]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[ENTRIES]], i64 [[IDXPROM]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i8], ptr [[ENTRIES]], i64 [[IDXPROM]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8, !tbaa [[_ZTS10TEST27_BARPTR_TBAA16:![0-9]+]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM1:%.*]] = sext i32 [[J]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [[STRUCT_TEST27_BAR:%.*]], ptr [[TMP0]], i64 [[IDXPROM1]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [6 x i8], ptr [[TMP0]], i64 [[IDXPROM1]]
// NO-SANITIZE-WITH-ATTR-NEXT: ret ptr [[ARRAYIDX2]]
//
// SANITIZE-WITHOUT-ATTR-LABEL: define dso_local ptr @test27(
@@ -1832,10 +1832,10 @@ struct test27_foo {
// SANITIZE-WITHOUT-ATTR-NEXT: [[ENTRY:.*:]]
// SANITIZE-WITHOUT-ATTR-NEXT: [[ENTRIES:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 24
// SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[I]] to i64
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[ENTRIES]], i64 [[IDXPROM]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i8], ptr [[ENTRIES]], i64 [[IDXPROM]]
// SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8, !tbaa [[_ZTS10TEST27_BARPTR_TBAA18:![0-9]+]]
// SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM3:%.*]] = sext i32 [[J]] to i64
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [[STRUCT_TEST27_BAR:%.*]], ptr [[TMP0]], i64 [[IDXPROM3]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [6 x i8], ptr [[TMP0]], i64 [[IDXPROM3]]
// SANITIZE-WITHOUT-ATTR-NEXT: ret ptr [[ARRAYIDX4]]
//
// NO-SANITIZE-WITHOUT-ATTR-LABEL: define dso_local ptr @test27(
@@ -1843,10 +1843,10 @@ struct test27_foo {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ENTRY:.*:]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ENTRIES:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 24
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[I]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[ENTRIES]], i64 [[IDXPROM]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i8], ptr [[ENTRIES]], i64 [[IDXPROM]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8, !tbaa [[_ZTS10TEST27_BARPTR_TBAA16:![0-9]+]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM1:%.*]] = sext i32 [[J]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [[STRUCT_TEST27_BAR:%.*]], ptr [[TMP0]], i64 [[IDXPROM1]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [6 x i8], ptr [[TMP0]], i64 [[IDXPROM1]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret ptr [[ARRAYIDX2]]
//
struct test27_bar *test27(struct test27_foo *p, int i, int j) {
@@ -1878,7 +1878,7 @@ struct test28_foo {
// SANITIZE-WITH-ATTR: [[CONT17]]:
// SANITIZE-WITH-ATTR-NEXT: [[ARR:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP2]], i64 12
// SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[I]] to i64
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[IDXPROM]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[ARR]], i64 [[IDXPROM]]
// SANITIZE-WITH-ATTR-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// SANITIZE-WITH-ATTR-NEXT: ret i32 [[TMP7]]
//
@@ -1890,7 +1890,7 @@ struct test28_foo {
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TMP1]], align 8, !tbaa [[_ZTS10TEST28_FOOPTR_TBAA18]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[ARR:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP2]], i64 12
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[I]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[IDXPROM]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[ARR]], i64 [[IDXPROM]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// NO-SANITIZE-WITH-ATTR-NEXT: ret i32 [[TMP3]]
//
@@ -1902,7 +1902,7 @@ struct test28_foo {
// SANITIZE-WITHOUT-ATTR-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TMP1]], align 8, !tbaa [[_ZTS10TEST28_FOOPTR_TBAA20]]
// SANITIZE-WITHOUT-ATTR-NEXT: [[ARR:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP2]], i64 12
// SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[I]] to i64
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[IDXPROM]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[ARR]], i64 [[IDXPROM]]
// SANITIZE-WITHOUT-ATTR-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// SANITIZE-WITHOUT-ATTR-NEXT: ret i32 [[TMP3]]
//
@@ -1914,7 +1914,7 @@ struct test28_foo {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TMP1]], align 8, !tbaa [[_ZTS10TEST28_FOOPTR_TBAA18]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARR:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP2]], i64 12
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[I]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[IDXPROM]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[ARR]], i64 [[IDXPROM]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret i32 [[TMP3]]
//
@@ -1939,7 +1939,7 @@ struct annotated_struct_array {
// SANITIZE-WITH-ATTR-NEXT: tail call void @__ubsan_handle_out_of_bounds_abort(ptr nonnull @[[GLOB43:[0-9]+]], i64 [[TMP0]]) #[[ATTR8]], !nosanitize [[META6]]
// SANITIZE-WITH-ATTR-NEXT: unreachable, !nosanitize [[META6]]
// SANITIZE-WITH-ATTR: [[CONT3]]:
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw ptr, ptr [[ANN]], i64 [[TMP0]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[ANN]], i64 [[TMP0]]
// SANITIZE-WITH-ATTR-NEXT: [[TMP2:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8, !tbaa [[_ZTS9ANNOTATEDPTR_TBAA23:![0-9]+]]
// SANITIZE-WITH-ATTR-NEXT: [[COUNTED_BY_GEP:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP2]], i64 8
// SANITIZE-WITH-ATTR-NEXT: [[COUNTED_BY_LOAD:%.*]] = load i32, ptr [[COUNTED_BY_GEP]], align 4
@@ -1954,7 +1954,7 @@ struct annotated_struct_array {
// SANITIZE-WITH-ATTR: [[CONT32]]:
// SANITIZE-WITH-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP2]], i64 12
// SANITIZE-WITH-ATTR-NEXT: [[IDXPROM27:%.*]] = sext i32 [[IDX2]] to i64
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX30:%.*]] = getelementptr inbounds i32, ptr [[ARRAY]], i64 [[IDXPROM27]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX30:%.*]] = getelementptr inbounds [4 x i8], ptr [[ARRAY]], i64 [[IDXPROM27]]
// SANITIZE-WITH-ATTR-NEXT: [[CONV:%.*]] = shl i32 [[COUNTED_BY_LOAD]], 2
// SANITIZE-WITH-ATTR-NEXT: store i32 [[CONV]], ptr [[ARRAYIDX30]], align 4, !tbaa [[INT_TBAA2]]
// SANITIZE-WITH-ATTR-NEXT: ret void
@@ -1963,7 +1963,7 @@ struct annotated_struct_array {
// NO-SANITIZE-WITH-ATTR-SAME: ptr noundef readonly captures(none) [[ANN:%.*]], i32 noundef [[IDX1:%.*]], i32 noundef [[IDX2:%.*]]) local_unnamed_addr #[[ATTR9:[0-9]+]] {
// NO-SANITIZE-WITH-ATTR-NEXT: [[ENTRY:.*:]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[IDX1]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[ANN]], i64 [[IDXPROM]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i8], ptr [[ANN]], i64 [[IDXPROM]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8, !tbaa [[_ZTS9ANNOTATEDPTR_TBAA20:![0-9]+]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 12
// NO-SANITIZE-WITH-ATTR-NEXT: [[COUNTED_BY_GEP:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 8
@@ -1971,7 +1971,7 @@ struct annotated_struct_array {
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP1:%.*]] = tail call i32 @llvm.smax.i32(i32 [[COUNTED_BY_LOAD]], i32 0)
// NO-SANITIZE-WITH-ATTR-NEXT: [[CONV:%.*]] = shl i32 [[TMP1]], 2
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM8:%.*]] = sext i32 [[IDX2]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, ptr [[ARRAY]], i64 [[IDXPROM8]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds [4 x i8], ptr [[ARRAY]], i64 [[IDXPROM8]]
// NO-SANITIZE-WITH-ATTR-NEXT: store i32 [[CONV]], ptr [[ARRAYIDX9]], align 4, !tbaa [[INT_TBAA2]]
// NO-SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -1985,11 +1985,11 @@ struct annotated_struct_array {
// SANITIZE-WITHOUT-ATTR-NEXT: tail call void @__ubsan_handle_out_of_bounds_abort(ptr nonnull @[[GLOB15:[0-9]+]], i64 [[TMP0]]) #[[ATTR8]], !nosanitize [[META8]]
// SANITIZE-WITHOUT-ATTR-NEXT: unreachable, !nosanitize [[META8]]
// SANITIZE-WITHOUT-ATTR: [[CONT21]]:
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw ptr, ptr [[ANN]], i64 [[TMP0]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[ANN]], i64 [[TMP0]]
// SANITIZE-WITHOUT-ATTR-NEXT: [[TMP2:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8, !tbaa [[_ZTS9ANNOTATEDPTR_TBAA22:![0-9]+]]
// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP2]], i64 12
// SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM18:%.*]] = sext i32 [[IDX2]] to i64
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds i32, ptr [[ARRAY]], i64 [[IDXPROM18]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds [4 x i8], ptr [[ARRAY]], i64 [[IDXPROM18]]
// SANITIZE-WITHOUT-ATTR-NEXT: store i32 -1, ptr [[ARRAYIDX19]], align 4, !tbaa [[INT_TBAA2]]
// SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -1997,11 +1997,11 @@ struct annotated_struct_array {
// NO-SANITIZE-WITHOUT-ATTR-SAME: ptr noundef readonly captures(none) [[ANN:%.*]], i32 noundef [[IDX1:%.*]], i32 noundef [[IDX2:%.*]]) local_unnamed_addr #[[ATTR8:[0-9]+]] {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ENTRY:.*:]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[IDX1]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[ANN]], i64 [[IDXPROM]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i8], ptr [[ANN]], i64 [[IDXPROM]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8, !tbaa [[_ZTS9ANNOTATEDPTR_TBAA20:![0-9]+]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 12
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM5:%.*]] = sext i32 [[IDX2]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, ptr [[ARRAY]], i64 [[IDXPROM5]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x i8], ptr [[ARRAY]], i64 [[IDXPROM5]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: store i32 -1, ptr [[ARRAYIDX6]], align 4, !tbaa [[INT_TBAA2]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -2117,7 +2117,7 @@ struct annotated_with_array {
// SANITIZE-WITH-ATTR: [[CONT9]]:
// SANITIZE-WITH-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[PTR]], i64 344
// SANITIZE-WITH-ATTR-NEXT: [[IDXPROM4:%.*]] = sext i32 [[IDX1]] to i64
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i64, ptr [[ARRAY]], i64 [[IDXPROM4]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [8 x i8], ptr [[ARRAY]], i64 [[IDXPROM4]]
// SANITIZE-WITH-ATTR-NEXT: [[COUNT:%.*]] = zext nneg i32 [[COUNTED_BY_LOAD]] to i64
// SANITIZE-WITH-ATTR-NEXT: [[TMP6:%.*]] = sub nsw i64 [[COUNT]], [[TMP0]]
// SANITIZE-WITH-ATTR-NEXT: [[REASS_SUB:%.*]] = shl nsw i64 [[TMP6]], 3
@@ -2141,7 +2141,7 @@ struct annotated_with_array {
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP4:%.*]] = select i1 [[TMP3]], i64 [[RESULT]], i64 0
// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[PTR]], i64 344
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM1:%.*]] = sext i32 [[IDX1]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i64, ptr [[ARRAY]], i64 [[IDXPROM1]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [8 x i8], ptr [[ARRAY]], i64 [[IDXPROM1]]
// NO-SANITIZE-WITH-ATTR-NEXT: store i64 [[TMP4]], ptr [[ARRAYIDX2]], align 8, !tbaa [[LONG_TBAA22:![0-9]+]]
// NO-SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -2157,7 +2157,7 @@ struct annotated_with_array {
// SANITIZE-WITHOUT-ATTR: [[CONT7]]:
// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[PTR]], i64 344
// SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM4:%.*]] = sext i32 [[IDX1]] to i64
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i64, ptr [[ARRAY]], i64 [[IDXPROM4]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [8 x i8], ptr [[ARRAY]], i64 [[IDXPROM4]]
// SANITIZE-WITHOUT-ATTR-NEXT: store i64 -1, ptr [[ARRAYIDX5]], align 8, !tbaa [[LONG_TBAA24:![0-9]+]]
// SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -2166,7 +2166,7 @@ struct annotated_with_array {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ENTRY:.*:]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[PTR]], i64 344
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM1:%.*]] = sext i32 [[IDX1]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i64, ptr [[ARRAY]], i64 [[IDXPROM1]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [8 x i8], ptr [[ARRAY]], i64 [[IDXPROM1]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: store i64 -1, ptr [[ARRAYIDX2]], align 8, !tbaa [[LONG_TBAA22:![0-9]+]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -2377,7 +2377,7 @@ size_t test34(struct multi_subscripts *ptr, int idx1, int idx2) {
// SANITIZE-WITH-ATTR-NEXT: unreachable, !nosanitize [[META6]]
// SANITIZE-WITH-ATTR: [[CONT3]]:
// SANITIZE-WITH-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[ARRAY]], i64 [[INDEX]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[ARRAY]], i64 [[INDEX]]
// SANITIZE-WITH-ATTR-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -2385,7 +2385,7 @@ size_t test34(struct multi_subscripts *ptr, int idx1, int idx2) {
// NO-SANITIZE-WITH-ATTR-SAME: ptr noundef writeonly captures(none) [[P:%.*]], i64 noundef [[INDEX:%.*]]) local_unnamed_addr #[[ATTR0]] {
// NO-SANITIZE-WITH-ATTR-NEXT: [[ENTRY:.*:]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[ARRAY]], i64 [[INDEX]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[ARRAY]], i64 [[INDEX]]
// NO-SANITIZE-WITH-ATTR-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// NO-SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -2393,7 +2393,7 @@ size_t test34(struct multi_subscripts *ptr, int idx1, int idx2) {
// SANITIZE-WITHOUT-ATTR-SAME: ptr noundef [[P:%.*]], i64 noundef [[INDEX:%.*]]) local_unnamed_addr #[[ATTR0]] {
// SANITIZE-WITHOUT-ATTR-NEXT: [[ENTRY:.*:]]
// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[ARRAY]], i64 [[INDEX]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[ARRAY]], i64 [[INDEX]]
// SANITIZE-WITHOUT-ATTR-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -2401,7 +2401,7 @@ size_t test34(struct multi_subscripts *ptr, int idx1, int idx2) {
// NO-SANITIZE-WITHOUT-ATTR-SAME: ptr noundef writeonly captures(none) [[P:%.*]], i64 noundef [[INDEX:%.*]]) local_unnamed_addr #[[ATTR0]] {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ENTRY:.*:]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[ARRAY]], i64 [[INDEX]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[ARRAY]], i64 [[INDEX]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -2555,7 +2555,7 @@ size_t test38(struct annotated *ptr) {
// NO-SANITIZE-WITH-ATTR-NEXT: store i32 -42, ptr [[COUNT]], align 8, !tbaa [[INT_TBAA2]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[PTR]], i64 12
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[ARRAY]], i64 [[IDXPROM]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[ARRAY]], i64 [[IDXPROM]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64
// NO-SANITIZE-WITH-ATTR-NEXT: ret i64 [[CONV]]
@@ -2567,7 +2567,7 @@ size_t test38(struct annotated *ptr) {
// SANITIZE-WITHOUT-ATTR-NEXT: store i32 -42, ptr [[COUNT]], align 8, !tbaa [[INT_TBAA2]]
// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[PTR]], i64 12
// SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[ARRAY]], i64 [[IDXPROM]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[ARRAY]], i64 [[IDXPROM]]
// SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// SANITIZE-WITHOUT-ATTR-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64
// SANITIZE-WITHOUT-ATTR-NEXT: ret i64 [[CONV]]
@@ -2579,7 +2579,7 @@ size_t test38(struct annotated *ptr) {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: store i32 -42, ptr [[COUNT]], align 8, !tbaa [[INT_TBAA2]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[PTR]], i64 12
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[ARRAY]], i64 [[IDXPROM]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[ARRAY]], i64 [[IDXPROM]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA2]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64
// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret i64 [[CONV]]
diff --git a/clang/test/CodeGen/union-tbaa1.c b/clang/test/CodeGen/union-tbaa1.c
index 3f6ada5023f27..bcc165e91de38 100644
--- a/clang/test/CodeGen/union-tbaa1.c
+++ b/clang/test/CodeGen/union-tbaa1.c
@@ -11,13 +11,13 @@ void bar(vect32 p[][2]);
// CHECK-LABEL: define dso_local void @fred(
// CHECK-SAME: i32 noundef [[NUM:%.*]], ptr noundef writeonly captures(none) initializes((0, 8)) [[VEC:%.*]], ptr noundef readonly captures(none) [[INDEX:%.*]], ptr noundef readonly captures(none) [[ARR:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[TMP:%.*]] = alloca [4 x [2 x %union.vect32]], align 8
+// CHECK-NEXT: [[TMP:%.*]] = alloca [4 x [2 x [[UNION_VECT32:%.*]]]], align 8
// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP]]) #[[ATTR3:[0-9]+]]
// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[INDEX]], align 4, !tbaa [[TBAA2:![0-9]+]]
-// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[ARR]], i32 [[TMP0]]
+// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i8], ptr [[ARR]], i32 [[TMP0]]
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// CHECK-NEXT: [[MUL:%.*]] = mul i32 [[TMP1]], [[NUM]]
-// CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x %union.vect32], ptr [[TMP]], i32 [[TMP0]]
+// CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [8 x i8], ptr [[TMP]], i32 [[TMP0]]
// CHECK-NEXT: store i32 [[MUL]], ptr [[ARRAYIDX2]], align 8, !tbaa [[TBAA6:![0-9]+]]
// CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds nuw i8, ptr [[ARRAYIDX]], i32 4
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX5]], align 4, !tbaa [[TBAA2]]
@@ -27,7 +27,7 @@ void bar(vect32 p[][2]);
// CHECK-NEXT: [[TMP3:%.*]] = lshr i32 [[MUL]], 16
// CHECK-NEXT: store i32 [[TMP3]], ptr [[VEC]], align 4, !tbaa [[TBAA2]]
// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[INDEX]], align 4, !tbaa [[TBAA2]]
-// CHECK-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds [2 x %union.vect32], ptr [[TMP]], i32 [[TMP4]]
+// CHECK-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds [8 x i8], ptr [[TMP]], i32 [[TMP4]]
// CHECK-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds nuw i8, ptr [[ARRAYIDX13]], i32 6
// CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[ARRAYIDX15]], align 2, !tbaa [[TBAA6]]
// CHECK-NEXT: [[CONV16:%.*]] = zext i16 [[TMP5]] to i32
diff --git a/clang/test/OpenMP/taskloop_strictmodifier_codegen.cpp b/clang/test/OpenMP/taskloop_strictmodifier_codegen.cpp
index cddd31da1b7fb..aa199f3bd09f6 100644
--- a/clang/test/OpenMP/taskloop_strictmodifier_codegen.cpp
+++ b/clang/test/OpenMP/taskloop_strictmodifier_codegen.cpp
@@ -75,7 +75,7 @@ struct S {
// CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !tbaa [[INT_TBAA3]]
// CHECK-NEXT: [[TMP15:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !tbaa [[CHARPTR_TBAA7]]
// CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP14]] to i64
-// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP15]], i64 [[IDXPROM]]
+// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i8], ptr [[TMP15]], i64 [[IDXPROM]]
// CHECK-NEXT: [[TMP16:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8, !tbaa [[CHARPTR_TBAA17:![0-9]+]]
// CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i8, ptr [[TMP16]], i64 [[IDXPROM]]
// CHECK-NEXT: [[TMP17:%.*]] = load i8, ptr [[ARRAYIDX9]], align 1, !tbaa [[CHAR_TBAA19:![0-9]+]]
@@ -212,7 +212,7 @@ struct S {
// CHECK-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8, !tbaa [[CHARPTR_TBAA37:![0-9]+]], !alias.scope [[META30]], !nonnull [[META35]], !align [[META38:![0-9]+]]
// CHECK-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP10]], align 8, !tbaa [[CHARPTR_TBAA7]], !noalias [[META30]]
// CHECK-NEXT: [[IDXPROM_I:%.*]] = zext nneg i32 [[TMP8]] to i64
-// CHECK-NEXT: [[ARRAYIDX_I:%.*]] = getelementptr inbounds nuw ptr, ptr [[TMP11]], i64 [[IDXPROM_I]]
+// CHECK-NEXT: [[ARRAYIDX_I:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[TMP11]], i64 [[IDXPROM_I]]
// CHECK-NEXT: [[TMP12:%.*]] = load ptr, ptr [[ARRAYIDX_I]], align 8, !tbaa [[CHARPTR_TBAA17]], !noalias [[META30]]
// CHECK-NEXT: [[ARRAYIDX5_I:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP12]], i64 [[IDXPROM_I]]
// CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr [[ARRAYIDX5_I]], align 1, !tbaa [[CHAR_TBAA19]], !noalias [[META30]]
diff --git a/flang/test/Integration/unroll-loops.f90 b/flang/test/Integration/unroll-loops.f90
index 2c4a3495eb0b7..23fefcd35c158 100644
--- a/flang/test/Integration/unroll-loops.f90
+++ b/flang/test/Integration/unroll-loops.f90
@@ -22,13 +22,13 @@ subroutine unroll(a)
! CHECK-NEXT: %[[IND:.*]] = phi i64 [ 0, %{{.*}} ], [ %[[NIV:.*]], %[[BLK]] ]
! CHECK-NEXT: %[[VIND:.*]] = phi <2 x i64> [ <i64 1, i64 2>, %{{.*}} ], [ %[[NVIND:.*]], %[[BLK]] ]
!
- ! NO-UNROLL-NEXT: %[[GEP:.*]] = getelementptr i64, ptr %[[ARG0]], i64 %[[IND]]
+ ! NO-UNROLL-NEXT: %[[GEP:.*]] = getelementptr [8 x i8], ptr %[[ARG0]], i64 %[[IND]]
! NO-UNROLL-NEXT: store <2 x i64> %[[VIND]], ptr %[[GEP]]
! NO-UNROLL-NEXT: %[[NIV:.*]] = add nuw i64 %{{.*}}, 2
! NO-UNROLL-NEXT: %[[NVIND]] = add nuw nsw <2 x i64> %[[VIND]], splat (i64 2)
!
! UNROLL-NEXT: %[[VIND1:.*]] = add <2 x i64> %[[VIND]], splat (i64 2)
- ! UNROLL-NEXT: %[[GEP0:.*]] = getelementptr i64, ptr %[[ARG0]], i64 %[[IND]]
+ ! UNROLL-NEXT: %[[GEP0:.*]] = getelementptr [8 x i8], ptr %[[ARG0]], i64 %[[IND]]
! UNROLL-NEXT: %[[GEP1:.*]] = getelementptr i8, ptr %[[GEP0]], i64 16
! UNROLL-NEXT: store <2 x i64> %[[VIND]], ptr %[[GEP0]]
! UNROLL-NEXT: store <2 x i64> %[[VIND1]], ptr %[[GEP1]]
diff --git a/flang/test/Lower/HLFIR/unroll-loops.fir b/flang/test/Lower/HLFIR/unroll-loops.fir
index 1ccb6b1bd0315..ce2b133365d35 100644
--- a/flang/test/Lower/HLFIR/unroll-loops.fir
+++ b/flang/test/Lower/HLFIR/unroll-loops.fir
@@ -24,13 +24,13 @@ func.func @unroll(%arg0: !fir.ref<!fir.array<1000 x index>> {fir.bindc_name = "a
// CHECK-NEXT: %[[IND:.*]] = phi i64 [ 0, %{{.*}} ], [ %[[NIV:.*]], %[[BLK]] ]
// CHECK-NEXT: %[[VIND:.*]] = phi <2 x i64> [ <i64 1, i64 2>, %{{.*}} ], [ %[[NVIND:.*]], %[[BLK]] ]
- // NO-UNROLL-NEXT: %[[GEP:.*]] = getelementptr i64, ptr %[[ARG0]], i64 %[[IND]]
+ // NO-UNROLL-NEXT: %[[GEP:.*]] = getelementptr [8 x i8], ptr %[[ARG0]], i64 %[[IND]]
// NO-UNROLL-NEXT: store <2 x i64> %[[VIND]], ptr %[[GEP]]
// NO-UNROLL-NEXT: %[[NIV:.*]] = add nuw i64 %{{.*}}, 2
// NO-UNROLL-NEXT: %[[NVIND]] = add nuw nsw <2 x i64> %[[VIND]], splat (i64 2)
// UNROLL-NEXT: %[[VIND1:.*]] = add <2 x i64> %[[VIND]], splat (i64 2)
- // UNROLL-NEXT: %[[GEP0:.*]] = getelementptr i64, ptr %[[ARG0]], i64 %[[IND]]
+ // UNROLL-NEXT: %[[GEP0:.*]] = getelementptr [8 x i8], ptr %[[ARG0]], i64 %[[IND]]
// UNROLL-NEXT: %[[GEP1:.*]] = getelementptr i8, ptr %[[GEP0]], i64 16
// UNROLL-NEXT: store <2 x i64> %[[VIND]], ptr %[[GEP0]]
// UNROLL-NEXT: store <2 x i64> %[[VIND1]], ptr %[[GEP1]]
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index fd699381c22fa..7c779832fd872 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -3487,6 +3487,24 @@ Instruction *InstCombinerImpl::visitGetElementPtrInst(GetElementPtrInst &GEP) {
BackIndices, GEP.getNoWrapFlags());
}
+ // Canonicalize gep %T to gep [sizeof(%T) x i8]:
+ auto IsCanonicalType = [](Type *Ty) {
+ if (auto *AT = dyn_cast<ArrayType>(Ty))
+ Ty = AT->getElementType();
+ return Ty->isIntegerTy(8);
+ };
+ if (Indices.size() == 1 && !IsCanonicalType(GEPEltType)) {
+ TypeSize Scale = DL.getTypeAllocSize(GEPEltType);
+ assert(!Scale.isScalable() && "Should have been handled earlier");
+ Type *NewElemTy = Builder.getInt8Ty();
+ if (Scale.getFixedValue() != 1)
+ NewElemTy = ArrayType::get(NewElemTy, Scale.getFixedValue());
+ GEP.setSourceElementType(NewElemTy);
+ GEP.setResultElementType(NewElemTy);
+ // Don't bother revisiting the GEP after this change.
+ MadeIRChange = true;
+ }
+
// Check to see if the inputs to the PHI node are getelementptr instructions.
if (auto *PN = dyn_cast<PHINode>(PtrOp)) {
if (Value *NewPtrOp = foldGEPOfPhi(GEP, PN, Builder))
diff --git a/llvm/test/Analysis/BasicAA/featuretest.ll b/llvm/test/Analysis/BasicAA/featuretest.ll
index 04c4725d26c1d..4682a2b804526 100644
--- a/llvm/test/Analysis/BasicAA/featuretest.ll
+++ b/llvm/test/Analysis/BasicAA/featuretest.ll
@@ -20,7 +20,7 @@ define i32 @different_array_test(i64 %A, i64 %B) {
; CHECK-NEXT: [[ARRAY22:%.*]] = alloca [200 x i32], align 4
; CHECK-NEXT: call void @external(ptr nonnull [[ARRAY11]])
; CHECK-NEXT: call void @external(ptr nonnull [[ARRAY22]])
-; CHECK-NEXT: [[POINTER2:%.*]] = getelementptr i32, ptr [[ARRAY22]], i64 [[B:%.*]]
+; CHECK-NEXT: [[POINTER2:%.*]] = getelementptr [4 x i8], ptr [[ARRAY22]], i64 [[B:%.*]]
; CHECK-NEXT: store i32 7, ptr [[POINTER2]], align 4
; CHECK-NEXT: ret i32 0
;
@@ -92,13 +92,13 @@ define i32 @gep_distance_test(ptr %A) {
; cannot alias, even if there is a variable offset between them...
define i32 @gep_distance_test2(ptr %A, i64 %distance) {
; NO_ASSUME-LABEL: @gep_distance_test2(
-; NO_ASSUME-NEXT: [[B_SPLIT:%.*]] = getelementptr { i32, i32 }, ptr [[A:%.*]], i64 [[DISTANCE:%.*]]
+; NO_ASSUME-NEXT: [[B_SPLIT:%.*]] = getelementptr [8 x i8], ptr [[A:%.*]], i64 [[DISTANCE:%.*]]
; NO_ASSUME-NEXT: [[B:%.*]] = getelementptr i8, ptr [[B_SPLIT]], i64 4
; NO_ASSUME-NEXT: store i32 7, ptr [[B]], align 4
; NO_ASSUME-NEXT: ret i32 0
;
; USE_ASSUME-LABEL: @gep_distance_test2(
-; USE_ASSUME-NEXT: [[B_SPLIT:%.*]] = getelementptr { i32, i32 }, ptr [[A:%.*]], i64 [[DISTANCE:%.*]]
+; USE_ASSUME-NEXT: [[B_SPLIT:%.*]] = getelementptr [8 x i8], ptr [[A:%.*]], i64 [[DISTANCE:%.*]]
; USE_ASSUME-NEXT: [[B:%.*]] = getelementptr i8, ptr [[B_SPLIT]], i64 4
; USE_ASSUME-NEXT: store i32 7, ptr [[B]], align 4
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[A]], i64 4), "nonnull"(ptr [[A]]), "align"(ptr [[A]], i64 4) ]
@@ -161,11 +161,11 @@ define i16 @zext_sext_confusion(ptr %row2col, i5 %j) nounwind{
; CHECK-LABEL: @zext_sext_confusion(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[SUM5_CAST:%.*]] = zext i5 [[J:%.*]] to i64
-; CHECK-NEXT: [[P1:%.*]] = getelementptr i16, ptr [[ROW2COL:%.*]], i64 [[SUM5_CAST]]
+; CHECK-NEXT: [[P1:%.*]] = getelementptr [2 x i8], ptr [[ROW2COL:%.*]], i64 [[SUM5_CAST]]
; CHECK-NEXT: [[ROW2COL_LOAD_1_2:%.*]] = load i16, ptr [[P1]], align 1
; CHECK-NEXT: [[SUM13_CAST31:%.*]] = sext i5 [[J]] to i6
; CHECK-NEXT: [[SUM13_CAST:%.*]] = zext i6 [[SUM13_CAST31]] to i64
-; CHECK-NEXT: [[P2:%.*]] = getelementptr i16, ptr [[ROW2COL]], i64 [[SUM13_CAST]]
+; CHECK-NEXT: [[P2:%.*]] = getelementptr [2 x i8], ptr [[ROW2COL]], i64 [[SUM13_CAST]]
; CHECK-NEXT: [[ROW2COL_LOAD_1_6:%.*]] = load i16, ptr [[P2]], align 1
; CHECK-NEXT: [[DOTRET:%.*]] = sub i16 [[ROW2COL_LOAD_1_6]], [[ROW2COL_LOAD_1_2]]
; CHECK-NEXT: ret i16 [[DOTRET]]
diff --git a/llvm/test/CodeGen/AArch64/sme-aarch64-svcount-O3.ll b/llvm/test/CodeGen/AArch64/sme-aarch64-svcount-O3.ll
index 079f2e7eb481b..a7ae0b344b1a3 100644
--- a/llvm/test/CodeGen/AArch64/sme-aarch64-svcount-O3.ll
+++ b/llvm/test/CodeGen/AArch64/sme-aarch64-svcount-O3.ll
@@ -11,7 +11,7 @@ define target("aarch64.svcount") @test_alloca_store_reload(target("aarch64.svcou
; CHECK-NEXT: br i1 [[I1_PEEL]], label [[LOOP_EXIT:%.*]], label [[LOOP_BODY:%.*]]
; CHECK: loop.body:
; CHECK-NEXT: [[IND:%.*]] = phi i64 [ [[IND_NEXT:%.*]], [[LOOP_BODY]] ], [ 1, [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[IPTR_GEP:%.*]] = getelementptr i64, ptr [[IPTR]], i64 [[IND]]
+; CHECK-NEXT: [[IPTR_GEP:%.*]] = getelementptr [8 x i8], ptr [[IPTR]], i64 [[IND]]
; CHECK-NEXT: store i64 [[IND]], ptr [[IPTR_GEP]], align 8
; CHECK-NEXT: store target("aarch64.svcount") [[VAL1:%.*]], ptr [[PPTR]], align 2
; CHECK-NEXT: [[IND_NEXT]] = add i64 [[IND]], 1
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
index 5391611742efd..d1144b66cb652 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
@@ -52,7 +52,7 @@ define amdgpu_kernel void @permlane64_nonuniform(ptr addrspace(1) %out) {
; CURRENT-CHECK-NEXT: [[TID:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
; CURRENT-CHECK-NEXT: [[V:%.*]] = tail call i32 @llvm.amdgcn.permlane64.i32(i32 [[TID]])
; CURRENT-CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[TID]] to i64
-; CURRENT-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; CURRENT-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr [4 x i8], ptr addrspace(1) [[OUT]], i64 [[TMP1]]
; CURRENT-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
; CURRENT-CHECK-NEXT: ret void
;
@@ -86,7 +86,7 @@ define amdgpu_kernel void @permlane64_nonuniform_expression(ptr addrspace(1) %ou
; CURRENT-CHECK-NEXT: [[TID2:%.*]] = add nuw nsw i32 [[TID]], 1
; CURRENT-CHECK-NEXT: [[V:%.*]] = tail call i32 @llvm.amdgcn.permlane64.i32(i32 [[TID2]])
; CURRENT-CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[TID]] to i64
-; CURRENT-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; CURRENT-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr [4 x i8], ptr addrspace(1) [[OUT]], i64 [[TMP1]]
; CURRENT-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
; CURRENT-CHECK-NEXT: ret void
;
@@ -165,7 +165,7 @@ define amdgpu_kernel void @readlane_nonuniform_workitem(ptr addrspace(1) %out) {
; CURRENT-CHECK-NEXT: [[TIDY:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.y()
; CURRENT-CHECK-NEXT: [[V:%.*]] = tail call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
; CURRENT-CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[TIDX]] to i64
-; CURRENT-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; CURRENT-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr [4 x i8], ptr addrspace(1) [[OUT]], i64 [[TMP1]]
; CURRENT-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
; CURRENT-CHECK-NEXT: ret void
;
@@ -204,7 +204,7 @@ define amdgpu_kernel void @readlane_nonuniform_expression(ptr addrspace(1) %out)
; CURRENT-CHECK-NEXT: [[TIDY2:%.*]] = add nuw nsw i32 [[TIDY]], 2
; CURRENT-CHECK-NEXT: [[V:%.*]] = tail call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX2]], i32 [[TIDY2]])
; CURRENT-CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[TIDX]] to i64
-; CURRENT-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; CURRENT-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr [4 x i8], ptr addrspace(1) [[OUT]], i64 [[TMP1]]
; CURRENT-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
; CURRENT-CHECK-NEXT: ret void
;
@@ -292,7 +292,7 @@ define amdgpu_kernel void @readfirstlane_with_workitem_id(ptr addrspace(1) %out)
; CURRENT-CHECK-NEXT: [[TID:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
; CURRENT-CHECK-NEXT: [[V:%.*]] = tail call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TID]])
; CURRENT-CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[TID]] to i64
-; CURRENT-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; CURRENT-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr [4 x i8], ptr addrspace(1) [[OUT]], i64 [[TMP1]]
; CURRENT-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
; CURRENT-CHECK-NEXT: ret void
;
@@ -326,7 +326,7 @@ define amdgpu_kernel void @readfirstlane_expression(ptr addrspace(1) %out) {
; CURRENT-CHECK-NEXT: [[TID2:%.*]] = add nuw nsw i32 [[TID]], 1
; CURRENT-CHECK-NEXT: [[V:%.*]] = tail call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TID2]])
; CURRENT-CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[TID2]] to i64
-; CURRENT-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; CURRENT-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr [4 x i8], ptr addrspace(1) [[OUT]], i64 [[TMP1]]
; CURRENT-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
; CURRENT-CHECK-NEXT: ret void
;
diff --git a/llvm/test/CodeGen/AMDGPU/vector-alloca-addrspacecast.ll b/llvm/test/CodeGen/AMDGPU/vector-alloca-addrspacecast.ll
index d0766851e5421..494de11ff6c3f 100644
--- a/llvm/test/CodeGen/AMDGPU/vector-alloca-addrspacecast.ll
+++ b/llvm/test/CodeGen/AMDGPU/vector-alloca-addrspacecast.ll
@@ -7,7 +7,7 @@
; OPT: store i32 0, ptr addrspace(5) %alloca, align 4
; OPT: store i32 1, ptr addrspace(5) %a1, align 4
; OPT: store i32 2, ptr addrspace(5) %a2, align 4
-; OPT: %tmp = getelementptr i32, ptr addrspace(5) %alloca, i64 %index
+; OPT: %tmp = getelementptr [4 x i8], ptr addrspace(5) %alloca, i64 %index
; OPT: %ac = addrspacecast ptr addrspace(5) %tmp to ptr
; OPT: %data = load i32, ptr %ac, align 4
define amdgpu_kernel void @vector_addrspacecast(ptr addrspace(1) %out, i64 %index) {
diff --git a/llvm/test/CodeGen/Hexagon/autohvx/vector-align-tbaa.ll b/llvm/test/CodeGen/Hexagon/autohvx/vector-align-tbaa.ll
index 15402c9992b18..4789ebcd7b25c 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/vector-align-tbaa.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/vector-align-tbaa.ll
@@ -12,10 +12,10 @@ target triple = "hexagon"
define <64 x i16> @f0(ptr %a0, i32 %a1) #0 {
; CHECK-LABEL: @f0(
; CHECK-NEXT: b0:
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i16, ptr [[A0:%.*]], i32 [[A1:%.*]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr [2 x i8], ptr [[A0:%.*]], i32 [[A1:%.*]]
; CHECK-NEXT: [[V1:%.*]] = getelementptr i8, ptr [[TMP0]], i32 128
; CHECK-NEXT: [[CST12:%.*]] = load <64 x i16>, ptr [[V1]], align 2, !tbaa [[TBAA0:![0-9]+]]
-; CHECK-NEXT: [[ITP:%.*]] = getelementptr i16, ptr [[A0]], i32 [[A1]]
+; CHECK-NEXT: [[ITP:%.*]] = getelementptr [2 x i8], ptr [[A0]], i32 [[A1]]
; CHECK-NEXT: [[GEP3:%.*]] = getelementptr i8, ptr [[ITP]], i32 256
; CHECK-NEXT: [[CST13:%.*]] = load <64 x i16>, ptr [[GEP3]], align 2, !tbaa [[TBAA0]]
; CHECK-NEXT: [[V8:%.*]] = add <64 x i16> [[CST12]], [[CST13]]
@@ -37,10 +37,10 @@ b0:
define <64 x i16> @f1(ptr %a0, i32 %a1) #0 {
; CHECK-LABEL: @f1(
; CHECK-NEXT: b0:
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i16, ptr [[A0:%.*]], i32 [[A1:%.*]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr [2 x i8], ptr [[A0:%.*]], i32 [[A1:%.*]]
; CHECK-NEXT: [[V1:%.*]] = getelementptr i8, ptr [[TMP0]], i32 128
; CHECK-NEXT: [[CST12:%.*]] = load <64 x i16>, ptr [[V1]], align 2, !tbaa [[TBAA0]]
-; CHECK-NEXT: [[ITP:%.*]] = getelementptr i16, ptr [[A0]], i32 [[A1]]
+; CHECK-NEXT: [[ITP:%.*]] = getelementptr [2 x i8], ptr [[A0]], i32 [[A1]]
; CHECK-NEXT: [[GEP3:%.*]] = getelementptr i8, ptr [[ITP]], i32 256
; CHECK-NEXT: [[CST13:%.*]] = load <64 x i16>, ptr [[GEP3]], align 2
; CHECK-NEXT: [[V8:%.*]] = add <64 x i16> [[CST12]], [[CST13]]
@@ -62,10 +62,10 @@ b0:
define <64 x i16> @f2(ptr %a0, i32 %a1) #0 {
; CHECK-LABEL: @f2(
; CHECK-NEXT: b0:
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i16, ptr [[A0:%.*]], i32 [[A1:%.*]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr [2 x i8], ptr [[A0:%.*]], i32 [[A1:%.*]]
; CHECK-NEXT: [[V1:%.*]] = getelementptr i8, ptr [[TMP0]], i32 128
; CHECK-NEXT: [[CST12:%.*]] = load <64 x i16>, ptr [[V1]], align 2, !tbaa [[TBAA0]]
-; CHECK-NEXT: [[ITP:%.*]] = getelementptr i16, ptr [[A0]], i32 [[A1]]
+; CHECK-NEXT: [[ITP:%.*]] = getelementptr [2 x i8], ptr [[A0]], i32 [[A1]]
; CHECK-NEXT: [[GEP3:%.*]] = getelementptr i8, ptr [[ITP]], i32 256
; CHECK-NEXT: [[CST13:%.*]] = load <64 x i16>, ptr [[GEP3]], align 2, !tbaa [[TBAA3:![0-9]+]]
; CHECK-NEXT: [[V8:%.*]] = add <64 x i16> [[CST12]], [[CST13]]
@@ -87,7 +87,7 @@ b0:
define void @f3(ptr %a0, i32 %a1, <64 x i16> %a2, <64 x i16> %a3) #0 {
; CHECK-LABEL: @f3(
; CHECK-NEXT: b0:
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i16, ptr [[A0:%.*]], i32 [[A1:%.*]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr [2 x i8], ptr [[A0:%.*]], i32 [[A1:%.*]]
; CHECK-NEXT: [[V1:%.*]] = getelementptr i8, ptr [[TMP0]], i32 128
; CHECK-NEXT: [[PTI:%.*]] = ptrtoint ptr [[V1]] to i32
; CHECK-NEXT: [[AND:%.*]] = and i32 [[PTI]], -128
@@ -140,7 +140,7 @@ b0:
define void @f4(ptr %a0, i32 %a1, <64 x i16> %a2, <64 x i16> %a3) #0 {
; CHECK-LABEL: @f4(
; CHECK-NEXT: b0:
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i16, ptr [[A0:%.*]], i32 [[A1:%.*]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr [2 x i8], ptr [[A0:%.*]], i32 [[A1:%.*]]
; CHECK-NEXT: [[V1:%.*]] = getelementptr i8, ptr [[TMP0]], i32 128
; CHECK-NEXT: [[PTI:%.*]] = ptrtoint ptr [[V1]] to i32
; CHECK-NEXT: [[AND:%.*]] = and i32 [[PTI]], -128
@@ -193,7 +193,7 @@ b0:
define void @f5(ptr %a0, i32 %a1, <64 x i16> %a2, <64 x i16> %a3) #0 {
; CHECK-LABEL: @f5(
; CHECK-NEXT: b0:
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i16, ptr [[A0:%.*]], i32 [[A1:%.*]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr [2 x i8], ptr [[A0:%.*]], i32 [[A1:%.*]]
; CHECK-NEXT: [[V1:%.*]] = getelementptr i8, ptr [[TMP0]], i32 128
; CHECK-NEXT: [[PTI:%.*]] = ptrtoint ptr [[V1]] to i32
; CHECK-NEXT: [[AND:%.*]] = and i32 [[PTI]], -128
diff --git a/llvm/test/DebugInfo/Generic/instcombine-phi.ll b/llvm/test/DebugInfo/Generic/instcombine-phi.ll
index 08f7bc0711808..d13cd84f196b6 100644
--- a/llvm/test/DebugInfo/Generic/instcombine-phi.ll
+++ b/llvm/test/DebugInfo/Generic/instcombine-phi.ll
@@ -106,7 +106,7 @@ if.end: ; preds = %if.else, %if.then
; CHECK: define ptr @gep
; CHECK-LABEL: if.end:
; CHECK: %[[PHI:.*]] = phi i64 [ %call, %if.then ], [ %call1, %if.else ]
-; CHECK: getelementptr inbounds i32, ptr %b, i64 %[[PHI]], !dbg [[gepMergedLoc:![0-9]+]]
+; CHECK: getelementptr inbounds [4 x i8], ptr %b, i64 %[[PHI]], !dbg [[gepMergedLoc:![0-9]+]]
; CHECK: ret ptr
define ptr @gep(i32 %a, ptr %b) !dbg !23 {
diff --git a/llvm/test/Transforms/InstCombine/2006-12-15-Range-Test.ll b/llvm/test/Transforms/InstCombine/2006-12-15-Range-Test.ll
index 42b30eba1740f..48997b5470321 100644
--- a/llvm/test/Transforms/InstCombine/2006-12-15-Range-Test.ll
+++ b/llvm/test/Transforms/InstCombine/2006-12-15-Range-Test.ll
@@ -16,7 +16,7 @@ define i1 @print_pgm_cond_true(i32 %tmp12.reload, ptr %tmp16.out) {
; CHECK-NEXT: store i32 [[TMP16]], ptr [[TMP16_OUT]], align 4
; CHECK-NEXT: ret i1 false
; CHECK: cond_true:
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr @r, i32 [[TMP12_RELOAD:%.*]]
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr [4 x i8], ptr @r, i32 [[TMP12_RELOAD:%.*]]
; CHECK-NEXT: [[TMP16]] = load i32, ptr [[TMP15]], align 4
; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[TMP16]], -32
; CHECK-NEXT: [[BOTHCOND:%.*]] = icmp ult i32 [[TMP0]], -63
@@ -53,7 +53,7 @@ define i1 @print_pgm_cond_true_logical(i32 %tmp12.reload, ptr %tmp16.out) {
; CHECK-NEXT: store i32 [[TMP16]], ptr [[TMP16_OUT]], align 4
; CHECK-NEXT: ret i1 false
; CHECK: cond_true:
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr @r, i32 [[TMP12_RELOAD:%.*]]
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr [4 x i8], ptr @r, i32 [[TMP12_RELOAD:%.*]]
; CHECK-NEXT: [[TMP16]] = load i32, ptr [[TMP15]], align 4
; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[TMP16]], -32
; CHECK-NEXT: [[BOTHCOND:%.*]] = icmp ult i32 [[TMP0]], -63
diff --git a/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-gatherscatter.ll b/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-gatherscatter.ll
index 9f3fdcd8a767c..4eeafb516d80d 100644
--- a/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-gatherscatter.ll
+++ b/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-gatherscatter.ll
@@ -9,7 +9,7 @@ target triple = "aarch64-unknown-linux-gnu"
define <vscale x 2 x double> @test_ld1_gather_index_nxv2f64_stride1(<vscale x 2 x i1> %pred, ptr %x, i64 %base) #0 {
; CHECK-LABEL: @test_ld1_gather_index_nxv2f64_stride1(
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr double, ptr [[X:%.*]], i64 [[BASE:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr [8 x i8], ptr [[X:%.*]], i64 [[BASE:%.*]]
; CHECK-NEXT: [[LD:%.*]] = call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0(ptr align 1 [[TMP1]], <vscale x 2 x i1> [[PRED:%.*]], <vscale x 2 x double> zeroinitializer)
; CHECK-NEXT: ret <vscale x 2 x double> [[LD]]
;
@@ -31,7 +31,7 @@ define <vscale x 2 x double> @test_ld1_gather_index_nxv2f64_stride2_negtest(<vsc
define <vscale x 2 x double> @test_ld1_gather_index_nxv2f64_stride1_align8(<vscale x 2 x i1> %pred, ptr align 8 %x, i64 %base) #0 {
; CHECK-LABEL: @test_ld1_gather_index_nxv2f64_stride1_align8(
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr double, ptr [[X:%.*]], i64 [[BASE:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr [8 x i8], ptr [[X:%.*]], i64 [[BASE:%.*]]
; CHECK-NEXT: [[LD:%.*]] = call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0(ptr align 8 [[TMP1]], <vscale x 2 x i1> [[PRED:%.*]], <vscale x 2 x double> zeroinitializer)
; CHECK-NEXT: ret <vscale x 2 x double> [[LD]]
;
@@ -46,7 +46,7 @@ define <vscale x 2 x double> @test_ld1_gather_index_nxv2f64_stride1_align8(<vsca
define void @test_st1_scatter_index_nxv2f64_stride1(<vscale x 2 x i1> %pred, ptr %x, i64 %base, <vscale x 2 x double> %val) #0 {
; CHECK-LABEL: @test_st1_scatter_index_nxv2f64_stride1(
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr double, ptr [[X:%.*]], i64 [[BASE:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr [8 x i8], ptr [[X:%.*]], i64 [[BASE:%.*]]
; CHECK-NEXT: call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[VAL:%.*]], ptr align 1 [[TMP1]], <vscale x 2 x i1> [[PRED:%.*]])
; CHECK-NEXT: ret void
;
@@ -68,7 +68,7 @@ define void @test_st1_scatter_index_nxv2f64_stride2_negtest(<vscale x 2 x i1> %p
define void @test_st1_scatter_index_nxv2f64_stride1_align8(<vscale x 2 x i1> %pred, ptr align 8 %x, i64 %base, <vscale x 2 x double> %val) #0 {
; CHECK-LABEL: @test_st1_scatter_index_nxv2f64_stride1_align8(
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr double, ptr [[X:%.*]], i64 [[BASE:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr [8 x i8], ptr [[X:%.*]], i64 [[BASE:%.*]]
; CHECK-NEXT: call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[VAL:%.*]], ptr align 8 [[TMP1]], <vscale x 2 x i1> [[PRED:%.*]])
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/InstCombine/AMDGPU/memcpy-from-constant.ll b/llvm/test/Transforms/InstCombine/AMDGPU/memcpy-from-constant.ll
index 968138e45b33a..74992d341a0f0 100644
--- a/llvm/test/Transforms/InstCombine/AMDGPU/memcpy-from-constant.ll
+++ b/llvm/test/Transforms/InstCombine/AMDGPU/memcpy-from-constant.ll
@@ -36,7 +36,7 @@ define i8 @memcpy_constant_arg_ptr_to_alloca_load_metadata(ptr addrspace(4) noal
define i64 @memcpy_constant_arg_ptr_to_alloca_load_alignment(ptr addrspace(4) noalias readonly align 4 dereferenceable(256) %arg, i32 %idx) {
; CHECK-LABEL: @memcpy_constant_arg_ptr_to_alloca_load_alignment(
; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[IDX:%.*]] to i64
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr addrspace(4) [[ARG:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [8 x i8], ptr addrspace(4) [[ARG:%.*]], i64 [[TMP1]]
; CHECK-NEXT: [[LOAD:%.*]] = load i64, ptr addrspace(4) [[GEP]], align 16
; CHECK-NEXT: ret i64 [[LOAD]]
;
@@ -51,7 +51,7 @@ define i64 @memcpy_constant_arg_ptr_to_alloca_load_atomic(ptr addrspace(4) noali
; CHECK-LABEL: @memcpy_constant_arg_ptr_to_alloca_load_atomic(
; CHECK-NEXT: [[ALLOCA:%.*]] = alloca [32 x i64], align 8, addrspace(5)
; CHECK-NEXT: call void @llvm.memcpy.p5.p4.i64(ptr addrspace(5) noundef align 8 dereferenceable(256) [[ALLOCA]], ptr addrspace(4) noundef align 8 dereferenceable(256) [[ARG:%.*]], i64 256, i1 false)
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr addrspace(5) [[ALLOCA]], i32 [[IDX:%.*]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [8 x i8], ptr addrspace(5) [[ALLOCA]], i32 [[IDX:%.*]]
; CHECK-NEXT: [[LOAD:%.*]] = load atomic i64, ptr addrspace(5) [[GEP]] syncscope("somescope") acquire, align 8
; CHECK-NEXT: ret i64 [[LOAD]]
;
diff --git a/llvm/test/Transforms/InstCombine/align-addr.ll b/llvm/test/Transforms/InstCombine/align-addr.ll
index b77037e592b54..5cc707be2c709 100644
--- a/llvm/test/Transforms/InstCombine/align-addr.ll
+++ b/llvm/test/Transforms/InstCombine/align-addr.ll
@@ -18,8 +18,8 @@ define void @test0(ptr %b, i64 %n, i64 %u, i64 %y) nounwind {
; CHECK: bb:
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[INDVAR_NEXT:%.*]], [[BB]] ], [ 20, [[ENTRY:%.*]] ]
; CHECK-NEXT: [[J:%.*]] = mul i64 [[I]], [[V]]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr double, ptr [[E]], i64 [[J]]
-; CHECK-NEXT: [[T8:%.*]] = getelementptr double, ptr [[TMP0]], i64 [[Z]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr [8 x i8], ptr [[E]], i64 [[J]]
+; CHECK-NEXT: [[T8:%.*]] = getelementptr [8 x i8], ptr [[TMP0]], i64 [[Z]]
; CHECK-NEXT: store <2 x double> zeroinitializer, ptr [[T8]], align 8
; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[I]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[N]]
diff --git a/llvm/test/Transforms/InstCombine/and-or-icmps.ll b/llvm/test/Transforms/InstCombine/and-or-icmps.ll
index 4cd089ca524c2..58c1f0038e5df 100644
--- a/llvm/test/Transforms/InstCombine/and-or-icmps.ll
+++ b/llvm/test/Transforms/InstCombine/and-or-icmps.ll
@@ -408,7 +408,7 @@ define void @simplify_before_foldAndOfICmps2(ptr %p, ptr %A8) "instcombine-no-ve
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i16 [[L7]], -1
; CHECK-NEXT: [[B11:%.*]] = zext i1 [[TMP1]] to i16
; CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i64
-; CHECK-NEXT: [[G4:%.*]] = getelementptr i16, ptr [[A8]], i64 [[TMP2]]
+; CHECK-NEXT: [[G4:%.*]] = getelementptr [2 x i8], ptr [[A8]], i64 [[TMP2]]
; CHECK-NEXT: [[L2:%.*]] = load i16, ptr [[G4]], align 2
; CHECK-NEXT: [[L4:%.*]] = load i16, ptr [[A8]], align 2
; CHECK-NEXT: [[B21:%.*]] = sdiv i16 [[L7]], [[L4]]
@@ -429,7 +429,7 @@ define void @simplify_before_foldAndOfICmps2(ptr %p, ptr %A8) "instcombine-no-ve
; CHECK-NEXT: [[TMP4:%.*]] = xor i1 [[C11]], true
; CHECK-NEXT: [[C18:%.*]] = or i1 [[C10]], [[TMP4]]
; CHECK-NEXT: [[TMP3:%.*]] = sext i1 [[C4]] to i64
-; CHECK-NEXT: [[G26:%.*]] = getelementptr i1, ptr null, i64 [[TMP3]]
+; CHECK-NEXT: [[G26:%.*]] = getelementptr i8, ptr null, i64 [[TMP3]]
; CHECK-NEXT: store i16 [[B33]], ptr [[P:%.*]], align 2
; CHECK-NEXT: store i1 [[C18]], ptr [[P]], align 1
; CHECK-NEXT: store ptr [[G26]], ptr [[P]], align 8
diff --git a/llvm/test/Transforms/InstCombine/array.ll b/llvm/test/Transforms/InstCombine/array.ll
index ec1a39dff47bf..95960b720d5e4 100644
--- a/llvm/test/Transforms/InstCombine/array.ll
+++ b/llvm/test/Transforms/InstCombine/array.ll
@@ -6,7 +6,7 @@ define void @test(ptr %ptr, i32 %a, i32 %b) {
; CHECK-SAME: ptr [[PTR:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[A]] to i64
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr [4 x i8], ptr [[PTR]], i64 [[TMP0]]
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[TMP1]], i64 40
; CHECK-NEXT: store i32 [[B]], ptr [[GEP]], align 4
; CHECK-NEXT: ret void
@@ -25,7 +25,7 @@ define i32 @test_add_res_moreoneuse(ptr %ptr, i32 %a, i32 %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[A]], 5
; CHECK-NEXT: [[IDX:%.*]] = sext i32 [[ADD]] to i64
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[IDX]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [4 x i8], ptr [[PTR]], i64 [[IDX]]
; CHECK-NEXT: store i32 [[B]], ptr [[GEP]], align 4
; CHECK-NEXT: ret i32 [[ADD]]
;
@@ -43,7 +43,7 @@ define void @test_addop_nonsw_flag(ptr %ptr, i32 %a, i32 %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[A]], 10
; CHECK-NEXT: [[IDX:%.*]] = sext i32 [[ADD]] to i64
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[IDX]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [4 x i8], ptr [[PTR]], i64 [[IDX]]
; CHECK-NEXT: store i32 [[B]], ptr [[GEP]], align 4
; CHECK-NEXT: ret void
;
@@ -61,7 +61,7 @@ define void @test_add_op2_not_constant(ptr %ptr, i32 %a, i32 %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[A]], [[B]]
; CHECK-NEXT: [[IDX:%.*]] = sext i32 [[ADD]] to i64
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[IDX]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [4 x i8], ptr [[PTR]], i64 [[IDX]]
; CHECK-NEXT: store i32 [[B]], ptr [[GEP]], align 4
; CHECK-NEXT: ret void
;
@@ -78,7 +78,7 @@ define void @test_zext_nneg(ptr %ptr, i32 %a, i32 %b) {
; CHECK-SAME: ptr [[PTR:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[A]] to i64
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr [4 x i8], ptr [[PTR]], i64 [[TMP0]]
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[TMP1]], i64 40
; CHECK-NEXT: store i32 [[B]], ptr [[GEP]], align 4
; CHECK-NEXT: ret void
@@ -97,7 +97,7 @@ define void @test_zext_missing_nneg(ptr %ptr, i32 %a, i32 %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[A]], 10
; CHECK-NEXT: [[IDX:%.*]] = zext i32 [[ADD]] to i64
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds nuw i32, ptr [[PTR]], i64 [[IDX]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[PTR]], i64 [[IDX]]
; CHECK-NEXT: store i32 [[B]], ptr [[GEP]], align 4
; CHECK-NEXT: ret void
;
@@ -112,8 +112,8 @@ entry:
define ptr @gep_inbounds_nuwaddlike(ptr %ptr, i64 %a, i64 %b) {
; CHECK-LABEL: define ptr @gep_inbounds_nuwaddlike(
; CHECK-SAME: ptr [[PTR:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) {
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i32, ptr [[PTR]], i64 [[A]]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP1]], i64 [[B]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[PTR]], i64 [[A]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[TMP1]], i64 [[B]]
; CHECK-NEXT: ret ptr [[GEP]]
;
%add = or disjoint i64 %a, %b
@@ -124,8 +124,8 @@ define ptr @gep_inbounds_nuwaddlike(ptr %ptr, i64 %a, i64 %b) {
define ptr @gep_inbounds_add_nuw(ptr %ptr, i64 %a, i64 %b) {
; CHECK-LABEL: define ptr @gep_inbounds_add_nuw(
; CHECK-SAME: ptr [[PTR:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) {
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i32, ptr [[PTR]], i64 [[A]]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP1]], i64 [[B]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[PTR]], i64 [[A]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[TMP1]], i64 [[B]]
; CHECK-NEXT: ret ptr [[GEP]]
;
%add = add nuw i64 %a, %b
@@ -136,8 +136,8 @@ define ptr @gep_inbounds_add_nuw(ptr %ptr, i64 %a, i64 %b) {
define ptr @gep_inbounds_add_nusw_nuw(ptr %ptr, i64 %a, i64 %b) {
; CHECK-LABEL: define ptr @gep_inbounds_add_nusw_nuw(
; CHECK-SAME: ptr [[PTR:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) {
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr nusw nuw i32, ptr [[PTR]], i64 [[A]]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr nusw nuw i32, ptr [[TMP1]], i64 [[B]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr nusw nuw [4 x i8], ptr [[PTR]], i64 [[A]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr nusw nuw [4 x i8], ptr [[TMP1]], i64 [[B]]
; CHECK-NEXT: ret ptr [[GEP]]
;
%add = add nuw i64 %a, %b
@@ -148,8 +148,8 @@ define ptr @gep_inbounds_add_nusw_nuw(ptr %ptr, i64 %a, i64 %b) {
define ptr @gep_add_nuw(ptr %ptr, i64 %a, i64 %b) {
; CHECK-LABEL: define ptr @gep_add_nuw(
; CHECK-SAME: ptr [[PTR:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) {
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr nuw i32, ptr [[PTR]], i64 [[A]]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr nuw i32, ptr [[TMP1]], i64 [[B]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr nuw [4 x i8], ptr [[PTR]], i64 [[A]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr nuw [4 x i8], ptr [[TMP1]], i64 [[B]]
; CHECK-NEXT: ret ptr [[GEP]]
;
%add = add nuw i64 %a, %b
@@ -164,8 +164,8 @@ define ptr @gep_inbounds_add_nsw_nonneg(ptr %ptr, i64 %a, i64 %b) {
; CHECK-NEXT: call void @llvm.assume(i1 [[A_NNEG]])
; CHECK-NEXT: [[B_NNEG:%.*]] = icmp sgt i64 [[B]], -1
; CHECK-NEXT: call void @llvm.assume(i1 [[B_NNEG]])
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i32, ptr [[PTR]], i64 [[A]]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP1]], i64 [[B]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[PTR]], i64 [[A]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[TMP1]], i64 [[B]]
; CHECK-NEXT: ret ptr [[GEP]]
;
%a.nneg = icmp sgt i64 %a, -1
@@ -182,8 +182,8 @@ define ptr @gep_inbounds_add_nsw_not_nonneg1(ptr %ptr, i64 %a, i64 %b) {
; CHECK-SAME: ptr [[PTR:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) {
; CHECK-NEXT: [[A_NNEG:%.*]] = icmp sgt i64 [[A]], -1
; CHECK-NEXT: call void @llvm.assume(i1 [[A_NNEG]])
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[A]]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[TMP1]], i64 [[B]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr [4 x i8], ptr [[PTR]], i64 [[A]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr [4 x i8], ptr [[TMP1]], i64 [[B]]
; CHECK-NEXT: ret ptr [[GEP]]
;
%a.nneg = icmp sgt i64 %a, -1
@@ -198,8 +198,8 @@ define ptr @gep_inbounds_add_nsw_not_nonneg2(ptr %ptr, i64 %a, i64 %b) {
; CHECK-SAME: ptr [[PTR:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) {
; CHECK-NEXT: [[B_NNEG:%.*]] = icmp sgt i64 [[B]], -1
; CHECK-NEXT: call void @llvm.assume(i1 [[B_NNEG]])
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[A]]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[TMP1]], i64 [[B]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr [4 x i8], ptr [[PTR]], i64 [[A]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr [4 x i8], ptr [[TMP1]], i64 [[B]]
; CHECK-NEXT: ret ptr [[GEP]]
;
%b.nneg = icmp sgt i64 %b, -1
@@ -216,8 +216,8 @@ define ptr @gep_not_inbounds_add_nsw_nonneg(ptr %ptr, i64 %a, i64 %b) {
; CHECK-NEXT: call void @llvm.assume(i1 [[A_NNEG]])
; CHECK-NEXT: [[B_NNEG:%.*]] = icmp sgt i64 [[B]], -1
; CHECK-NEXT: call void @llvm.assume(i1 [[B_NNEG]])
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[A]]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[TMP1]], i64 [[B]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr [4 x i8], ptr [[PTR]], i64 [[A]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr [4 x i8], ptr [[TMP1]], i64 [[B]]
; CHECK-NEXT: ret ptr [[GEP]]
;
%a.nneg = icmp sgt i64 %a, -1
@@ -236,8 +236,8 @@ define ptr @gep_inbounds_add_not_nsw_nonneg(ptr %ptr, i64 %a, i64 %b) {
; CHECK-NEXT: call void @llvm.assume(i1 [[A_NNEG]])
; CHECK-NEXT: [[B_NNEG:%.*]] = icmp sgt i64 [[B]], -1
; CHECK-NEXT: call void @llvm.assume(i1 [[B_NNEG]])
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[A]]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[TMP1]], i64 [[B]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr [4 x i8], ptr [[PTR]], i64 [[A]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr [4 x i8], ptr [[TMP1]], i64 [[B]]
; CHECK-NEXT: ret ptr [[GEP]]
;
%a.nneg = icmp sgt i64 %a, -1
@@ -255,7 +255,7 @@ define ptr @gep_inbounds_sext_add_nonneg(ptr %ptr, i32 %a) {
; CHECK-NEXT: [[A_NNEG:%.*]] = icmp sgt i32 [[A]], -1
; CHECK-NEXT: call void @llvm.assume(i1 [[A_NNEG]])
; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[A]] to i64
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw i32, ptr [[PTR]], i64 [[TMP1]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[PTR]], i64 [[TMP1]]
; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP2]], i64 40
; CHECK-NEXT: ret ptr [[GEP]]
;
@@ -273,7 +273,7 @@ define ptr @gep_inbounds_sext_addlike_nonneg(ptr %ptr, i32 %a) {
; CHECK-NEXT: [[A_NNEG:%.*]] = icmp sgt i32 [[A]], -1
; CHECK-NEXT: call void @llvm.assume(i1 [[A_NNEG]])
; CHECK-NEXT: [[IDX:%.*]] = zext nneg i32 [[A]] to i64
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds nuw i32, ptr [[PTR]], i64 [[IDX]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[PTR]], i64 [[IDX]]
; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds nuw i8, ptr [[GEP]], i64 40
; CHECK-NEXT: ret ptr [[GEP1]]
;
@@ -291,7 +291,7 @@ define ptr @gep_inbounds_sext_add_not_nonneg_1(ptr %ptr, i32 %a) {
; CHECK-NEXT: [[A_NNEG:%.*]] = icmp sgt i32 [[A]], -1
; CHECK-NEXT: call void @llvm.assume(i1 [[A_NNEG]])
; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[A]] to i64
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[TMP1]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr [4 x i8], ptr [[PTR]], i64 [[TMP1]]
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[TMP2]], i64 -40
; CHECK-NEXT: ret ptr [[GEP]]
;
@@ -307,7 +307,7 @@ define ptr @gep_inbounds_sext_add_not_nonneg_2(ptr %ptr, i32 %a) {
; CHECK-LABEL: define ptr @gep_inbounds_sext_add_not_nonneg_2(
; CHECK-SAME: ptr [[PTR:%.*]], i32 [[A:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[A]] to i64
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[TMP1]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr [4 x i8], ptr [[PTR]], i64 [[TMP1]]
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[TMP2]], i64 40
; CHECK-NEXT: ret ptr [[GEP]]
;
@@ -323,7 +323,7 @@ define ptr @gep_not_inbounds_sext_add_nonneg(ptr %ptr, i32 %a) {
; CHECK-NEXT: [[A_NNEG:%.*]] = icmp sgt i32 [[A]], -1
; CHECK-NEXT: call void @llvm.assume(i1 [[A_NNEG]])
; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[A]] to i64
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[TMP1]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr [4 x i8], ptr [[PTR]], i64 [[TMP1]]
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[TMP2]], i64 40
; CHECK-NEXT: ret ptr [[GEP]]
;
diff --git a/llvm/test/Transforms/InstCombine/assume-loop-align.ll b/llvm/test/Transforms/InstCombine/assume-loop-align.ll
index 7669d5bae5b08..f692dbca69e0e 100644
--- a/llvm/test/Transforms/InstCombine/assume-loop-align.ll
+++ b/llvm/test/Transforms/InstCombine/assume-loop-align.ll
@@ -15,10 +15,10 @@ define void @foo(ptr %a, ptr %b) #0 {
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
-; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX5]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 16
; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
diff --git a/llvm/test/Transforms/InstCombine/canonicalize-gep-constglob.ll b/llvm/test/Transforms/InstCombine/canonicalize-gep-constglob.ll
index 129da3f9110ad..112c25b9368a2 100644
--- a/llvm/test/Transforms/InstCombine/canonicalize-gep-constglob.ll
+++ b/llvm/test/Transforms/InstCombine/canonicalize-gep-constglob.ll
@@ -35,9 +35,9 @@ define ptr @xzy(i64 %x, i64 %y, i64 %z) {
; CHECK-LABEL: define ptr @xzy(
; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]], i64 [[Z:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[GEP_SPLIT:%.*]] = getelementptr inbounds [10 x [10 x i32]], ptr getelementptr inbounds nuw (i8, ptr @glob, i64 40), i64 [[X]]
-; CHECK-NEXT: [[GEP_SPLIT1:%.*]] = getelementptr inbounds [10 x i32], ptr [[GEP_SPLIT]], i64 [[Z]]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[GEP_SPLIT1]], i64 [[Y]]
+; CHECK-NEXT: [[GEP_SPLIT:%.*]] = getelementptr inbounds [400 x i8], ptr getelementptr inbounds nuw (i8, ptr @glob, i64 40), i64 [[X]]
+; CHECK-NEXT: [[GEP_SPLIT1:%.*]] = getelementptr inbounds [40 x i8], ptr [[GEP_SPLIT]], i64 [[Z]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [4 x i8], ptr [[GEP_SPLIT1]], i64 [[Y]]
; CHECK-NEXT: ret ptr [[GEP]]
;
entry:
@@ -49,7 +49,7 @@ define ptr @zerox(i64 %x) {
; CHECK-LABEL: define ptr @zerox(
; CHECK-SAME: i64 [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds nuw (i8, ptr @glob, i64 32), i64 [[X]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [4 x i8], ptr getelementptr inbounds nuw (i8, ptr @glob, i64 32), i64 [[X]]
; CHECK-NEXT: ret ptr [[GEP]]
;
entry:
diff --git a/llvm/test/Transforms/InstCombine/canonicalize-gep-mul.ll b/llvm/test/Transforms/InstCombine/canonicalize-gep-mul.ll
index 1b33e0c2087f1..d2777e2216a7b 100644
--- a/llvm/test/Transforms/InstCombine/canonicalize-gep-mul.ll
+++ b/llvm/test/Transforms/InstCombine/canonicalize-gep-mul.ll
@@ -62,7 +62,7 @@ define ptr @usemul(ptr %p, i64 %x) {
; CHECK-SAME: ptr [[P:%.*]], i64 [[X:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[MUL:%.*]] = mul i64 [[X]], 5
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 [[MUL]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [4 x i8], ptr [[P]], i64 [[MUL]]
; CHECK-NEXT: call void @use(i64 [[MUL]])
; CHECK-NEXT: ret ptr [[GEP]]
;
diff --git a/llvm/test/Transforms/InstCombine/cast.ll b/llvm/test/Transforms/InstCombine/cast.ll
index 46deb294b9d45..6047d61e8dbff 100644
--- a/llvm/test/Transforms/InstCombine/cast.ll
+++ b/llvm/test/Transforms/InstCombine/cast.ll
@@ -1090,7 +1090,7 @@ define double @test73(ptr %p, i128 %i) {
define double @test74(ptr %p, i64 %i) {
; ALL-LABEL: @test74(
-; ALL-NEXT: [[PP:%.*]] = getelementptr inbounds i64, ptr [[P:%.*]], i64 [[I:%.*]]
+; ALL-NEXT: [[PP:%.*]] = getelementptr inbounds [8 x i8], ptr [[P:%.*]], i64 [[I:%.*]]
; ALL-NEXT: [[L:%.*]] = load double, ptr [[PP]], align 8
; ALL-NEXT: ret double [[L]]
;
diff --git a/llvm/test/Transforms/InstCombine/cast_phi.ll b/llvm/test/Transforms/InstCombine/cast_phi.ll
index a3f818e013208..e8db72c8d849e 100644
--- a/llvm/test/Transforms/InstCombine/cast_phi.ll
+++ b/llvm/test/Transforms/InstCombine/cast_phi.ll
@@ -9,9 +9,9 @@ define void @MainKernel(i32 %iNumSteps, i32 %tid, i32 %base) {
; CHECK-NEXT: [[CALLB:%.*]] = alloca [258 x float], align 4
; CHECK-NEXT: [[CONV_I:%.*]] = uitofp i32 [[INUMSTEPS:%.*]] to float
; CHECK-NEXT: [[CONV_I12:%.*]] = zext i32 [[TID:%.*]] to i64
-; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds nuw float, ptr [[CALLA]], i64 [[CONV_I12]]
+; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[CALLA]], i64 [[CONV_I12]]
; CHECK-NEXT: store float [[CONV_I]], ptr [[ARRAYIDX3]], align 4
-; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds nuw float, ptr [[CALLB]], i64 [[CONV_I12]]
+; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[CALLB]], i64 [[CONV_I12]]
; CHECK-NEXT: store float [[CONV_I]], ptr [[ARRAYIDX6]], align 4
; CHECK-NEXT: [[CMP7:%.*]] = icmp eq i32 [[TID]], 0
; CHECK-NEXT: br i1 [[CMP7]], label [[DOTBB1:%.*]], label [[DOTBB2:%.*]]
@@ -31,8 +31,8 @@ define void @MainKernel(i32 %iNumSteps, i32 %tid, i32 %base) {
; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i32 [[I12_06]], [[BASE:%.*]]
; CHECK-NEXT: [[ADD:%.*]] = add nuw i32 [[I12_06]], 1
; CHECK-NEXT: [[CONV_I9:%.*]] = sext i32 [[ADD]] to i64
-; CHECK-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds float, ptr [[CALLA]], i64 [[CONV_I9]]
-; CHECK-NEXT: [[ARRAYIDX24:%.*]] = getelementptr inbounds float, ptr [[CALLB]], i64 [[CONV_I9]]
+; CHECK-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds [4 x i8], ptr [[CALLA]], i64 [[CONV_I9]]
+; CHECK-NEXT: [[ARRAYIDX24:%.*]] = getelementptr inbounds [4 x i8], ptr [[CALLB]], i64 [[CONV_I9]]
; CHECK-NEXT: [[CMP40:%.*]] = icmp ult i32 [[I12_06]], [[BASE]]
; CHECK-NEXT: br i1 [[TMP3]], label [[DOTBB4:%.*]], label [[DOTBB5:%.*]]
; CHECK: .bb4:
diff --git a/llvm/test/Transforms/InstCombine/cast_ptr.ll b/llvm/test/Transforms/InstCombine/cast_ptr.ll
index 1cf3e32556b38..c42add1bcd071 100644
--- a/llvm/test/Transforms/InstCombine/cast_ptr.ll
+++ b/llvm/test/Transforms/InstCombine/cast_ptr.ll
@@ -451,9 +451,9 @@ define i32 @ptrtoint_of_inttoptr_multiple_gep(i32 %x, i32 %y, i32 %z) {
define i32 @ptrtoint_of_inttoptr_multiple_gep_extra_use(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @ptrtoint_of_inttoptr_multiple_gep_extra_use(
; CHECK-NEXT: [[PTR:%.*]] = inttoptr i32 [[X:%.*]] to ptr
-; CHECK-NEXT: [[PTR2:%.*]] = getelementptr i16, ptr [[PTR]], i32 [[Y:%.*]]
+; CHECK-NEXT: [[PTR2:%.*]] = getelementptr [2 x i8], ptr [[PTR]], i32 [[Y:%.*]]
; CHECK-NEXT: call void @use_ptr(ptr [[PTR2]])
-; CHECK-NEXT: [[PTR3:%.*]] = getelementptr i32, ptr [[PTR2]], i32 [[Z:%.*]]
+; CHECK-NEXT: [[PTR3:%.*]] = getelementptr [4 x i8], ptr [[PTR2]], i32 [[Z:%.*]]
; CHECK-NEXT: [[R:%.*]] = ptrtoint ptr [[PTR3]] to i32
; CHECK-NEXT: ret i32 [[R]]
;
@@ -468,7 +468,7 @@ define i32 @ptrtoint_of_inttoptr_multiple_gep_extra_use(i32 %x, i32 %y, i32 %z)
define i32 @ptrtoint_of_inttoptr_index_type(i32 %x, i16 %y) {
; CHECK-LABEL: @ptrtoint_of_inttoptr_index_type(
; CHECK-NEXT: [[PTR:%.*]] = inttoptr i32 [[X:%.*]] to ptr addrspace(3)
-; CHECK-NEXT: [[PTR2:%.*]] = getelementptr i16, ptr addrspace(3) [[PTR]], i16 [[Y:%.*]]
+; CHECK-NEXT: [[PTR2:%.*]] = getelementptr [2 x i8], ptr addrspace(3) [[PTR]], i16 [[Y:%.*]]
; CHECK-NEXT: [[R:%.*]] = ptrtoint ptr addrspace(3) [[PTR2]] to i32
; CHECK-NEXT: ret i32 [[R]]
;
@@ -496,10 +496,10 @@ define i32 @ptrtoint_of_null_multiple_gep(i32 %x, i32 %y, i32 %z) {
define i32 @ptrtoint_of_null_multiple_gep_extra_use(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @ptrtoint_of_null_multiple_gep_extra_use(
-; CHECK-NEXT: [[PTR2:%.*]] = getelementptr i16, ptr null, i32 [[X:%.*]]
+; CHECK-NEXT: [[PTR2:%.*]] = getelementptr [2 x i8], ptr null, i32 [[X:%.*]]
; CHECK-NEXT: call void @use_ptr(ptr [[PTR2]])
-; CHECK-NEXT: [[PTR3:%.*]] = getelementptr nuw i32, ptr [[PTR2]], i32 [[Y:%.*]]
-; CHECK-NEXT: [[PTR4:%.*]] = getelementptr i64, ptr [[PTR3]], i32 [[Z:%.*]]
+; CHECK-NEXT: [[PTR3:%.*]] = getelementptr nuw [4 x i8], ptr [[PTR2]], i32 [[Y:%.*]]
+; CHECK-NEXT: [[PTR4:%.*]] = getelementptr [8 x i8], ptr [[PTR3]], i32 [[Z:%.*]]
; CHECK-NEXT: [[R:%.*]] = ptrtoint ptr [[PTR4]] to i32
; CHECK-NEXT: ret i32 [[R]]
;
@@ -524,7 +524,7 @@ define i32 @ptrtoint_of_null_index_type(i16 %x) {
define <2 x i32> @ptrtoint_of_null_splat(<2 x i16> %x) {
; CHECK-LABEL: @ptrtoint_of_null_splat(
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr i16, ptr addrspace(3) null, <2 x i16> [[X:%.*]]
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr [2 x i8], ptr addrspace(3) null, <2 x i16> [[X:%.*]]
; CHECK-NEXT: [[R:%.*]] = ptrtoint <2 x ptr addrspace(3)> [[PTR]] to <2 x i32>
; CHECK-NEXT: ret <2 x i32> [[R]]
;
diff --git a/llvm/test/Transforms/InstCombine/constant-fold-address-space-pointer.ll b/llvm/test/Transforms/InstCombine/constant-fold-address-space-pointer.ll
index 81052194b6b95..fc85eb562e5ea 100644
--- a/llvm/test/Transforms/InstCombine/constant-fold-address-space-pointer.ll
+++ b/llvm/test/Transforms/InstCombine/constant-fold-address-space-pointer.ll
@@ -266,7 +266,7 @@ define i32 @constant_through_array_as_ptrs() {
define float @canonicalize_addrspacecast(i32 %i) {
; CHECK-LABEL: @canonicalize_addrspacecast(
-; CHECK-NEXT: [[P:%.*]] = getelementptr inbounds float, ptr addrspacecast (ptr addrspace(3) @shared_mem to ptr), i32 [[I:%.*]]
+; CHECK-NEXT: [[P:%.*]] = getelementptr inbounds [4 x i8], ptr addrspacecast (ptr addrspace(3) @shared_mem to ptr), i32 [[I:%.*]]
; CHECK-NEXT: [[V:%.*]] = load float, ptr [[P]], align 4
; CHECK-NEXT: ret float [[V]]
;
diff --git a/llvm/test/Transforms/InstCombine/dependent-ivs.ll b/llvm/test/Transforms/InstCombine/dependent-ivs.ll
index bfd01c6b68941..8e8175a054074 100644
--- a/llvm/test/Transforms/InstCombine/dependent-ivs.ll
+++ b/llvm/test/Transforms/InstCombine/dependent-ivs.ll
@@ -606,7 +606,7 @@ define void @ptr_iv_non_i8_type(ptr %base, i64 %end) {
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ 0, [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[IV_PTR:%.*]] = getelementptr i32, ptr [[BASE]], i64 [[IV]]
+; CHECK-NEXT: [[IV_PTR:%.*]] = getelementptr [4 x i8], ptr [[BASE]], i64 [[IV]]
; CHECK-NEXT: call void @use.p0(ptr [[IV_PTR]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 4
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], [[END]]
diff --git a/llvm/test/Transforms/InstCombine/fmul.ll b/llvm/test/Transforms/InstCombine/fmul.ll
index d19fa41a5b0cd..62e6d7a723695 100644
--- a/llvm/test/Transforms/InstCombine/fmul.ll
+++ b/llvm/test/Transforms/InstCombine/fmul.ll
@@ -1099,7 +1099,7 @@ define void @fmul_loop_invariant_fdiv(ptr %a, float %x) {
; CHECK: for.body:
; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[IDXPROM:%.*]] = zext nneg i32 [[I_08]] to i64
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A:%.*]], i64 [[IDXPROM]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A:%.*]], i64 [[IDXPROM]]
; CHECK-NEXT: [[F:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[M:%.*]] = fdiv fast float [[F]], [[X:%.*]]
; CHECK-NEXT: store float [[M]], ptr [[ARRAYIDX]], align 4
diff --git a/llvm/test/Transforms/InstCombine/gep-addrspace.ll b/llvm/test/Transforms/InstCombine/gep-addrspace.ll
index fdd3b2e25dbfc..6632a22f360a2 100644
--- a/llvm/test/Transforms/InstCombine/gep-addrspace.ll
+++ b/llvm/test/Transforms/InstCombine/gep-addrspace.ll
@@ -24,8 +24,8 @@ define void @func(ptr addrspace(1) nocapture %p) nounwind {
define void @keep_necessary_addrspacecast(i64 %i, ptr %out0, ptr %out1) {
; CHECK-LABEL: @keep_necessary_addrspacecast(
-; CHECK-NEXT: [[T1:%.*]] = getelementptr float, ptr addrspacecast (ptr addrspace(3) @array to ptr), i64 [[I:%.*]]
-; CHECK-NEXT: [[T2:%.*]] = getelementptr float, ptr addrspacecast (ptr addrspace(3) @scalar to ptr), i64 [[I]]
+; CHECK-NEXT: [[T1:%.*]] = getelementptr [4 x i8], ptr addrspacecast (ptr addrspace(3) @array to ptr), i64 [[I:%.*]]
+; CHECK-NEXT: [[T2:%.*]] = getelementptr [4 x i8], ptr addrspacecast (ptr addrspace(3) @scalar to ptr), i64 [[I]]
; CHECK-NEXT: store ptr [[T1]], ptr [[OUT1:%.*]], align 4
; CHECK-NEXT: store ptr [[T2]], ptr [[OUT2:%.*]], align 4
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/InstCombine/gep-alias.ll b/llvm/test/Transforms/InstCombine/gep-alias.ll
index 0b573aa7c6967..d4f8e906b1031 100644
--- a/llvm/test/Transforms/InstCombine/gep-alias.ll
+++ b/llvm/test/Transforms/InstCombine/gep-alias.ll
@@ -10,7 +10,7 @@ define i32 @f(i64 %i) {
; CHECK-LABEL: define i32 @f(
; CHECK-SAME: i64 [[I:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr @x, i64 [[I]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr @x, i64 [[I]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: ret i32 [[TMP0]]
;
diff --git a/llvm/test/Transforms/InstCombine/gep-canonicalize-constant-indices.ll b/llvm/test/Transforms/InstCombine/gep-canonicalize-constant-indices.ll
index 8a4898ae923a7..52371891cb954 100644
--- a/llvm/test/Transforms/InstCombine/gep-canonicalize-constant-indices.ll
+++ b/llvm/test/Transforms/InstCombine/gep-canonicalize-constant-indices.ll
@@ -14,8 +14,8 @@ declare void @use(i1)
define ptr @basic(ptr %p, i64 %a, i64 %b) {
; CHECK-LABEL: @basic(
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr [[P:%.*]], i64 4
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 [[A:%.*]]
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 [[B:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP1]], i64 [[A:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP2]], i64 [[B:%.*]]
; CHECK-NEXT: ret ptr [[TMP3]]
;
%1 = getelementptr inbounds i32, ptr %p, i64 1
@@ -27,7 +27,7 @@ define ptr @basic(ptr %p, i64 %a, i64 %b) {
; GEP with the last index being a constant should also be swapped.
define ptr @partialConstant1(ptr %p, i64 %a, i64 %b) {
; CHECK-LABEL: @partialConstant1(
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 [[B:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[P:%.*]], i64 [[B:%.*]]
; CHECK-NEXT: ret ptr [[TMP1]]
;
%1 = getelementptr inbounds [4 x i32], ptr %p, i64 %a, i64 1
@@ -38,7 +38,7 @@ define ptr @partialConstant1(ptr %p, i64 %a, i64 %b) {
; Negative test. GEP should not be swapped if the last index is not a constant.
define ptr @partialConstant2(ptr %p, i64 %a, i64 %b) {
; CHECK-LABEL: @partialConstant2(
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 [[B:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[P:%.*]], i64 [[B:%.*]]
; CHECK-NEXT: ret ptr [[TMP1]]
;
%1 = getelementptr inbounds [4 x i32], ptr %p, i64 1, i64 %a
@@ -50,7 +50,7 @@ define ptr @partialConstant2(ptr %p, i64 %a, i64 %b) {
; result = ((ptr) p + a) + 3
define ptr @merge(ptr %p, i64 %a) {
; CHECK-LABEL: @merge(
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[A:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr [4 x i8], ptr [[P:%.*]], i64 [[A:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP1]], i64 12
; CHECK-NEXT: ret ptr [[TMP2]]
;
@@ -68,7 +68,7 @@ define ptr @nested(ptr %p, i64 %a, i64 %b) {
; CHECK-LABEL: @nested(
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 [[A:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[A]], [[B:%.*]]
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i16, ptr [[TMP1]], i64 [[TMP2]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr [2 x i8], ptr [[TMP1]], i64 [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[TMP3]], i64 160
; CHECK-NEXT: ret ptr [[TMP4]]
;
@@ -86,7 +86,7 @@ define ptr @multipleUses1(ptr %p) {
; CHECK-LABEL: @multipleUses1(
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr [[P:%.*]], i64 4
; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 [[TMP2]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP1]], i64 [[TMP2]]
; CHECK-NEXT: ret ptr [[TMP3]]
;
%1 = getelementptr inbounds i32, ptr %p, i64 1
@@ -99,7 +99,7 @@ define ptr @multipleUses1(ptr %p) {
define ptr @multipleUses2(ptr %p, i64 %a) {
; CHECK-LABEL: @multipleUses2(
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr [[P:%.*]], i64 4
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 [[A:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP1]], i64 [[A:%.*]]
; CHECK-NEXT: call void @use(ptr nonnull [[TMP2]])
; CHECK-NEXT: ret ptr [[TMP2]]
;
@@ -114,7 +114,7 @@ define ptr @multipleUses3(ptr %p) {
; CHECK-LABEL: @multipleUses3(
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr [[P:%.*]], i64 4
; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[TMP1]] to i64
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 [[TMP2]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP1]], i64 [[TMP2]]
; CHECK-NEXT: ret ptr [[TMP3]]
;
%1 = getelementptr inbounds i32, ptr %p, i64 1
@@ -125,7 +125,7 @@ define ptr @multipleUses3(ptr %p) {
define ptr @merge_nuw(ptr %p, i64 %a) {
; CHECK-LABEL: @merge_nuw(
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr nuw i32, ptr [[P:%.*]], i64 [[A:%.*]]
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr nuw [4 x i8], ptr [[P:%.*]], i64 [[A:%.*]]
; CHECK-NEXT: [[GEP3:%.*]] = getelementptr nuw i8, ptr [[GEP2]], i64 5
; CHECK-NEXT: ret ptr [[GEP3]]
;
@@ -137,7 +137,7 @@ define ptr @merge_nuw(ptr %p, i64 %a) {
define ptr @merge_nuw_inbounds(ptr %p, i64 %a) {
; CHECK-LABEL: @merge_nuw_inbounds(
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds nuw i32, ptr [[P:%.*]], i64 [[A:%.*]]
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[P:%.*]], i64 [[A:%.*]]
; CHECK-NEXT: [[GEP3:%.*]] = getelementptr inbounds nuw i8, ptr [[GEP2]], i64 5
; CHECK-NEXT: ret ptr [[GEP3]]
;
@@ -151,7 +151,7 @@ define ptr @merge_nuw_inbounds(ptr %p, i64 %a) {
; overflow.
define ptr @merge_nuw_nusw(ptr %p, i64 %a) {
; CHECK-LABEL: @merge_nuw_nusw(
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr nusw nuw i32, ptr [[P:%.*]], i64 [[A:%.*]]
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr nusw nuw [4 x i8], ptr [[P:%.*]], i64 [[A:%.*]]
; CHECK-NEXT: [[GEP3:%.*]] = getelementptr nuw i8, ptr [[GEP2]], i64 5
; CHECK-NEXT: ret ptr [[GEP3]]
;
@@ -164,7 +164,7 @@ define ptr @merge_nuw_nusw(ptr %p, i64 %a) {
; Can't preserve nusw on the final GEP
define ptr @merge_nuw_nusw_overflow(ptr %p, i64 %a) {
; CHECK-LABEL: @merge_nuw_nusw_overflow(
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr nusw nuw i32, ptr [[P:%.*]], i64 [[A:%.*]]
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr nusw nuw [4 x i8], ptr [[P:%.*]], i64 [[A:%.*]]
; CHECK-NEXT: [[GEP3:%.*]] = getelementptr nuw i8, ptr [[GEP2]], i64 -2305843009213693952
; CHECK-NEXT: ret ptr [[GEP3]]
;
@@ -176,7 +176,7 @@ define ptr @merge_nuw_nusw_overflow(ptr %p, i64 %a) {
define ptr @merge_missing_nuw1(ptr %p, i64 %a) {
; CHECK-LABEL: @merge_missing_nuw1(
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[A:%.*]]
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr [4 x i8], ptr [[P:%.*]], i64 [[A:%.*]]
; CHECK-NEXT: [[GEP3:%.*]] = getelementptr i8, ptr [[GEP2]], i64 5
; CHECK-NEXT: ret ptr [[GEP3]]
;
@@ -188,7 +188,7 @@ define ptr @merge_missing_nuw1(ptr %p, i64 %a) {
define ptr @merge_missing_nuw2(ptr %p, i64 %a) {
; CHECK-LABEL: @merge_missing_nuw2(
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[A:%.*]]
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr [4 x i8], ptr [[P:%.*]], i64 [[A:%.*]]
; CHECK-NEXT: [[GEP3:%.*]] = getelementptr i8, ptr [[GEP2]], i64 5
; CHECK-NEXT: ret ptr [[GEP3]]
;
@@ -200,7 +200,7 @@ define ptr @merge_missing_nuw2(ptr %p, i64 %a) {
define ptr @merge_missing_nuw3(ptr %p, i64 %a) {
; CHECK-LABEL: @merge_missing_nuw3(
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr nuw i32, ptr [[P:%.*]], i64 [[A:%.*]]
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr nuw [4 x i8], ptr [[P:%.*]], i64 [[A:%.*]]
; CHECK-NEXT: [[GEP3:%.*]] = getelementptr i8, ptr [[GEP2]], i64 5
; CHECK-NEXT: ret ptr [[GEP3]]
;
@@ -212,7 +212,7 @@ define ptr @merge_missing_nuw3(ptr %p, i64 %a) {
define ptr @merge_nuw_missing_inbounds(ptr %p, i64 %a) {
; CHECK-LABEL: @merge_nuw_missing_inbounds(
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr nuw i32, ptr [[P:%.*]], i64 [[A:%.*]]
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr nuw [4 x i8], ptr [[P:%.*]], i64 [[A:%.*]]
; CHECK-NEXT: [[GEP3:%.*]] = getelementptr nuw i8, ptr [[GEP2]], i64 5
; CHECK-NEXT: ret ptr [[GEP3]]
;
@@ -224,7 +224,7 @@ define ptr @merge_nuw_missing_inbounds(ptr %p, i64 %a) {
define ptr @merge_nuw_missing_nusw(ptr %p, i64 %a) {
; CHECK-LABEL: @merge_nuw_missing_nusw(
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr nuw i32, ptr [[P:%.*]], i64 [[A:%.*]]
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr nuw [4 x i8], ptr [[P:%.*]], i64 [[A:%.*]]
; CHECK-NEXT: [[GEP3:%.*]] = getelementptr nuw i8, ptr [[GEP2]], i64 5
; CHECK-NEXT: ret ptr [[GEP3]]
;
@@ -236,7 +236,7 @@ define ptr @merge_nuw_missing_nusw(ptr %p, i64 %a) {
define ptr @merge_inbounds_missing_nuw(ptr %p, i64 %a) {
; CHECK-LABEL: @merge_inbounds_missing_nuw(
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[A:%.*]]
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr [4 x i8], ptr [[P:%.*]], i64 [[A:%.*]]
; CHECK-NEXT: [[GEP3:%.*]] = getelementptr i8, ptr [[GEP2]], i64 5
; CHECK-NEXT: ret ptr [[GEP3]]
;
@@ -248,7 +248,7 @@ define ptr @merge_inbounds_missing_nuw(ptr %p, i64 %a) {
define ptr @merge_nusw_missing_nuw(ptr %p, i64 %a) {
; CHECK-LABEL: @merge_nusw_missing_nuw(
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[A:%.*]]
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr [4 x i8], ptr [[P:%.*]], i64 [[A:%.*]]
; CHECK-NEXT: [[GEP3:%.*]] = getelementptr i8, ptr [[GEP2]], i64 5
; CHECK-NEXT: ret ptr [[GEP3]]
;
diff --git a/llvm/test/Transforms/InstCombine/gep-combine-loop-invariant.ll b/llvm/test/Transforms/InstCombine/gep-combine-loop-invariant.ll
index 9b266b097057c..081ba679bf53e 100644
--- a/llvm/test/Transforms/InstCombine/gep-combine-loop-invariant.ll
+++ b/llvm/test/Transforms/InstCombine/gep-combine-loop-invariant.ll
@@ -30,7 +30,7 @@ define i32 @foo(ptr nocapture readnone %match, i32 %cur_match, i32 %best_len, i3
; CHECK-NEXT: [[CHAIN_LENGTH_ADDR_08:%.*]] = phi i32 [ [[CHAIN_LENGTH:%.*]], [[IF_THEN_LR_PH]] ], [ [[DEC:%.*]], [[DO_BODY]] ]
; CHECK-NEXT: [[AND:%.*]] = and i32 [[CUR_MATCH_ADDR_09]], [[WMASK:%.*]]
; CHECK-NEXT: [[IDXPROM:%.*]] = zext i32 [[AND]] to i64
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[PREV:%.*]], i64 [[IDXPROM]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[PREV:%.*]], i64 [[IDXPROM]]
; CHECK-NEXT: [[I4]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[CMP4:%.*]] = icmp ugt i32 [[I4]], [[LIMIT:%.*]]
; CHECK-NEXT: br i1 [[CMP4]], label [[LAND_LHS_TRUE:%.*]], label [[DO_END]]
@@ -93,9 +93,9 @@ define void @PR37005(ptr %base, ptr %in) {
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[E1:%.*]] = getelementptr inbounds ptr, ptr [[IN:%.*]], i64 undef
+; CHECK-NEXT: [[E1:%.*]] = getelementptr inbounds [8 x i8], ptr [[IN:%.*]], i64 undef
; CHECK-NEXT: [[E2:%.*]] = getelementptr inbounds nuw i8, ptr [[E1]], i64 48
-; CHECK-NEXT: [[E4:%.*]] = getelementptr inbounds ptr, ptr [[E2]], <2 x i64> <i64 0, i64 1>
+; CHECK-NEXT: [[E4:%.*]] = getelementptr inbounds [8 x i8], ptr [[E2]], <2 x i64> <i64 0, i64 1>
; CHECK-NEXT: [[PI1:%.*]] = ptrtoint <2 x ptr> [[E4]] to <2 x i64>
; CHECK-NEXT: [[TMP0:%.*]] = lshr <2 x i64> [[PI1]], splat (i64 14)
; CHECK-NEXT: [[SL1:%.*]] = and <2 x i64> [[TMP0]], splat (i64 1125899906842496)
@@ -125,7 +125,7 @@ define void @PR37005_2(ptr %base, ptr %in) {
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[E1:%.*]] = getelementptr inbounds ptr, ptr [[IN:%.*]], i64 undef
+; CHECK-NEXT: [[E1:%.*]] = getelementptr inbounds [8 x i8], ptr [[IN:%.*]], i64 undef
; CHECK-NEXT: [[E2:%.*]] = getelementptr inbounds nuw i8, ptr [[E1]], i64 48
; CHECK-NEXT: [[PI1:%.*]] = ptrtoint ptr [[E2]] to i64
; CHECK-NEXT: [[TMP0:%.*]] = lshr i64 [[PI1]], 14
@@ -155,9 +155,9 @@ define void @PR37005_3(<2 x ptr> %base, ptr %in) {
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[E1:%.*]] = getelementptr inbounds ptr, ptr [[IN:%.*]], i64 undef
+; CHECK-NEXT: [[E1:%.*]] = getelementptr inbounds [8 x i8], ptr [[IN:%.*]], i64 undef
; CHECK-NEXT: [[E2:%.*]] = getelementptr inbounds nuw i8, ptr [[E1]], i64 48
-; CHECK-NEXT: [[E4:%.*]] = getelementptr inbounds ptr, ptr [[E2]], <2 x i64> <i64 0, i64 1>
+; CHECK-NEXT: [[E4:%.*]] = getelementptr inbounds [8 x i8], ptr [[E2]], <2 x i64> <i64 0, i64 1>
; CHECK-NEXT: [[PI1:%.*]] = ptrtoint <2 x ptr> [[E4]] to <2 x i64>
; CHECK-NEXT: [[TMP0:%.*]] = lshr <2 x i64> [[PI1]], splat (i64 14)
; CHECK-NEXT: [[SL1:%.*]] = and <2 x i64> [[TMP0]], splat (i64 1125899906842496)
@@ -212,7 +212,7 @@ define float @gep_cross_loop(ptr %_arg_, ptr %_arg_3, float %_arg_8) {
; CHECK-LABEL: @gep_cross_loop(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[I:%.*]] = load i64, ptr [[_ARG_:%.*]], align 8
-; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds float, ptr [[_ARG_3:%.*]], i64 [[I]]
+; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds [4 x i8], ptr [[_ARG_3:%.*]], i64 [[I]]
; CHECK-NEXT: br label [[FOR_COND_I:%.*]]
; CHECK: for.cond.i:
; CHECK-NEXT: [[IDX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[ADD11_I:%.*]], [[FOR_BODY_I:%.*]] ]
@@ -222,7 +222,7 @@ define float @gep_cross_loop(ptr %_arg_, ptr %_arg_3, float %_arg_8) {
; CHECK: for.cond.i.i.i.preheader:
; CHECK-NEXT: ret float [[SUM]]
; CHECK: for.body.i:
-; CHECK-NEXT: [[ARRAYIDX_I84_I:%.*]] = getelementptr inbounds nuw float, ptr [[ADD_PTR]], i64 [[IDX]]
+; CHECK-NEXT: [[ARRAYIDX_I84_I:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[ADD_PTR]], i64 [[IDX]]
; CHECK-NEXT: [[I1:%.*]] = load float, ptr [[ARRAYIDX_I84_I]], align 4
; CHECK-NEXT: [[ADD_I]] = fadd fast float [[SUM]], [[I1]]
; CHECK-NEXT: [[ADD11_I]] = add nuw nsw i64 [[IDX]], 1
diff --git a/llvm/test/Transforms/InstCombine/gep-custom-dl.ll b/llvm/test/Transforms/InstCombine/gep-custom-dl.ll
index 4cd711a2582ae..5027af6941f3b 100644
--- a/llvm/test/Transforms/InstCombine/gep-custom-dl.ll
+++ b/llvm/test/Transforms/InstCombine/gep-custom-dl.ll
@@ -73,8 +73,8 @@ define void @test_evaluate_gep_as_ptrs_array(ptr addrspace(2) %B) {
define ptr @test4(ptr %I, i32 %C, i32 %D) {
; CHECK-LABEL: @test4(
-; CHECK-NEXT: [[A:%.*]] = getelementptr i32, ptr [[I:%.*]], i32 [[C:%.*]]
-; CHECK-NEXT: [[B:%.*]] = getelementptr i32, ptr [[A]], i32 [[D:%.*]]
+; CHECK-NEXT: [[A:%.*]] = getelementptr [4 x i8], ptr [[I:%.*]], i32 [[C:%.*]]
+; CHECK-NEXT: [[B:%.*]] = getelementptr [4 x i8], ptr [[A]], i32 [[D:%.*]]
; CHECK-NEXT: ret ptr [[B]]
;
%A = getelementptr i32, ptr %I, i32 %C
@@ -137,7 +137,7 @@ define ptr @test7(i16 %Idx) {
@Array = external global [40 x i32]
define ptr @test8(i32 %X) {
; CHECK-LABEL: @test8(
-; CHECK-NEXT: [[A:%.*]] = getelementptr i32, ptr @Array, i32 [[X:%.*]]
+; CHECK-NEXT: [[A:%.*]] = getelementptr [4 x i8], ptr @Array, i32 [[X:%.*]]
; CHECK-NEXT: ret ptr [[A]]
;
%A = getelementptr i32, ptr @Array, i32 %X
@@ -147,7 +147,7 @@ define ptr @test8(i32 %X) {
define ptr @test9(ptr %base, i8 %ind) {
; CHECK-LABEL: @test9(
; CHECK-NEXT: [[TMP1:%.*]] = sext i8 [[IND:%.*]] to i32
-; CHECK-NEXT: [[RES:%.*]] = getelementptr i32, ptr [[BASE:%.*]], i32 [[TMP1]]
+; CHECK-NEXT: [[RES:%.*]] = getelementptr [4 x i8], ptr [[BASE:%.*]], i32 [[TMP1]]
; CHECK-NEXT: ret ptr [[RES]]
;
%res = getelementptr i32, ptr %base, i8 %ind
diff --git a/llvm/test/Transforms/InstCombine/gep-fold-chained-const-select.ll b/llvm/test/Transforms/InstCombine/gep-fold-chained-const-select.ll
index 5dd220abda648..d780ef8ed94de 100644
--- a/llvm/test/Transforms/InstCombine/gep-fold-chained-const-select.ll
+++ b/llvm/test/Transforms/InstCombine/gep-fold-chained-const-select.ll
@@ -91,7 +91,7 @@ define ptr @src_fail_different_type(i32 %arg0, ptr %arg1) {
; CHECK-NEXT: [[V0:%.*]] = getelementptr inbounds nuw i8, ptr [[ARG1:%.*]], i64 8148
; CHECK-NEXT: [[V1:%.*]] = icmp sgt i32 [[ARG0:%.*]], 3
; CHECK-NEXT: [[V2:%.*]] = select i1 [[V1]], i64 55104, i64 21304
-; CHECK-NEXT: [[V3:%.*]] = getelementptr i16, ptr [[V0]], i64 [[V2]]
+; CHECK-NEXT: [[V3:%.*]] = getelementptr [2 x i8], ptr [[V0]], i64 [[V2]]
; CHECK-NEXT: ret ptr [[V3]]
;
%v0 = getelementptr inbounds nuw i8, ptr %arg1, i64 8148
diff --git a/llvm/test/Transforms/InstCombine/gep-merge-constant-indices.ll b/llvm/test/Transforms/InstCombine/gep-merge-constant-indices.ll
index ecebaa7d9ae5d..82be5160f1763 100644
--- a/llvm/test/Transforms/InstCombine/gep-merge-constant-indices.ll
+++ b/llvm/test/Transforms/InstCombine/gep-merge-constant-indices.ll
@@ -118,7 +118,7 @@ define ptr @structStruct(ptr %p) {
; result = (ptr) &((struct.B*) p)[i].member1 + 2
define ptr @appendIndex(ptr %p, i64 %i) {
; CHECK-LABEL: @appendIndex(
-; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[STRUCT_B:%.*]], ptr [[P:%.*]], i64 [[I:%.*]]
+; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [140 x i8], ptr [[P:%.*]], i64 [[I:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 6
; CHECK-NEXT: ret ptr [[TMP1]]
;
@@ -174,7 +174,7 @@ define ptr @partialConstant3(ptr %p) {
; result = &((struct.C*) p + a).member2
define ptr @partialConstantMemberAliasing1(ptr %p, i64 %a) {
; CHECK-LABEL: @partialConstantMemberAliasing1(
-; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[STRUCT_C:%.*]], ptr [[P:%.*]], i64 [[A:%.*]]
+; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [12 x i8], ptr [[P:%.*]], i64 [[A:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 8
; CHECK-NEXT: ret ptr [[TMP1]]
;
@@ -187,7 +187,7 @@ define ptr @partialConstantMemberAliasing1(ptr %p, i64 %a) {
; address of another member.
define ptr @partialConstantMemberAliasing2(ptr %p, i64 %a) {
; CHECK-LABEL: @partialConstantMemberAliasing2(
-; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[STRUCT_C:%.*]], ptr [[P:%.*]], i64 [[A:%.*]]
+; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [12 x i8], ptr [[P:%.*]], i64 [[A:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 5
; CHECK-NEXT: ret ptr [[TMP2]]
;
@@ -200,7 +200,7 @@ define ptr @partialConstantMemberAliasing2(ptr %p, i64 %a) {
; range of the object currently pointed by the non-constant GEP.
define ptr @partialConstantMemberAliasing3(ptr %p, i64 %a) {
; CHECK-LABEL: @partialConstantMemberAliasing3(
-; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[STRUCT_C:%.*]], ptr [[P:%.*]], i64 [[A:%.*]]
+; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [12 x i8], ptr [[P:%.*]], i64 [[A:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 12
; CHECK-NEXT: ret ptr [[TMP2]]
;
diff --git a/llvm/test/Transforms/InstCombine/gep-sext.ll b/llvm/test/Transforms/InstCombine/gep-sext.ll
index c1998776bde2a..3c50194082d44 100644
--- a/llvm/test/Transforms/InstCombine/gep-sext.ll
+++ b/llvm/test/Transforms/InstCombine/gep-sext.ll
@@ -8,7 +8,7 @@ define void @test(ptr %p, i32 %index) {
; CHECK-LABEL: define void @test(
; CHECK-SAME: ptr [[P:%.*]], i32 [[INDEX:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[INDEX]] to i64
-; CHECK-NEXT: [[ADDR:%.*]] = getelementptr i32, ptr [[P]], i64 [[TMP1]]
+; CHECK-NEXT: [[ADDR:%.*]] = getelementptr [4 x i8], ptr [[P]], i64 [[TMP1]]
; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ADDR]], align 4
; CHECK-NEXT: call void @use(i32 [[VAL]])
; CHECK-NEXT: ret void
@@ -23,7 +23,7 @@ define void @test2(ptr %p, i32 %index) {
; CHECK-LABEL: define void @test2(
; CHECK-SAME: ptr [[P:%.*]], i32 [[INDEX:%.*]]) {
; CHECK-NEXT: [[I:%.*]] = zext i32 [[INDEX]] to i64
-; CHECK-NEXT: [[ADDR:%.*]] = getelementptr i32, ptr [[P]], i64 [[I]]
+; CHECK-NEXT: [[ADDR:%.*]] = getelementptr [4 x i8], ptr [[P]], i64 [[I]]
; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ADDR]], align 4
; CHECK-NEXT: call void @use(i32 [[VAL]])
; CHECK-NEXT: ret void
@@ -43,7 +43,7 @@ define void @test3(ptr %p, i32 %index) {
; CHECK-NEXT: [[ADDR_FIXED:%.*]] = getelementptr i8, ptr [[P]], i64 352
; CHECK-NEXT: [[VAL_FIXED:%.*]] = load i32, ptr [[ADDR_FIXED]], align 4, !range [[RNG0:![0-9]+]]
; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[VAL_FIXED]] to i64
-; CHECK-NEXT: [[ADDR:%.*]] = getelementptr i32, ptr [[ADDR_BEGIN]], i64 [[TMP1]]
+; CHECK-NEXT: [[ADDR:%.*]] = getelementptr [4 x i8], ptr [[ADDR_BEGIN]], i64 [[TMP1]]
; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ADDR]], align 4
; CHECK-NEXT: call void @use(i32 [[VAL]])
; CHECK-NEXT: ret void
@@ -64,7 +64,7 @@ define void @test4(ptr %p, i32 %index) {
; CHECK-NEXT: [[ADDR_FIXED:%.*]] = getelementptr i8, ptr [[P]], i64 352
; CHECK-NEXT: [[VAL_FIXED:%.*]] = load i32, ptr [[ADDR_FIXED]], align 4, !range [[RNG0]]
; CHECK-NEXT: [[I:%.*]] = zext nneg i32 [[VAL_FIXED]] to i64
-; CHECK-NEXT: [[ADDR:%.*]] = getelementptr i32, ptr [[ADDR_BEGIN]], i64 [[I]]
+; CHECK-NEXT: [[ADDR:%.*]] = getelementptr [4 x i8], ptr [[ADDR_BEGIN]], i64 [[I]]
; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ADDR]], align 4
; CHECK-NEXT: call void @use(i32 [[VAL]])
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/InstCombine/gep-srem-to-and-deref.ll b/llvm/test/Transforms/InstCombine/gep-srem-to-and-deref.ll
index 9f529dad5d3d1..5355a9a5152c9 100644
--- a/llvm/test/Transforms/InstCombine/gep-srem-to-and-deref.ll
+++ b/llvm/test/Transforms/InstCombine/gep-srem-to-and-deref.ll
@@ -41,7 +41,7 @@ define ptr @pos_not_i8(ptr %foo, i64 %x) {
; CHECK-LABEL: define ptr @pos_not_i8(
; CHECK-SAME: ptr [[FOO:%.*]], i64 [[X:%.*]]) {
; CHECK-NEXT: [[IDX1:%.*]] = and i64 [[X]], 3
-; CHECK-NEXT: [[P2:%.*]] = getelementptr inbounds nuw i32, ptr [[FOO]], i64 [[IDX1]]
+; CHECK-NEXT: [[P2:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[FOO]], i64 [[IDX1]]
; CHECK-NEXT: ret ptr [[P2]]
;
%idx = srem i64 %x, 4
diff --git a/llvm/test/Transforms/InstCombine/gep-vector-indices.ll b/llvm/test/Transforms/InstCombine/gep-vector-indices.ll
index ce584e0400cd5..53ff8bf1da262 100644
--- a/llvm/test/Transforms/InstCombine/gep-vector-indices.ll
+++ b/llvm/test/Transforms/InstCombine/gep-vector-indices.ll
@@ -51,7 +51,7 @@ define ptr @vector_splat_indices_nxv2i64_ext0_nusw(ptr %a) {
define ptr @vector_indices_v2i64_ext0(ptr %a, <2 x i64> %indices) {
; CHECK-LABEL: @vector_indices_v2i64_ext0(
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x i64> [[INDICES:%.*]], i64 0
-; CHECK-NEXT: [[RES:%.*]] = getelementptr i32, ptr [[A:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: [[RES:%.*]] = getelementptr [4 x i8], ptr [[A:%.*]], i64 [[TMP1]]
; CHECK-NEXT: ret ptr [[RES]]
;
%gep = getelementptr i32, ptr %a, <2 x i64> %indices
@@ -62,7 +62,7 @@ define ptr @vector_indices_v2i64_ext0(ptr %a, <2 x i64> %indices) {
define ptr @vector_indices_nxv1i64_ext0(ptr %a, <vscale x 1 x i64> %indices) {
; CHECK-LABEL: @vector_indices_nxv1i64_ext0(
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <vscale x 1 x i64> [[INDICES:%.*]], i64 0
-; CHECK-NEXT: [[RES:%.*]] = getelementptr i32, ptr [[A:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: [[RES:%.*]] = getelementptr [4 x i8], ptr [[A:%.*]], i64 [[TMP1]]
; CHECK-NEXT: ret ptr [[RES]]
;
%gep = getelementptr i32, ptr %a, <vscale x 1 x i64> %indices
@@ -73,7 +73,7 @@ define ptr @vector_indices_nxv1i64_ext0(ptr %a, <vscale x 1 x i64> %indices) {
define ptr @vector_splat_ptrs_v2i64_ext0(ptr %a, i64 %index) {
; CHECK-LABEL: @vector_splat_ptrs_v2i64_ext0(
-; CHECK-NEXT: [[RES:%.*]] = getelementptr i32, ptr [[A:%.*]], i64 [[INDEX:%.*]]
+; CHECK-NEXT: [[RES:%.*]] = getelementptr [4 x i8], ptr [[A:%.*]], i64 [[INDEX:%.*]]
; CHECK-NEXT: ret ptr [[RES]]
;
%tmp = insertelement <2 x ptr> poison, ptr %a, i32 0
@@ -86,7 +86,7 @@ define ptr @vector_splat_ptrs_v2i64_ext0(ptr %a, i64 %index) {
define ptr @vector_splat_ptrs_nxv2i64_ext0(ptr %a, i64 %index) {
; CHECK-LABEL: @vector_splat_ptrs_nxv2i64_ext0(
-; CHECK-NEXT: [[RES:%.*]] = getelementptr i32, ptr [[A:%.*]], i64 [[INDEX:%.*]]
+; CHECK-NEXT: [[RES:%.*]] = getelementptr [4 x i8], ptr [[A:%.*]], i64 [[INDEX:%.*]]
; CHECK-NEXT: ret ptr [[RES]]
;
%tmp = insertelement <vscale x 2 x ptr> poison, ptr %a, i32 0
@@ -123,7 +123,7 @@ define ptr @vector_struct2_splat_indices_v4i64_ext1(ptr %a) {
define ptr @vector_indices_nxv2i64_ext3(ptr %a, <vscale x 2 x i64> %indices) {
; CHECK-LABEL: @vector_indices_nxv2i64_ext3(
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[A:%.*]], <vscale x 2 x i64> [[INDICES:%.*]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr [4 x i8], ptr [[A:%.*]], <vscale x 2 x i64> [[INDICES:%.*]]
; CHECK-NEXT: [[RES:%.*]] = extractelement <vscale x 2 x ptr> [[GEP]], i64 3
; CHECK-NEXT: ret ptr [[RES]]
;
@@ -134,7 +134,7 @@ define ptr @vector_indices_nxv2i64_ext3(ptr %a, <vscale x 2 x i64> %indices) {
define ptr @vector_indices_nxv2i64_extN(ptr %a, <vscale x 2 x i64> %indices, i32 %N) {
; CHECK-LABEL: @vector_indices_nxv2i64_extN(
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[A:%.*]], <vscale x 2 x i64> [[INDICES:%.*]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr [4 x i8], ptr [[A:%.*]], <vscale x 2 x i64> [[INDICES:%.*]]
; CHECK-NEXT: [[RES:%.*]] = extractelement <vscale x 2 x ptr> [[GEP]], i32 [[N:%.*]]
; CHECK-NEXT: ret ptr [[RES]]
;
@@ -145,7 +145,7 @@ define ptr @vector_indices_nxv2i64_extN(ptr %a, <vscale x 2 x i64> %indices, i32
define void @vector_indices_nxv2i64_mulitple_use(ptr %a, <vscale x 2 x i64> %indices, ptr %b, ptr %c) {
; CHECK-LABEL: @vector_indices_nxv2i64_mulitple_use(
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[A:%.*]], <vscale x 2 x i64> [[INDICES:%.*]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr [4 x i8], ptr [[A:%.*]], <vscale x 2 x i64> [[INDICES:%.*]]
; CHECK-NEXT: [[LANE0:%.*]] = extractelement <vscale x 2 x ptr> [[GEP]], i64 0
; CHECK-NEXT: [[LANE1:%.*]] = extractelement <vscale x 2 x ptr> [[GEP]], i64 1
; CHECK-NEXT: store ptr [[LANE0]], ptr [[B:%.*]], align 8
@@ -162,7 +162,7 @@ define void @vector_indices_nxv2i64_mulitple_use(ptr %a, <vscale x 2 x i64> %ind
define ptr @vector_ptrs_and_indices_ext0(<vscale x 4 x ptr> %a, <vscale x 4 x i64> %indices) {
; CHECK-LABEL: @vector_ptrs_and_indices_ext0(
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, <vscale x 4 x ptr> [[A:%.*]], <vscale x 4 x i64> [[INDICES:%.*]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr [4 x i8], <vscale x 4 x ptr> [[A:%.*]], <vscale x 4 x i64> [[INDICES:%.*]]
; CHECK-NEXT: [[RES:%.*]] = extractelement <vscale x 4 x ptr> [[GEP]], i64 0
; CHECK-NEXT: ret ptr [[RES]]
;
diff --git a/llvm/test/Transforms/InstCombine/gep-vector.ll b/llvm/test/Transforms/InstCombine/gep-vector.ll
index 920147b3576d2..50c5d734fc9ff 100644
--- a/llvm/test/Transforms/InstCombine/gep-vector.ll
+++ b/llvm/test/Transforms/InstCombine/gep-vector.ll
@@ -31,8 +31,8 @@ define <2 x ptr> @vectorindex3() {
define ptr @bitcast_vec_to_array_gep(ptr %x, i64 %y, i64 %z) {
; CHECK-LABEL: @bitcast_vec_to_array_gep(
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr [7 x i32], ptr [[GEP_SPLIT:%.*]], i64 [[Z:%.*]]
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i32, ptr [[GEP]], i64 [[Z1:%.*]]
+; CHECK-NEXT: [[GEP_SPLIT:%.*]] = getelementptr [28 x i8], ptr [[X:%.*]], i64 [[Y:%.*]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr [4 x i8], ptr [[GEP_SPLIT]], i64 [[Z:%.*]]
; CHECK-NEXT: ret ptr [[GEP1]]
;
%gep = getelementptr [7 x i32], ptr %x, i64 %y, i64 %z
@@ -43,8 +43,8 @@ define ptr @bitcast_vec_to_array_gep(ptr %x, i64 %y, i64 %z) {
define ptr @bitcast_array_to_vec_gep(ptr %x, i64 %y, i64 %z) {
; CHECK-LABEL: @bitcast_array_to_vec_gep(
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds <3 x i32>, ptr [[GEP_SPLIT:%.*]], i64 [[Z:%.*]]
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i32, ptr [[GEP]], i64 [[Z1:%.*]]
+; CHECK-NEXT: [[GEP_SPLIT:%.*]] = getelementptr inbounds [16 x i8], ptr [[X:%.*]], i64 [[Y:%.*]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[GEP_SPLIT]], i64 [[Z:%.*]]
; CHECK-NEXT: ret ptr [[GEP1]]
;
%gep = getelementptr inbounds <3 x i32>, ptr %x, i64 %y, i64 %z
@@ -55,8 +55,8 @@ define ptr @bitcast_array_to_vec_gep(ptr %x, i64 %y, i64 %z) {
define ptr @bitcast_vec_to_array_gep_matching_alloc_size(ptr %x, i64 %y, i64 %z) {
; CHECK-LABEL: @bitcast_vec_to_array_gep_matching_alloc_size(
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr [4 x i32], ptr [[GEP_SPLIT:%.*]], i64 [[Z:%.*]]
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i32, ptr [[GEP]], i64 [[Z1:%.*]]
+; CHECK-NEXT: [[GEP_SPLIT:%.*]] = getelementptr [16 x i8], ptr [[X:%.*]], i64 [[Y:%.*]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr [4 x i8], ptr [[GEP_SPLIT]], i64 [[Z:%.*]]
; CHECK-NEXT: ret ptr [[GEP1]]
;
%gep = getelementptr [4 x i32], ptr %x, i64 %y, i64 %z
@@ -67,8 +67,8 @@ define ptr @bitcast_vec_to_array_gep_matching_alloc_size(ptr %x, i64 %y, i64 %z)
define ptr @bitcast_array_to_vec_gep_matching_alloc_size(ptr %x, i64 %y, i64 %z) {
; CHECK-LABEL: @bitcast_array_to_vec_gep_matching_alloc_size(
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds <4 x i32>, ptr [[GEP_SPLIT:%.*]], i64 [[Z:%.*]]
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i32, ptr [[GEP]], i64 [[Z1:%.*]]
+; CHECK-NEXT: [[GEP_SPLIT:%.*]] = getelementptr inbounds [16 x i8], ptr [[X:%.*]], i64 [[Y:%.*]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[GEP_SPLIT]], i64 [[Z:%.*]]
; CHECK-NEXT: ret ptr [[GEP1]]
;
%gep = getelementptr inbounds <4 x i32>, ptr %x, i64 %y, i64 %z
@@ -80,8 +80,8 @@ define ptr @bitcast_array_to_vec_gep_matching_alloc_size(ptr %x, i64 %y, i64 %z)
define ptr addrspace(3) @bitcast_vec_to_array_addrspace(ptr %x, i64 %y, i64 %z) {
; CHECK-LABEL: @bitcast_vec_to_array_addrspace(
; CHECK-NEXT: [[ASC:%.*]] = addrspacecast ptr [[X:%.*]] to ptr addrspace(3)
-; CHECK-NEXT: [[GEP_SPLIT:%.*]] = getelementptr [7 x i32], ptr addrspace(3) [[ASC]], i64 [[Y:%.*]]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr addrspace(3) [[GEP_SPLIT]], i64 [[Z:%.*]]
+; CHECK-NEXT: [[GEP_SPLIT:%.*]] = getelementptr [28 x i8], ptr addrspace(3) [[ASC]], i64 [[Y:%.*]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr [4 x i8], ptr addrspace(3) [[GEP_SPLIT]], i64 [[Z:%.*]]
; CHECK-NEXT: ret ptr addrspace(3) [[GEP]]
;
%asc = addrspacecast ptr %x to ptr addrspace(3)
@@ -94,8 +94,8 @@ define ptr addrspace(3) @bitcast_vec_to_array_addrspace(ptr %x, i64 %y, i64 %z)
define ptr addrspace(3) @inbounds_bitcast_vec_to_array_addrspace(ptr %x, i64 %y, i64 %z) {
; CHECK-LABEL: @inbounds_bitcast_vec_to_array_addrspace(
; CHECK-NEXT: [[ASC:%.*]] = addrspacecast ptr [[X:%.*]] to ptr addrspace(3)
-; CHECK-NEXT: [[GEP_SPLIT:%.*]] = getelementptr inbounds [7 x i32], ptr addrspace(3) [[ASC]], i64 [[Y:%.*]]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr addrspace(3) [[GEP_SPLIT]], i64 [[Z:%.*]]
+; CHECK-NEXT: [[GEP_SPLIT:%.*]] = getelementptr inbounds [28 x i8], ptr addrspace(3) [[ASC]], i64 [[Y:%.*]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [4 x i8], ptr addrspace(3) [[GEP_SPLIT]], i64 [[Z:%.*]]
; CHECK-NEXT: ret ptr addrspace(3) [[GEP]]
;
%asc = addrspacecast ptr %x to ptr addrspace(3)
@@ -108,8 +108,8 @@ define ptr addrspace(3) @inbounds_bitcast_vec_to_array_addrspace(ptr %x, i64 %y,
define ptr addrspace(3) @bitcast_vec_to_array_addrspace_matching_alloc_size(ptr %x, i64 %y, i64 %z) {
; CHECK-LABEL: @bitcast_vec_to_array_addrspace_matching_alloc_size(
; CHECK-NEXT: [[ASC:%.*]] = addrspacecast ptr [[X:%.*]] to ptr addrspace(3)
-; CHECK-NEXT: [[GEP_SPLIT:%.*]] = getelementptr [4 x i32], ptr addrspace(3) [[ASC]], i64 [[Y:%.*]]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr addrspace(3) [[GEP_SPLIT]], i64 [[Z:%.*]]
+; CHECK-NEXT: [[GEP_SPLIT:%.*]] = getelementptr [16 x i8], ptr addrspace(3) [[ASC]], i64 [[Y:%.*]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr [4 x i8], ptr addrspace(3) [[GEP_SPLIT]], i64 [[Z:%.*]]
; CHECK-NEXT: ret ptr addrspace(3) [[GEP]]
;
%asc = addrspacecast ptr %x to ptr addrspace(3)
@@ -122,8 +122,8 @@ define ptr addrspace(3) @bitcast_vec_to_array_addrspace_matching_alloc_size(ptr
define ptr addrspace(3) @inbounds_bitcast_vec_to_array_addrspace_matching_alloc_size(ptr %x, i64 %y, i64 %z) {
; CHECK-LABEL: @inbounds_bitcast_vec_to_array_addrspace_matching_alloc_size(
; CHECK-NEXT: [[ASC:%.*]] = addrspacecast ptr [[X:%.*]] to ptr addrspace(3)
-; CHECK-NEXT: [[GEP_SPLIT:%.*]] = getelementptr inbounds [4 x i32], ptr addrspace(3) [[ASC]], i64 [[Y:%.*]]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr addrspace(3) [[GEP_SPLIT]], i64 [[Z:%.*]]
+; CHECK-NEXT: [[GEP_SPLIT:%.*]] = getelementptr inbounds [16 x i8], ptr addrspace(3) [[ASC]], i64 [[Y:%.*]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [4 x i8], ptr addrspace(3) [[GEP_SPLIT]], i64 [[Z:%.*]]
; CHECK-NEXT: ret ptr addrspace(3) [[GEP]]
;
%asc = addrspacecast ptr %x to ptr addrspace(3)
diff --git a/llvm/test/Transforms/InstCombine/gepofconstgepi8.ll b/llvm/test/Transforms/InstCombine/gepofconstgepi8.ll
index 42a4619645994..64484dd72ad3f 100644
--- a/llvm/test/Transforms/InstCombine/gepofconstgepi8.ll
+++ b/llvm/test/Transforms/InstCombine/gepofconstgepi8.ll
@@ -8,7 +8,7 @@ define ptr @test_zero(ptr %base, i64 %a) {
; CHECK-LABEL: define ptr @test_zero(
; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[P2:%.*]] = getelementptr i32, ptr [[BASE]], i64 [[A]]
+; CHECK-NEXT: [[P2:%.*]] = getelementptr [4 x i8], ptr [[BASE]], i64 [[A]]
; CHECK-NEXT: ret ptr [[P2]]
;
entry:
@@ -23,7 +23,7 @@ define ptr @test_nonzero(ptr %base, i64 %a) {
; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[BASE]], i64 4
-; CHECK-NEXT: [[P2:%.*]] = getelementptr i32, ptr [[TMP0]], i64 [[A]]
+; CHECK-NEXT: [[P2:%.*]] = getelementptr [4 x i8], ptr [[TMP0]], i64 [[A]]
; CHECK-NEXT: ret ptr [[P2]]
;
entry:
@@ -37,7 +37,7 @@ define ptr @test_or_disjoint(ptr %base, i64 %a) {
; CHECK-LABEL: define ptr @test_or_disjoint(
; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[P2:%.*]] = getelementptr i32, ptr [[BASE]], i64 [[A]]
+; CHECK-NEXT: [[P2:%.*]] = getelementptr [4 x i8], ptr [[BASE]], i64 [[A]]
; CHECK-NEXT: ret ptr [[P2]]
;
entry:
@@ -53,7 +53,7 @@ define ptr @test_zero_multiuse_index(ptr %base, i64 %a) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[INDEX:%.*]] = add i64 [[A]], 1
; CHECK-NEXT: call void @use64(i64 [[INDEX]])
-; CHECK-NEXT: [[P2:%.*]] = getelementptr i32, ptr [[BASE]], i64 [[A]]
+; CHECK-NEXT: [[P2:%.*]] = getelementptr [4 x i8], ptr [[BASE]], i64 [[A]]
; CHECK-NEXT: ret ptr [[P2]]
;
entry:
@@ -70,7 +70,7 @@ define ptr @test_zero_multiuse_ptr(ptr %base, i64 %a) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[P1:%.*]] = getelementptr i8, ptr [[BASE]], i64 -4
; CHECK-NEXT: call void @useptr(ptr [[P1]])
-; CHECK-NEXT: [[P2:%.*]] = getelementptr i32, ptr [[BASE]], i64 [[A]]
+; CHECK-NEXT: [[P2:%.*]] = getelementptr [4 x i8], ptr [[BASE]], i64 [[A]]
; CHECK-NEXT: ret ptr [[P2]]
;
entry:
@@ -86,7 +86,7 @@ define ptr @test_zero_sext_add_nsw(ptr %base, i32 %a) {
; CHECK-SAME: ptr [[BASE:%.*]], i32 [[A:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[A]] to i64
-; CHECK-NEXT: [[P2:%.*]] = getelementptr i32, ptr [[BASE]], i64 [[TMP0]]
+; CHECK-NEXT: [[P2:%.*]] = getelementptr [4 x i8], ptr [[BASE]], i64 [[TMP0]]
; CHECK-NEXT: ret ptr [[P2]]
;
entry:
@@ -101,7 +101,7 @@ define ptr @test_zero_trunc_add(ptr %base, i128 %a) {
; CHECK-SAME: ptr [[BASE:%.*]], i128 [[A:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = trunc i128 [[A]] to i64
-; CHECK-NEXT: [[P2:%.*]] = getelementptr i32, ptr [[BASE]], i64 [[TMP0]]
+; CHECK-NEXT: [[P2:%.*]] = getelementptr [4 x i8], ptr [[BASE]], i64 [[TMP0]]
; CHECK-NEXT: ret ptr [[P2]]
;
entry:
@@ -116,7 +116,7 @@ define ptr @test_non_i8(ptr %base, i64 %a) {
; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[P1:%.*]] = getelementptr i8, ptr [[BASE]], i64 -4
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i32, ptr [[P1]], i64 [[A]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr [4 x i8], ptr [[P1]], i64 [[A]]
; CHECK-NEXT: ret ptr [[TMP0]]
;
entry:
@@ -131,7 +131,7 @@ define ptr @test_non_const(ptr %base, i64 %a, i64 %b) {
; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[P1:%.*]] = getelementptr i8, ptr [[BASE]], i64 [[B]]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i32, ptr [[P1]], i64 [[A]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr [4 x i8], ptr [[P1]], i64 [[A]]
; CHECK-NEXT: [[P2:%.*]] = getelementptr i8, ptr [[TMP0]], i64 4
; CHECK-NEXT: ret ptr [[P2]]
;
@@ -148,7 +148,7 @@ define ptr @test_too_many_indices(ptr %base, i64 %a, i64 %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[P1:%.*]] = getelementptr i8, ptr [[BASE]], i64 [[B]]
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[P1]], i64 36
-; CHECK-NEXT: [[P2:%.*]] = getelementptr i32, ptr [[TMP0]], i64 [[A]]
+; CHECK-NEXT: [[P2:%.*]] = getelementptr [4 x i8], ptr [[TMP0]], i64 [[A]]
; CHECK-NEXT: ret ptr [[P2]]
;
entry:
@@ -164,7 +164,7 @@ define ptr @test_wrong_op(ptr %base, i64 %a) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[P1:%.*]] = getelementptr i8, ptr [[BASE]], i64 -4
; CHECK-NEXT: [[INDEX:%.*]] = xor i64 [[A]], 1
-; CHECK-NEXT: [[P2:%.*]] = getelementptr i32, ptr [[P1]], i64 [[INDEX]]
+; CHECK-NEXT: [[P2:%.*]] = getelementptr [4 x i8], ptr [[P1]], i64 [[INDEX]]
; CHECK-NEXT: ret ptr [[P2]]
;
entry:
@@ -181,7 +181,7 @@ define ptr @test_sext_add_without_nsw(ptr %base, i32 %a) {
; CHECK-NEXT: [[P1:%.*]] = getelementptr i8, ptr [[BASE]], i64 -4
; CHECK-NEXT: [[INDEX:%.*]] = add i32 [[A]], 1
; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[INDEX]] to i64
-; CHECK-NEXT: [[P2:%.*]] = getelementptr i32, ptr [[P1]], i64 [[TMP0]]
+; CHECK-NEXT: [[P2:%.*]] = getelementptr [4 x i8], ptr [[P1]], i64 [[TMP0]]
; CHECK-NEXT: ret ptr [[P2]]
;
entry:
@@ -197,7 +197,7 @@ define ptr @test_or_without_disjoint(ptr %base, i64 %a) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[P1:%.*]] = getelementptr i8, ptr [[BASE]], i64 -4
; CHECK-NEXT: [[INDEX:%.*]] = or i64 [[A]], 1
-; CHECK-NEXT: [[P2:%.*]] = getelementptr i32, ptr [[P1]], i64 [[INDEX]]
+; CHECK-NEXT: [[P2:%.*]] = getelementptr [4 x i8], ptr [[P1]], i64 [[INDEX]]
; CHECK-NEXT: ret ptr [[P2]]
;
entry:
@@ -212,7 +212,7 @@ define ptr @test_smul_overflow(ptr %base, i64 %a) {
; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[P1:%.*]] = getelementptr i8, ptr [[BASE]], i64 -12
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i32, ptr [[P1]], i64 [[A]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr [4 x i8], ptr [[P1]], i64 [[A]]
; CHECK-NEXT: ret ptr [[TMP0]]
;
entry:
@@ -227,7 +227,7 @@ define ptr @test_sadd_overflow(ptr %base, i64 %a) {
; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[P1:%.*]] = getelementptr i8, ptr [[BASE]], i64 -9223372036854775808
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i32, ptr [[P1]], i64 [[A]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr [4 x i8], ptr [[P1]], i64 [[A]]
; CHECK-NEXT: ret ptr [[TMP0]]
;
entry:
@@ -244,7 +244,7 @@ define ptr @test_nonzero_multiuse_index(ptr %base, i64 %a) {
; CHECK-NEXT: [[P1:%.*]] = getelementptr i8, ptr [[BASE]], i64 -4
; CHECK-NEXT: [[INDEX:%.*]] = add i64 [[A]], 2
; CHECK-NEXT: call void @use64(i64 [[INDEX]])
-; CHECK-NEXT: [[P2:%.*]] = getelementptr i32, ptr [[P1]], i64 [[INDEX]]
+; CHECK-NEXT: [[P2:%.*]] = getelementptr [4 x i8], ptr [[P1]], i64 [[INDEX]]
; CHECK-NEXT: ret ptr [[P2]]
;
entry:
@@ -261,7 +261,7 @@ define ptr @test_nonzero_multiuse_ptr(ptr %base, i64 %a) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[P1:%.*]] = getelementptr i8, ptr [[BASE]], i64 -4
; CHECK-NEXT: call void @useptr(ptr [[P1]])
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i32, ptr [[P1]], i64 [[A]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr [4 x i8], ptr [[P1]], i64 [[A]]
; CHECK-NEXT: [[P2:%.*]] = getelementptr i8, ptr [[TMP0]], i64 8
; CHECK-NEXT: ret ptr [[P2]]
;
@@ -297,7 +297,7 @@ define ptr @test_all_nuw(ptr %base, i64 %a) {
; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr nuw i8, ptr [[BASE]], i64 9
-; CHECK-NEXT: [[P2:%.*]] = getelementptr nuw i32, ptr [[TMP0]], i64 [[A]]
+; CHECK-NEXT: [[P2:%.*]] = getelementptr nuw [4 x i8], ptr [[TMP0]], i64 [[A]]
; CHECK-NEXT: ret ptr [[P2]]
;
entry:
@@ -312,7 +312,7 @@ define ptr @test_all_partial_nuw1(ptr %base, i64 %a) {
; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[BASE]], i64 9
-; CHECK-NEXT: [[P2:%.*]] = getelementptr i32, ptr [[TMP0]], i64 [[A]]
+; CHECK-NEXT: [[P2:%.*]] = getelementptr [4 x i8], ptr [[TMP0]], i64 [[A]]
; CHECK-NEXT: ret ptr [[P2]]
;
entry:
@@ -327,7 +327,7 @@ define ptr @test_all_partial_nuw2(ptr %base, i64 %a) {
; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[BASE]], i64 9
-; CHECK-NEXT: [[P2:%.*]] = getelementptr i32, ptr [[TMP0]], i64 [[A]]
+; CHECK-NEXT: [[P2:%.*]] = getelementptr [4 x i8], ptr [[TMP0]], i64 [[A]]
; CHECK-NEXT: ret ptr [[P2]]
;
entry:
@@ -342,7 +342,7 @@ define ptr @test_all_partial_nuw3(ptr %base, i64 %a) {
; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[BASE]], i64 9
-; CHECK-NEXT: [[P2:%.*]] = getelementptr i32, ptr [[TMP0]], i64 [[A]]
+; CHECK-NEXT: [[P2:%.*]] = getelementptr [4 x i8], ptr [[TMP0]], i64 [[A]]
; CHECK-NEXT: ret ptr [[P2]]
;
entry:
@@ -357,7 +357,7 @@ define ptr @test_all_nuw_disjoint(ptr %base, i64 %a) {
; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr nuw i8, ptr [[BASE]], i64 9
-; CHECK-NEXT: [[P2:%.*]] = getelementptr nuw i32, ptr [[TMP0]], i64 [[A]]
+; CHECK-NEXT: [[P2:%.*]] = getelementptr nuw [4 x i8], ptr [[TMP0]], i64 [[A]]
; CHECK-NEXT: ret ptr [[P2]]
;
entry:
@@ -372,7 +372,7 @@ define ptr @test_all_inbounds_nuw(ptr %base, i64 %a) {
; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i8, ptr [[BASE]], i64 9
-; CHECK-NEXT: [[P2:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP0]], i64 [[A]]
+; CHECK-NEXT: [[P2:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[TMP0]], i64 [[A]]
; CHECK-NEXT: ret ptr [[P2]]
;
entry:
@@ -387,7 +387,7 @@ define ptr @test_all_partial_inbounds1(ptr %base, i64 %a) {
; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr nuw i8, ptr [[BASE]], i64 9
-; CHECK-NEXT: [[P2:%.*]] = getelementptr nuw i32, ptr [[TMP0]], i64 [[A]]
+; CHECK-NEXT: [[P2:%.*]] = getelementptr nuw [4 x i8], ptr [[TMP0]], i64 [[A]]
; CHECK-NEXT: ret ptr [[P2]]
;
entry:
@@ -402,7 +402,7 @@ define ptr @test_all_partial_inbounds2(ptr %base, i64 %a) {
; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr nuw i8, ptr [[BASE]], i64 9
-; CHECK-NEXT: [[P2:%.*]] = getelementptr nuw i32, ptr [[TMP0]], i64 [[A]]
+; CHECK-NEXT: [[P2:%.*]] = getelementptr nuw [4 x i8], ptr [[TMP0]], i64 [[A]]
; CHECK-NEXT: ret ptr [[P2]]
;
entry:
@@ -417,7 +417,7 @@ define ptr @test_all_inbounds_partial_nuw1(ptr %base, i64 %a) {
; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[BASE]], i64 7
-; CHECK-NEXT: [[P2:%.*]] = getelementptr i32, ptr [[TMP0]], i64 [[A]]
+; CHECK-NEXT: [[P2:%.*]] = getelementptr [4 x i8], ptr [[TMP0]], i64 [[A]]
; CHECK-NEXT: ret ptr [[P2]]
;
entry:
@@ -432,7 +432,7 @@ define ptr @test_all_inbounds_partial_nuw2(ptr %base, i64 %a) {
; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[BASE]], i64 9
-; CHECK-NEXT: [[P2:%.*]] = getelementptr i32, ptr [[TMP0]], i64 [[A]]
+; CHECK-NEXT: [[P2:%.*]] = getelementptr [4 x i8], ptr [[TMP0]], i64 [[A]]
; CHECK-NEXT: ret ptr [[P2]]
;
entry:
@@ -447,7 +447,7 @@ define ptr @test_all_inbounds_partial_nuw3(ptr %base, i64 %a) {
; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[BASE]], i64 9
-; CHECK-NEXT: [[P2:%.*]] = getelementptr i32, ptr [[TMP0]], i64 [[A]]
+; CHECK-NEXT: [[P2:%.*]] = getelementptr [4 x i8], ptr [[TMP0]], i64 [[A]]
; CHECK-NEXT: ret ptr [[P2]]
;
entry:
@@ -462,7 +462,7 @@ define ptr @test_all_nusw_nuw(ptr %base, i64 %a) {
; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr nuw i8, ptr [[BASE]], i64 9
-; CHECK-NEXT: [[P2:%.*]] = getelementptr nuw i32, ptr [[TMP0]], i64 [[A]]
+; CHECK-NEXT: [[P2:%.*]] = getelementptr nuw [4 x i8], ptr [[TMP0]], i64 [[A]]
; CHECK-NEXT: ret ptr [[P2]]
;
entry:
diff --git a/llvm/test/Transforms/InstCombine/gepphigep.ll b/llvm/test/Transforms/InstCombine/gepphigep.ll
index 6a6b7708bbfc9..0709bdd59c4f9 100644
--- a/llvm/test/Transforms/InstCombine/gepphigep.ll
+++ b/llvm/test/Transforms/InstCombine/gepphigep.ll
@@ -12,16 +12,16 @@ define i32 @test1(ptr %dm, i1 %c, i64 %idx1, i64 %idx2) {
; CHECK-NEXT: [[INST1:%.*]] = load ptr, ptr [[DM:%.*]], align 8
; CHECK-NEXT: br i1 [[C:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: bb1:
-; CHECK-NEXT: [[INST10:%.*]] = getelementptr inbounds [[STRUCT2:%.*]], ptr [[INST1]], i64 [[IDX1:%.*]]
+; CHECK-NEXT: [[INST10:%.*]] = getelementptr inbounds [8 x i8], ptr [[INST1]], i64 [[IDX1:%.*]]
; CHECK-NEXT: store i32 0, ptr [[INST10]], align 4
; CHECK-NEXT: br label [[BB3:%.*]]
; CHECK: bb2:
-; CHECK-NEXT: [[INST20:%.*]] = getelementptr inbounds [[STRUCT2]], ptr [[INST1]], i64 [[IDX2:%.*]]
+; CHECK-NEXT: [[INST20:%.*]] = getelementptr inbounds [8 x i8], ptr [[INST1]], i64 [[IDX2:%.*]]
; CHECK-NEXT: store i32 0, ptr [[INST20]], align 4
; CHECK-NEXT: br label [[BB3]]
; CHECK: bb3:
; CHECK-NEXT: [[TMP0:%.*]] = phi i64 [ [[IDX1]], [[BB1]] ], [ [[IDX2]], [[BB2]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT2]], ptr [[INST1]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [8 x i8], ptr [[INST1]], i64 [[TMP0]]
; CHECK-NEXT: [[INST24:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP1]], i64 4
; CHECK-NEXT: [[INST25:%.*]] = load i32, ptr [[INST24]], align 4
; CHECK-NEXT: ret i32 [[INST25]]
@@ -51,9 +51,9 @@ define i32 @test2(ptr %dm, i64 %idx1, i64 %idx2) {
; CHECK-LABEL: @test2(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[INST1:%.*]] = load ptr, ptr [[DM:%.*]], align 8
-; CHECK-NEXT: [[INST10:%.*]] = getelementptr inbounds [[STRUCT2:%.*]], ptr [[INST1]], i64 [[IDX1:%.*]]
+; CHECK-NEXT: [[INST10:%.*]] = getelementptr inbounds [8 x i8], ptr [[INST1]], i64 [[IDX1:%.*]]
; CHECK-NEXT: store i32 0, ptr [[INST10]], align 4
-; CHECK-NEXT: [[INST20:%.*]] = getelementptr inbounds [[STRUCT2]], ptr [[INST1]], i64 [[IDX2:%.*]]
+; CHECK-NEXT: [[INST20:%.*]] = getelementptr inbounds [8 x i8], ptr [[INST1]], i64 [[IDX2:%.*]]
; CHECK-NEXT: store i32 0, ptr [[INST20]], align 4
; CHECK-NEXT: [[INST24:%.*]] = getelementptr inbounds nuw i8, ptr [[INST10]], i64 4
; CHECK-NEXT: [[INST25:%.*]] = load i32, ptr [[INST24]], align 4
@@ -77,12 +77,12 @@ define i32 @test3(ptr %dm, i1 %c, i64 %idx1, i64 %idx2, i64 %idx3) personality p
; CHECK-NEXT: bb:
; CHECK-NEXT: br i1 [[C:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: bb1:
-; CHECK-NEXT: [[INST1_SPLIT:%.*]] = getelementptr inbounds [[STRUCT3:%.*]], ptr [[DM:%.*]], i64 [[IDX1:%.*]]
+; CHECK-NEXT: [[INST1_SPLIT:%.*]] = getelementptr inbounds [36 x i8], ptr [[DM:%.*]], i64 [[IDX1:%.*]]
; CHECK-NEXT: [[INST1:%.*]] = getelementptr inbounds nuw i8, ptr [[INST1_SPLIT]], i64 4
; CHECK-NEXT: store i32 0, ptr [[INST1]], align 4
; CHECK-NEXT: br label [[BB3:%.*]]
; CHECK: bb2:
-; CHECK-NEXT: [[INST2_SPLIT:%.*]] = getelementptr inbounds [[STRUCT3]], ptr [[DM]], i64 [[IDX2:%.*]]
+; CHECK-NEXT: [[INST2_SPLIT:%.*]] = getelementptr inbounds [36 x i8], ptr [[DM]], i64 [[IDX2:%.*]]
; CHECK-NEXT: [[INST12:%.*]] = getelementptr inbounds nuw i8, ptr [[INST2_SPLIT]], i64 8
; CHECK-NEXT: store i32 0, ptr [[INST12]], align 4
; CHECK-NEXT: br label [[BB3]]
@@ -95,8 +95,8 @@ define i32 @test3(ptr %dm, i1 %c, i64 %idx1, i64 %idx2, i64 %idx3) personality p
; CHECK: bb5:
; CHECK-NEXT: [[INST27:%.*]] = landingpad { ptr, i32 }
; CHECK-NEXT: catch ptr @_ZTIi
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT3]], ptr [[DM]], i64 [[TMP0]]
-; CHECK-NEXT: [[INST34_SPLIT:%.*]] = getelementptr [[STRUCT4:%.*]], ptr [[TMP1]], i64 [[IDX3:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [36 x i8], ptr [[DM]], i64 [[TMP0]]
+; CHECK-NEXT: [[INST34_SPLIT:%.*]] = getelementptr [16 x i8], ptr [[TMP1]], i64 [[IDX3:%.*]]
; CHECK-NEXT: [[INST35:%.*]] = getelementptr i8, ptr [[INST34_SPLIT]], i64 16
; CHECK-NEXT: [[INST25:%.*]] = load i32, ptr [[INST35]], align 4
; CHECK-NEXT: ret i32 [[INST25]]
@@ -215,7 +215,7 @@ define void @test5(ptr %idx, ptr %in) #0 {
; CHECK: while.cond.57:
; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr [[INCDEC_PTR34]], align 1
; CHECK-NEXT: [[TMP4:%.*]] = zext i8 [[TMP3]] to i64
-; CHECK-NEXT: [[ARRAYIDX61:%.*]] = getelementptr inbounds nuw i16, ptr [[IDX:%.*]], i64 [[TMP4]]
+; CHECK-NEXT: [[ARRAYIDX61:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[IDX:%.*]], i64 [[TMP4]]
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[ARRAYIDX61]], align 2
; CHECK-NEXT: [[AND63:%.*]] = and i16 [[TMP5]], 2048
; CHECK-NEXT: [[TOBOOL64:%.*]] = icmp eq i16 [[AND63]], 0
diff --git a/llvm/test/Transforms/InstCombine/getelementptr.ll b/llvm/test/Transforms/InstCombine/getelementptr.ll
index 92b76c5d1b46a..ba709adeea176 100644
--- a/llvm/test/Transforms/InstCombine/getelementptr.ll
+++ b/llvm/test/Transforms/InstCombine/getelementptr.ll
@@ -125,8 +125,8 @@ define void @test_overaligned_vec(i8 %B) {
define ptr @test7(ptr %I, i64 %C, i64 %D) {
; CHECK-LABEL: @test7(
-; CHECK-NEXT: [[A:%.*]] = getelementptr i32, ptr [[I:%.*]], i64 [[C:%.*]]
-; CHECK-NEXT: [[B:%.*]] = getelementptr i32, ptr [[A]], i64 [[D:%.*]]
+; CHECK-NEXT: [[A:%.*]] = getelementptr [4 x i8], ptr [[I:%.*]], i64 [[C:%.*]]
+; CHECK-NEXT: [[B:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[D:%.*]]
; CHECK-NEXT: ret ptr [[B]]
;
%A = getelementptr i32, ptr %I, i64 %C
@@ -247,10 +247,10 @@ define <2 x i1> @test13_vector2(i64 %X, <2 x ptr> %P) nounwind {
define <2 x i1> @test13_fixed_fixed(i64 %X, ptr %P, <2 x i64> %y) nounwind {
; CHECK-LABEL: @test13_fixed_fixed(
-; CHECK-NEXT: [[A1:%.*]] = getelementptr inbounds i64, ptr [[P:%.*]], i64 [[X:%.*]]
+; CHECK-NEXT: [[A1:%.*]] = getelementptr inbounds [8 x i8], ptr [[P:%.*]], i64 [[X:%.*]]
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <2 x ptr> poison, ptr [[A1]], i64 0
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <2 x ptr> [[DOTSPLATINSERT]], <2 x ptr> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[B:%.*]] = getelementptr inbounds <2 x i64>, ptr [[P]], <2 x i64> [[Y:%.*]]
+; CHECK-NEXT: [[B:%.*]] = getelementptr inbounds [16 x i8], ptr [[P]], <2 x i64> [[Y:%.*]]
; CHECK-NEXT: [[C:%.*]] = icmp eq <2 x ptr> [[DOTSPLAT]], [[B]]
; CHECK-NEXT: ret <2 x i1> [[C]]
;
@@ -383,7 +383,7 @@ define ptr @test14(i32 %idx) {
@Array = external global [40 x i32]
define ptr @test15(i64 %X) {
; CHECK-LABEL: @test15(
-; CHECK-NEXT: [[A:%.*]] = getelementptr i32, ptr @Array, i64 [[X:%.*]]
+; CHECK-NEXT: [[A:%.*]] = getelementptr [4 x i8], ptr @Array, i64 [[X:%.*]]
; CHECK-NEXT: ret ptr [[A]]
;
%A = getelementptr i32, ptr @Array, i64 %X
@@ -393,7 +393,7 @@ define ptr @test15(i64 %X) {
define ptr @test_index_canon(ptr %X, i32 %Idx) {
; CHECK-LABEL: @test_index_canon(
; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[IDX:%.*]] to i64
-; CHECK-NEXT: [[R:%.*]] = getelementptr i32, ptr [[X:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = getelementptr [4 x i8], ptr [[X:%.*]], i64 [[TMP1]]
; CHECK-NEXT: ret ptr [[R]]
;
%R = getelementptr i32, ptr %X, i32 %Idx
@@ -403,7 +403,7 @@ define ptr @test_index_canon(ptr %X, i32 %Idx) {
define ptr @test_index_canon_inbounds(ptr %X, i32 %Idx) {
; CHECK-LABEL: @test_index_canon_inbounds(
; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[IDX:%.*]] to i64
-; CHECK-NEXT: [[R:%.*]] = getelementptr inbounds i32, ptr [[X:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = getelementptr inbounds [4 x i8], ptr [[X:%.*]], i64 [[TMP1]]
; CHECK-NEXT: ret ptr [[R]]
;
%R = getelementptr inbounds i32, ptr %X, i32 %Idx
@@ -413,7 +413,7 @@ define ptr @test_index_canon_inbounds(ptr %X, i32 %Idx) {
define ptr @test_index_canon_nusw_nuw(ptr %X, i32 %Idx) {
; CHECK-LABEL: @test_index_canon_nusw_nuw(
; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[IDX:%.*]] to i64
-; CHECK-NEXT: [[R:%.*]] = getelementptr nusw nuw i32, ptr [[X:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = getelementptr nusw nuw [4 x i8], ptr [[X:%.*]], i64 [[TMP1]]
; CHECK-NEXT: ret ptr [[R]]
;
%R = getelementptr nusw nuw i32, ptr %X, i32 %Idx
@@ -680,7 +680,7 @@ entry:
define i32 @test28() nounwind {
; CHECK-LABEL: @test28(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[ORIENTATIONS:%.*]] = alloca [1 x [1 x %struct.x]], align 8
+; CHECK-NEXT: [[ORIENTATIONS:%.*]] = alloca [1 x [1 x [[STRUCT_X:%.*]]]], align 8
; CHECK-NEXT: [[T3:%.*]] = call i32 @puts(ptr noundef nonnull dereferenceable(1) @.str) #[[ATTR0]]
; CHECK-NEXT: br label [[BB10:%.*]]
; CHECK: bb10:
@@ -760,7 +760,7 @@ define i32 @test30(i32 %m, i32 %n) nounwind {
; CHECK-NEXT: [[TMP1:%.*]] = alloca i32, i64 [[TMP0]], align 4
; CHECK-NEXT: call void @test30f(ptr nonnull [[TMP1]]) #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = sext i32 [[M:%.*]] to i64
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i32, ptr [[TMP1]], i64 [[TMP2]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr [4 x i8], ptr [[TMP1]], i64 [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
; CHECK-NEXT: ret i32 [[TMP4]]
;
@@ -917,7 +917,7 @@ define i1 @test37() nounwind {
define ptr @test38(ptr %I, i32 %n) {
; CHECK-LABEL: @test38(
; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[N:%.*]] to i64
-; CHECK-NEXT: [[A:%.*]] = getelementptr i32, ptr [[I:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: [[A:%.*]] = getelementptr [4 x i8], ptr [[I:%.*]], i64 [[TMP1]]
; CHECK-NEXT: ret ptr [[A]]
;
%A = getelementptr i32, ptr %I, i32 %n
@@ -1138,7 +1138,7 @@ define ptr @test43i(ptr %c1, ptr %c2) {
; CHECK-NEXT: [[PTRTOINT:%.*]] = ptrtoint ptr [[C1:%.*]] to i64
; CHECK-NEXT: [[SUB:%.*]] = sub i64 0, [[PTRTOINT]]
; CHECK-NEXT: [[SHR:%.*]] = ashr i64 [[SUB]], 1
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i16, ptr [[C2:%.*]], i64 [[SHR]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [2 x i8], ptr [[C2:%.*]], i64 [[SHR]]
; CHECK-NEXT: ret ptr [[GEP]]
;
%ptrtoint = ptrtoint ptr %c1 to i64
@@ -1154,7 +1154,7 @@ define ptr @test44i(ptr %c1, ptr %c2) {
; CHECK-NEXT: [[PTRTOINT:%.*]] = ptrtoint ptr [[C1:%.*]] to i64
; CHECK-NEXT: [[SUB:%.*]] = sub i64 0, [[PTRTOINT]]
; CHECK-NEXT: [[SHR:%.*]] = sdiv i64 [[SUB]], 7
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [[STRUCT_C:%.*]], ptr [[C2:%.*]], i64 [[SHR]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [7 x i8], ptr [[C2:%.*]], i64 [[SHR]]
; CHECK-NEXT: ret ptr [[GEP]]
;
%ptrtoint = ptrtoint ptr %c1 to i64
@@ -1171,7 +1171,7 @@ define ptr @test45(ptr %c1, ptr %c2) {
; CHECK-NEXT: [[PTRTOINT2:%.*]] = ptrtoint ptr [[C2:%.*]] to i64
; CHECK-NEXT: [[SUB:%.*]] = sub i64 [[PTRTOINT2]], [[PTRTOINT1]]
; CHECK-NEXT: [[SHR:%.*]] = sdiv i64 [[SUB]], 7
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [[STRUCT_C:%.*]], ptr [[C1]], i64 [[SHR]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [7 x i8], ptr [[C1]], i64 [[SHR]]
; CHECK-NEXT: ret ptr [[GEP]]
;
%ptrtoint1 = ptrtoint ptr %c1 to i64
@@ -1187,7 +1187,7 @@ define ptr @test46(ptr %c1, ptr %c2, i64 %N) {
; CHECK-NEXT: [[PTRTOINT:%.*]] = ptrtoint ptr [[C1:%.*]] to i64
; CHECK-NEXT: [[SUB:%.*]] = sub i64 0, [[PTRTOINT]]
; CHECK-NEXT: [[SDIV:%.*]] = sdiv i64 [[SUB]], [[N:%.*]]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [[STRUCT_C:%.*]], ptr [[C2:%.*]], i64 [[SDIV]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [7 x i8], ptr [[C2:%.*]], i64 [[SDIV]]
; CHECK-NEXT: ret ptr [[GEP]]
;
%ptrtoint = ptrtoint ptr %c1 to i64
@@ -1200,7 +1200,7 @@ define ptr @test46(ptr %c1, ptr %c2, i64 %N) {
define ptr @test47(ptr %I, i64 %C, i64 %D) {
; CHECK-LABEL: @test47(
-; CHECK-NEXT: [[B:%.*]] = getelementptr i32, ptr [[I:%.*]], i64 [[D:%.*]]
+; CHECK-NEXT: [[B:%.*]] = getelementptr [4 x i8], ptr [[I:%.*]], i64 [[D:%.*]]
; CHECK-NEXT: ret ptr [[B]]
;
%sub = sub i64 %D, %C
@@ -1211,7 +1211,7 @@ define ptr @test47(ptr %I, i64 %C, i64 %D) {
define ptr @test48(ptr %I, i64 %C, i64 %D) {
; CHECK-LABEL: @test48(
-; CHECK-NEXT: [[B:%.*]] = getelementptr i32, ptr [[I:%.*]], i64 [[D:%.*]]
+; CHECK-NEXT: [[B:%.*]] = getelementptr [4 x i8], ptr [[I:%.*]], i64 [[D:%.*]]
; CHECK-NEXT: ret ptr [[B]]
;
%sub = sub i64 %D, %C
@@ -1253,7 +1253,7 @@ define ptr addrspace(1) @ascast_0_0_gep(ptr %p) nounwind {
define <2 x ptr> @PR32414(ptr %ptr) {
; CHECK-LABEL: @PR32414(
-; CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds i32, ptr [[PTR:%.*]], <2 x i64> <i64 0, i64 1>
+; CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [4 x i8], ptr [[PTR:%.*]], <2 x i64> <i64 0, i64 1>
; CHECK-NEXT: ret <2 x ptr> [[T1]]
;
%t1 = getelementptr inbounds i32, ptr %ptr, <2 x i64> <i64 0, i64 1>
@@ -1262,7 +1262,7 @@ define <2 x ptr> @PR32414(ptr %ptr) {
define ptr @test_bitcast_nzgep(ptr %base, i64 %idx) {
; CHECK-LABEL: @test_bitcast_nzgep(
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds i32, ptr [[BASE:%.*]], i64 [[IDX:%.*]]
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [4 x i8], ptr [[BASE:%.*]], i64 [[IDX:%.*]]
; CHECK-NEXT: ret ptr [[PTR]]
;
%ptr = getelementptr inbounds i32, ptr %base, i64 %idx
@@ -1271,7 +1271,7 @@ define ptr @test_bitcast_nzgep(ptr %base, i64 %idx) {
define ptr @test_zgep_nzgep(ptr %base, i64 %idx) {
; CHECK-LABEL: @test_zgep_nzgep(
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds i32, ptr [[BASE:%.*]], i64 [[IDX:%.*]]
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [4 x i8], ptr [[BASE:%.*]], i64 [[IDX:%.*]]
; CHECK-NEXT: ret ptr [[PTR]]
;
%ptr = getelementptr inbounds i32, ptr %base, i64 %idx
@@ -1280,7 +1280,7 @@ define ptr @test_zgep_nzgep(ptr %base, i64 %idx) {
define ptr @test_nzgep_zgep(ptr %base, i64 %idx) {
; CHECK-LABEL: @test_nzgep_zgep(
-; CHECK-NEXT: [[BASE2:%.*]] = getelementptr inbounds [1 x i32], ptr [[BASE:%.*]], i64 [[IDX:%.*]]
+; CHECK-NEXT: [[BASE2:%.*]] = getelementptr inbounds [4 x i8], ptr [[BASE:%.*]], i64 [[IDX:%.*]]
; CHECK-NEXT: ret ptr [[BASE2]]
;
%base2 = getelementptr inbounds [1 x i32], ptr %base, i64 %idx
@@ -1365,7 +1365,7 @@ define ptr @gep_of_gep_multiuse_const_and_const(ptr %p, i64 %idx) {
define ptr @gep_of_gep_multiuse_var_and_const(ptr %p, i64 %idx) {
; CHECK-LABEL: @gep_of_gep_multiuse_var_and_const(
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr { i32, i32 }, ptr [[P:%.*]], i64 [[IDX:%.*]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr [8 x i8], ptr [[P:%.*]], i64 [[IDX:%.*]]
; CHECK-NEXT: call void @use(ptr [[GEP1]])
; CHECK-NEXT: [[GEP2:%.*]] = getelementptr i8, ptr [[GEP1]], i64 4
; CHECK-NEXT: ret ptr [[GEP2]]
@@ -1378,9 +1378,9 @@ define ptr @gep_of_gep_multiuse_var_and_const(ptr %p, i64 %idx) {
define ptr @gep_of_gep_multiuse_var_and_var(ptr %p, i64 %idx, i64 %idx2) {
; CHECK-LABEL: @gep_of_gep_multiuse_var_and_var(
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr [4 x i32], ptr [[P:%.*]], i64 [[IDX:%.*]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr [16 x i8], ptr [[P:%.*]], i64 [[IDX:%.*]]
; CHECK-NEXT: call void @use(ptr [[GEP1]])
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr i32, ptr [[GEP1]], i64 [[IDX2:%.*]]
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr [4 x i8], ptr [[GEP1]], i64 [[IDX2:%.*]]
; CHECK-NEXT: ret ptr [[GEP2]]
;
%gep1 = getelementptr [4 x i32], ptr %p, i64 %idx
@@ -1568,7 +1568,7 @@ define ptr @gep_i8(ptr %p, i64 %off) {
define ptr @gep_sdiv_mismatched_size(ptr %p, i64 %off) {
; CHECK-LABEL: @gep_sdiv_mismatched_size(
; CHECK-NEXT: [[INDEX:%.*]] = sdiv exact i64 [[OFF:%.*]], 20
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr [[STRUCT_C:%.*]], ptr [[P:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr [7 x i8], ptr [[P:%.*]], i64 [[INDEX]]
; CHECK-NEXT: ret ptr [[PTR]]
;
%index = sdiv exact i64 %off, 20
@@ -1579,7 +1579,7 @@ define ptr @gep_sdiv_mismatched_size(ptr %p, i64 %off) {
define ptr @gep_udiv_mismatched_size(ptr %p, i64 %off) {
; CHECK-LABEL: @gep_udiv_mismatched_size(
; CHECK-NEXT: [[INDEX:%.*]] = udiv exact i64 [[OFF:%.*]], 20
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr [[STRUCT_C:%.*]], ptr [[P:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr [7 x i8], ptr [[P:%.*]], i64 [[INDEX]]
; CHECK-NEXT: ret ptr [[PTR]]
;
%index = udiv exact i64 %off, 20
@@ -1590,7 +1590,7 @@ define ptr @gep_udiv_mismatched_size(ptr %p, i64 %off) {
define ptr @gep_sdiv_without_exact(ptr %p, i64 %off) {
; CHECK-LABEL: @gep_sdiv_without_exact(
; CHECK-NEXT: [[INDEX:%.*]] = sdiv i64 [[OFF:%.*]], 7
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr [[STRUCT_C:%.*]], ptr [[P:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr [7 x i8], ptr [[P:%.*]], i64 [[INDEX]]
; CHECK-NEXT: ret ptr [[PTR]]
;
%index = sdiv i64 %off, 7
@@ -1601,7 +1601,7 @@ define ptr @gep_sdiv_without_exact(ptr %p, i64 %off) {
define ptr @gep_udiv_without_exact(ptr %p, i64 %off) {
; CHECK-LABEL: @gep_udiv_without_exact(
; CHECK-NEXT: [[INDEX:%.*]] = udiv i64 [[OFF:%.*]], 7
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr [[STRUCT_C:%.*]], ptr [[P:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr [7 x i8], ptr [[P:%.*]], i64 [[INDEX]]
; CHECK-NEXT: ret ptr [[PTR]]
;
%index = udiv i64 %off, 7
@@ -1612,7 +1612,7 @@ define ptr @gep_udiv_without_exact(ptr %p, i64 %off) {
define ptr @gep_ashr_without_exact(ptr %p, i64 %off) {
; CHECK-LABEL: @gep_ashr_without_exact(
; CHECK-NEXT: [[INDEX:%.*]] = ashr i64 [[OFF:%.*]], 2
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr [4 x i8], ptr [[P:%.*]], i64 [[INDEX]]
; CHECK-NEXT: ret ptr [[PTR]]
;
%index = ashr i64 %off, 2
@@ -1623,7 +1623,7 @@ define ptr @gep_ashr_without_exact(ptr %p, i64 %off) {
define ptr @gep_lshr_without_exact(ptr %p, i64 %off) {
; CHECK-LABEL: @gep_lshr_without_exact(
; CHECK-NEXT: [[INDEX:%.*]] = lshr i64 [[OFF:%.*]], 2
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr [4 x i8], ptr [[P:%.*]], i64 [[INDEX]]
; CHECK-NEXT: ret ptr [[PTR]]
;
%index = lshr i64 %off, 2
@@ -1762,7 +1762,7 @@ define ptr @gep_of_udiv(ptr %p, i64 %x) {
define ptr @gep_of_udiv_fail_not_divisible(ptr %p, i64 %x) {
; CHECK-LABEL: @gep_of_udiv_fail_not_divisible(
; CHECK-NEXT: [[IDX:%.*]] = udiv exact i64 [[X:%.*]], 13
-; CHECK-NEXT: [[R:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[IDX]]
+; CHECK-NEXT: [[R:%.*]] = getelementptr [4 x i8], ptr [[P:%.*]], i64 [[IDX]]
; CHECK-NEXT: ret ptr [[R]]
;
%idx = udiv exact i64 %x, 13
@@ -1786,7 +1786,7 @@ define ptr @gep_of_sdiv(ptr %p, i64 %x) {
define ptr @gep_of_sdiv_fail_not_divisible(ptr %p, i64 %x) {
; CHECK-LABEL: @gep_of_sdiv_fail_not_divisible(
; CHECK-NEXT: [[IDX:%.*]] = sdiv exact i64 [[X:%.*]], -35
-; CHECK-NEXT: [[R:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[IDX]]
+; CHECK-NEXT: [[R:%.*]] = getelementptr [4 x i8], ptr [[P:%.*]], i64 [[IDX]]
; CHECK-NEXT: ret ptr [[R]]
;
%idx = sdiv exact i64 %x, -35
@@ -1797,7 +1797,7 @@ define ptr @gep_of_sdiv_fail_not_divisible(ptr %p, i64 %x) {
define ptr @gep_of_sdiv_fail_ub(ptr %p, i64 %x) {
; CHECK-LABEL: @gep_of_sdiv_fail_ub(
; CHECK-NEXT: [[IDX:%.*]] = sdiv i64 [[X:%.*]], -4
-; CHECK-NEXT: [[R:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[IDX]]
+; CHECK-NEXT: [[R:%.*]] = getelementptr [4 x i8], ptr [[P:%.*]], i64 [[IDX]]
; CHECK-NEXT: ret ptr [[R]]
;
%idx = sdiv i64 %x, -4
@@ -1831,7 +1831,7 @@ define ptr @gep_of_ashr_fail_multiuse(ptr %p, i64 %x) {
; CHECK-LABEL: @gep_of_ashr_fail_multiuse(
; CHECK-NEXT: [[IDX:%.*]] = ashr exact i64 [[X:%.*]], 3
; CHECK-NEXT: call void @use.i64(i64 [[IDX]])
-; CHECK-NEXT: [[R:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 [[IDX]]
+; CHECK-NEXT: [[R:%.*]] = getelementptr inbounds [4 x i8], ptr [[P:%.*]], i64 [[IDX]]
; CHECK-NEXT: ret ptr [[R]]
;
%idx = ashr exact i64 %x, 3
@@ -1843,7 +1843,7 @@ define ptr @gep_of_ashr_fail_multiuse(ptr %p, i64 %x) {
define ptr @gep_of_lshr_fail_missing_exact(ptr %p, i64 %x) {
; CHECK-LABEL: @gep_of_lshr_fail_missing_exact(
; CHECK-NEXT: [[IDX:%.*]] = lshr i64 [[X:%.*]], 3
-; CHECK-NEXT: [[R:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[IDX]]
+; CHECK-NEXT: [[R:%.*]] = getelementptr [4 x i8], ptr [[P:%.*]], i64 [[IDX]]
; CHECK-NEXT: ret ptr [[R]]
;
%idx = lshr i64 %x, 3
@@ -1854,7 +1854,7 @@ define ptr @gep_of_lshr_fail_missing_exact(ptr %p, i64 %x) {
define ptr @gep_of_ashr_fail_not_divisible(ptr %p, i64 %x) {
; CHECK-LABEL: @gep_of_ashr_fail_not_divisible(
; CHECK-NEXT: [[IDX:%.*]] = ashr exact i64 [[X:%.*]], 1
-; CHECK-NEXT: [[R:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[IDX]]
+; CHECK-NEXT: [[R:%.*]] = getelementptr [4 x i8], ptr [[P:%.*]], i64 [[IDX]]
; CHECK-NEXT: ret ptr [[R]]
;
%idx = ashr exact i64 %x, 1
@@ -1922,8 +1922,8 @@ define ptr @gep_merge_nusw(ptr %p, i64 %x, i64 %y) {
define ptr @gep_merge_nuw_add_zero(ptr %p, i64 %idx, i64 %idx2) {
; CHECK-LABEL: @gep_merge_nuw_add_zero(
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr nuw [2 x i32], ptr [[GEP_SPLIT:%.*]], i64 [[IDX2:%.*]]
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr nuw i32, ptr [[GEP]], i64 [[IDX3:%.*]]
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr nuw [8 x i8], ptr [[P:%.*]], i64 [[IDX:%.*]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr nuw [4 x i8], ptr [[GEP2]], i64 [[IDX2:%.*]]
; CHECK-NEXT: ret ptr [[GEP1]]
;
%gep1 = getelementptr nuw [2 x i32], ptr %p, i64 %idx
@@ -1936,8 +1936,8 @@ define ptr @gep_merge_nuw_add_zero(ptr %p, i64 %idx, i64 %idx2) {
; after the merge.
define ptr @gep_merge_nusw_add_zero(ptr %p, i64 %idx, i64 %idx2) {
; CHECK-LABEL: @gep_merge_nusw_add_zero(
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr nusw [2 x i32], ptr [[P:%.*]], i64 [[IDX:%.*]]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr nusw i32, ptr [[GEP1]], i64 [[IDX2:%.*]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr nusw [8 x i8], ptr [[P:%.*]], i64 [[IDX:%.*]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr nusw [4 x i8], ptr [[GEP1]], i64 [[IDX2:%.*]]
; CHECK-NEXT: ret ptr [[GEP]]
;
%gep1 = getelementptr nusw [2 x i32], ptr %p, i64 %idx
@@ -1947,7 +1947,7 @@ define ptr @gep_merge_nusw_add_zero(ptr %p, i64 %idx, i64 %idx2) {
define ptr @gep_merge_nuw_const(ptr %p, i64 %idx, i64 %idx2) {
; CHECK-LABEL: @gep_merge_nuw_const(
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr nuw [2 x i32], ptr [[P:%.*]], i64 [[IDX:%.*]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr nuw [8 x i8], ptr [[P:%.*]], i64 [[IDX:%.*]]
; CHECK-NEXT: [[GEP:%.*]] = getelementptr nuw i8, ptr [[GEP1]], i64 4
; CHECK-NEXT: ret ptr [[GEP]]
;
@@ -1958,7 +1958,7 @@ define ptr @gep_merge_nuw_const(ptr %p, i64 %idx, i64 %idx2) {
define ptr @gep_merge_nuw_const_neg(ptr %p, i64 %idx, i64 %idx2) {
; CHECK-LABEL: @gep_merge_nuw_const_neg(
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr nuw [2 x i32], ptr [[P:%.*]], i64 [[IDX:%.*]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr nuw [8 x i8], ptr [[P:%.*]], i64 [[IDX:%.*]]
; CHECK-NEXT: [[GEP:%.*]] = getelementptr nuw i8, ptr [[GEP1]], i64 -4
; CHECK-NEXT: ret ptr [[GEP]]
;
@@ -1971,7 +1971,7 @@ define ptr @gep_merge_nuw_const_neg(ptr %p, i64 %idx, i64 %idx2) {
; does not overflow.
define ptr @gep_merge_nusw_const(ptr %p, i64 %idx, i64 %idx2) {
; CHECK-LABEL: @gep_merge_nusw_const(
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr nusw [2 x i32], ptr [[P:%.*]], i64 [[IDX:%.*]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr nusw [8 x i8], ptr [[P:%.*]], i64 [[IDX:%.*]]
; CHECK-NEXT: [[GEP:%.*]] = getelementptr nusw nuw i8, ptr [[GEP1]], i64 4
; CHECK-NEXT: ret ptr [[GEP]]
;
diff --git a/llvm/test/Transforms/InstCombine/icmp-custom-dl.ll b/llvm/test/Transforms/InstCombine/icmp-custom-dl.ll
index f873551512718..6640451862b86 100644
--- a/llvm/test/Transforms/InstCombine/icmp-custom-dl.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-custom-dl.ll
@@ -116,7 +116,7 @@ define i1 @test60_addrspacecast_larger(ptr addrspace(1) %foo, i32 %i, i16 %j) {
define i1 @test61(ptr %foo, i64 %i, i64 %j) {
; CHECK-LABEL: @test61(
; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[I:%.*]] to i32
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i32, ptr [[FOO:%.*]], i32 [[TMP1]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr [4 x i8], ptr [[FOO:%.*]], i32 [[TMP1]]
; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[J:%.*]] to i32
; CHECK-NEXT: [[GEP2:%.*]] = getelementptr i8, ptr [[FOO]], i32 [[TMP2]]
; CHECK-NEXT: [[CMP:%.*]] = icmp ult ptr [[GEP1]], [[GEP2]]
@@ -131,7 +131,7 @@ define i1 @test61(ptr %foo, i64 %i, i64 %j) {
define i1 @test61_as1(ptr addrspace(1) %foo, i16 %i, i16 %j) {
; CHECK-LABEL: @test61_as1(
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i32, ptr addrspace(1) [[FOO:%.*]], i16 [[I:%.*]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr [4 x i8], ptr addrspace(1) [[FOO:%.*]], i16 [[I:%.*]]
; CHECK-NEXT: [[GEP2:%.*]] = getelementptr i8, ptr addrspace(1) [[FOO]], i16 [[J:%.*]]
; CHECK-NEXT: [[CMP:%.*]] = icmp ult ptr addrspace(1) [[GEP1]], [[GEP2]]
; CHECK-NEXT: ret i1 [[CMP]]
diff --git a/llvm/test/Transforms/InstCombine/icmp-gep.ll b/llvm/test/Transforms/InstCombine/icmp-gep.ll
index 10d7b6df8eaf3..048a4c4a7e5fe 100644
--- a/llvm/test/Transforms/InstCombine/icmp-gep.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-gep.ll
@@ -410,7 +410,7 @@ define i1 @test60_nusw_nuw(ptr %foo, i64 %i, i64 %j) {
define i1 @test60_nusw_nuw_mix(ptr %foo, i64 %i, i64 %j) {
; CHECK-LABEL: @test60_nusw_nuw_mix(
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr nuw i32, ptr [[FOO:%.*]], i64 [[I:%.*]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr nuw [4 x i8], ptr [[FOO:%.*]], i64 [[I:%.*]]
; CHECK-NEXT: [[GEP2:%.*]] = getelementptr nusw i8, ptr [[FOO]], i64 [[J:%.*]]
; CHECK-NEXT: [[CMP:%.*]] = icmp ult ptr [[GEP1]], [[GEP2]]
; CHECK-NEXT: ret i1 [[CMP]]
@@ -423,7 +423,7 @@ define i1 @test60_nusw_nuw_mix(ptr %foo, i64 %i, i64 %j) {
define i1 @test_gep_ult_no_inbounds(ptr %foo, i64 %i, i64 %j) {
; CHECK-LABEL: @test_gep_ult_no_inbounds(
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i32, ptr [[FOO:%.*]], i64 [[I:%.*]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr [4 x i8], ptr [[FOO:%.*]], i64 [[I:%.*]]
; CHECK-NEXT: [[GEP2:%.*]] = getelementptr i8, ptr [[FOO]], i64 [[J:%.*]]
; CHECK-NEXT: [[CMP:%.*]] = icmp ult ptr [[GEP1]], [[GEP2]]
; CHECK-NEXT: ret i1 [[CMP]]
@@ -508,7 +508,7 @@ define i1 @test60_addrspacecast_larger(ptr addrspace(1) %foo, i32 %i, i16 %j) {
define i1 @test61(ptr %foo, i64 %i, i64 %j) {
; CHECK-LABEL: @test61(
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i32, ptr [[FOO:%.*]], i64 [[I:%.*]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr [4 x i8], ptr [[FOO:%.*]], i64 [[I:%.*]]
; CHECK-NEXT: [[GEP2:%.*]] = getelementptr i8, ptr [[FOO]], i64 [[J:%.*]]
; CHECK-NEXT: [[CMP:%.*]] = icmp ult ptr [[GEP1]], [[GEP2]]
; CHECK-NEXT: ret i1 [[CMP]]
@@ -522,7 +522,7 @@ define i1 @test61(ptr %foo, i64 %i, i64 %j) {
define i1 @test61_as1(ptr addrspace(1) %foo, i16 %i, i16 %j) {
; CHECK-LABEL: @test61_as1(
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i32, ptr addrspace(1) [[FOO:%.*]], i16 [[I:%.*]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr [4 x i8], ptr addrspace(1) [[FOO:%.*]], i16 [[I:%.*]]
; CHECK-NEXT: [[GEP2:%.*]] = getelementptr i8, ptr addrspace(1) [[FOO]], i16 [[J:%.*]]
; CHECK-NEXT: [[CMP:%.*]] = icmp ult ptr addrspace(1) [[GEP1]], [[GEP2]]
; CHECK-NEXT: ret i1 [[CMP]]
@@ -825,8 +825,8 @@ define i1 @gep_mugtiple_ugt_nuw(ptr %base, i64 %idx, i64 %idx2) {
define i1 @gep_mugtiple_ugt_not_all_nuw(ptr %base, i64 %idx, i64 %idx2) {
; CHECK-LABEL: @gep_mugtiple_ugt_not_all_nuw(
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr nuw i32, ptr [[BASE:%.*]], i64 [[IDX:%.*]]
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr i32, ptr [[GEP1]], i64 [[IDX2:%.*]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr nuw [4 x i8], ptr [[BASE:%.*]], i64 [[IDX:%.*]]
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr [4 x i8], ptr [[GEP1]], i64 [[IDX2:%.*]]
; CHECK-NEXT: [[CMP:%.*]] = icmp ugt ptr [[GEP2]], [[BASE]]
; CHECK-NEXT: ret i1 [[CMP]]
;
@@ -838,8 +838,8 @@ define i1 @gep_mugtiple_ugt_not_all_nuw(ptr %base, i64 %idx, i64 %idx2) {
define i1 @gep_mugtiple_ugt_inbounds_nusw(ptr %base, i64 %idx, i64 %idx2) {
; CHECK-LABEL: @gep_mugtiple_ugt_inbounds_nusw(
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i32, ptr [[BASE:%.*]], i64 [[IDX:%.*]]
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr nusw i32, ptr [[GEP1]], i64 [[IDX2:%.*]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[BASE:%.*]], i64 [[IDX:%.*]]
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr nusw [4 x i8], ptr [[GEP1]], i64 [[IDX2:%.*]]
; CHECK-NEXT: [[CMP:%.*]] = icmp ugt ptr [[GEP2]], [[BASE]]
; CHECK-NEXT: ret i1 [[CMP]]
;
@@ -935,11 +935,11 @@ define i1 @gep_multiple_multi_above_below_limit_consts(ptr %base, i64 %idx1, i64
; CHECK-LABEL: @gep_multiple_multi_above_below_limit_consts(
; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 16
; CHECK-NEXT: call void @use(ptr [[GEP1]])
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr i32, ptr [[GEP1]], i64 [[IDX1:%.*]]
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr [4 x i8], ptr [[GEP1]], i64 [[IDX1:%.*]]
; CHECK-NEXT: call void @use(ptr [[GEP2]])
; CHECK-NEXT: [[GEP3:%.*]] = getelementptr i8, ptr [[GEP2]], i64 16
; CHECK-NEXT: call void @use(ptr [[GEP3]])
-; CHECK-NEXT: [[GEP4:%.*]] = getelementptr i32, ptr [[GEP3]], i64 [[IDX2:%.*]]
+; CHECK-NEXT: [[GEP4:%.*]] = getelementptr [4 x i8], ptr [[GEP3]], i64 [[IDX2:%.*]]
; CHECK-NEXT: call void @use(ptr [[GEP4]])
; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[GEP4]], [[BASE]]
; CHECK-NEXT: ret i1 [[CMP]]
@@ -958,13 +958,13 @@ define i1 @gep_multiple_multi_above_below_limit_consts(ptr %base, i64 %idx1, i64
define i1 @gep_multiple_multi_use_above_limit(ptr %base, i64 %idx1, i64 %idx2, i64 %idx3, i64 %idx4) {
; CHECK-LABEL: @gep_multiple_multi_use_above_limit(
-; CHECK-NEXT: [[GEP4:%.*]] = getelementptr i32, ptr [[BASE:%.*]], i64 [[IDX1:%.*]]
+; CHECK-NEXT: [[GEP4:%.*]] = getelementptr [4 x i8], ptr [[BASE:%.*]], i64 [[IDX1:%.*]]
; CHECK-NEXT: call void @use(ptr [[GEP4]])
-; CHECK-NEXT: [[GEP3:%.*]] = getelementptr i32, ptr [[GEP4]], i64 [[IDX2:%.*]]
+; CHECK-NEXT: [[GEP3:%.*]] = getelementptr [4 x i8], ptr [[GEP4]], i64 [[IDX2:%.*]]
; CHECK-NEXT: call void @use(ptr [[GEP3]])
-; CHECK-NEXT: [[GEP5:%.*]] = getelementptr i32, ptr [[GEP3]], i64 [[IDX3:%.*]]
+; CHECK-NEXT: [[GEP5:%.*]] = getelementptr [4 x i8], ptr [[GEP3]], i64 [[IDX3:%.*]]
; CHECK-NEXT: call void @use(ptr [[GEP5]])
-; CHECK-NEXT: [[GEP6:%.*]] = getelementptr i32, ptr [[GEP5]], i64 [[IDX4:%.*]]
+; CHECK-NEXT: [[GEP6:%.*]] = getelementptr [4 x i8], ptr [[GEP5]], i64 [[IDX4:%.*]]
; CHECK-NEXT: call void @use(ptr [[GEP6]])
; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[GEP6]], [[BASE]]
; CHECK-NEXT: ret i1 [[CMP]]
@@ -1087,10 +1087,10 @@ define i1 @gep_gep_multiple_ult_nuw(ptr %base, i64 %idx1, i64 %idx2, i64 %idx3,
define i1 @gep_gep_multiple_ult_missing_nuw(ptr %base, i64 %idx1, i64 %idx2, i64 %idx3, i64 %idx4) {
; CHECK-LABEL: @gep_gep_multiple_ult_missing_nuw(
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr nuw i32, ptr [[BASE:%.*]], i64 [[IDX1:%.*]]
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr nuw i32, ptr [[GEP1]], i64 [[IDX2:%.*]]
-; CHECK-NEXT: [[GEP3:%.*]] = getelementptr nuw i32, ptr [[BASE]], i64 [[IDX3:%.*]]
-; CHECK-NEXT: [[GEP4:%.*]] = getelementptr i32, ptr [[GEP3]], i64 [[IDX4:%.*]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr nuw [4 x i8], ptr [[BASE:%.*]], i64 [[IDX1:%.*]]
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr nuw [4 x i8], ptr [[GEP1]], i64 [[IDX2:%.*]]
+; CHECK-NEXT: [[GEP3:%.*]] = getelementptr nuw [4 x i8], ptr [[BASE]], i64 [[IDX3:%.*]]
+; CHECK-NEXT: [[GEP4:%.*]] = getelementptr [4 x i8], ptr [[GEP3]], i64 [[IDX4:%.*]]
; CHECK-NEXT: [[CMP:%.*]] = icmp ult ptr [[GEP2]], [[GEP4]]
; CHECK-NEXT: ret i1 [[CMP]]
;
diff --git a/llvm/test/Transforms/InstCombine/indexed-gep-compares.ll b/llvm/test/Transforms/InstCombine/indexed-gep-compares.ll
index 07486ff21d54d..0115da65b3a4c 100644
--- a/llvm/test/Transforms/InstCombine/indexed-gep-compares.ll
+++ b/llvm/test/Transforms/InstCombine/indexed-gep-compares.ll
@@ -131,7 +131,7 @@ bb2:
define ptr @test3_no_inbounds1(ptr %A, i32 %Offset) {
; CHECK-LABEL: @test3_no_inbounds1(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP:%.*]] = getelementptr i32, ptr [[A:%.*]], i32 [[OFFSET:%.*]]
+; CHECK-NEXT: [[TMP:%.*]] = getelementptr [4 x i8], ptr [[A:%.*]], i32 [[OFFSET:%.*]]
; CHECK-NEXT: br label [[BB:%.*]]
; CHECK: bb:
; CHECK-NEXT: [[RHS:%.*]] = phi ptr [ [[RHS_NEXT:%.*]], [[BB]] ], [ [[TMP]], [[ENTRY:%.*]] ]
@@ -160,7 +160,7 @@ bb2:
define ptr @test3_no_inbounds2(ptr %A, i32 %Offset) {
; CHECK-LABEL: @test3_no_inbounds2(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[OFFSET:%.*]]
+; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i32 [[OFFSET:%.*]]
; CHECK-NEXT: br label [[BB:%.*]]
; CHECK: bb:
; CHECK-NEXT: [[RHS:%.*]] = phi ptr [ [[RHS_NEXT:%.*]], [[BB]] ], [ [[TMP]], [[ENTRY:%.*]] ]
@@ -189,7 +189,7 @@ bb2:
define ptr @test3_no_inbounds3(ptr %A, i32 %Offset) {
; CHECK-LABEL: @test3_no_inbounds3(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[OFFSET:%.*]]
+; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i32 [[OFFSET:%.*]]
; CHECK-NEXT: br label [[BB:%.*]]
; CHECK: bb:
; CHECK-NEXT: [[RHS:%.*]] = phi ptr [ [[RHS_NEXT:%.*]], [[BB]] ], [ [[TMP]], [[ENTRY:%.*]] ]
diff --git a/llvm/test/Transforms/InstCombine/known-phi-recurse.ll b/llvm/test/Transforms/InstCombine/known-phi-recurse.ll
index ac44e6c9df412..657ef26e6ffb9 100644
--- a/llvm/test/Transforms/InstCombine/known-phi-recurse.ll
+++ b/llvm/test/Transforms/InstCombine/known-phi-recurse.ll
@@ -158,7 +158,7 @@ define i32 @knownbits_phi_select_test1(ptr %p1, ptr %p2, i8 %x) {
; CHECK-NEXT: ]
; CHECK: default:
; CHECK-NEXT: [[EXT:%.*]] = sext i8 [[INDVAR1]] to i64
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i16, ptr [[P2:%.*]], i64 [[EXT]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds [2 x i8], ptr [[P2:%.*]], i64 [[EXT]]
; CHECK-NEXT: [[LOAD1:%.*]] = load i16, ptr [[GEP1]], align 2
; CHECK-NEXT: [[MASK:%.*]] = and i16 [[LOAD1]], 8192
; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i16 [[MASK]], 0
diff --git a/llvm/test/Transforms/InstCombine/load-bitcast-select.ll b/llvm/test/Transforms/InstCombine/load-bitcast-select.ll
index 3c43618b13569..35eecc5c4c384 100644
--- a/llvm/test/Transforms/InstCombine/load-bitcast-select.ll
+++ b/llvm/test/Transforms/InstCombine/load-bitcast-select.ll
@@ -16,8 +16,8 @@ define void @_Z3foov() {
; CHECK-NEXT: ret void
; CHECK: for.body:
; CHECK-NEXT: [[TMP0:%.*]] = zext nneg i32 [[I_0]] to i64
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr @a, i64 [[TMP0]]
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw float, ptr @b, i64 [[TMP0]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr @a, i64 [[TMP0]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4 x i8], ptr @b, i64 [[TMP0]]
; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[CMP_I:%.*]] = fcmp fast olt float [[TMP1]], [[TMP2]]
diff --git a/llvm/test/Transforms/InstCombine/load-cmp.ll b/llvm/test/Transforms/InstCombine/load-cmp.ll
index a5a0681ac3af3..e5b90107bb27d 100644
--- a/llvm/test/Transforms/InstCombine/load-cmp.ll
+++ b/llvm/test/Transforms/InstCombine/load-cmp.ll
@@ -359,7 +359,7 @@ define i1 @test10_struct_arr_noarrayty(i32 %x) {
define i1 @pr93017(i64 %idx) {
; CHECK-LABEL: @pr93017(
; CHECK-NEXT: [[TMP1:%.*]] = trunc nsw i64 [[IDX:%.*]] to i32
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds ptr, ptr @table, i32 [[TMP1]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [4 x i8], ptr @table, i32 [[TMP1]]
; CHECK-NEXT: [[V:%.*]] = load ptr, ptr [[GEP]], align 4
; CHECK-NEXT: [[CMP:%.*]] = icmp ne ptr [[V]], null
; CHECK-NEXT: ret i1 [[CMP]]
@@ -458,7 +458,7 @@ define i1 @load_vs_array_type_mismatch_offset2(i32 %idx) {
define i1 @offset_larger_than_stride(i32 %idx) {
; CHECK-LABEL: @offset_larger_than_stride(
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr getelementptr inbounds nuw (i8, ptr @g_i16_1, i32 4), i32 [[IDX:%.*]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr [2 x i8], ptr getelementptr inbounds nuw (i8, ptr @g_i16_1, i32 4), i32 [[IDX:%.*]]
; CHECK-NEXT: [[LOAD:%.*]] = load i16, ptr [[GEP]], align 2
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i16 [[LOAD]], 0
; CHECK-NEXT: ret i1 [[CMP]]
@@ -516,7 +516,7 @@ define i1 @cmp_diff_load_constant_array_messy0(i32 %x){
; Load size larger than store size currently not supported.
define i1 @cmp_diff_load_constant_array_messy1(i32 %x){
; CHECK-LABEL: @cmp_diff_load_constant_array_messy1(
-; CHECK-NEXT: [[ISOK_PTR:%.*]] = getelementptr i6, ptr @CG_MESSY, i32 [[TMP1:%.*]]
+; CHECK-NEXT: [[ISOK_PTR:%.*]] = getelementptr i8, ptr @CG_MESSY, i32 [[X:%.*]]
; CHECK-NEXT: [[ISOK:%.*]] = load i16, ptr [[ISOK_PTR]], align 2
; CHECK-NEXT: [[COND:%.*]] = icmp slt i16 [[ISOK]], 5
; CHECK-NEXT: ret i1 [[COND]]
@@ -530,7 +530,7 @@ define i1 @cmp_diff_load_constant_array_messy1(i32 %x){
define i1 @cmp_load_constant_array_variable_icmp(i32 %x, i32 %y) {
; CHECK-LABEL: @cmp_load_constant_array_variable_icmp(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[ISOK_PTR:%.*]] = getelementptr inbounds i32, ptr @CG_MESSY, i32 [[X:%.*]]
+; CHECK-NEXT: [[ISOK_PTR:%.*]] = getelementptr inbounds [4 x i8], ptr @CG_MESSY, i32 [[X:%.*]]
; CHECK-NEXT: [[ISOK:%.*]] = load i32, ptr [[ISOK_PTR]], align 4
; CHECK-NEXT: [[COND:%.*]] = icmp ult i32 [[ISOK]], [[Y:%.*]]
; CHECK-NEXT: ret i1 [[COND]]
@@ -548,7 +548,7 @@ entry:
define i1 @cmp_load_constant_additional_positive_offset(i32 %x) {
; CHECK-LABEL: @cmp_load_constant_additional_positive_offset(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[ISOK_PTR:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds nuw (i8, ptr @CG_CLEAR, i32 20), i32 [[X:%.*]]
+; CHECK-NEXT: [[ISOK_PTR:%.*]] = getelementptr inbounds [4 x i8], ptr getelementptr inbounds nuw (i8, ptr @CG_CLEAR, i32 20), i32 [[X:%.*]]
; CHECK-NEXT: [[ISOK:%.*]] = load i32, ptr [[ISOK_PTR]], align 4
; CHECK-NEXT: [[COND:%.*]] = icmp ult i32 [[ISOK]], 5
; CHECK-NEXT: ret i1 [[COND]]
@@ -563,7 +563,7 @@ entry:
define i1 @cmp_load_constant_additional_negative_offset(i32 %x) {
; CHECK-LABEL: @cmp_load_constant_additional_negative_offset(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[ISOK_PTR_SPLIT:%.*]] = getelementptr inbounds [1 x i32], ptr @CG_CLEAR, i32 [[X:%.*]]
+; CHECK-NEXT: [[ISOK_PTR_SPLIT:%.*]] = getelementptr inbounds [4 x i8], ptr @CG_CLEAR, i32 [[X:%.*]]
; CHECK-NEXT: [[ISOK_PTR:%.*]] = getelementptr inbounds i8, ptr [[ISOK_PTR_SPLIT]], i32 -20
; CHECK-NEXT: [[ISOK:%.*]] = load i32, ptr [[ISOK_PTR]], align 4
; CHECK-NEXT: [[COND:%.*]] = icmp ult i32 [[ISOK]], 5
@@ -578,8 +578,8 @@ entry:
define i1 @cmp_load_multiple_indices(i32 %idx, i32 %idx2) {
; CHECK-LABEL: @cmp_load_multiple_indices(
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i16, ptr @g_i16_1, i32 [[IDX:%.*]]
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds i16, ptr [[GEP1]], i32 [[IDX2:%.*]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds [2 x i8], ptr @g_i16_1, i32 [[IDX:%.*]]
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds [2 x i8], ptr [[GEP1]], i32 [[IDX2:%.*]]
; CHECK-NEXT: [[GEP3:%.*]] = getelementptr inbounds nuw i8, ptr [[GEP2]], i32 2
; CHECK-NEXT: [[LOAD:%.*]] = load i16, ptr [[GEP3]], align 2
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i16 [[LOAD]], 0
@@ -595,8 +595,8 @@ define i1 @cmp_load_multiple_indices(i32 %idx, i32 %idx2) {
define i1 @cmp_load_multiple_indices2(i32 %idx, i32 %idx2) {
; CHECK-LABEL: @cmp_load_multiple_indices2(
-; CHECK-NEXT: [[GEP1_SPLIT:%.*]] = getelementptr inbounds [1 x i16], ptr @g_i16_1, i32 [[IDX:%.*]]
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i16, ptr [[GEP1_SPLIT]], i32 [[IDX2:%.*]]
+; CHECK-NEXT: [[GEP1_SPLIT:%.*]] = getelementptr inbounds [2 x i8], ptr @g_i16_1, i32 [[IDX:%.*]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds [2 x i8], ptr [[GEP1_SPLIT]], i32 [[IDX2:%.*]]
; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds nuw i8, ptr [[GEP1]], i32 2
; CHECK-NEXT: [[LOAD:%.*]] = load i16, ptr [[GEP2]], align 2
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i16 [[LOAD]], 0
diff --git a/llvm/test/Transforms/InstCombine/load.ll b/llvm/test/Transforms/InstCombine/load.ll
index 736a31fc6ae0a..526faf3c4832e 100644
--- a/llvm/test/Transforms/InstCombine/load.ll
+++ b/llvm/test/Transforms/InstCombine/load.ll
@@ -91,7 +91,7 @@ define i32 @load_gep_null_not_inbounds(i64 %X) {
define i32 @test7_no_null_opt(i32 %X) #0 {
; CHECK-LABEL: @test7_no_null_opt(
; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[X:%.*]] to i64
-; CHECK-NEXT: [[V:%.*]] = getelementptr i32, ptr null, i64 [[TMP1]]
+; CHECK-NEXT: [[V:%.*]] = getelementptr [4 x i8], ptr null, i64 [[TMP1]]
; CHECK-NEXT: [[R:%.*]] = load i32, ptr [[V]], align 4
; CHECK-NEXT: ret i32 [[R]]
;
@@ -455,7 +455,7 @@ define i32 @load_select_with_null_gep(i1 %cond, ptr %p, i64 %off) {
define i16 @load_select_with_null_gep2(i1 %cond, ptr %p, i64 %x) {
; CHECK-LABEL: @load_select_with_null_gep2(
; CHECK-NEXT: [[INVARIANT_GEP:%.*]] = getelementptr i8, ptr [[SEL:%.*]], i64 -2
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[INVARIANT_GEP]], i64 [[X:%.*]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr [2 x i8], ptr [[INVARIANT_GEP]], i64 [[X:%.*]]
; CHECK-NEXT: [[RES:%.*]] = load i16, ptr [[GEP]], align 2
; CHECK-NEXT: ret i16 [[RES]]
;
@@ -469,8 +469,8 @@ define i16 @load_select_with_null_gep2(i1 %cond, ptr %p, i64 %x) {
define i16 @load_select_with_null_gep3(i1 %cond, ptr %p, i64 %x, i64 %y) {
; CHECK-LABEL: @load_select_with_null_gep3(
; CHECK-NEXT: [[INVARIANT_GEP:%.*]] = getelementptr i8, ptr [[SEL:%.*]], i64 -2
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[INVARIANT_GEP]], i64 [[X:%.*]]
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr i16, ptr [[GEP]], i64 [[Y:%.*]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr [2 x i8], ptr [[INVARIANT_GEP]], i64 [[X:%.*]]
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr [2 x i8], ptr [[GEP]], i64 [[Y:%.*]]
; CHECK-NEXT: [[RES:%.*]] = load i16, ptr [[GEP2]], align 2
; CHECK-NEXT: ret i16 [[RES]]
;
diff --git a/llvm/test/Transforms/InstCombine/loadstore-alignment.ll b/llvm/test/Transforms/InstCombine/loadstore-alignment.ll
index e6a752575901f..d038ca33432f7 100644
--- a/llvm/test/Transforms/InstCombine/loadstore-alignment.ll
+++ b/llvm/test/Transforms/InstCombine/loadstore-alignment.ll
@@ -20,7 +20,7 @@ define <2 x i64> @static_hem() {
define <2 x i64> @hem(i32 %i) {
; CHECK-LABEL: @hem(
; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[I:%.*]] to i64
-; CHECK-NEXT: [[T:%.*]] = getelementptr <2 x i64>, ptr @x, i64 [[TMP1]]
+; CHECK-NEXT: [[T:%.*]] = getelementptr [16 x i8], ptr @x, i64 [[TMP1]]
; CHECK-NEXT: [[L:%.*]] = load <2 x i64>, ptr [[T]], align 1
; CHECK-NEXT: ret <2 x i64> [[L]]
;
@@ -33,8 +33,8 @@ define <2 x i64> @hem_2d(i32 %i, i32 %j) {
; CHECK-LABEL: @hem_2d(
; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[I:%.*]] to i64
; CHECK-NEXT: [[TMP2:%.*]] = sext i32 [[J:%.*]] to i64
-; CHECK-NEXT: [[T_SPLIT:%.*]] = getelementptr [13 x <2 x i64>], ptr @xx, i64 [[TMP1]]
-; CHECK-NEXT: [[T:%.*]] = getelementptr <2 x i64>, ptr [[T_SPLIT]], i64 [[TMP2]]
+; CHECK-NEXT: [[T_SPLIT:%.*]] = getelementptr [208 x i8], ptr @xx, i64 [[TMP1]]
+; CHECK-NEXT: [[T:%.*]] = getelementptr [16 x i8], ptr [[T_SPLIT]], i64 [[TMP2]]
; CHECK-NEXT: [[L:%.*]] = load <2 x i64>, ptr [[T]], align 1
; CHECK-NEXT: ret <2 x i64> [[L]]
;
@@ -78,7 +78,7 @@ define void @static_hem_store(<2 x i64> %y) {
define void @hem_store(i32 %i, <2 x i64> %y) {
; CHECK-LABEL: @hem_store(
; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[I:%.*]] to i64
-; CHECK-NEXT: [[T:%.*]] = getelementptr <2 x i64>, ptr @x, i64 [[TMP1]]
+; CHECK-NEXT: [[T:%.*]] = getelementptr [16 x i8], ptr @x, i64 [[TMP1]]
; CHECK-NEXT: store <2 x i64> [[Y:%.*]], ptr [[T]], align 1
; CHECK-NEXT: ret void
;
@@ -91,8 +91,8 @@ define void @hem_2d_store(i32 %i, i32 %j, <2 x i64> %y) {
; CHECK-LABEL: @hem_2d_store(
; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[I:%.*]] to i64
; CHECK-NEXT: [[TMP2:%.*]] = sext i32 [[J:%.*]] to i64
-; CHECK-NEXT: [[T_SPLIT:%.*]] = getelementptr [13 x <2 x i64>], ptr @xx, i64 [[TMP1]]
-; CHECK-NEXT: [[T:%.*]] = getelementptr <2 x i64>, ptr [[T_SPLIT]], i64 [[TMP2]]
+; CHECK-NEXT: [[T_SPLIT:%.*]] = getelementptr [208 x i8], ptr @xx, i64 [[TMP1]]
+; CHECK-NEXT: [[T:%.*]] = getelementptr [16 x i8], ptr [[T_SPLIT]], i64 [[TMP2]]
; CHECK-NEXT: store <2 x i64> [[Y:%.*]], ptr [[T]], align 1
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/InstCombine/loadstore-metadata.ll b/llvm/test/Transforms/InstCombine/loadstore-metadata.ll
index 64f9f2797a439..ec485edbb709b 100644
--- a/llvm/test/Transforms/InstCombine/loadstore-metadata.ll
+++ b/llvm/test/Transforms/InstCombine/loadstore-metadata.ll
@@ -128,9 +128,9 @@ define void @test_load_cast_combine_loop(ptr %src, ptr %dst, i32 %n) {
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[I:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[I]] to i64
-; CHECK-NEXT: [[SRC_GEP:%.*]] = getelementptr inbounds float, ptr [[SRC]], i64 [[TMP0]]
+; CHECK-NEXT: [[SRC_GEP:%.*]] = getelementptr inbounds [4 x i8], ptr [[SRC]], i64 [[TMP0]]
; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[I]] to i64
-; CHECK-NEXT: [[DST_GEP:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[TMP1]]
+; CHECK-NEXT: [[DST_GEP:%.*]] = getelementptr inbounds [4 x i8], ptr [[DST]], i64 [[TMP1]]
; CHECK-NEXT: [[L1:%.*]] = load i32, ptr [[SRC_GEP]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]]
; CHECK-NEXT: store i32 [[L1]], ptr [[DST_GEP]], align 4
; CHECK-NEXT: [[I_NEXT]] = add i32 [[I]], 1
diff --git a/llvm/test/Transforms/InstCombine/masked_intrinsics-inseltpoison.ll b/llvm/test/Transforms/InstCombine/masked_intrinsics-inseltpoison.ll
index 02539d17fac04..10a80cc89dbb9 100644
--- a/llvm/test/Transforms/InstCombine/masked_intrinsics-inseltpoison.ll
+++ b/llvm/test/Transforms/InstCombine/masked_intrinsics-inseltpoison.ll
@@ -58,7 +58,7 @@ define <2 x double> @load_lane0(ptr %ptr, double %pt) {
define double @load_all(ptr %base, double %pt) {
; CHECK-LABEL: @load_all(
-; CHECK-NEXT: [[PTRS:%.*]] = getelementptr double, ptr [[BASE:%.*]], <4 x i64> <i64 0, i64 poison, i64 2, i64 3>
+; CHECK-NEXT: [[PTRS:%.*]] = getelementptr [8 x i8], ptr [[BASE:%.*]], <4 x i64> <i64 0, i64 poison, i64 2, i64 3>
; CHECK-NEXT: [[RES:%.*]] = call <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr> align 4 [[PTRS]], <4 x i1> <i1 true, i1 false, i1 true, i1 true>, <4 x double> poison)
; CHECK-NEXT: [[ELT:%.*]] = extractelement <4 x double> [[RES]], i64 2
; CHECK-NEXT: ret double [[ELT]]
@@ -200,7 +200,7 @@ define <2 x double> @gather_onemask(<2 x ptr> %ptrs, <2 x double> %passthru) {
define <4 x double> @gather_lane2(ptr %base, double %pt) {
; CHECK-LABEL: @gather_lane2(
-; CHECK-NEXT: [[PTRS:%.*]] = getelementptr double, ptr [[BASE:%.*]], <4 x i64> <i64 poison, i64 poison, i64 2, i64 poison>
+; CHECK-NEXT: [[PTRS:%.*]] = getelementptr [8 x i8], ptr [[BASE:%.*]], <4 x i64> <i64 poison, i64 poison, i64 2, i64 poison>
; CHECK-NEXT: [[PT_V1:%.*]] = insertelement <4 x double> poison, double [[PT:%.*]], i64 0
; CHECK-NEXT: [[PT_V2:%.*]] = shufflevector <4 x double> [[PT_V1]], <4 x double> poison, <4 x i32> <i32 0, i32 0, i32 poison, i32 0>
; CHECK-NEXT: [[RES:%.*]] = call <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr> align 4 [[PTRS]], <4 x i1> <i1 false, i1 false, i1 true, i1 false>, <4 x double> [[PT_V2]])
@@ -215,7 +215,7 @@ define <4 x double> @gather_lane2(ptr %base, double %pt) {
define <2 x double> @gather_lane0_maybe(ptr %base, double %pt, <2 x i1> %mask) {
; CHECK-LABEL: @gather_lane0_maybe(
-; CHECK-NEXT: [[PTRS:%.*]] = getelementptr double, ptr [[BASE:%.*]], <2 x i64> <i64 0, i64 1>
+; CHECK-NEXT: [[PTRS:%.*]] = getelementptr [8 x i8], ptr [[BASE:%.*]], <2 x i64> <i64 0, i64 1>
; CHECK-NEXT: [[PT_V1:%.*]] = insertelement <2 x double> poison, double [[PT:%.*]], i64 0
; CHECK-NEXT: [[PT_V2:%.*]] = shufflevector <2 x double> [[PT_V1]], <2 x double> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[MASK2:%.*]] = insertelement <2 x i1> [[MASK:%.*]], i1 false, i64 1
@@ -232,7 +232,7 @@ define <2 x double> @gather_lane0_maybe(ptr %base, double %pt, <2 x i1> %mask)
define <2 x double> @gather_lane0_maybe_spec(ptr %base, double %pt, <2 x i1> %mask) {
; CHECK-LABEL: @gather_lane0_maybe_spec(
-; CHECK-NEXT: [[PTRS:%.*]] = getelementptr double, ptr [[BASE:%.*]], <2 x i64> <i64 0, i64 1>
+; CHECK-NEXT: [[PTRS:%.*]] = getelementptr [8 x i8], ptr [[BASE:%.*]], <2 x i64> <i64 0, i64 1>
; CHECK-NEXT: [[PT_V1:%.*]] = insertelement <2 x double> poison, double [[PT:%.*]], i64 0
; CHECK-NEXT: [[PT_V2:%.*]] = shufflevector <2 x double> [[PT_V1]], <2 x double> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[MASK2:%.*]] = insertelement <2 x i1> [[MASK:%.*]], i1 false, i64 1
@@ -258,7 +258,7 @@ define void @scatter_zeromask(<2 x ptr> %ptrs, <2 x double> %val) {
define void @scatter_demandedelts(ptr %ptr, double %val) {
; CHECK-LABEL: @scatter_demandedelts(
-; CHECK-NEXT: [[PTRS:%.*]] = getelementptr double, ptr [[PTR:%.*]], <2 x i64> <i64 0, i64 poison>
+; CHECK-NEXT: [[PTRS:%.*]] = getelementptr [8 x i8], ptr [[PTR:%.*]], <2 x i64> <i64 0, i64 poison>
; CHECK-NEXT: [[VALVEC1:%.*]] = insertelement <2 x double> poison, double [[VAL:%.*]], i64 0
; CHECK-NEXT: call void @llvm.masked.scatter.v2f64.v2p0(<2 x double> [[VALVEC1]], <2 x ptr> align 8 [[PTRS]], <2 x i1> <i1 true, i1 false>)
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/InstCombine/masked_intrinsics.ll b/llvm/test/Transforms/InstCombine/masked_intrinsics.ll
index 6aadb08b16991..625c979cf794d 100644
--- a/llvm/test/Transforms/InstCombine/masked_intrinsics.ll
+++ b/llvm/test/Transforms/InstCombine/masked_intrinsics.ll
@@ -67,7 +67,7 @@ define <2 x double> @load_lane0(ptr %ptr, double %pt) {
define double @load_all(ptr %base, double %pt) {
; CHECK-LABEL: @load_all(
-; CHECK-NEXT: [[PTRS:%.*]] = getelementptr double, ptr [[BASE:%.*]], <4 x i64> <i64 0, i64 poison, i64 2, i64 3>
+; CHECK-NEXT: [[PTRS:%.*]] = getelementptr [8 x i8], ptr [[BASE:%.*]], <4 x i64> <i64 0, i64 poison, i64 2, i64 3>
; CHECK-NEXT: [[RES:%.*]] = call <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr> align 4 [[PTRS]], <4 x i1> <i1 true, i1 false, i1 true, i1 true>, <4 x double> poison)
; CHECK-NEXT: [[ELT:%.*]] = extractelement <4 x double> [[RES]], i64 2
; CHECK-NEXT: ret double [[ELT]]
@@ -242,7 +242,7 @@ define <2 x double> @gather_one_withpoisonmask(<2 x ptr> %ptrs, <2 x double> %pa
define <4 x double> @gather_lane2(ptr %base, double %pt) {
; CHECK-LABEL: @gather_lane2(
-; CHECK-NEXT: [[PTRS:%.*]] = getelementptr double, ptr [[BASE:%.*]], <4 x i64> <i64 poison, i64 poison, i64 2, i64 poison>
+; CHECK-NEXT: [[PTRS:%.*]] = getelementptr [8 x i8], ptr [[BASE:%.*]], <4 x i64> <i64 poison, i64 poison, i64 2, i64 poison>
; CHECK-NEXT: [[PT_V1:%.*]] = insertelement <4 x double> poison, double [[PT:%.*]], i64 0
; CHECK-NEXT: [[PT_V2:%.*]] = shufflevector <4 x double> [[PT_V1]], <4 x double> poison, <4 x i32> <i32 0, i32 0, i32 poison, i32 0>
; CHECK-NEXT: [[RES:%.*]] = call <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr> align 4 [[PTRS]], <4 x i1> <i1 false, i1 false, i1 true, i1 false>, <4 x double> [[PT_V2]])
@@ -257,7 +257,7 @@ define <4 x double> @gather_lane2(ptr %base, double %pt) {
define <2 x double> @gather_lane0_maybe(ptr %base, double %pt, <2 x i1> %mask) {
; CHECK-LABEL: @gather_lane0_maybe(
-; CHECK-NEXT: [[PTRS:%.*]] = getelementptr double, ptr [[BASE:%.*]], <2 x i64> <i64 0, i64 1>
+; CHECK-NEXT: [[PTRS:%.*]] = getelementptr [8 x i8], ptr [[BASE:%.*]], <2 x i64> <i64 0, i64 1>
; CHECK-NEXT: [[PT_V1:%.*]] = insertelement <2 x double> poison, double [[PT:%.*]], i64 0
; CHECK-NEXT: [[PT_V2:%.*]] = shufflevector <2 x double> [[PT_V1]], <2 x double> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[MASK2:%.*]] = insertelement <2 x i1> [[MASK:%.*]], i1 false, i64 1
@@ -274,7 +274,7 @@ define <2 x double> @gather_lane0_maybe(ptr %base, double %pt, <2 x i1> %mask)
define <2 x double> @gather_lane0_maybe_spec(ptr %base, double %pt, <2 x i1> %mask) {
; CHECK-LABEL: @gather_lane0_maybe_spec(
-; CHECK-NEXT: [[PTRS:%.*]] = getelementptr double, ptr [[BASE:%.*]], <2 x i64> <i64 0, i64 1>
+; CHECK-NEXT: [[PTRS:%.*]] = getelementptr [8 x i8], ptr [[BASE:%.*]], <2 x i64> <i64 0, i64 1>
; CHECK-NEXT: [[PT_V1:%.*]] = insertelement <2 x double> poison, double [[PT:%.*]], i64 0
; CHECK-NEXT: [[PT_V2:%.*]] = shufflevector <2 x double> [[PT_V1]], <2 x double> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[MASK2:%.*]] = insertelement <2 x i1> [[MASK:%.*]], i1 false, i64 1
@@ -317,7 +317,7 @@ define void @scatter_one_withpoison_mask(<2 x ptr> %ptrs, <2 x double> %val) {
define void @scatter_demandedelts(ptr %ptr, double %val) {
; CHECK-LABEL: @scatter_demandedelts(
-; CHECK-NEXT: [[PTRS:%.*]] = getelementptr double, ptr [[PTR:%.*]], <2 x i64> <i64 0, i64 poison>
+; CHECK-NEXT: [[PTRS:%.*]] = getelementptr [8 x i8], ptr [[PTR:%.*]], <2 x i64> <i64 0, i64 poison>
; CHECK-NEXT: [[VALVEC1:%.*]] = insertelement <2 x double> poison, double [[VAL:%.*]], i64 0
; CHECK-NEXT: call void @llvm.masked.scatter.v2f64.v2p0(<2 x double> [[VALVEC1]], <2 x ptr> align 8 [[PTRS]], <2 x i1> <i1 true, i1 false>)
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/InstCombine/mem-gep-zidx.ll b/llvm/test/Transforms/InstCombine/mem-gep-zidx.ll
index 8840374de1c1e..e8543f91e34c7 100644
--- a/llvm/test/Transforms/InstCombine/mem-gep-zidx.ll
+++ b/llvm/test/Transforms/InstCombine/mem-gep-zidx.ll
@@ -48,7 +48,7 @@ define signext i32 @test3(i32 signext %x, i1 %y) #0 {
define signext i32 @test4(i32 signext %x, i1 %y) #0 {
; CHECK-LABEL: @test4(
; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[X:%.*]] to i64
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr @f.c, i64 [[IDXPROM]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr @f.c, i64 [[IDXPROM]]
; CHECK-NEXT: [[R:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: ret i32 [[R]]
;
diff --git a/llvm/test/Transforms/InstCombine/memcpy-addrspace.ll b/llvm/test/Transforms/InstCombine/memcpy-addrspace.ll
index cfec7b8558412..e36f061f13412 100644
--- a/llvm/test/Transforms/InstCombine/memcpy-addrspace.ll
+++ b/llvm/test/Transforms/InstCombine/memcpy-addrspace.ll
@@ -6,9 +6,9 @@
define void @test_load(ptr addrspace(1) %out, i64 %x) {
; CHECK-LABEL: @test_load(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr addrspace(2) @test.data, i64 [[X:%.*]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr addrspace(2) @test.data, i64 [[X:%.*]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(2) [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[OUT:%.*]], i64 [[X]]
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [4 x i8], ptr addrspace(1) [[OUT:%.*]], i64 [[X]]
; CHECK-NEXT: store i32 [[TMP0]], ptr addrspace(1) [[ARRAYIDX1]], align 4
; CHECK-NEXT: ret void
;
@@ -26,9 +26,9 @@ entry:
define void @test_load_gep_nusw_nuw(ptr addrspace(1) %out, i64 %x) {
; CHECK-LABEL: @test_load_gep_nusw_nuw(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr nusw nuw i32, ptr addrspace(2) @test.data, i64 [[X:%.*]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr nusw nuw [4 x i8], ptr addrspace(2) @test.data, i64 [[X:%.*]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(2) [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr nusw nuw i32, ptr addrspace(1) [[OUT:%.*]], i64 [[X]]
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr nusw nuw [4 x i8], ptr addrspace(1) [[OUT:%.*]], i64 [[X]]
; CHECK-NEXT: store i32 [[TMP0]], ptr addrspace(1) [[ARRAYIDX1]], align 4
; CHECK-NEXT: ret void
;
@@ -45,9 +45,9 @@ entry:
define void @test_load_bitcast_chain(ptr addrspace(1) %out, i64 %x) {
; CHECK-LABEL: @test_load_bitcast_chain(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr addrspace(2) @test.data, i64 [[X:%.*]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr addrspace(2) @test.data, i64 [[X:%.*]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(2) [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[OUT:%.*]], i64 [[X]]
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [4 x i8], ptr addrspace(1) [[OUT:%.*]], i64 [[X]]
; CHECK-NEXT: store i32 [[TMP0]], ptr addrspace(1) [[ARRAYIDX1]], align 4
; CHECK-NEXT: ret void
;
@@ -66,9 +66,9 @@ define void @test_call(ptr addrspace(1) %out, i64 %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[DATA:%.*]] = alloca [8 x i32], align 4
; CHECK-NEXT: call void @llvm.memcpy.p0.p2.i64(ptr noundef nonnull align 4 dereferenceable(32) [[DATA]], ptr addrspace(2) noundef align 4 dereferenceable(32) @test.data, i64 32, i1 false)
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[DATA]], i64 [[X:%.*]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[DATA]], i64 [[X:%.*]]
; CHECK-NEXT: [[TMP0:%.*]] = call i32 @foo(ptr nonnull [[ARRAYIDX]])
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[OUT:%.*]], i64 [[X]]
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [4 x i8], ptr addrspace(1) [[OUT:%.*]], i64 [[X]]
; CHECK-NEXT: store i32 [[TMP0]], ptr addrspace(1) [[ARRAYIDX1]], align 4
; CHECK-NEXT: ret void
;
@@ -87,9 +87,9 @@ define void @test_call_no_null_opt(ptr addrspace(1) %out, i64 %x) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[DATA:%.*]] = alloca [8 x i32], align 4
; CHECK-NEXT: call void @llvm.memcpy.p0.p2.i64(ptr noundef nonnull align 4 dereferenceable(32) [[DATA]], ptr addrspace(2) noundef align 4 dereferenceable(32) @test.data, i64 32, i1 false)
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[DATA]], i64 [[X:%.*]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[DATA]], i64 [[X:%.*]]
; CHECK-NEXT: [[TMP0:%.*]] = call i32 @foo(ptr [[ARRAYIDX]])
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[OUT:%.*]], i64 [[X]]
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [4 x i8], ptr addrspace(1) [[OUT:%.*]], i64 [[X]]
; CHECK-NEXT: store i32 [[TMP0]], ptr addrspace(1) [[ARRAYIDX1]], align 4
; CHECK-NEXT: ret void
;
@@ -108,12 +108,12 @@ define void @test_load_and_call(ptr addrspace(1) %out, i64 %x, i64 %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[DATA:%.*]] = alloca [8 x i32], align 4
; CHECK-NEXT: call void @llvm.memcpy.p0.p2.i64(ptr noundef nonnull align 4 dereferenceable(32) [[DATA]], ptr addrspace(2) noundef align 4 dereferenceable(32) @test.data, i64 32, i1 false)
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[DATA]], i64 [[X:%.*]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[DATA]], i64 [[X:%.*]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[OUT:%.*]], i64 [[X]]
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [4 x i8], ptr addrspace(1) [[OUT:%.*]], i64 [[X]]
; CHECK-NEXT: store i32 [[TMP0]], ptr addrspace(1) [[ARRAYIDX1]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @foo(ptr nonnull [[ARRAYIDX]])
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[OUT]], i64 [[Y:%.*]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i8], ptr addrspace(1) [[OUT]], i64 [[Y:%.*]]
; CHECK-NEXT: store i32 [[TMP1]], ptr addrspace(1) [[ARRAYIDX2]], align 4
; CHECK-NEXT: ret void
;
@@ -135,12 +135,12 @@ define void @test_load_and_call_no_null_opt(ptr addrspace(1) %out, i64 %x, i64 %
; CHECK-NEXT: entry:
; CHECK-NEXT: [[DATA:%.*]] = alloca [8 x i32], align 4
; CHECK-NEXT: call void @llvm.memcpy.p0.p2.i64(ptr noundef nonnull align 4 dereferenceable(32) [[DATA]], ptr addrspace(2) noundef align 4 dereferenceable(32) @test.data, i64 32, i1 false)
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[DATA]], i64 [[X:%.*]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[DATA]], i64 [[X:%.*]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[OUT:%.*]], i64 [[X]]
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [4 x i8], ptr addrspace(1) [[OUT:%.*]], i64 [[X]]
; CHECK-NEXT: store i32 [[TMP0]], ptr addrspace(1) [[ARRAYIDX1]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @foo(ptr [[ARRAYIDX]])
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[OUT]], i64 [[Y:%.*]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i8], ptr addrspace(1) [[OUT]], i64 [[Y:%.*]]
; CHECK-NEXT: store i32 [[TMP1]], ptr addrspace(1) [[ARRAYIDX2]], align 4
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/InstCombine/memcpy-from-global.ll b/llvm/test/Transforms/InstCombine/memcpy-from-global.ll
index f10ba1e3d27e6..6ed9924f159d6 100644
--- a/llvm/test/Transforms/InstCombine/memcpy-from-global.ll
+++ b/llvm/test/Transforms/InstCombine/memcpy-from-global.ll
@@ -9,24 +9,24 @@ define float @test1(i32 %hash, float %x, float %y, float %z, float %w) {
; CHECK-NEXT: [[T3:%.*]] = shl i32 [[HASH:%.*]], 2
; CHECK-NEXT: [[T5:%.*]] = and i32 [[T3]], 124
; CHECK-NEXT: [[TMP0:%.*]] = zext nneg i32 [[T5]] to i64
-; CHECK-NEXT: [[T753:%.*]] = getelementptr float, ptr @C.0.1248, i64 [[TMP0]]
+; CHECK-NEXT: [[T753:%.*]] = getelementptr [4 x i8], ptr @C.0.1248, i64 [[TMP0]]
; CHECK-NEXT: [[T9:%.*]] = load float, ptr [[T753]], align 4
; CHECK-NEXT: [[T11:%.*]] = fmul float [[T9]], [[X:%.*]]
; CHECK-NEXT: [[T13:%.*]] = fadd float [[T11]], 0.000000e+00
; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[T5]] to i64
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr float, ptr @C.0.1248, i64 [[TMP1]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr [4 x i8], ptr @C.0.1248, i64 [[TMP1]]
; CHECK-NEXT: [[T1851:%.*]] = getelementptr i8, ptr [[TMP2]], i64 4
; CHECK-NEXT: [[T19:%.*]] = load float, ptr [[T1851]], align 4
; CHECK-NEXT: [[T21:%.*]] = fmul float [[T19]], [[Y:%.*]]
; CHECK-NEXT: [[T23:%.*]] = fadd float [[T21]], [[T13]]
; CHECK-NEXT: [[TMP3:%.*]] = zext nneg i32 [[T5]] to i64
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr float, ptr @C.0.1248, i64 [[TMP3]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr [4 x i8], ptr @C.0.1248, i64 [[TMP3]]
; CHECK-NEXT: [[T2849:%.*]] = getelementptr i8, ptr [[TMP4]], i64 8
; CHECK-NEXT: [[T29:%.*]] = load float, ptr [[T2849]], align 4
; CHECK-NEXT: [[T31:%.*]] = fmul float [[T29]], [[Z:%.*]]
; CHECK-NEXT: [[T33:%.*]] = fadd float [[T31]], [[T23]]
; CHECK-NEXT: [[TMP5:%.*]] = zext nneg i32 [[T5]] to i64
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr float, ptr @C.0.1248, i64 [[TMP5]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr [4 x i8], ptr @C.0.1248, i64 [[TMP5]]
; CHECK-NEXT: [[T3847:%.*]] = getelementptr i8, ptr [[TMP6]], i64 12
; CHECK-NEXT: [[T39:%.*]] = load float, ptr [[T3847]], align 4
; CHECK-NEXT: [[T41:%.*]] = fmul float [[T39]], [[W:%.*]]
@@ -337,7 +337,7 @@ define float @test11_volatile(i64 %i) {
; CHECK-NEXT: [[A:%.*]] = alloca [4 x float], align 4
; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[A]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p1.i64(ptr align 4 [[A]], ptr addrspace(1) align 4 @I, i64 16, i1 true)
-; CHECK-NEXT: [[G:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[I:%.*]]
+; CHECK-NEXT: [[G:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[I:%.*]]
; CHECK-NEXT: [[R:%.*]] = load float, ptr [[G]], align 4
; CHECK-NEXT: ret float [[R]]
;
diff --git a/llvm/test/Transforms/InstCombine/multi-size-address-space-pointer.ll b/llvm/test/Transforms/InstCombine/multi-size-address-space-pointer.ll
index df3152a6a4353..c890d95403220 100644
--- a/llvm/test/Transforms/InstCombine/multi-size-address-space-pointer.ll
+++ b/llvm/test/Transforms/InstCombine/multi-size-address-space-pointer.ll
@@ -120,7 +120,7 @@ define ptr addrspace(3) @shrink_gep_constant_index_64_as3(ptr addrspace(3) %p) {
define ptr addrspace(2) @shrink_gep_variable_index_64_as2(ptr addrspace(2) %p, i64 %idx) {
; CHECK-LABEL: @shrink_gep_variable_index_64_as2(
; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[IDX:%.*]] to i8
-; CHECK-NEXT: [[RET:%.*]] = getelementptr i32, ptr addrspace(2) [[P:%.*]], i8 [[TMP1]]
+; CHECK-NEXT: [[RET:%.*]] = getelementptr [4 x i8], ptr addrspace(2) [[P:%.*]], i8 [[TMP1]]
; CHECK-NEXT: ret ptr addrspace(2) [[RET]]
;
%ret = getelementptr i32, ptr addrspace(2) %p, i64 %idx
@@ -130,7 +130,7 @@ define ptr addrspace(2) @shrink_gep_variable_index_64_as2(ptr addrspace(2) %p, i
define ptr addrspace(1) @grow_gep_variable_index_8_as1(ptr addrspace(1) %p, i8 %idx) {
; CHECK-LABEL: @grow_gep_variable_index_8_as1(
; CHECK-NEXT: [[TMP1:%.*]] = sext i8 [[IDX:%.*]] to i64
-; CHECK-NEXT: [[RET:%.*]] = getelementptr i32, ptr addrspace(1) [[P:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: [[RET:%.*]] = getelementptr [4 x i8], ptr addrspace(1) [[P:%.*]], i64 [[TMP1]]
; CHECK-NEXT: ret ptr addrspace(1) [[RET]]
;
%ret = getelementptr i32, ptr addrspace(1) %p, i8 %idx
diff --git a/llvm/test/Transforms/InstCombine/narrow.ll b/llvm/test/Transforms/InstCombine/narrow.ll
index 5fad7f07090b9..3161d284b637f 100644
--- a/llvm/test/Transforms/InstCombine/narrow.ll
+++ b/llvm/test/Transforms/InstCombine/narrow.ll
@@ -109,7 +109,7 @@ define i1 @searchArray1(i32 %needle, ptr %haystack) {
; CHECK-NEXT: [[INDVAR:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[FOUND:%.*]] = phi i8 [ 0, [[ENTRY]] ], [ [[OR:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[INDVAR]] to i64
-; CHECK-NEXT: [[IDX:%.*]] = getelementptr i32, ptr [[HAYSTACK:%.*]], i64 [[TMP0]]
+; CHECK-NEXT: [[IDX:%.*]] = getelementptr [4 x i8], ptr [[HAYSTACK:%.*]], i64 [[TMP0]]
; CHECK-NEXT: [[LD:%.*]] = load i32, ptr [[IDX]], align 4
; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[LD]], [[NEEDLE:%.*]]
; CHECK-NEXT: [[ZEXT:%.*]] = zext i1 [[CMP1]] to i8
@@ -152,7 +152,7 @@ define i1 @searchArray2(i32 %hay, ptr %haystack) {
; CHECK: loop:
; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[FOUND:%.*]] = phi i8 [ 1, [[ENTRY]] ], [ [[AND:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[IDX:%.*]] = getelementptr i32, ptr [[HAYSTACK:%.*]], i64 [[INDVAR]]
+; CHECK-NEXT: [[IDX:%.*]] = getelementptr [4 x i8], ptr [[HAYSTACK:%.*]], i64 [[INDVAR]]
; CHECK-NEXT: [[LD:%.*]] = load i32, ptr [[IDX]], align 4
; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[LD]], [[HAY:%.*]]
; CHECK-NEXT: [[AND]] = select i1 [[CMP1]], i8 [[FOUND]], i8 0
diff --git a/llvm/test/Transforms/InstCombine/opaque-ptr.ll b/llvm/test/Transforms/InstCombine/opaque-ptr.ll
index 1d80724d2355e..9f3cd87924774 100644
--- a/llvm/test/Transforms/InstCombine/opaque-ptr.ll
+++ b/llvm/test/Transforms/InstCombine/opaque-ptr.ll
@@ -223,7 +223,7 @@ define ptr @geps_combinable_different_elem_type5(ptr %a) {
define ptr @geps_combinable_different_elem_type6(ptr %a, i64 %idx) {
; CHECK-LABEL: @geps_combinable_different_elem_type6(
-; CHECK-NEXT: [[A2:%.*]] = getelementptr { i32, i32 }, ptr [[A:%.*]], i64 [[IDX:%.*]]
+; CHECK-NEXT: [[A2:%.*]] = getelementptr [8 x i8], ptr [[A:%.*]], i64 [[IDX:%.*]]
; CHECK-NEXT: [[A3:%.*]] = getelementptr i8, ptr [[A2]], i64 4
; CHECK-NEXT: ret ptr [[A3]]
;
@@ -234,7 +234,7 @@ define ptr @geps_combinable_different_elem_type6(ptr %a, i64 %idx) {
define ptr @geps_combinable_different_elem_type7(ptr %a, i64 %idx) {
; CHECK-LABEL: @geps_combinable_different_elem_type7(
-; CHECK-NEXT: [[A2_SPLIT:%.*]] = getelementptr { i32, i32 }, ptr [[A:%.*]], i64 [[IDX:%.*]]
+; CHECK-NEXT: [[A2_SPLIT:%.*]] = getelementptr [8 x i8], ptr [[A:%.*]], i64 [[IDX:%.*]]
; CHECK-NEXT: [[A3:%.*]] = getelementptr i8, ptr [[A2_SPLIT]], i64 8
; CHECK-NEXT: ret ptr [[A3]]
;
@@ -245,7 +245,7 @@ define ptr @geps_combinable_different_elem_type7(ptr %a, i64 %idx) {
define ptr @geps_combinable_different_elem_type8(ptr %a, i64 %idx) {
; CHECK-LABEL: @geps_combinable_different_elem_type8(
-; CHECK-NEXT: [[A2_SPLIT:%.*]] = getelementptr inbounds { { i32, i32 } }, ptr [[A:%.*]], i64 [[IDX:%.*]]
+; CHECK-NEXT: [[A2_SPLIT:%.*]] = getelementptr inbounds [8 x i8], ptr [[A:%.*]], i64 [[IDX:%.*]]
; CHECK-NEXT: [[A3:%.*]] = getelementptr inbounds nuw i8, ptr [[A2_SPLIT]], i64 8
; CHECK-NEXT: ret ptr [[A3]]
;
@@ -256,7 +256,7 @@ define ptr @geps_combinable_different_elem_type8(ptr %a, i64 %idx) {
define ptr @geps_combinable_different_elem_type9(ptr %a, i64 %idx) {
; CHECK-LABEL: @geps_combinable_different_elem_type9(
-; CHECK-NEXT: [[A3:%.*]] = getelementptr inbounds { { i32, i32 } }, ptr [[A:%.*]], i64 [[IDX:%.*]]
+; CHECK-NEXT: [[A3:%.*]] = getelementptr inbounds [8 x i8], ptr [[A:%.*]], i64 [[IDX:%.*]]
; CHECK-NEXT: ret ptr [[A3]]
;
%a2 = getelementptr inbounds { { i32, i32 } }, ptr %a, i64 %idx, i32 0, i32 1
@@ -281,7 +281,7 @@ define ptr @geps_combinable_different_elem_type_extra_use1(ptr %a) {
define ptr @geps_combinable_different_elem_type_extra_use2(ptr %a, i64 %idx) {
; CHECK-LABEL: @geps_combinable_different_elem_type_extra_use2(
-; CHECK-NEXT: [[A2:%.*]] = getelementptr { i32, i32 }, ptr [[A:%.*]], i64 [[IDX:%.*]]
+; CHECK-NEXT: [[A2:%.*]] = getelementptr [8 x i8], ptr [[A:%.*]], i64 [[IDX:%.*]]
; CHECK-NEXT: call void @use(ptr [[A2]])
; CHECK-NEXT: [[A3:%.*]] = getelementptr i8, ptr [[A2]], i64 4
; CHECK-NEXT: ret ptr [[A3]]
@@ -331,8 +331,8 @@ define i1 @compare_geps_same_indices(ptr %a, ptr %b, i64 %idx) {
define i1 @compare_geps_same_indices_different_types(ptr %a, ptr %b, i64 %idx) {
; CHECK-LABEL: @compare_geps_same_indices_different_types(
-; CHECK-NEXT: [[A2:%.*]] = getelementptr i32, ptr [[A:%.*]], i64 [[IDX:%.*]]
-; CHECK-NEXT: [[B2:%.*]] = getelementptr i64, ptr [[B:%.*]], i64 [[IDX]]
+; CHECK-NEXT: [[A2:%.*]] = getelementptr [4 x i8], ptr [[A:%.*]], i64 [[IDX:%.*]]
+; CHECK-NEXT: [[B2:%.*]] = getelementptr [8 x i8], ptr [[B:%.*]], i64 [[IDX]]
; CHECK-NEXT: [[C:%.*]] = icmp eq ptr [[A2]], [[B2]]
; CHECK-NEXT: ret i1 [[C]]
;
@@ -377,8 +377,8 @@ define <2 x i1> @compare_gep_with_base_vector2(<2 x ptr> %p, <2 x i64> %idx) {
define <4 x i1> @compare_geps_same_indices_scalar_vector_base_mismatch(ptr %ptr, <4 x ptr> %ptrs) {
; CHECK-LABEL: @compare_geps_same_indices_scalar_vector_base_mismatch(
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i16, <4 x ptr> [[PTRS:%.*]], <4 x i64> <i64 1, i64 2, i64 3, i64 4>
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr i16, ptr [[PTR:%.*]], <4 x i64> <i64 1, i64 2, i64 3, i64 4>
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr [2 x i8], <4 x ptr> [[PTRS:%.*]], <4 x i64> <i64 1, i64 2, i64 3, i64 4>
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr [2 x i8], ptr [[PTR:%.*]], <4 x i64> <i64 1, i64 2, i64 3, i64 4>
; CHECK-NEXT: [[CMP:%.*]] = icmp eq <4 x ptr> [[GEP1]], [[GEP2]]
; CHECK-NEXT: ret <4 x i1> [[CMP]]
;
diff --git a/llvm/test/Transforms/InstCombine/phi.ll b/llvm/test/Transforms/InstCombine/phi.ll
index 1c8b21ceb029b..f3aa07069721b 100644
--- a/llvm/test/Transforms/InstCombine/phi.ll
+++ b/llvm/test/Transforms/InstCombine/phi.ll
@@ -1750,7 +1750,7 @@ for.end:
define i1 @pr57488_icmp_of_phi(ptr %ptr.base, i64 %len) {
; CHECK-LABEL: @pr57488_icmp_of_phi(
; CHECK-NEXT: start:
-; CHECK-NEXT: [[END:%.*]] = getelementptr inbounds i64, ptr [[PTR_BASE:%.*]], i64 [[LEN:%.*]]
+; CHECK-NEXT: [[END:%.*]] = getelementptr inbounds [8 x i8], ptr [[PTR_BASE:%.*]], i64 [[LEN:%.*]]
; CHECK-NEXT: [[LEN_ZERO:%.*]] = icmp eq i64 [[LEN]], 0
; CHECK-NEXT: br i1 [[LEN_ZERO]], label [[EXIT:%.*]], label [[LOOP:%.*]]
; CHECK: loop:
diff --git a/llvm/test/Transforms/InstCombine/pr38984-inseltpoison.ll b/llvm/test/Transforms/InstCombine/pr38984-inseltpoison.ll
index a9f310b17e6b6..205074050cbb2 100644
--- a/llvm/test/Transforms/InstCombine/pr38984-inseltpoison.ll
+++ b/llvm/test/Transforms/InstCombine/pr38984-inseltpoison.ll
@@ -26,8 +26,8 @@ define <4 x i1> @PR38984_2() {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr @offsets, align 2
; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i16> poison, i16 [[TMP0]], i64 3
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i16, ptr getelementptr inbounds nuw (i8, ptr @a, i16 42), <4 x i16> [[TMP1]]
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i16, ptr null, <4 x i16> [[TMP1]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr [2 x i8], ptr getelementptr inbounds nuw (i8, ptr @a, i16 42), <4 x i16> [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr [2 x i8], ptr null, <4 x i16> [[TMP1]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq <4 x ptr> [[TMP2]], [[TMP3]]
; CHECK-NEXT: ret <4 x i1> [[TMP4]]
;
diff --git a/llvm/test/Transforms/InstCombine/pr38984.ll b/llvm/test/Transforms/InstCombine/pr38984.ll
index 19fa3346797fb..d211ea5a9a22b 100644
--- a/llvm/test/Transforms/InstCombine/pr38984.ll
+++ b/llvm/test/Transforms/InstCombine/pr38984.ll
@@ -26,8 +26,8 @@ define <4 x i1> @PR38984_2() {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr @offsets, align 2
; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i16> <i16 undef, i16 undef, i16 undef, i16 poison>, i16 [[TMP0]], i64 3
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i16, ptr getelementptr inbounds nuw (i8, ptr @a, i16 42), <4 x i16> [[TMP1]]
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i16, ptr null, <4 x i16> [[TMP1]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr [2 x i8], ptr getelementptr inbounds nuw (i8, ptr @a, i16 42), <4 x i16> [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr [2 x i8], ptr null, <4 x i16> [[TMP1]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq <4 x ptr> [[TMP2]], [[TMP3]]
; CHECK-NEXT: ret <4 x i1> [[TMP4]]
;
diff --git a/llvm/test/Transforms/InstCombine/pr58901.ll b/llvm/test/Transforms/InstCombine/pr58901.ll
index aa5aad23b082d..2c8539dfc9dc7 100644
--- a/llvm/test/Transforms/InstCombine/pr58901.ll
+++ b/llvm/test/Transforms/InstCombine/pr58901.ll
@@ -4,7 +4,7 @@
define ptr @f1(ptr %arg, i64 %arg1) {
; CHECK-LABEL: @f1(
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[ARG:%.*]], i64 72
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i32, ptr [[TMP1]], i64 [[ARG1:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr [4 x i8], ptr [[TMP1]], i64 [[ARG1:%.*]]
; CHECK-NEXT: ret ptr [[TMP2]]
;
%1 = getelementptr [6 x i32], ptr %arg, i64 3
@@ -15,8 +15,8 @@ define ptr @f1(ptr %arg, i64 %arg1) {
define ptr @f2(ptr %arg, i64 %arg1) {
; CHECK-LABEL: @f2(
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[ARG:%.*]], i64 72
-; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr [6 x i32], ptr [[TMP1]], i64 [[ARG1:%.*]]
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i32, ptr [[DOTSPLIT]], i64 [[ARG1]]
+; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr [24 x i8], ptr [[TMP1]], i64 [[ARG1:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr [4 x i8], ptr [[DOTSPLIT]], i64 [[ARG1]]
; CHECK-NEXT: ret ptr [[TMP2]]
;
%1 = getelementptr [6 x i32], ptr %arg, i64 3
diff --git a/llvm/test/Transforms/InstCombine/ptrtoint-nullgep.ll b/llvm/test/Transforms/InstCombine/ptrtoint-nullgep.ll
index bf978801fec5d..c70b33ec0cd1b 100644
--- a/llvm/test/Transforms/InstCombine/ptrtoint-nullgep.ll
+++ b/llvm/test/Transforms/InstCombine/ptrtoint-nullgep.ll
@@ -628,8 +628,8 @@ define i64 @fold_ptrtoint_nested_nullgep_array_variable_multiple_uses(i64 %x, i6
;
; INSTCOMBINE-LABEL: define {{[^@]+}}@fold_ptrtoint_nested_nullgep_array_variable_multiple_uses
; INSTCOMBINE-SAME: (i64 [[X:%.*]], i64 [[Y:%.*]]) {
-; INSTCOMBINE-NEXT: [[PTR_SPLIT:%.*]] = getelementptr [2 x i16], ptr addrspace(1) null, i64 [[X]]
-; INSTCOMBINE-NEXT: [[PTR:%.*]] = getelementptr i16, ptr addrspace(1) [[PTR_SPLIT]], i64 [[Y]]
+; INSTCOMBINE-NEXT: [[PTR_SPLIT:%.*]] = getelementptr [4 x i8], ptr addrspace(1) null, i64 [[X]]
+; INSTCOMBINE-NEXT: [[PTR:%.*]] = getelementptr [2 x i8], ptr addrspace(1) [[PTR_SPLIT]], i64 [[Y]]
; INSTCOMBINE-NEXT: call void @use_ptr(ptr addrspace(1) [[PTR]])
; INSTCOMBINE-NEXT: [[RET:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
; INSTCOMBINE-NEXT: ret i64 [[RET]]
diff --git a/llvm/test/Transforms/InstCombine/remove-loop-phi-multiply-by-zero.ll b/llvm/test/Transforms/InstCombine/remove-loop-phi-multiply-by-zero.ll
index a4a9de7fd1285..caa679b444a3b 100644
--- a/llvm/test/Transforms/InstCombine/remove-loop-phi-multiply-by-zero.ll
+++ b/llvm/test/Transforms/InstCombine/remove-loop-phi-multiply-by-zero.ll
@@ -67,7 +67,7 @@ define double @test_nnan_flag_enabled(ptr %arr_d) {
; CHECK: for.body:
; CHECK-NEXT: [[I_02:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[F_PROD_01:%.*]] = phi double [ 0.000000e+00, [[ENTRY]] ], [ [[MUL:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw double, ptr [[ARR_D:%.*]], i64 [[I_02]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[ARR_D:%.*]], i64 [[I_02]]
; CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[MUL]] = fmul nnan double [[F_PROD_01]], [[TMP0]]
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_02]], 1
@@ -101,7 +101,7 @@ define double @test_ninf_flag_enabled(ptr %arr_d) {
; CHECK: for.body:
; CHECK-NEXT: [[I_02:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[F_PROD_01:%.*]] = phi double [ 0.000000e+00, [[ENTRY]] ], [ [[MUL:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw double, ptr [[ARR_D:%.*]], i64 [[I_02]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[ARR_D:%.*]], i64 [[I_02]]
; CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[MUL]] = fmul ninf double [[F_PROD_01]], [[TMP0]]
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_02]], 1
@@ -135,7 +135,7 @@ define double @test_nsz_flag_enabled(ptr %arr_d) {
; CHECK: for.body:
; CHECK-NEXT: [[I_02:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[F_PROD_01:%.*]] = phi double [ 0.000000e+00, [[ENTRY]] ], [ [[MUL:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw double, ptr [[ARR_D:%.*]], i64 [[I_02]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[ARR_D:%.*]], i64 [[I_02]]
; CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[MUL]] = fmul nsz double [[F_PROD_01]], [[TMP0]]
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_02]], 1
@@ -169,7 +169,7 @@ define double @test_phi_initalise_to_non_zero(ptr %arr_d) {
; CHECK: for.body:
; CHECK-NEXT: [[I_02:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[F_PROD_01:%.*]] = phi double [ 1.000000e+00, [[ENTRY]] ], [ [[MUL:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw double, ptr [[ARR_D:%.*]], i64 [[I_02]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[ARR_D:%.*]], i64 [[I_02]]
; CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[MUL]] = fmul fast double [[F_PROD_01]], [[TMP0]]
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_02]], 1
@@ -205,7 +205,7 @@ define double @test_multiple_phi_operands(ptr %arr_d, i1 %entry_cond) {
; CHECK: for.body:
; CHECK-NEXT: [[I_02:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ 0, [[ENTRY_2]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[F_PROD_01:%.*]] = phi double [ 0.000000e+00, [[ENTRY]] ], [ 0.000000e+00, [[ENTRY_2]] ], [ [[MUL:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw double, ptr [[ARR_D:%.*]], i64 [[I_02]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[ARR_D:%.*]], i64 [[I_02]]
; CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[MUL]] = fmul fast double [[F_PROD_01]], [[TMP0]]
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_02]], 1
@@ -244,7 +244,7 @@ define double @test_multiple_phi_operands_with_non_zero(ptr %arr_d, i1 %entry_co
; CHECK: for.body:
; CHECK-NEXT: [[I_02:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ 0, [[ENTRY_2]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[F_PROD_01:%.*]] = phi double [ 1.000000e+00, [[ENTRY]] ], [ 0.000000e+00, [[ENTRY_2]] ], [ [[MUL:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw double, ptr [[ARR_D:%.*]], i64 [[I_02]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[ARR_D:%.*]], i64 [[I_02]]
; CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[MUL]] = fmul fast double [[F_PROD_01]], [[TMP0]]
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_02]], 1
@@ -281,7 +281,7 @@ define i32 @test_int_phi_operands(ptr %arr_d) {
; CHECK: for.body:
; CHECK-NEXT: [[I_02:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[F_PROD_01:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[MUL:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[ARR_D:%.*]], i64 [[I_02]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[ARR_D:%.*]], i64 [[I_02]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[MUL]] = mul nsw i32 [[F_PROD_01]], [[TMP0]]
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_02]], 1
@@ -315,7 +315,7 @@ define i32 @test_int_phi_operands_initalise_to_non_zero(ptr %arr_d) {
; CHECK: for.body:
; CHECK-NEXT: [[I_02:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[F_PROD_01:%.*]] = phi i32 [ 1, [[ENTRY]] ], [ [[MUL:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[ARR_D:%.*]], i64 [[I_02]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[ARR_D:%.*]], i64 [[I_02]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[MUL]] = mul i32 [[F_PROD_01]], [[TMP0]]
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_02]], 1
@@ -351,7 +351,7 @@ define i32 @test_multiple_int_phi_operands(ptr %arr_d, i1 %entry_cond) {
; CHECK: for.body:
; CHECK-NEXT: [[I_02:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ 0, [[ENTRY_2]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[F_PROD_01:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ 0, [[ENTRY_2]] ], [ [[MUL:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[ARR_D:%.*]], i64 [[I_02]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[ARR_D:%.*]], i64 [[I_02]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[MUL]] = mul i32 [[F_PROD_01]], [[TMP0]]
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_02]], 1
@@ -390,7 +390,7 @@ define i32 @test_multiple_int_phi_operands_initalise_to_non_zero(ptr %arr_d, i1
; CHECK: for.body:
; CHECK-NEXT: [[I_02:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ 0, [[ENTRY_2]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[F_PROD_01:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ 1, [[ENTRY_2]] ], [ [[MUL:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[ARR_D:%.*]], i64 [[I_02]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[ARR_D:%.*]], i64 [[I_02]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[MUL]] = mul i32 [[F_PROD_01]], [[TMP0]]
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_02]], 1
diff --git a/llvm/test/Transforms/InstCombine/scalable-select.ll b/llvm/test/Transforms/InstCombine/scalable-select.ll
index 717507dbfa356..db61c60aad784 100644
--- a/llvm/test/Transforms/InstCombine/scalable-select.ll
+++ b/llvm/test/Transforms/InstCombine/scalable-select.ll
@@ -17,8 +17,8 @@ define <vscale x 1 x i32> @select_opt(<vscale x 1 x i32> %b, <vscale x 1 x i1> %
define <vscale x 2 x i64> @load_scalable_of_selected_ptrs(ptr %p0, ptr %p1, i64 %idx) {
; CHECK-LABEL: @load_scalable_of_selected_ptrs(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, ptr [[P0:%.*]], i64 [[IDX:%.*]]
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[P1:%.*]], i64 [[IDX]]
+; CHECK-NEXT: [[ARRAYIDX0:%.*]] = getelementptr inbounds [4 x i8], ptr [[P0:%.*]], i64 [[IDX:%.*]]
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [4 x i8], ptr [[P1:%.*]], i64 [[IDX]]
; CHECK-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX0]], align 4
; CHECK-NEXT: [[L1:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[L0]], [[L1]]
diff --git a/llvm/test/Transforms/InstCombine/select-gep.ll b/llvm/test/Transforms/InstCombine/select-gep.ll
index 718133699a8a7..e58f39a588f5a 100644
--- a/llvm/test/Transforms/InstCombine/select-gep.ll
+++ b/llvm/test/Transforms/InstCombine/select-gep.ll
@@ -60,7 +60,7 @@ define ptr @test1d(ptr %p, ptr %q) {
define ptr @test2(ptr %p, i64 %x, i64 %y) {
; CHECK-LABEL: @test2(
; CHECK-NEXT: [[SELECT_V:%.*]] = call i64 @llvm.umax.i64(i64 [[X:%.*]], i64 [[Y:%.*]])
-; CHECK-NEXT: [[SELECT:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 [[SELECT_V]]
+; CHECK-NEXT: [[SELECT:%.*]] = getelementptr inbounds [4 x i8], ptr [[P:%.*]], i64 [[SELECT_V]]
; CHECK-NEXT: ret ptr [[SELECT]]
;
%gep1 = getelementptr inbounds i32, ptr %p, i64 %x
@@ -75,7 +75,7 @@ define ptr @test2a(ptr %p, i64 %x, i64 %y) {
; CHECK-LABEL: @test2a(
; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i64 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[SELECT_IDX:%.*]] = select i1 [[CMP]], i64 [[X]], i64 0
-; CHECK-NEXT: [[SELECT:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 [[SELECT_IDX]]
+; CHECK-NEXT: [[SELECT:%.*]] = getelementptr inbounds [4 x i8], ptr [[P:%.*]], i64 [[SELECT_IDX]]
; CHECK-NEXT: ret ptr [[SELECT]]
;
%gep = getelementptr inbounds i32, ptr %p, i64 %x
@@ -88,7 +88,7 @@ define ptr @test2a_nusw(ptr %p, i64 %x, i64 %y) {
; CHECK-LABEL: @test2a_nusw(
; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i64 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[SELECT_IDX:%.*]] = select i1 [[CMP]], i64 [[X]], i64 0
-; CHECK-NEXT: [[SELECT:%.*]] = getelementptr nusw i32, ptr [[P:%.*]], i64 [[SELECT_IDX]]
+; CHECK-NEXT: [[SELECT:%.*]] = getelementptr nusw [4 x i8], ptr [[P:%.*]], i64 [[SELECT_IDX]]
; CHECK-NEXT: ret ptr [[SELECT]]
;
%gep = getelementptr nusw i32, ptr %p, i64 %x
@@ -101,7 +101,7 @@ define ptr @test2a_nuw(ptr %p, i64 %x, i64 %y) {
; CHECK-LABEL: @test2a_nuw(
; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i64 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[SELECT_IDX:%.*]] = select i1 [[CMP]], i64 [[X]], i64 0
-; CHECK-NEXT: [[SELECT:%.*]] = getelementptr nuw i32, ptr [[P:%.*]], i64 [[SELECT_IDX]]
+; CHECK-NEXT: [[SELECT:%.*]] = getelementptr nuw [4 x i8], ptr [[P:%.*]], i64 [[SELECT_IDX]]
; CHECK-NEXT: ret ptr [[SELECT]]
;
%gep = getelementptr nuw i32, ptr %p, i64 %x
@@ -115,7 +115,7 @@ define ptr @test2b(ptr %p, i64 %x, i64 %y) {
; CHECK-LABEL: @test2b(
; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i64 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[SELECT_IDX:%.*]] = select i1 [[CMP]], i64 0, i64 [[X]]
-; CHECK-NEXT: [[SELECT:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 [[SELECT_IDX]]
+; CHECK-NEXT: [[SELECT:%.*]] = getelementptr inbounds [4 x i8], ptr [[P:%.*]], i64 [[SELECT_IDX]]
; CHECK-NEXT: ret ptr [[SELECT]]
;
%gep = getelementptr inbounds i32, ptr %p, i64 %x
@@ -127,7 +127,7 @@ define ptr @test2b(ptr %p, i64 %x, i64 %y) {
; PR51069
define ptr @test2c(ptr %p, i64 %x, i64 %y) {
; CHECK-LABEL: @test2c(
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 [[X:%.*]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[P:%.*]], i64 [[X:%.*]]
; CHECK-NEXT: [[ICMP:%.*]] = icmp ugt i64 [[X]], [[Y:%.*]]
; CHECK-NEXT: [[SEL_IDX:%.*]] = select i1 [[ICMP]], i64 0, i64 24
; CHECK-NEXT: [[SEL:%.*]] = getelementptr inbounds nuw i8, ptr [[GEP1]], i64 [[SEL_IDX]]
@@ -143,7 +143,7 @@ define ptr @test2c(ptr %p, i64 %x, i64 %y) {
; PR51069
define ptr @test2d(ptr %p, i64 %x, i64 %y) {
; CHECK-LABEL: @test2d(
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 [[X:%.*]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[P:%.*]], i64 [[X:%.*]]
; CHECK-NEXT: [[ICMP:%.*]] = icmp ugt i64 [[X]], [[Y:%.*]]
; CHECK-NEXT: [[SEL_IDX:%.*]] = select i1 [[ICMP]], i64 24, i64 0
; CHECK-NEXT: [[SEL:%.*]] = getelementptr inbounds nuw i8, ptr [[GEP1]], i64 [[SEL_IDX]]
@@ -162,9 +162,9 @@ define ptr @test2d(ptr %p, i64 %x, i64 %y) {
define ptr @test3a(ptr %p, i64 %x, i64 %y) {
; CHECK-LABEL: @test3a(
; CHECK-NEXT: [[GEP1_SPLIT:%.*]] = getelementptr inbounds nuw i8, ptr [[P:%.*]], i64 32
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i32, ptr [[GEP1_SPLIT]], i64 [[X:%.*]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[GEP1_SPLIT]], i64 [[X:%.*]]
; CHECK-NEXT: [[GEP2_SPLIT:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 32
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds i32, ptr [[GEP2_SPLIT]], i64 [[Y:%.*]]
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[GEP2_SPLIT]], i64 [[Y:%.*]]
; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i64 [[X]], [[Y]]
; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], ptr [[GEP1]], ptr [[GEP2]]
; CHECK-NEXT: ret ptr [[SELECT]]
@@ -181,7 +181,7 @@ define ptr @test3b(ptr %p, ptr %q, i64 %x, i64 %y) {
; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i64 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], ptr [[GEP1:%.*]], ptr [[GEP2:%.*]]
; CHECK-NEXT: [[SELECT_V:%.*]] = getelementptr inbounds nuw i8, ptr [[SELECT]], i64 32
-; CHECK-NEXT: [[SELECT1:%.*]] = getelementptr inbounds i32, ptr [[SELECT_V]], i64 [[X]]
+; CHECK-NEXT: [[SELECT1:%.*]] = getelementptr inbounds [4 x i8], ptr [[SELECT_V]], i64 [[X]]
; CHECK-NEXT: ret ptr [[SELECT1]]
;
%gep1 = getelementptr inbounds [4 x i32], ptr %p, i64 2, i64 %x
@@ -193,9 +193,9 @@ define ptr @test3b(ptr %p, ptr %q, i64 %x, i64 %y) {
define ptr @test3c(ptr %p, ptr %q, i64 %x, i64 %y) {
; CHECK-LABEL: @test3c(
-; CHECK-NEXT: [[GEP1_SPLIT:%.*]] = getelementptr inbounds [4 x i32], ptr [[P:%.*]], i64 [[X:%.*]]
+; CHECK-NEXT: [[GEP1_SPLIT:%.*]] = getelementptr inbounds [16 x i8], ptr [[P:%.*]], i64 [[X:%.*]]
; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds nuw i8, ptr [[GEP1_SPLIT]], i64 8
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds i32, ptr [[Q:%.*]], i64 [[X]]
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[Q:%.*]], i64 [[X]]
; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i64 [[X]], [[Y:%.*]]
; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], ptr [[GEP1]], ptr [[GEP2]]
; CHECK-NEXT: ret ptr [[SELECT]]
@@ -209,8 +209,8 @@ define ptr @test3c(ptr %p, ptr %q, i64 %x, i64 %y) {
define ptr @test3d(ptr %p, ptr %q, i64 %x, i64 %y) {
; CHECK-LABEL: @test3d(
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 [[X:%.*]]
-; CHECK-NEXT: [[GEP2_SPLIT:%.*]] = getelementptr inbounds [4 x i32], ptr [[Q:%.*]], i64 [[X]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[P:%.*]], i64 [[X:%.*]]
+; CHECK-NEXT: [[GEP2_SPLIT:%.*]] = getelementptr inbounds [16 x i8], ptr [[Q:%.*]], i64 [[X]]
; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds nuw i8, ptr [[GEP2_SPLIT]], i64 8
; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i64 [[X]], [[Y:%.*]]
; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], ptr [[GEP1]], ptr [[GEP2]]
@@ -227,8 +227,8 @@ define ptr @test3d(ptr %p, ptr %q, i64 %x, i64 %y) {
define ptr @test4(ptr %p, ptr %q, i64 %x, i64 %y) {
; CHECK-LABEL: @test4(
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 [[X:%.*]]
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds i32, ptr [[Q:%.*]], i64 [[Y:%.*]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[P:%.*]], i64 [[X:%.*]]
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[Q:%.*]], i64 [[Y:%.*]]
; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i64 [[X]], [[Y]]
; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], ptr [[GEP1]], ptr [[GEP2]]
; CHECK-NEXT: ret ptr [[SELECT]]
@@ -244,8 +244,8 @@ define ptr @test4(ptr %p, ptr %q, i64 %x, i64 %y) {
define <2 x ptr> @test5(ptr %p1, ptr %p2, <2 x i64> %idx, <2 x i1> %cc) {
; CHECK-LABEL: @test5(
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i64, ptr [[P1:%.*]], <2 x i64> [[IDX:%.*]]
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr i64, ptr [[P2:%.*]], <2 x i64> [[IDX]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr [8 x i8], ptr [[P1:%.*]], <2 x i64> [[IDX:%.*]]
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr [8 x i8], ptr [[P2:%.*]], <2 x i64> [[IDX]]
; CHECK-NEXT: [[SELECT:%.*]] = select <2 x i1> [[CC:%.*]], <2 x ptr> [[GEP1]], <2 x ptr> [[GEP2]]
; CHECK-NEXT: ret <2 x ptr> [[SELECT]]
;
@@ -258,10 +258,10 @@ define <2 x ptr> @test5(ptr %p1, ptr %p2, <2 x i64> %idx, <2 x i1> %cc) {
; PR51069 - multiple uses
define ptr @test6(ptr %p, i64 %x, i64 %y) {
; CHECK-LABEL: @test6(
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 [[X:%.*]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[P:%.*]], i64 [[X:%.*]]
; CHECK-NEXT: [[ICMP:%.*]] = icmp ugt i64 [[X]], [[Y:%.*]]
; CHECK-NEXT: [[SEL_IDX:%.*]] = select i1 [[ICMP]], i64 [[Y]], i64 0
-; CHECK-NEXT: [[SEL:%.*]] = getelementptr inbounds i32, ptr [[GEP1]], i64 [[SEL_IDX]]
+; CHECK-NEXT: [[SEL:%.*]] = getelementptr inbounds [4 x i8], ptr [[GEP1]], i64 [[SEL_IDX]]
; CHECK-NEXT: call void @use_i32p(ptr [[GEP1]])
; CHECK-NEXT: ret ptr [[SEL]]
;
@@ -278,7 +278,7 @@ declare void @use_i32p(ptr)
define <2 x ptr> @test7(<2 x ptr> %p1, i64 %idx, <2 x i1> %cc) {
; CHECK-LABEL: @test7(
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, <2 x ptr> [[P1:%.*]], i64 [[IDX:%.*]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr [8 x i8], <2 x ptr> [[P1:%.*]], i64 [[IDX:%.*]]
; CHECK-NEXT: [[SELECT:%.*]] = select <2 x i1> [[CC:%.*]], <2 x ptr> [[P1]], <2 x ptr> [[GEP]]
; CHECK-NEXT: ret <2 x ptr> [[SELECT]]
;
diff --git a/llvm/test/Transforms/InstCombine/shift.ll b/llvm/test/Transforms/InstCombine/shift.ll
index 2ff3c3cd9d990..3a9e756bafc36 100644
--- a/llvm/test/Transforms/InstCombine/shift.ll
+++ b/llvm/test/Transforms/InstCombine/shift.ll
@@ -1692,7 +1692,7 @@ define i177 @lshr_out_of_range(i177 %Y, ptr %A2, ptr %ptr) {
; CHECK-NEXT: [[B4:%.*]] = sext i1 [[TMP1]] to i177
; CHECK-NEXT: [[C8:%.*]] = icmp ugt i177 [[Y]], [[B4]]
; CHECK-NEXT: [[TMP2:%.*]] = sext i1 [[C8]] to i64
-; CHECK-NEXT: [[G18:%.*]] = getelementptr ptr, ptr [[A2:%.*]], i64 [[TMP2]]
+; CHECK-NEXT: [[G18:%.*]] = getelementptr [8 x i8], ptr [[A2:%.*]], i64 [[TMP2]]
; CHECK-NEXT: store ptr [[G18]], ptr [[PTR:%.*]], align 8
; CHECK-NEXT: ret i177 0
;
@@ -1716,7 +1716,7 @@ define i177 @lshr_out_of_range2(i177 %Y, ptr %A2, ptr %ptr) {
; CHECK-LABEL: @lshr_out_of_range2(
; CHECK-NEXT: [[C8:%.*]] = icmp ne i177 [[Y:%.*]], 0
; CHECK-NEXT: [[TMP1:%.*]] = sext i1 [[C8]] to i64
-; CHECK-NEXT: [[G18:%.*]] = getelementptr ptr, ptr [[A2:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: [[G18:%.*]] = getelementptr [8 x i8], ptr [[A2:%.*]], i64 [[TMP1]]
; CHECK-NEXT: store ptr [[G18]], ptr [[PTR:%.*]], align 8
; CHECK-NEXT: ret i177 0
;
@@ -1743,13 +1743,13 @@ define void @ashr_out_of_range(ptr %A) "instcombine-no-verify-fixpoint" {
; CHECK-NEXT: [[L:%.*]] = load i177, ptr [[A:%.*]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i177 [[L]], -1
; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i64 -1, i64 -2
-; CHECK-NEXT: [[G11:%.*]] = getelementptr i177, ptr [[A]], i64 [[TMP2]]
+; CHECK-NEXT: [[G11:%.*]] = getelementptr [24 x i8], ptr [[A]], i64 [[TMP2]]
; CHECK-NEXT: [[L7:%.*]] = load i177, ptr [[G11]], align 4
; CHECK-NEXT: [[L7_FROZEN:%.*]] = freeze i177 [[L7]]
; CHECK-NEXT: [[C171:%.*]] = icmp slt i177 [[L7_FROZEN]], 0
; CHECK-NEXT: [[C17:%.*]] = select i1 [[TMP1]], i1 [[C171]], i1 false
; CHECK-NEXT: [[TMP3:%.*]] = sext i1 [[C17]] to i64
-; CHECK-NEXT: [[G62:%.*]] = getelementptr i177, ptr [[G11]], i64 [[TMP3]]
+; CHECK-NEXT: [[G62:%.*]] = getelementptr [24 x i8], ptr [[G11]], i64 [[TMP3]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i177 [[L7_FROZEN]], -1
; CHECK-NEXT: [[B28:%.*]] = select i1 [[TMP4]], i177 0, i177 [[L7_FROZEN]]
; CHECK-NEXT: store i177 [[B28]], ptr [[G62]], align 4
@@ -1780,10 +1780,10 @@ define void @ashr_out_of_range_1(ptr %A) {
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i177 [[L_FROZEN]], -1
; CHECK-NEXT: [[TMP2:%.*]] = trunc i177 [[L_FROZEN]] to i64
; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[TMP1]], i64 0, i64 [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i177, ptr [[A]], i64 [[TMP3]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr [24 x i8], ptr [[A]], i64 [[TMP3]]
; CHECK-NEXT: [[G11:%.*]] = getelementptr i8, ptr [[TMP4]], i64 -24
; CHECK-NEXT: [[TMP5:%.*]] = sext i1 [[TMP1]] to i64
-; CHECK-NEXT: [[G62:%.*]] = getelementptr i177, ptr [[G11]], i64 [[TMP5]]
+; CHECK-NEXT: [[G62:%.*]] = getelementptr [24 x i8], ptr [[G11]], i64 [[TMP5]]
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i177 [[L_FROZEN]], -1
; CHECK-NEXT: [[B28:%.*]] = select i1 [[TMP6]], i177 0, i177 [[L_FROZEN]]
; CHECK-NEXT: store i177 [[B28]], ptr [[G62]], align 4
diff --git a/llvm/test/Transforms/InstCombine/sink_instruction.ll b/llvm/test/Transforms/InstCombine/sink_instruction.ll
index cb9a3069ca5fd..c3af8163830b8 100644
--- a/llvm/test/Transforms/InstCombine/sink_instruction.ll
+++ b/llvm/test/Transforms/InstCombine/sink_instruction.ll
@@ -90,7 +90,7 @@ define i32 @test3(ptr nocapture readonly %P, i32 %i) {
; CHECK-NEXT: ]
; CHECK: sw.bb:
; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 [[IDXPROM]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[P:%.*]], i64 [[IDXPROM]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], [[I]]
; CHECK-NEXT: br label [[SW_EPILOG]]
@@ -148,7 +148,7 @@ define i32 @test5(ptr nocapture readonly %P, i32 %i, i1 %cond) {
; CHECK-LABEL: @test5(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[I:%.*]] to i64
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 [[IDXPROM]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[P:%.*]], i64 [[IDXPROM]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: br i1 [[COND:%.*]], label [[DISPATCHBB:%.*]], label [[SW_EPILOG:%.*]]
; CHECK: dispatchBB:
@@ -186,7 +186,7 @@ define i32 @test6(ptr nocapture readonly %P, i32 %i, i1 %cond) {
; CHECK-NEXT: br label [[DISPATCHBB:%.*]]
; CHECK: dispatchBB:
; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 [[IDXPROM]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[P:%.*]], i64 [[IDXPROM]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: switch i32 [[I]], label [[SW_BB:%.*]] [
; CHECK-NEXT: i32 5, label [[SW_EPILOG:%.*]]
diff --git a/llvm/test/Transforms/InstCombine/store.ll b/llvm/test/Transforms/InstCombine/store.ll
index daa40da1828b5..45f16d874c04e 100644
--- a/llvm/test/Transforms/InstCombine/store.ll
+++ b/llvm/test/Transforms/InstCombine/store.ll
@@ -49,7 +49,7 @@ define void @test2(ptr %P) {
define void @store_at_gep_off_null_inbounds(i64 %offset) {
; CHECK-LABEL: @store_at_gep_off_null_inbounds(
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds i32, ptr null, i64 [[OFFSET:%.*]]
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [4 x i8], ptr null, i64 [[OFFSET:%.*]]
; CHECK-NEXT: store i32 poison, ptr [[PTR]], align 4
; CHECK-NEXT: ret void
;
@@ -60,7 +60,7 @@ define void @store_at_gep_off_null_inbounds(i64 %offset) {
define void @store_at_gep_off_null_not_inbounds(i64 %offset) {
; CHECK-LABEL: @store_at_gep_off_null_not_inbounds(
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr i32, ptr null, i64 [[OFFSET:%.*]]
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr [4 x i8], ptr null, i64 [[OFFSET:%.*]]
; CHECK-NEXT: store i32 poison, ptr [[PTR]], align 4
; CHECK-NEXT: ret void
;
@@ -71,7 +71,7 @@ define void @store_at_gep_off_null_not_inbounds(i64 %offset) {
define void @store_at_gep_off_no_null_opt(i64 %offset) #0 {
; CHECK-LABEL: @store_at_gep_off_no_null_opt(
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds i32, ptr null, i64 [[OFFSET:%.*]]
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [4 x i8], ptr null, i64 [[OFFSET:%.*]]
; CHECK-NEXT: store i32 24, ptr [[PTR]], align 4
; CHECK-NEXT: ret void
;
@@ -170,7 +170,7 @@ define void @test6(i32 %n, ptr %a, ptr %gi) nounwind uwtable ssp {
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[STOREMERGE]] to i64
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IDXPROM]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[IDXPROM]]
; CHECK-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA4:![0-9]+]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[GI]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[INC]] = add nsw i32 [[TMP0]], 1
diff --git a/llvm/test/Transforms/InstCombine/sub-gep.ll b/llvm/test/Transforms/InstCombine/sub-gep.ll
index 01da63fa5b0af..a0d90da2e0d49 100644
--- a/llvm/test/Transforms/InstCombine/sub-gep.ll
+++ b/llvm/test/Transforms/InstCombine/sub-gep.ll
@@ -329,7 +329,7 @@ define i64 @test25(ptr %P, i64 %A){
define i64 @zext_ptrtoint_sub_ptrtoint(ptr %p, i32 %offset) {
; CHECK-LABEL: @zext_ptrtoint_sub_ptrtoint(
; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[OFFSET:%.*]] to i64
-; CHECK-NEXT: [[A:%.*]] = getelementptr bfloat, ptr @Arr, i64 [[TMP1]]
+; CHECK-NEXT: [[A:%.*]] = getelementptr [2 x i8], ptr @Arr, i64 [[TMP1]]
; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
; CHECK-NEXT: [[C:%.*]] = and i64 [[TMP2]], 4294967294
; CHECK-NEXT: [[D:%.*]] = sub i64 [[C]], ptrtoint (ptr @Arr to i64)
@@ -345,7 +345,7 @@ define i64 @zext_ptrtoint_sub_ptrtoint(ptr %p, i32 %offset) {
define i64 @ptrtoint_sub_zext_ptrtoint(ptr %p, i32 %offset) {
; CHECK-LABEL: @ptrtoint_sub_zext_ptrtoint(
; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[OFFSET:%.*]] to i64
-; CHECK-NEXT: [[A:%.*]] = getelementptr bfloat, ptr @Arr, i64 [[TMP1]]
+; CHECK-NEXT: [[A:%.*]] = getelementptr [2 x i8], ptr @Arr, i64 [[TMP1]]
; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
; CHECK-NEXT: [[C:%.*]] = and i64 [[TMP2]], 4294967294
; CHECK-NEXT: [[D:%.*]] = sub i64 ptrtoint (ptr @Arr to i64), [[C]]
@@ -361,7 +361,7 @@ define i64 @ptrtoint_sub_zext_ptrtoint(ptr %p, i32 %offset) {
define i64 @negative_zext_ptrtoint_sub_ptrtoint(ptr %p, i32 %offset) {
; CHECK-LABEL: @negative_zext_ptrtoint_sub_ptrtoint(
; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[OFFSET:%.*]] to i64
-; CHECK-NEXT: [[A:%.*]] = getelementptr bfloat, ptr @Arr, i64 [[TMP1]]
+; CHECK-NEXT: [[A:%.*]] = getelementptr [2 x i8], ptr @Arr, i64 [[TMP1]]
; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
; CHECK-NEXT: [[C:%.*]] = and i64 [[TMP2]], 65534
; CHECK-NEXT: [[D:%.*]] = sub i64 [[C]], ptrtoint (ptr @Arr to i64)
@@ -377,7 +377,7 @@ define i64 @negative_zext_ptrtoint_sub_ptrtoint(ptr %p, i32 %offset) {
define i64 @negative_ptrtoint_sub_zext_ptrtoint(ptr %p, i32 %offset) {
; CHECK-LABEL: @negative_ptrtoint_sub_zext_ptrtoint(
; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[OFFSET:%.*]] to i64
-; CHECK-NEXT: [[A:%.*]] = getelementptr bfloat, ptr @Arr, i64 [[TMP1]]
+; CHECK-NEXT: [[A:%.*]] = getelementptr [2 x i8], ptr @Arr, i64 [[TMP1]]
; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
; CHECK-NEXT: [[C:%.*]] = and i64 [[TMP2]], 65534
; CHECK-NEXT: [[D:%.*]] = sub i64 ptrtoint (ptr @Arr to i64), [[C]]
@@ -409,7 +409,7 @@ define i16 @test25_as1(ptr addrspace(1) %P, i64 %A) {
define i64 @ptrtoint_sub_zext_ptrtoint_as2_inbounds(i32 %offset) {
; CHECK-LABEL: @ptrtoint_sub_zext_ptrtoint_as2_inbounds(
-; CHECK-NEXT: [[A:%.*]] = getelementptr inbounds bfloat, ptr addrspace(2) @Arr_as2, i32 [[OFFSET:%.*]]
+; CHECK-NEXT: [[A:%.*]] = getelementptr inbounds [2 x i8], ptr addrspace(2) @Arr_as2, i32 [[OFFSET:%.*]]
; CHECK-NEXT: [[B:%.*]] = ptrtoint ptr addrspace(2) [[A]] to i32
; CHECK-NEXT: [[C:%.*]] = zext i32 [[B]] to i64
; CHECK-NEXT: [[D:%.*]] = sub nsw i64 ptrtoint (ptr addrspace(2) @Arr_as2 to i64), [[C]]
@@ -491,7 +491,7 @@ define i64 @zext_ptrtoint_sub_zext_ptrtoint_as2_nuw(i32 %offset) {
define i64 @negative_zext_ptrtoint_sub_ptrtoint_as2_nuw(i32 %offset) {
; CHECK-LABEL: @negative_zext_ptrtoint_sub_ptrtoint_as2_nuw(
-; CHECK-NEXT: [[A:%.*]] = getelementptr nuw bfloat, ptr addrspace(2) @Arr_as2, i32 [[OFFSET:%.*]]
+; CHECK-NEXT: [[A:%.*]] = getelementptr nuw [2 x i8], ptr addrspace(2) @Arr_as2, i32 [[OFFSET:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(2) [[A]] to i32
; CHECK-NEXT: [[B_MASK:%.*]] = and i32 [[TMP1]], 65534
; CHECK-NEXT: [[C:%.*]] = zext nneg i32 [[B_MASK]] to i64
@@ -507,7 +507,7 @@ define i64 @negative_zext_ptrtoint_sub_ptrtoint_as2_nuw(i32 %offset) {
define i64 @negative_zext_ptrtoint_sub_zext_ptrtoint_as2_nuw_truncating(i32 %offset) {
; CHECK-LABEL: @negative_zext_ptrtoint_sub_zext_ptrtoint_as2_nuw_truncating(
-; CHECK-NEXT: [[A:%.*]] = getelementptr nuw bfloat, ptr addrspace(2) @Arr_as2, i32 [[OFFSET:%.*]]
+; CHECK-NEXT: [[A:%.*]] = getelementptr nuw [2 x i8], ptr addrspace(2) @Arr_as2, i32 [[OFFSET:%.*]]
; CHECK-NEXT: [[A_IDX:%.*]] = ptrtoint ptr addrspace(2) [[A]] to i32
; CHECK-NEXT: [[E:%.*]] = zext i32 [[A_IDX]] to i64
; CHECK-NEXT: [[D:%.*]] = zext i16 ptrtoint (ptr addrspace(2) @Arr_as2 to i16) to i64
@@ -524,7 +524,7 @@ define i64 @negative_zext_ptrtoint_sub_zext_ptrtoint_as2_nuw_truncating(i32 %off
define i64 @ptrtoint_sub_zext_ptrtoint_as2_inbounds_local(ptr addrspace(2) %p, i32 %offset) {
; CHECK-LABEL: @ptrtoint_sub_zext_ptrtoint_as2_inbounds_local(
-; CHECK-NEXT: [[A:%.*]] = getelementptr inbounds bfloat, ptr addrspace(2) [[P:%.*]], i32 [[OFFSET:%.*]]
+; CHECK-NEXT: [[A:%.*]] = getelementptr inbounds [2 x i8], ptr addrspace(2) [[P:%.*]], i32 [[OFFSET:%.*]]
; CHECK-NEXT: [[B:%.*]] = ptrtoint ptr addrspace(2) [[A]] to i32
; CHECK-NEXT: [[C:%.*]] = zext i32 [[B]] to i64
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(2) [[P]] to i32
@@ -614,7 +614,7 @@ define i64 @zext_ptrtoint_sub_zext_ptrtoint_as2_nuw_local(ptr addrspace(2) %p, i
define i64 @negative_zext_ptrtoint_sub_ptrtoint_as2_nuw_local(ptr addrspace(2) %p, i32 %offset) {
; CHECK-LABEL: @negative_zext_ptrtoint_sub_ptrtoint_as2_nuw_local(
-; CHECK-NEXT: [[A:%.*]] = getelementptr nuw bfloat, ptr addrspace(2) [[P:%.*]], i32 [[OFFSET:%.*]]
+; CHECK-NEXT: [[A:%.*]] = getelementptr nuw [2 x i8], ptr addrspace(2) [[P:%.*]], i32 [[OFFSET:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(2) [[A]] to i32
; CHECK-NEXT: [[B_MASK:%.*]] = and i32 [[TMP1]], 65535
; CHECK-NEXT: [[C:%.*]] = zext nneg i32 [[B_MASK]] to i64
@@ -955,7 +955,7 @@ declare void @use(ptr)
define i64 @multiple_geps_two_chains_gep_base(ptr %base, i64 %base.idx, i64 %idx, i64 %idx2, i64 %idx3) {
; CHECK-LABEL: @multiple_geps_two_chains_gep_base(
-; CHECK-NEXT: [[GEP_BASE:%.*]] = getelementptr inbounds i32, ptr [[BASE:%.*]], i64 [[BASE_IDX:%.*]]
+; CHECK-NEXT: [[GEP_BASE:%.*]] = getelementptr inbounds [4 x i8], ptr [[BASE:%.*]], i64 [[BASE_IDX:%.*]]
; CHECK-NEXT: call void @use(ptr [[GEP_BASE]])
; CHECK-NEXT: [[P2_IDX1:%.*]] = add i64 [[IDX:%.*]], [[IDX2:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[P2_IDX1]], [[IDX3:%.*]]
diff --git a/llvm/test/Transforms/InstCombine/vec_demanded_elts-inseltpoison.ll b/llvm/test/Transforms/InstCombine/vec_demanded_elts-inseltpoison.ll
index 00cc06f74d544..d186fe1f5d6f4 100644
--- a/llvm/test/Transforms/InstCombine/vec_demanded_elts-inseltpoison.ll
+++ b/llvm/test/Transforms/InstCombine/vec_demanded_elts-inseltpoison.ll
@@ -502,7 +502,7 @@ define <3 x float> @shuf_frem_const_op1(<3 x float> %x) {
define ptr @gep_vbase_w_s_idx(<2 x ptr> %base, i64 %index) {
; CHECK-LABEL: @gep_vbase_w_s_idx(
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x ptr> [[BASE:%.*]], i64 1
-; CHECK-NEXT: [[EE:%.*]] = getelementptr i32, ptr [[TMP1]], i64 [[INDEX:%.*]]
+; CHECK-NEXT: [[EE:%.*]] = getelementptr [4 x i8], ptr [[TMP1]], i64 [[INDEX:%.*]]
; CHECK-NEXT: ret ptr [[EE]]
;
%gep = getelementptr i32, <2 x ptr> %base, i64 %index
@@ -538,7 +538,7 @@ define ptr @gep_splat_base_w_cv_idx(ptr %base) {
define ptr @gep_splat_base_w_vidx(ptr %base, <2 x i64> %idxvec) {
; CHECK-LABEL: @gep_splat_base_w_vidx(
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x i64> [[IDXVEC:%.*]], i64 1
-; CHECK-NEXT: [[EE:%.*]] = getelementptr i32, ptr [[BASE:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: [[EE:%.*]] = getelementptr [4 x i8], ptr [[BASE:%.*]], i64 [[TMP1]]
; CHECK-NEXT: ret ptr [[EE]]
;
%basevec1 = insertelement <2 x ptr> poison, ptr %base, i32 0
@@ -553,7 +553,7 @@ define ptr @gep_splat_base_w_vidx(ptr %base, <2 x i64> %idxvec) {
define ptr @gep_cvbase_w_s_idx(<2 x ptr> %base, i64 %raw_addr) {
; CHECK-LABEL: @gep_cvbase_w_s_idx(
-; CHECK-NEXT: [[EE:%.*]] = getelementptr i32, ptr @GLOBAL, i64 [[RAW_ADDR:%.*]]
+; CHECK-NEXT: [[EE:%.*]] = getelementptr [4 x i8], ptr @GLOBAL, i64 [[RAW_ADDR:%.*]]
; CHECK-NEXT: ret ptr [[EE]]
;
%gep = getelementptr i32, <2 x ptr> <ptr @GLOBAL, ptr @GLOBAL>, i64 %raw_addr
@@ -583,7 +583,7 @@ define ptr @gep_sbase_w_cv_idx(ptr %base) {
define ptr @gep_sbase_w_splat_idx(ptr %base, i64 %idx) {
; CHECK-LABEL: @gep_sbase_w_splat_idx(
-; CHECK-NEXT: [[EE:%.*]] = getelementptr i32, ptr [[BASE:%.*]], i64 [[IDX:%.*]]
+; CHECK-NEXT: [[EE:%.*]] = getelementptr [4 x i8], ptr [[BASE:%.*]], i64 [[IDX:%.*]]
; CHECK-NEXT: ret ptr [[EE]]
;
%idxvec1 = insertelement <2 x i64> poison, i64 %idx, i32 0
@@ -594,7 +594,7 @@ define ptr @gep_sbase_w_splat_idx(ptr %base, i64 %idx) {
}
define ptr @gep_splat_both(ptr %base, i64 %idx) {
; CHECK-LABEL: @gep_splat_both(
-; CHECK-NEXT: [[EE:%.*]] = getelementptr i32, ptr [[BASE:%.*]], i64 [[IDX:%.*]]
+; CHECK-NEXT: [[EE:%.*]] = getelementptr [4 x i8], ptr [[BASE:%.*]], i64 [[IDX:%.*]]
; CHECK-NEXT: ret ptr [[EE]]
;
%basevec1 = insertelement <2 x ptr> poison, ptr %base, i32 0
@@ -610,7 +610,7 @@ define <2 x ptr> @gep_all_lanes_undef(ptr %base, i64 %idx) {;
; CHECK-LABEL: @gep_all_lanes_undef(
; CHECK-NEXT: [[BASEVEC:%.*]] = insertelement <2 x ptr> poison, ptr [[BASE:%.*]], i64 0
; CHECK-NEXT: [[IDXVEC:%.*]] = insertelement <2 x i64> poison, i64 [[IDX:%.*]], i64 1
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, <2 x ptr> [[BASEVEC]], <2 x i64> [[IDXVEC]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr [4 x i8], <2 x ptr> [[BASEVEC]], <2 x i64> [[IDXVEC]]
; CHECK-NEXT: ret <2 x ptr> [[GEP]]
;
%basevec = insertelement <2 x ptr> poison, ptr %base, i32 0
@@ -652,7 +652,7 @@ define ptr @zero_sized_type_extract(<4 x i64> %arg, i64 %arg1) {
; CHECK-LABEL: @zero_sized_type_extract(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[TMP0:%.*]] = extractelement <4 x i64> [[ARG:%.*]], i64 0
-; CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds i32, ptr @global, i64 [[TMP0]]
+; CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds [4 x i8], ptr @global, i64 [[TMP0]]
; CHECK-NEXT: ret ptr [[T2]]
;
bb:
diff --git a/llvm/test/Transforms/InstCombine/vec_demanded_elts.ll b/llvm/test/Transforms/InstCombine/vec_demanded_elts.ll
index cf71eddf7dbf5..7e24ddfac5b1b 100644
--- a/llvm/test/Transforms/InstCombine/vec_demanded_elts.ll
+++ b/llvm/test/Transforms/InstCombine/vec_demanded_elts.ll
@@ -505,7 +505,7 @@ define <3 x float> @shuf_frem_const_op1(<3 x float> %x) {
define ptr @gep_vbase_w_s_idx(<2 x ptr> %base, i64 %index) {
; CHECK-LABEL: @gep_vbase_w_s_idx(
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x ptr> [[BASE:%.*]], i64 1
-; CHECK-NEXT: [[EE:%.*]] = getelementptr i32, ptr [[TMP1]], i64 [[INDEX:%.*]]
+; CHECK-NEXT: [[EE:%.*]] = getelementptr [4 x i8], ptr [[TMP1]], i64 [[INDEX:%.*]]
; CHECK-NEXT: ret ptr [[EE]]
;
%gep = getelementptr i32, <2 x ptr> %base, i64 %index
@@ -541,7 +541,7 @@ define ptr @gep_splat_base_w_cv_idx(ptr %base) {
define ptr @gep_splat_base_w_vidx(ptr %base, <2 x i64> %idxvec) {
; CHECK-LABEL: @gep_splat_base_w_vidx(
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x i64> [[IDXVEC:%.*]], i64 1
-; CHECK-NEXT: [[EE:%.*]] = getelementptr i32, ptr [[BASE:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: [[EE:%.*]] = getelementptr [4 x i8], ptr [[BASE:%.*]], i64 [[TMP1]]
; CHECK-NEXT: ret ptr [[EE]]
;
%basevec1 = insertelement <2 x ptr> undef, ptr %base, i32 0
@@ -556,7 +556,7 @@ define ptr @gep_splat_base_w_vidx(ptr %base, <2 x i64> %idxvec) {
define ptr @gep_cvbase_w_s_idx(<2 x ptr> %base, i64 %raw_addr) {
; CHECK-LABEL: @gep_cvbase_w_s_idx(
-; CHECK-NEXT: [[EE:%.*]] = getelementptr i32, ptr @GLOBAL, i64 [[RAW_ADDR:%.*]]
+; CHECK-NEXT: [[EE:%.*]] = getelementptr [4 x i8], ptr @GLOBAL, i64 [[RAW_ADDR:%.*]]
; CHECK-NEXT: ret ptr [[EE]]
;
%gep = getelementptr i32, <2 x ptr> <ptr @GLOBAL, ptr @GLOBAL>, i64 %raw_addr
@@ -586,7 +586,7 @@ define ptr @gep_sbase_w_cv_idx(ptr %base) {
define ptr @gep_sbase_w_splat_idx(ptr %base, i64 %idx) {
; CHECK-LABEL: @gep_sbase_w_splat_idx(
-; CHECK-NEXT: [[EE:%.*]] = getelementptr i32, ptr [[BASE:%.*]], i64 [[IDX:%.*]]
+; CHECK-NEXT: [[EE:%.*]] = getelementptr [4 x i8], ptr [[BASE:%.*]], i64 [[IDX:%.*]]
; CHECK-NEXT: ret ptr [[EE]]
;
%idxvec1 = insertelement <2 x i64> undef, i64 %idx, i32 0
@@ -597,7 +597,7 @@ define ptr @gep_sbase_w_splat_idx(ptr %base, i64 %idx) {
}
define ptr @gep_splat_both(ptr %base, i64 %idx) {
; CHECK-LABEL: @gep_splat_both(
-; CHECK-NEXT: [[EE:%.*]] = getelementptr i32, ptr [[BASE:%.*]], i64 [[IDX:%.*]]
+; CHECK-NEXT: [[EE:%.*]] = getelementptr [4 x i8], ptr [[BASE:%.*]], i64 [[IDX:%.*]]
; CHECK-NEXT: ret ptr [[EE]]
;
%basevec1 = insertelement <2 x ptr> undef, ptr %base, i32 0
@@ -613,7 +613,7 @@ define <2 x ptr> @gep_all_lanes_undef(ptr %base, i64 %idx) {;
; CHECK-LABEL: @gep_all_lanes_undef(
; CHECK-NEXT: [[BASEVEC:%.*]] = insertelement <2 x ptr> <ptr poison, ptr undef>, ptr [[BASE:%.*]], i64 0
; CHECK-NEXT: [[IDXVEC:%.*]] = insertelement <2 x i64> <i64 undef, i64 poison>, i64 [[IDX:%.*]], i64 1
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, <2 x ptr> [[BASEVEC]], <2 x i64> [[IDXVEC]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr [4 x i8], <2 x ptr> [[BASEVEC]], <2 x i64> [[IDXVEC]]
; CHECK-NEXT: ret <2 x ptr> [[GEP]]
;
%basevec = insertelement <2 x ptr> undef, ptr %base, i32 0
@@ -655,7 +655,7 @@ define ptr @zero_sized_type_extract(<4 x i64> %arg, i64 %arg1) {
; CHECK-LABEL: @zero_sized_type_extract(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[TMP0:%.*]] = extractelement <4 x i64> [[ARG:%.*]], i64 0
-; CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds i32, ptr @global, i64 [[TMP0]]
+; CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds [4 x i8], ptr @global, i64 [[TMP0]]
; CHECK-NEXT: ret ptr [[T2]]
;
bb:
diff --git a/llvm/test/Transforms/InstCombine/vec_phi_extract-inseltpoison.ll b/llvm/test/Transforms/InstCombine/vec_phi_extract-inseltpoison.ll
index ecd5e49bd6b28..c6966a034986f 100644
--- a/llvm/test/Transforms/InstCombine/vec_phi_extract-inseltpoison.ll
+++ b/llvm/test/Transforms/InstCombine/vec_phi_extract-inseltpoison.ll
@@ -11,7 +11,7 @@ define void @f(i64 %val, i32 %limit, ptr %ptr) {
; CHECK-NEXT: [[END:%.*]] = icmp ult i32 [[TMP1]], [[LIMIT:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[TMP1]], 10
; CHECK-NEXT: [[TMP3:%.*]] = sext i32 [[TMP1]] to i64
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i64 [[TMP3]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr [4 x i8], ptr [[PTR:%.*]], i64 [[TMP3]]
; CHECK-NEXT: store i32 [[TMP2]], ptr [[TMP4]], align 4
; CHECK-NEXT: [[TMP5]] = add i32 [[TMP1]], 16
; CHECK-NEXT: br i1 [[END]], label [[LOOP]], label [[RET:%.*]]
@@ -50,7 +50,7 @@ define void @copy(i64 %val, i32 %limit, ptr %ptr) {
; CHECK-NEXT: [[END:%.*]] = icmp ult i32 [[TMP1]], [[LIMIT:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[TMP1]], 10
; CHECK-NEXT: [[TMP3:%.*]] = sext i32 [[TMP1]] to i64
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i64 [[TMP3]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr [4 x i8], ptr [[PTR:%.*]], i64 [[TMP3]]
; CHECK-NEXT: store i32 [[TMP2]], ptr [[TMP4]], align 4
; CHECK-NEXT: [[TMP5]] = add i32 [[TMP1]], 16
; CHECK-NEXT: br i1 [[END]], label [[LOOP]], label [[RET:%.*]]
@@ -95,7 +95,7 @@ define void @nocopy(i64 %val, i32 %limit, ptr %ptr) {
; CHECK-NEXT: [[END:%.*]] = icmp ult i32 [[ELT]], [[LIMIT:%.*]]
; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[ELTCOPY]], 10
; CHECK-NEXT: [[TMP6:%.*]] = sext i32 [[ELT]] to i64
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i64 [[TMP6]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr [4 x i8], ptr [[PTR:%.*]], i64 [[TMP6]]
; CHECK-NEXT: store i32 [[TMP5]], ptr [[TMP7]], align 4
; CHECK-NEXT: [[INC]] = add <16 x i32> [[TMP4]], splat (i32 16)
; CHECK-NEXT: br i1 [[END]], label [[LOOP]], label [[RET:%.*]]
diff --git a/llvm/test/Transforms/InstCombine/vec_phi_extract.ll b/llvm/test/Transforms/InstCombine/vec_phi_extract.ll
index ef093709a98f5..55a86852e6d4a 100644
--- a/llvm/test/Transforms/InstCombine/vec_phi_extract.ll
+++ b/llvm/test/Transforms/InstCombine/vec_phi_extract.ll
@@ -11,7 +11,7 @@ define void @f(i64 %val, i32 %limit, ptr %ptr) {
; CHECK-NEXT: [[END:%.*]] = icmp ult i32 [[TMP1]], [[LIMIT:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[TMP1]], 10
; CHECK-NEXT: [[TMP3:%.*]] = sext i32 [[TMP1]] to i64
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i64 [[TMP3]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr [4 x i8], ptr [[PTR:%.*]], i64 [[TMP3]]
; CHECK-NEXT: store i32 [[TMP2]], ptr [[TMP4]], align 4
; CHECK-NEXT: [[TMP5]] = add i32 [[TMP1]], 16
; CHECK-NEXT: br i1 [[END]], label [[LOOP]], label [[RET:%.*]]
@@ -50,7 +50,7 @@ define void @copy(i64 %val, i32 %limit, ptr %ptr) {
; CHECK-NEXT: [[END:%.*]] = icmp ult i32 [[TMP1]], [[LIMIT:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[TMP1]], 10
; CHECK-NEXT: [[TMP3:%.*]] = sext i32 [[TMP1]] to i64
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i64 [[TMP3]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr [4 x i8], ptr [[PTR:%.*]], i64 [[TMP3]]
; CHECK-NEXT: store i32 [[TMP2]], ptr [[TMP4]], align 4
; CHECK-NEXT: [[TMP5]] = add i32 [[TMP1]], 16
; CHECK-NEXT: br i1 [[END]], label [[LOOP]], label [[RET:%.*]]
@@ -95,7 +95,7 @@ define void @nocopy(i64 %val, i32 %limit, ptr %ptr) {
; CHECK-NEXT: [[END:%.*]] = icmp ult i32 [[ELT]], [[LIMIT:%.*]]
; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[ELTCOPY]], 10
; CHECK-NEXT: [[TMP6:%.*]] = sext i32 [[ELT]] to i64
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i64 [[TMP6]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr [4 x i8], ptr [[PTR:%.*]], i64 [[TMP6]]
; CHECK-NEXT: store i32 [[TMP5]], ptr [[TMP7]], align 4
; CHECK-NEXT: [[INC]] = add <16 x i32> [[TMP4]], splat (i32 16)
; CHECK-NEXT: br i1 [[END]], label [[LOOP]], label [[RET:%.*]]
diff --git a/llvm/test/Transforms/InstCombine/vector_gep1-inseltpoison.ll b/llvm/test/Transforms/InstCombine/vector_gep1-inseltpoison.ll
index 4c7a28bac6f4d..4d79e9599b348 100644
--- a/llvm/test/Transforms/InstCombine/vector_gep1-inseltpoison.ll
+++ b/llvm/test/Transforms/InstCombine/vector_gep1-inseltpoison.ll
@@ -55,7 +55,7 @@ define <2 x i1> @test5(<2 x ptr> %a) {
define <2 x ptr> @test7(<2 x ptr> %a) {
; CHECK-LABEL: @test7(
-; CHECK-NEXT: [[W:%.*]] = getelementptr { i32, i32 }, <2 x ptr> [[A:%.*]], <2 x i64> <i64 5, i64 9>
+; CHECK-NEXT: [[W:%.*]] = getelementptr [8 x i8], <2 x ptr> [[A:%.*]], <2 x i64> <i64 5, i64 9>
; CHECK-NEXT: ret <2 x ptr> [[W]]
;
%w = getelementptr {i32, i32}, <2 x ptr> %a, <2 x i32> <i32 5, i32 9>, <2 x i32> zeroinitializer
diff --git a/llvm/test/Transforms/InstCombine/vector_gep1.ll b/llvm/test/Transforms/InstCombine/vector_gep1.ll
index 88af1c45e89a5..3ca712e1c2f24 100644
--- a/llvm/test/Transforms/InstCombine/vector_gep1.ll
+++ b/llvm/test/Transforms/InstCombine/vector_gep1.ll
@@ -55,7 +55,7 @@ define <2 x i1> @test5(<2 x ptr> %a) {
define <2 x ptr> @test7(<2 x ptr> %a) {
; CHECK-LABEL: @test7(
-; CHECK-NEXT: [[W:%.*]] = getelementptr { i32, i32 }, <2 x ptr> [[A:%.*]], <2 x i64> <i64 5, i64 9>
+; CHECK-NEXT: [[W:%.*]] = getelementptr [8 x i8], <2 x ptr> [[A:%.*]], <2 x i64> <i64 5, i64 9>
; CHECK-NEXT: ret <2 x ptr> [[W]]
;
%w = getelementptr {i32, i32}, <2 x ptr> %a, <2 x i32> <i32 5, i32 9>, <2 x i32> zeroinitializer
diff --git a/llvm/test/Transforms/InstCombine/vector_gep2.ll b/llvm/test/Transforms/InstCombine/vector_gep2.ll
index cfa2f6277d858..aa86dd35d6dbe 100644
--- a/llvm/test/Transforms/InstCombine/vector_gep2.ll
+++ b/llvm/test/Transforms/InstCombine/vector_gep2.ll
@@ -15,7 +15,7 @@ define <2 x ptr> @testa(<2 x ptr> %a) {
define <8 x ptr> @vgep_s_v8i64(ptr %a, <8 x i64>%i) {
; CHECK-LABEL: @vgep_s_v8i64(
-; CHECK-NEXT: [[VECTORGEP:%.*]] = getelementptr double, ptr [[A:%.*]], <8 x i64> [[I:%.*]]
+; CHECK-NEXT: [[VECTORGEP:%.*]] = getelementptr [8 x i8], ptr [[A:%.*]], <8 x i64> [[I:%.*]]
; CHECK-NEXT: ret <8 x ptr> [[VECTORGEP]]
;
%VectorGep = getelementptr double, ptr %a, <8 x i64> %i
@@ -25,7 +25,7 @@ define <8 x ptr> @vgep_s_v8i64(ptr %a, <8 x i64>%i) {
define <8 x ptr> @vgep_s_v8i32(ptr %a, <8 x i32>%i) {
; CHECK-LABEL: @vgep_s_v8i32(
; CHECK-NEXT: [[TMP1:%.*]] = sext <8 x i32> [[I:%.*]] to <8 x i64>
-; CHECK-NEXT: [[VECTORGEP:%.*]] = getelementptr double, ptr [[A:%.*]], <8 x i64> [[TMP1]]
+; CHECK-NEXT: [[VECTORGEP:%.*]] = getelementptr [8 x i8], ptr [[A:%.*]], <8 x i64> [[TMP1]]
; CHECK-NEXT: ret <8 x ptr> [[VECTORGEP]]
;
%VectorGep = getelementptr double, ptr %a, <8 x i32> %i
diff --git a/llvm/test/Transforms/InstCombine/vectorgep-crash.ll b/llvm/test/Transforms/InstCombine/vectorgep-crash.ll
index d33a2d3417bfd..46bfeba3020ad 100644
--- a/llvm/test/Transforms/InstCombine/vectorgep-crash.ll
+++ b/llvm/test/Transforms/InstCombine/vectorgep-crash.ll
@@ -14,7 +14,7 @@ define <8 x ptr> @test_vector_gep(ptr %arg1, <8 x i64> %arg2) {
; CHECK-LABEL: define <8 x ptr> @test_vector_gep(
; CHECK-SAME: ptr [[ARG1:%.*]], <8 x i64> [[ARG2:%.*]]) {
; CHECK-NEXT: [[TOP:.*:]]
-; CHECK-NEXT: [[VECTORGEP14_SPLIT:%.*]] = getelementptr inbounds [[DUAL:%.*]], ptr [[ARG1]], <8 x i64> [[ARG2]]
+; CHECK-NEXT: [[VECTORGEP14_SPLIT:%.*]] = getelementptr inbounds [72 x i8], ptr [[ARG1]], <8 x i64> [[ARG2]]
; CHECK-NEXT: [[VECTORGEP14:%.*]] = getelementptr inbounds i8, <8 x ptr> [[VECTORGEP14_SPLIT]], i64 32
; CHECK-NEXT: ret <8 x ptr> [[VECTORGEP14]]
;
@@ -32,7 +32,7 @@ top:
define <16 x ptr> @test() {
; CHECK-LABEL: define <16 x ptr> @test() {
; CHECK-NEXT: [[VECTOR_BODY:.*:]]
-; CHECK-NEXT: ret <16 x ptr> getelementptr ([65 x %struct.A], ptr @G, <16 x i64> zeroinitializer, <16 x i64> <i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15, i64 16>, i32 0)
+; CHECK-NEXT: ret <16 x ptr> getelementptr ([65 x [[STRUCT_A:%.*]]], ptr @G, <16 x i64> zeroinitializer, <16 x i64> <i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15, i64 16>, i32 0)
;
vector.body:
%VectorGep = getelementptr [65 x %struct.A], ptr @G, <16 x i64> zeroinitializer, <16 x i64> <i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15, i64 16>, <16 x i32> zeroinitializer
@@ -42,7 +42,7 @@ vector.body:
define <16 x ptr> @test2() {
; CHECK-LABEL: define <16 x ptr> @test2() {
; CHECK-NEXT: [[VECTOR_BODY:.*:]]
-; CHECK-NEXT: ret <16 x ptr> getelementptr ([65 x %struct.A], ptr @G, <16 x i64> zeroinitializer, <16 x i64> <i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15, i64 16>, i32 0)
+; CHECK-NEXT: ret <16 x ptr> getelementptr ([65 x [[STRUCT_A:%.*]]], ptr @G, <16 x i64> zeroinitializer, <16 x i64> <i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15, i64 16>, i32 0)
;
vector.body:
%VectorGep = getelementptr [65 x %struct.A], ptr @G, <16 x i32> zeroinitializer, <16 x i64> <i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15, i64 16>, <16 x i32> zeroinitializer
diff --git a/llvm/test/Transforms/InstCombine/wcslen-1.ll b/llvm/test/Transforms/InstCombine/wcslen-1.ll
index 3c79c32ffe0da..d7f0620ac89f1 100644
--- a/llvm/test/Transforms/InstCombine/wcslen-1.ll
+++ b/llvm/test/Transforms/InstCombine/wcslen-1.ll
@@ -131,7 +131,7 @@ define i64 @test_simplify10_gepi32(i64 %x) {
define i64 @test_simplify10_gepi64(i32 %x) {
; CHECK-LABEL: @test_simplify10_gepi64(
; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[X:%.*]] to i64
-; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds i64, ptr @hello, i64 [[TMP1]]
+; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds [8 x i8], ptr @hello, i64 [[TMP1]]
; CHECK-NEXT: [[HELLO_L:%.*]] = call i64 @wcslen(ptr nonnull [[HELLO_P]])
; CHECK-NEXT: ret i64 [[HELLO_L]]
;
@@ -142,7 +142,7 @@ define i64 @test_simplify10_gepi64(i32 %x) {
define i64 @test_simplify10_gepi16(i64 %x) {
; CHECK-LABEL: @test_simplify10_gepi16(
-; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds i16, ptr @hello, i64 [[X:%.*]]
+; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds [2 x i8], ptr @hello, i64 [[X:%.*]]
; CHECK-NEXT: [[HELLO_L:%.*]] = call i64 @wcslen(ptr nonnull [[HELLO_P]])
; CHECK-NEXT: ret i64 [[HELLO_L]]
;
@@ -233,7 +233,7 @@ define i64 @test_no_simplify1() {
define i64 @test_no_simplify2(i32 %x) {
; CHECK-LABEL: @test_no_simplify2(
; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[X:%.*]] to i64
-; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds i32, ptr @null_hello, i64 [[TMP1]]
+; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds [4 x i8], ptr @null_hello, i64 [[TMP1]]
; CHECK-NEXT: [[HELLO_L:%.*]] = call i64 @wcslen(ptr nonnull [[HELLO_P]])
; CHECK-NEXT: ret i64 [[HELLO_L]]
;
@@ -245,7 +245,7 @@ define i64 @test_no_simplify2(i32 %x) {
define i64 @test_no_simplify2_no_null_opt(i32 %x) #0 {
; CHECK-LABEL: @test_no_simplify2_no_null_opt(
; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[X:%.*]] to i64
-; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds i32, ptr @null_hello, i64 [[TMP1]]
+; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds [4 x i8], ptr @null_hello, i64 [[TMP1]]
; CHECK-NEXT: [[HELLO_L:%.*]] = call i64 @wcslen(ptr [[HELLO_P]])
; CHECK-NEXT: ret i64 [[HELLO_L]]
;
@@ -260,7 +260,7 @@ define i64 @test_no_simplify3(i32 %x) {
; CHECK-LABEL: @test_no_simplify3(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 15
; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[AND]] to i64
-; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds nuw i32, ptr @null_hello_mid, i64 [[TMP1]]
+; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds nuw [4 x i8], ptr @null_hello_mid, i64 [[TMP1]]
; CHECK-NEXT: [[HELLO_L:%.*]] = call i64 @wcslen(ptr nonnull [[HELLO_P]])
; CHECK-NEXT: ret i64 [[HELLO_L]]
;
@@ -274,7 +274,7 @@ define i64 @test_no_simplify3_no_null_opt(i32 %x) #0 {
; CHECK-LABEL: @test_no_simplify3_no_null_opt(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 15
; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[AND]] to i64
-; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds nuw i32, ptr @null_hello_mid, i64 [[TMP1]]
+; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds nuw [4 x i8], ptr @null_hello_mid, i64 [[TMP1]]
; CHECK-NEXT: [[HELLO_L:%.*]] = call i64 @wcslen(ptr nonnull [[HELLO_P]])
; CHECK-NEXT: ret i64 [[HELLO_L]]
;
diff --git a/llvm/test/Transforms/InstCombine/wcslen-3.ll b/llvm/test/Transforms/InstCombine/wcslen-3.ll
index 89fc4e758c54e..3aac15219af4c 100644
--- a/llvm/test/Transforms/InstCombine/wcslen-3.ll
+++ b/llvm/test/Transforms/InstCombine/wcslen-3.ll
@@ -150,7 +150,7 @@ define i64 @test_no_simplify1() {
define i64 @test_no_simplify2(i16 %x) {
; CHECK-LABEL: @test_no_simplify2(
; CHECK-NEXT: [[TMP1:%.*]] = sext i16 [[X:%.*]] to i64
-; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds i16, ptr @null_hello, i64 [[TMP1]]
+; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds [2 x i8], ptr @null_hello, i64 [[TMP1]]
; CHECK-NEXT: [[HELLO_L:%.*]] = call i64 @wcslen(ptr nonnull [[HELLO_P]])
; CHECK-NEXT: ret i64 [[HELLO_L]]
;
@@ -165,7 +165,7 @@ define i64 @test_no_simplify3(i16 %x) {
; CHECK-LABEL: @test_no_simplify3(
; CHECK-NEXT: [[AND:%.*]] = and i16 [[X:%.*]], 15
; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i16 [[AND]] to i64
-; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds nuw i16, ptr @null_hello_mid, i64 [[TMP1]]
+; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds nuw [2 x i8], ptr @null_hello_mid, i64 [[TMP1]]
; CHECK-NEXT: [[HELLO_L:%.*]] = call i64 @wcslen(ptr nonnull [[HELLO_P]])
; CHECK-NEXT: ret i64 [[HELLO_L]]
;
diff --git a/llvm/test/Transforms/InstCombine/wcslen-5.ll b/llvm/test/Transforms/InstCombine/wcslen-5.ll
index b127f2ae5ea1b..4e60385ac6578 100644
--- a/llvm/test/Transforms/InstCombine/wcslen-5.ll
+++ b/llvm/test/Transforms/InstCombine/wcslen-5.ll
@@ -19,7 +19,7 @@ declare i64 @wcslen(ptr)
define dso_local i64 @fold_wcslen_s3_pi_s5(i1 zeroext %0, i64 %1) {
; CHECK-LABEL: @fold_wcslen_s3_pi_s5(
-; CHECK-NEXT: [[PS3_PI:%.*]] = getelementptr inbounds i32, ptr @ws3, i64 [[TMP1:%.*]]
+; CHECK-NEXT: [[PS3_PI:%.*]] = getelementptr inbounds [4 x i8], ptr @ws3, i64 [[TMP1:%.*]]
; CHECK-NEXT: [[SEL:%.*]] = select i1 [[TMP0:%.*]], ptr [[PS3_PI]], ptr @ws5
; CHECK-NEXT: [[LEN:%.*]] = tail call i64 @wcslen(ptr nonnull [[SEL]])
; CHECK-NEXT: ret i64 [[LEN]]
@@ -41,7 +41,7 @@ define dso_local i64 @fold_wcslen_s3_pi_p1_s5(i1 zeroext %0, i64 %1) {
; XFAIL-CHECK-NEXT: [[SEL:%.*]] = select i1 %0, i64 [[DIF_I]], i64 5
; XFAIL-CHECK-NEXT: ret i64 [[SEL]]
; CHECK-LABEL: @fold_wcslen_s3_pi_p1_s5(
-; CHECK-NEXT: [[PS3_PI:%.*]] = getelementptr inbounds i32, ptr @ws3, i64 [[TMP1:%.*]]
+; CHECK-NEXT: [[PS3_PI:%.*]] = getelementptr inbounds [4 x i8], ptr @ws3, i64 [[TMP1:%.*]]
; CHECK-NEXT: [[PS3_PI_P1:%.*]] = getelementptr inbounds nuw i8, ptr [[PS3_PI]], i64 4
; CHECK-NEXT: [[SEL:%.*]] = select i1 [[TMP0:%.*]], ptr [[PS3_PI_P1]], ptr @ws5
; CHECK-NEXT: [[LEN:%.*]] = tail call i64 @wcslen(ptr nonnull [[SEL]])
@@ -62,7 +62,7 @@ define dso_local i64 @fold_wcslen_s3_pi_p1_s5(i1 zeroext %0, i64 %1) {
define dso_local i64 @call_wcslen_s5_3_pi_s5(i1 zeroext %0, i64 %1) {
; CHECK-LABEL: @call_wcslen_s5_3_pi_s5(
-; CHECK-NEXT: [[PS5_3_PI:%.*]] = getelementptr inbounds i32, ptr @ws5_3, i64 [[TMP1:%.*]]
+; CHECK-NEXT: [[PS5_3_PI:%.*]] = getelementptr inbounds [4 x i8], ptr @ws5_3, i64 [[TMP1:%.*]]
; CHECK-NEXT: [[SEL:%.*]] = select i1 [[TMP0:%.*]], ptr [[PS5_3_PI]], ptr @ws5
; CHECK-NEXT: [[LEN:%.*]] = tail call i64 @wcslen(ptr nonnull [[SEL]])
; CHECK-NEXT: ret i64 [[LEN]]
@@ -79,7 +79,7 @@ define dso_local i64 @call_wcslen_s5_3_pi_s5(i1 zeroext %0, i64 %1) {
define dso_local i64 @call_wcslen_s5_3_s5_pj(i1 zeroext %0, i64 %1) {
; CHECK-LABEL: @call_wcslen_s5_3_s5_pj(
-; CHECK-NEXT: [[PS5:%.*]] = getelementptr inbounds i32, ptr @ws5, i64 [[TMP1:%.*]]
+; CHECK-NEXT: [[PS5:%.*]] = getelementptr inbounds [4 x i8], ptr @ws5, i64 [[TMP1:%.*]]
; CHECK-NEXT: [[SEL:%.*]] = select i1 [[TMP0:%.*]], ptr @ws5_3, ptr [[PS5]]
; CHECK-NEXT: [[LEN:%.*]] = tail call i64 @wcslen(ptr nonnull [[SEL]])
; CHECK-NEXT: ret i64 [[LEN]]
@@ -96,7 +96,7 @@ define dso_local i64 @call_wcslen_s5_3_s5_pj(i1 zeroext %0, i64 %1) {
define dso_local i64 @fold_wcslen_s3_s5_pj(i1 zeroext %0, i64 %1) {
; CHECK-LABEL: @fold_wcslen_s3_s5_pj(
-; CHECK-NEXT: [[PS5_PJ:%.*]] = getelementptr inbounds i32, ptr @ws5, i64 [[TMP1:%.*]]
+; CHECK-NEXT: [[PS5_PJ:%.*]] = getelementptr inbounds [4 x i8], ptr @ws5, i64 [[TMP1:%.*]]
; CHECK-NEXT: [[SEL:%.*]] = select i1 [[TMP0:%.*]], ptr @ws3, ptr [[PS5_PJ]]
; CHECK-NEXT: [[LEN:%.*]] = tail call i64 @wcslen(ptr nonnull [[SEL]])
; CHECK-NEXT: ret i64 [[LEN]]
@@ -115,7 +115,7 @@ define dso_local i64 @fold_wcslen_s3_s5_pj(i1 zeroext %0, i64 %1) {
define dso_local i64 @call_wcslen_s3_s5_3_pj(i1 zeroext %0, i64 %1) {
; CHECK-LABEL: @call_wcslen_s3_s5_3_pj(
-; CHECK-NEXT: [[PS5_3_PJ:%.*]] = getelementptr inbounds i32, ptr @ws5_3, i64 [[TMP1:%.*]]
+; CHECK-NEXT: [[PS5_3_PJ:%.*]] = getelementptr inbounds [4 x i8], ptr @ws5_3, i64 [[TMP1:%.*]]
; CHECK-NEXT: [[SEL:%.*]] = select i1 [[TMP0:%.*]], ptr @ws3, ptr [[PS5_3_PJ]]
; CHECK-NEXT: [[LEN:%.*]] = tail call i64 @wcslen(ptr nonnull [[SEL]])
; CHECK-NEXT: ret i64 [[LEN]]
@@ -132,8 +132,8 @@ define dso_local i64 @call_wcslen_s3_s5_3_pj(i1 zeroext %0, i64 %1) {
define dso_local i64 @fold_wcslen_s3_pi_s5_pj(i1 zeroext %0, i64 %1, i64 %2) {
; CHECK-LABEL: @fold_wcslen_s3_pi_s5_pj(
-; CHECK-NEXT: [[PS3_PI:%.*]] = getelementptr inbounds i32, ptr @ws3, i64 [[TMP1:%.*]]
-; CHECK-NEXT: [[PS5_PJ:%.*]] = getelementptr inbounds i32, ptr @ws5, i64 [[TMP2:%.*]]
+; CHECK-NEXT: [[PS3_PI:%.*]] = getelementptr inbounds [4 x i8], ptr @ws3, i64 [[TMP1:%.*]]
+; CHECK-NEXT: [[PS5_PJ:%.*]] = getelementptr inbounds [4 x i8], ptr @ws5, i64 [[TMP2:%.*]]
; CHECK-NEXT: [[SEL:%.*]] = select i1 [[TMP0:%.*]], ptr [[PS3_PI]], ptr [[PS5_PJ]]
; CHECK-NEXT: [[LEN:%.*]] = tail call i64 @wcslen(ptr nonnull [[SEL]])
; CHECK-NEXT: ret i64 [[LEN]]
diff --git a/llvm/test/Transforms/LoopSimplify/merge-exits.ll b/llvm/test/Transforms/LoopSimplify/merge-exits.ll
index 10e634fd47a1a..ad09dbeca0ac5 100644
--- a/llvm/test/Transforms/LoopSimplify/merge-exits.ll
+++ b/llvm/test/Transforms/LoopSimplify/merge-exits.ll
@@ -22,11 +22,11 @@ define float @test1(ptr %pTmp1, ptr %peakWeight, i32 %bandEdgeIndex) nounwind {
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[BB_LR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[BB]] ]
; CHECK-NEXT: [[DISTERBHI_04:%.*]] = phi float [ 0.000000e+00, [[BB_LR_PH]] ], [ [[T4:%.*]], [[BB]] ]
; CHECK-NEXT: [[PEAKCOUNT_02:%.*]] = phi float [ [[T0]], [[BB_LR_PH]] ], [ [[T9:%.*]], [[BB]] ]
-; CHECK-NEXT: [[T2:%.*]] = getelementptr float, ptr [[PTMP1:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[T2:%.*]] = getelementptr [4 x i8], ptr [[PTMP1:%.*]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[T3:%.*]] = load float, ptr [[T2]], align 4
; CHECK-NEXT: [[T4]] = fadd float [[T3]], [[DISTERBHI_04]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[T7:%.*]] = getelementptr float, ptr [[PEAKWEIGHT]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-NEXT: [[T7:%.*]] = getelementptr [4 x i8], ptr [[PEAKWEIGHT]], i64 [[INDVARS_IV_NEXT]]
; CHECK-NEXT: [[T8:%.*]] = load float, ptr [[T7]], align 4
; CHECK-NEXT: [[T9]] = fadd float [[T8]], [[PEAKCOUNT_02]]
; CHECK-NEXT: [[T10:%.*]] = fcmp olt float [[T4]], 2.500000e+00
@@ -93,11 +93,11 @@ define float @merge_branches_profile_metadata(ptr %pTmp1, ptr %peakWeight, i32 %
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[BB_LR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[BB]] ]
; CHECK-NEXT: [[DISTERBHI_04:%.*]] = phi float [ 0.000000e+00, [[BB_LR_PH]] ], [ [[T4:%.*]], [[BB]] ]
; CHECK-NEXT: [[PEAKCOUNT_02:%.*]] = phi float [ [[T0]], [[BB_LR_PH]] ], [ [[T9:%.*]], [[BB]] ]
-; CHECK-NEXT: [[T2:%.*]] = getelementptr float, ptr [[PTMP1:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[T2:%.*]] = getelementptr [4 x i8], ptr [[PTMP1:%.*]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[T3:%.*]] = load float, ptr [[T2]], align 4
; CHECK-NEXT: [[T4]] = fadd float [[T3]], [[DISTERBHI_04]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[T7:%.*]] = getelementptr float, ptr [[PEAKWEIGHT]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-NEXT: [[T7:%.*]] = getelementptr [4 x i8], ptr [[PEAKWEIGHT]], i64 [[INDVARS_IV_NEXT]]
; CHECK-NEXT: [[T8:%.*]] = load float, ptr [[T7]], align 4
; CHECK-NEXT: [[T9]] = fadd float [[T8]], [[PEAKCOUNT_02]]
; CHECK-NEXT: [[T10:%.*]] = fcmp olt float [[T4]], 2.500000e+00
diff --git a/llvm/test/Transforms/LoopUnroll/AArch64/runtime-unroll-generic.ll b/llvm/test/Transforms/LoopUnroll/AArch64/runtime-unroll-generic.ll
index 66c55f20e533f..3411c997747d9 100644
--- a/llvm/test/Transforms/LoopUnroll/AArch64/runtime-unroll-generic.ll
+++ b/llvm/test/Transforms/LoopUnroll/AArch64/runtime-unroll-generic.ll
@@ -19,50 +19,50 @@ define void @runtime_unroll_generic(i32 %arg_0, ptr %arg_1, ptr %arg_2, ptr %arg
; CHECK-A55: for.body6:
; CHECK-A55-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[FOR_BODY6_PREHEADER_NEW]] ], [ [[INDVARS_IV_NEXT_3:%.*]], [[FOR_BODY6]] ]
; CHECK-A55-NEXT: [[NITER:%.*]] = phi i64 [ 0, [[FOR_BODY6_PREHEADER_NEW]] ], [ [[NITER_NEXT_3:%.*]], [[FOR_BODY6]] ]
-; CHECK-A55-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds nuw i16, ptr [[ARG_2:%.*]], i64 [[INDVARS_IV]]
+; CHECK-A55-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[ARG_2:%.*]], i64 [[INDVARS_IV]]
; CHECK-A55-NEXT: [[TMP1:%.*]] = load i16, ptr [[ARRAYIDX10]], align 2
; CHECK-A55-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
-; CHECK-A55-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds nuw i16, ptr [[ARG_3:%.*]], i64 [[INDVARS_IV]]
+; CHECK-A55-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[ARG_3:%.*]], i64 [[INDVARS_IV]]
; CHECK-A55-NEXT: [[TMP2:%.*]] = load i16, ptr [[ARRAYIDX14]], align 2
; CHECK-A55-NEXT: [[CONV15:%.*]] = sext i16 [[TMP2]] to i32
; CHECK-A55-NEXT: [[MUL16:%.*]] = mul nsw i32 [[CONV15]], [[CONV]]
-; CHECK-A55-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds nuw i32, ptr [[ARG_1:%.*]], i64 [[INDVARS_IV]]
+; CHECK-A55-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[ARG_1:%.*]], i64 [[INDVARS_IV]]
; CHECK-A55-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX20]], align 4
; CHECK-A55-NEXT: [[ADD21:%.*]] = add nsw i32 [[MUL16]], [[TMP3]]
; CHECK-A55-NEXT: store i32 [[ADD21]], ptr [[ARRAYIDX20]], align 4
; CHECK-A55-NEXT: [[INDVARS_IV_NEXT:%.*]] = or disjoint i64 [[INDVARS_IV]], 1
-; CHECK-A55-NEXT: [[ARRAYIDX10_1:%.*]] = getelementptr inbounds nuw i16, ptr [[ARG_2]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-A55-NEXT: [[ARRAYIDX10_1:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[ARG_2]], i64 [[INDVARS_IV_NEXT]]
; CHECK-A55-NEXT: [[TMP4:%.*]] = load i16, ptr [[ARRAYIDX10_1]], align 2
; CHECK-A55-NEXT: [[CONV_1:%.*]] = sext i16 [[TMP4]] to i32
-; CHECK-A55-NEXT: [[ARRAYIDX14_1:%.*]] = getelementptr inbounds nuw i16, ptr [[ARG_3]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-A55-NEXT: [[ARRAYIDX14_1:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[ARG_3]], i64 [[INDVARS_IV_NEXT]]
; CHECK-A55-NEXT: [[TMP5:%.*]] = load i16, ptr [[ARRAYIDX14_1]], align 2
; CHECK-A55-NEXT: [[CONV15_1:%.*]] = sext i16 [[TMP5]] to i32
; CHECK-A55-NEXT: [[MUL16_1:%.*]] = mul nsw i32 [[CONV15_1]], [[CONV_1]]
-; CHECK-A55-NEXT: [[ARRAYIDX20_1:%.*]] = getelementptr inbounds nuw i32, ptr [[ARG_1]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-A55-NEXT: [[ARRAYIDX20_1:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[ARG_1]], i64 [[INDVARS_IV_NEXT]]
; CHECK-A55-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX20_1]], align 4
; CHECK-A55-NEXT: [[ADD21_1:%.*]] = add nsw i32 [[MUL16_1]], [[TMP6]]
; CHECK-A55-NEXT: store i32 [[ADD21_1]], ptr [[ARRAYIDX20_1]], align 4
; CHECK-A55-NEXT: [[INDVARS_IV_NEXT_1:%.*]] = or disjoint i64 [[INDVARS_IV]], 2
-; CHECK-A55-NEXT: [[ARRAYIDX10_2:%.*]] = getelementptr inbounds nuw i16, ptr [[ARG_2]], i64 [[INDVARS_IV_NEXT_1]]
+; CHECK-A55-NEXT: [[ARRAYIDX10_2:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[ARG_2]], i64 [[INDVARS_IV_NEXT_1]]
; CHECK-A55-NEXT: [[TMP7:%.*]] = load i16, ptr [[ARRAYIDX10_2]], align 2
; CHECK-A55-NEXT: [[CONV_2:%.*]] = sext i16 [[TMP7]] to i32
-; CHECK-A55-NEXT: [[ARRAYIDX14_2:%.*]] = getelementptr inbounds nuw i16, ptr [[ARG_3]], i64 [[INDVARS_IV_NEXT_1]]
+; CHECK-A55-NEXT: [[ARRAYIDX14_2:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[ARG_3]], i64 [[INDVARS_IV_NEXT_1]]
; CHECK-A55-NEXT: [[TMP8:%.*]] = load i16, ptr [[ARRAYIDX14_2]], align 2
; CHECK-A55-NEXT: [[CONV15_2:%.*]] = sext i16 [[TMP8]] to i32
; CHECK-A55-NEXT: [[MUL16_2:%.*]] = mul nsw i32 [[CONV15_2]], [[CONV_2]]
-; CHECK-A55-NEXT: [[ARRAYIDX20_2:%.*]] = getelementptr inbounds nuw i32, ptr [[ARG_1]], i64 [[INDVARS_IV_NEXT_1]]
+; CHECK-A55-NEXT: [[ARRAYIDX20_2:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[ARG_1]], i64 [[INDVARS_IV_NEXT_1]]
; CHECK-A55-NEXT: [[TMP9:%.*]] = load i32, ptr [[ARRAYIDX20_2]], align 4
; CHECK-A55-NEXT: [[ADD21_2:%.*]] = add nsw i32 [[MUL16_2]], [[TMP9]]
; CHECK-A55-NEXT: store i32 [[ADD21_2]], ptr [[ARRAYIDX20_2]], align 4
; CHECK-A55-NEXT: [[INDVARS_IV_NEXT_2:%.*]] = or disjoint i64 [[INDVARS_IV]], 3
-; CHECK-A55-NEXT: [[ARRAYIDX10_3:%.*]] = getelementptr inbounds nuw i16, ptr [[ARG_2]], i64 [[INDVARS_IV_NEXT_2]]
+; CHECK-A55-NEXT: [[ARRAYIDX10_3:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[ARG_2]], i64 [[INDVARS_IV_NEXT_2]]
; CHECK-A55-NEXT: [[TMP10:%.*]] = load i16, ptr [[ARRAYIDX10_3]], align 2
; CHECK-A55-NEXT: [[CONV_3:%.*]] = sext i16 [[TMP10]] to i32
-; CHECK-A55-NEXT: [[ARRAYIDX14_3:%.*]] = getelementptr inbounds nuw i16, ptr [[ARG_3]], i64 [[INDVARS_IV_NEXT_2]]
+; CHECK-A55-NEXT: [[ARRAYIDX14_3:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[ARG_3]], i64 [[INDVARS_IV_NEXT_2]]
; CHECK-A55-NEXT: [[TMP11:%.*]] = load i16, ptr [[ARRAYIDX14_3]], align 2
; CHECK-A55-NEXT: [[CONV15_3:%.*]] = sext i16 [[TMP11]] to i32
; CHECK-A55-NEXT: [[MUL16_3:%.*]] = mul nsw i32 [[CONV15_3]], [[CONV_3]]
-; CHECK-A55-NEXT: [[ARRAYIDX20_3:%.*]] = getelementptr inbounds nuw i32, ptr [[ARG_1]], i64 [[INDVARS_IV_NEXT_2]]
+; CHECK-A55-NEXT: [[ARRAYIDX20_3:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[ARG_1]], i64 [[INDVARS_IV_NEXT_2]]
; CHECK-A55-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX20_3]], align 4
; CHECK-A55-NEXT: [[ADD21_3:%.*]] = add nsw i32 [[MUL16_3]], [[TMP12]]
; CHECK-A55-NEXT: store i32 [[ADD21_3]], ptr [[ARRAYIDX20_3]], align 4
@@ -77,14 +77,14 @@ define void @runtime_unroll_generic(i32 %arg_0, ptr %arg_1, ptr %arg_2, ptr %arg
; CHECK-A55-NEXT: [[INDVARS_IV_UNR:%.*]] = phi i64 [ 0, [[FOR_BODY6_PREHEADER]] ], [ [[INDVARS_IV_NEXT_3]], [[FOR_END_LOOPEXIT_UNR_LCSSA1]] ]
; CHECK-A55-NEXT: [[LCMP_MOD5:%.*]] = icmp ne i64 [[XTRAITER]], 0
; CHECK-A55-NEXT: tail call void @llvm.assume(i1 [[LCMP_MOD5]])
-; CHECK-A55-NEXT: [[ARRAYIDX10_EPIL:%.*]] = getelementptr inbounds nuw i16, ptr [[ARG_2]], i64 [[INDVARS_IV_UNR]]
+; CHECK-A55-NEXT: [[ARRAYIDX10_EPIL:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[ARG_2]], i64 [[INDVARS_IV_UNR]]
; CHECK-A55-NEXT: [[TMP13:%.*]] = load i16, ptr [[ARRAYIDX10_EPIL]], align 2
; CHECK-A55-NEXT: [[CONV_EPIL:%.*]] = sext i16 [[TMP13]] to i32
-; CHECK-A55-NEXT: [[ARRAYIDX14_EPIL:%.*]] = getelementptr inbounds nuw i16, ptr [[ARG_3]], i64 [[INDVARS_IV_UNR]]
+; CHECK-A55-NEXT: [[ARRAYIDX14_EPIL:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[ARG_3]], i64 [[INDVARS_IV_UNR]]
; CHECK-A55-NEXT: [[TMP14:%.*]] = load i16, ptr [[ARRAYIDX14_EPIL]], align 2
; CHECK-A55-NEXT: [[CONV15_EPIL:%.*]] = sext i16 [[TMP14]] to i32
; CHECK-A55-NEXT: [[MUL16_EPIL:%.*]] = mul nsw i32 [[CONV15_EPIL]], [[CONV_EPIL]]
-; CHECK-A55-NEXT: [[ARRAYIDX20_EPIL:%.*]] = getelementptr inbounds nuw i32, ptr [[ARG_1]], i64 [[INDVARS_IV_UNR]]
+; CHECK-A55-NEXT: [[ARRAYIDX20_EPIL:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[ARG_1]], i64 [[INDVARS_IV_UNR]]
; CHECK-A55-NEXT: [[TMP15:%.*]] = load i32, ptr [[ARRAYIDX20_EPIL]], align 4
; CHECK-A55-NEXT: [[ADD21_EPIL:%.*]] = add nsw i32 [[MUL16_EPIL]], [[TMP15]]
; CHECK-A55-NEXT: store i32 [[ADD21_EPIL]], ptr [[ARRAYIDX20_EPIL]], align 4
@@ -92,14 +92,14 @@ define void @runtime_unroll_generic(i32 %arg_0, ptr %arg_1, ptr %arg_2, ptr %arg
; CHECK-A55-NEXT: br i1 [[EPIL_ITER_CMP_NOT]], label [[FOR_END]], label [[FOR_BODY6_EPIL_1:%.*]]
; CHECK-A55: for.body6.epil.1:
; CHECK-A55-NEXT: [[INDVARS_IV_NEXT_EPIL:%.*]] = add nuw nsw i64 [[INDVARS_IV_UNR]], 1
-; CHECK-A55-NEXT: [[ARRAYIDX10_EPIL_1:%.*]] = getelementptr inbounds nuw i16, ptr [[ARG_2]], i64 [[INDVARS_IV_NEXT_EPIL]]
+; CHECK-A55-NEXT: [[ARRAYIDX10_EPIL_1:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[ARG_2]], i64 [[INDVARS_IV_NEXT_EPIL]]
; CHECK-A55-NEXT: [[TMP16:%.*]] = load i16, ptr [[ARRAYIDX10_EPIL_1]], align 2
; CHECK-A55-NEXT: [[CONV_EPIL_1:%.*]] = sext i16 [[TMP16]] to i32
-; CHECK-A55-NEXT: [[ARRAYIDX14_EPIL_1:%.*]] = getelementptr inbounds nuw i16, ptr [[ARG_3]], i64 [[INDVARS_IV_NEXT_EPIL]]
+; CHECK-A55-NEXT: [[ARRAYIDX14_EPIL_1:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[ARG_3]], i64 [[INDVARS_IV_NEXT_EPIL]]
; CHECK-A55-NEXT: [[TMP17:%.*]] = load i16, ptr [[ARRAYIDX14_EPIL_1]], align 2
; CHECK-A55-NEXT: [[CONV15_EPIL_1:%.*]] = sext i16 [[TMP17]] to i32
; CHECK-A55-NEXT: [[MUL16_EPIL_1:%.*]] = mul nsw i32 [[CONV15_EPIL_1]], [[CONV_EPIL_1]]
-; CHECK-A55-NEXT: [[ARRAYIDX20_EPIL_1:%.*]] = getelementptr inbounds nuw i32, ptr [[ARG_1]], i64 [[INDVARS_IV_NEXT_EPIL]]
+; CHECK-A55-NEXT: [[ARRAYIDX20_EPIL_1:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[ARG_1]], i64 [[INDVARS_IV_NEXT_EPIL]]
; CHECK-A55-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX20_EPIL_1]], align 4
; CHECK-A55-NEXT: [[ADD21_EPIL_1:%.*]] = add nsw i32 [[MUL16_EPIL_1]], [[TMP18]]
; CHECK-A55-NEXT: store i32 [[ADD21_EPIL_1]], ptr [[ARRAYIDX20_EPIL_1]], align 4
@@ -107,14 +107,14 @@ define void @runtime_unroll_generic(i32 %arg_0, ptr %arg_1, ptr %arg_2, ptr %arg
; CHECK-A55-NEXT: br i1 [[EPIL_ITER_CMP_1_NOT]], label [[FOR_END]], label [[FOR_BODY6_EPIL_2:%.*]]
; CHECK-A55: for.body6.epil.2:
; CHECK-A55-NEXT: [[INDVARS_IV_NEXT_EPIL_1:%.*]] = add nuw nsw i64 [[INDVARS_IV_UNR]], 2
-; CHECK-A55-NEXT: [[ARRAYIDX10_EPIL_2:%.*]] = getelementptr inbounds nuw i16, ptr [[ARG_2]], i64 [[INDVARS_IV_NEXT_EPIL_1]]
+; CHECK-A55-NEXT: [[ARRAYIDX10_EPIL_2:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[ARG_2]], i64 [[INDVARS_IV_NEXT_EPIL_1]]
; CHECK-A55-NEXT: [[TMP19:%.*]] = load i16, ptr [[ARRAYIDX10_EPIL_2]], align 2
; CHECK-A55-NEXT: [[CONV_EPIL_2:%.*]] = sext i16 [[TMP19]] to i32
-; CHECK-A55-NEXT: [[ARRAYIDX14_EPIL_2:%.*]] = getelementptr inbounds nuw i16, ptr [[ARG_3]], i64 [[INDVARS_IV_NEXT_EPIL_1]]
+; CHECK-A55-NEXT: [[ARRAYIDX14_EPIL_2:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[ARG_3]], i64 [[INDVARS_IV_NEXT_EPIL_1]]
; CHECK-A55-NEXT: [[TMP20:%.*]] = load i16, ptr [[ARRAYIDX14_EPIL_2]], align 2
; CHECK-A55-NEXT: [[CONV15_EPIL_2:%.*]] = sext i16 [[TMP20]] to i32
; CHECK-A55-NEXT: [[MUL16_EPIL_2:%.*]] = mul nsw i32 [[CONV15_EPIL_2]], [[CONV_EPIL_2]]
-; CHECK-A55-NEXT: [[ARRAYIDX20_EPIL_2:%.*]] = getelementptr inbounds nuw i32, ptr [[ARG_1]], i64 [[INDVARS_IV_NEXT_EPIL_1]]
+; CHECK-A55-NEXT: [[ARRAYIDX20_EPIL_2:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[ARG_1]], i64 [[INDVARS_IV_NEXT_EPIL_1]]
; CHECK-A55-NEXT: [[TMP21:%.*]] = load i32, ptr [[ARRAYIDX20_EPIL_2]], align 4
; CHECK-A55-NEXT: [[ADD21_EPIL_2:%.*]] = add nsw i32 [[MUL16_EPIL_2]], [[TMP21]]
; CHECK-A55-NEXT: store i32 [[ADD21_EPIL_2]], ptr [[ARRAYIDX20_EPIL_2]], align 4
@@ -131,14 +131,14 @@ define void @runtime_unroll_generic(i32 %arg_0, ptr %arg_1, ptr %arg_2, ptr %arg
; CHECK-GENERIC-NEXT: br label [[FOR_BODY6:%.*]]
; CHECK-GENERIC: for.body6:
; CHECK-GENERIC-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[FOR_BODY6_PREHEADER]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY6]] ]
-; CHECK-GENERIC-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds nuw i16, ptr [[ARG_2:%.*]], i64 [[INDVARS_IV]]
+; CHECK-GENERIC-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[ARG_2:%.*]], i64 [[INDVARS_IV]]
; CHECK-GENERIC-NEXT: [[TMP0:%.*]] = load i16, ptr [[ARRAYIDX10]], align 2
; CHECK-GENERIC-NEXT: [[CONV:%.*]] = sext i16 [[TMP0]] to i32
-; CHECK-GENERIC-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds nuw i16, ptr [[ARG_3:%.*]], i64 [[INDVARS_IV]]
+; CHECK-GENERIC-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[ARG_3:%.*]], i64 [[INDVARS_IV]]
; CHECK-GENERIC-NEXT: [[TMP1:%.*]] = load i16, ptr [[ARRAYIDX14]], align 2
; CHECK-GENERIC-NEXT: [[CONV15:%.*]] = sext i16 [[TMP1]] to i32
; CHECK-GENERIC-NEXT: [[MUL16:%.*]] = mul nsw i32 [[CONV15]], [[CONV]]
-; CHECK-GENERIC-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds nuw i32, ptr [[ARG_1:%.*]], i64 [[INDVARS_IV]]
+; CHECK-GENERIC-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[ARG_1:%.*]], i64 [[INDVARS_IV]]
; CHECK-GENERIC-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX20]], align 4
; CHECK-GENERIC-NEXT: [[ADD21:%.*]] = add nsw i32 [[MUL16]], [[TMP2]]
; CHECK-GENERIC-NEXT: store i32 [[ADD21]], ptr [[ARRAYIDX20]], align 4
diff --git a/llvm/test/Transforms/LoopUnroll/WebAssembly/basic-unrolling.ll b/llvm/test/Transforms/LoopUnroll/WebAssembly/basic-unrolling.ll
index 5e0fc75a6bbb2..7f17fe284b803 100644
--- a/llvm/test/Transforms/LoopUnroll/WebAssembly/basic-unrolling.ll
+++ b/llvm/test/Transforms/LoopUnroll/WebAssembly/basic-unrolling.ll
@@ -71,28 +71,28 @@ define hidden void @compile_time_partial(ptr nocapture %a, ptr nocapture readonl
; CHECK-NEXT: ret void
; CHECK: for.body:
; CHECK-NEXT: [[I_07:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC_3:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i16, ptr [[B:%.*]], i32 [[I_07]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[B:%.*]], i32 [[I_07]]
; CHECK-NEXT: [[I:%.*]] = load i16, ptr [[ARRAYIDX]], align 2
; CHECK-NEXT: [[ADD:%.*]] = add i16 [[I]], 1
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i16, ptr [[A:%.*]], i32 [[I_07]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[A:%.*]], i32 [[I_07]]
; CHECK-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX2]], align 2
; CHECK-NEXT: [[INC:%.*]] = or disjoint i32 [[I_07]], 1
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds nuw i16, ptr [[B]], i32 [[INC]]
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[B]], i32 [[INC]]
; CHECK-NEXT: [[I_1:%.*]] = load i16, ptr [[ARRAYIDX_1]], align 2
; CHECK-NEXT: [[ADD_1:%.*]] = add i16 [[I_1]], 1
-; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds nuw i16, ptr [[A]], i32 [[INC]]
+; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[A]], i32 [[INC]]
; CHECK-NEXT: store i16 [[ADD_1]], ptr [[ARRAYIDX2_1]], align 2
; CHECK-NEXT: [[INC_1:%.*]] = or disjoint i32 [[I_07]], 2
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds nuw i16, ptr [[B]], i32 [[INC_1]]
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[B]], i32 [[INC_1]]
; CHECK-NEXT: [[I_2:%.*]] = load i16, ptr [[ARRAYIDX_2]], align 2
; CHECK-NEXT: [[ADD_2:%.*]] = add i16 [[I_2]], 1
-; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds nuw i16, ptr [[A]], i32 [[INC_1]]
+; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[A]], i32 [[INC_1]]
; CHECK-NEXT: store i16 [[ADD_2]], ptr [[ARRAYIDX2_2]], align 2
; CHECK-NEXT: [[INC_2:%.*]] = or disjoint i32 [[I_07]], 3
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds nuw i16, ptr [[B]], i32 [[INC_2]]
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[B]], i32 [[INC_2]]
; CHECK-NEXT: [[I_3:%.*]] = load i16, ptr [[ARRAYIDX_3]], align 2
; CHECK-NEXT: [[ADD_3:%.*]] = add i16 [[I_3]], 1
-; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds nuw i16, ptr [[A]], i32 [[INC_2]]
+; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[A]], i32 [[INC_2]]
; CHECK-NEXT: store i16 [[ADD_3]], ptr [[ARRAYIDX2_3]], align 2
; CHECK-NEXT: [[INC_3]] = add nuw nsw i32 [[I_07]], 4
; CHECK-NEXT: [[EXITCOND_NOT_3:%.*]] = icmp eq i32 [[INC_3]], 1000
@@ -135,12 +135,12 @@ define hidden void @runtime(ptr nocapture %a, ptr nocapture readonly %b, ptr noc
; CHECK-NEXT: [[I_09_UNR:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ], [ [[INC_1:%.*]], [[FOR_COND_CLEANUP_LOOPEXIT_UNR_LCSSA:%.*]] ]
; CHECK-NEXT: [[LCMP_MOD1:%.*]] = trunc i32 [[N]] to i1
; CHECK-NEXT: call void @llvm.assume(i1 [[LCMP_MOD1]])
-; CHECK-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i32 [[I_09_UNR]]
+; CHECK-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i32 [[I_09_UNR]]
; CHECK-NEXT: [[I_EPIL:%.*]] = load i32, ptr [[ARRAYIDX_EPIL]], align 4
-; CHECK-NEXT: [[ARRAYIDX1_EPIL:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i32 [[I_09_UNR]]
+; CHECK-NEXT: [[ARRAYIDX1_EPIL:%.*]] = getelementptr inbounds [4 x i8], ptr [[C:%.*]], i32 [[I_09_UNR]]
; CHECK-NEXT: [[I1_EPIL:%.*]] = load i32, ptr [[ARRAYIDX1_EPIL]], align 4
; CHECK-NEXT: [[MUL_EPIL:%.*]] = mul nsw i32 [[I1_EPIL]], [[I_EPIL]]
-; CHECK-NEXT: [[ARRAYIDX2_EPIL:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[I_09_UNR]]
+; CHECK-NEXT: [[ARRAYIDX2_EPIL:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i32 [[I_09_UNR]]
; CHECK-NEXT: store i32 [[MUL_EPIL]], ptr [[ARRAYIDX2_EPIL]], align 4
; CHECK-NEXT: br label [[FOR_COND_CLEANUP]]
; CHECK: for.cond.cleanup:
@@ -148,20 +148,20 @@ define hidden void @runtime(ptr nocapture %a, ptr nocapture readonly %b, ptr noc
; CHECK: for.body:
; CHECK-NEXT: [[I_09:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER_NEW]] ], [ [[INC_1]], [[FOR_BODY]] ]
; CHECK-NEXT: [[NITER:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER_NEW]] ], [ [[NITER_NEXT_1:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[I_09]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i32 [[I_09]]
; CHECK-NEXT: [[I:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[C]], i32 [[I_09]]
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [4 x i8], ptr [[C]], i32 [[I_09]]
; CHECK-NEXT: [[I1:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4
; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[I1]], [[I]]
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[I_09]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i32 [[I_09]]
; CHECK-NEXT: store i32 [[MUL]], ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[INC:%.*]] = or disjoint i32 [[I_09]], 1
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[INC]]
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i32 [[INC]]
; CHECK-NEXT: [[I_1:%.*]] = load i32, ptr [[ARRAYIDX_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX1_1:%.*]] = getelementptr inbounds i32, ptr [[C]], i32 [[INC]]
+; CHECK-NEXT: [[ARRAYIDX1_1:%.*]] = getelementptr inbounds [4 x i8], ptr [[C]], i32 [[INC]]
; CHECK-NEXT: [[I1_1:%.*]] = load i32, ptr [[ARRAYIDX1_1]], align 4
; CHECK-NEXT: [[MUL_1:%.*]] = mul nsw i32 [[I1_1]], [[I_1]]
-; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[INC]]
+; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i32 [[INC]]
; CHECK-NEXT: store i32 [[MUL_1]], ptr [[ARRAYIDX2_1]], align 4
; CHECK-NEXT: [[INC_1]] = add nuw i32 [[I_09]], 2
; CHECK-NEXT: [[NITER_NEXT_1]] = add i32 [[NITER]], 2
@@ -198,12 +198,12 @@ define hidden void @dont_unroll_call(ptr nocapture %a, ptr nocapture readonly %b
; CHECK-NEXT: ret void
; CHECK: for.body:
; CHECK-NEXT: [[I_013:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i32 [[I_013]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i32 [[I_013]]
; CHECK-NEXT: [[I:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i32 [[I_013]]
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [4 x i8], ptr [[C:%.*]], i32 [[I_013]]
; CHECK-NEXT: [[I1:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4
; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[I1]], [[I]]
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[I_013]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i32 [[I_013]]
; CHECK-NEXT: store i32 [[MUL]], ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[TMP0:%.*]] = call i32 (ptr, ...) @printf(ptr noundef nonnull dereferenceable(1) @.str, i32 [[I_013]], i32 [[MUL]])
; CHECK-NEXT: [[INC]] = add nuw i32 [[I_013]], 1
diff --git a/llvm/test/Transforms/LoopUnroll/runtime-multiexit-heuristic.ll b/llvm/test/Transforms/LoopUnroll/runtime-multiexit-heuristic.ll
index 65ef3e40ec229..99276e44095e6 100644
--- a/llvm/test/Transforms/LoopUnroll/runtime-multiexit-heuristic.ll
+++ b/llvm/test/Transforms/LoopUnroll/runtime-multiexit-heuristic.ll
@@ -32,14 +32,14 @@ define i32 @test1(ptr nocapture %a, i64 %n) {
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[TMP0]], 42
; CHECK-NEXT: br i1 [[CMP]], label [[OTHEREXIT_LOOPEXIT:%.*]], label [[LATCH:%.*]]
; CHECK: latch:
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], [[SUM_02]]
; CHECK-NEXT: br label [[FOR_EXITING_BLOCK_1:%.*]]
; CHECK: for.exiting_block.1:
; CHECK-NEXT: br i1 false, label [[OTHEREXIT_LOOPEXIT]], label [[LATCH_1:%.*]]
; CHECK: latch.1:
-; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr i8, ptr [[TMP13]], i64 4
; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX_1]], align 4
; CHECK-NEXT: [[ADD_1:%.*]] = add nsw i32 [[TMP4]], [[ADD]]
@@ -47,7 +47,7 @@ define i32 @test1(ptr nocapture %a, i64 %n) {
; CHECK: for.exiting_block.2:
; CHECK-NEXT: br i1 false, label [[OTHEREXIT_LOOPEXIT]], label [[LATCH_2:%.*]]
; CHECK: latch.2:
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr i8, ptr [[TMP15]], i64 8
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX_2]], align 4
; CHECK-NEXT: [[ADD_2:%.*]] = add nsw i32 [[TMP5]], [[ADD_1]]
@@ -55,7 +55,7 @@ define i32 @test1(ptr nocapture %a, i64 %n) {
; CHECK: for.exiting_block.3:
; CHECK-NEXT: br i1 false, label [[OTHEREXIT_LOOPEXIT]], label [[LATCH_3:%.*]]
; CHECK: latch.3:
-; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP17:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr i8, ptr [[TMP17]], i64 12
; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX_3]], align 4
; CHECK-NEXT: [[ADD_3:%.*]] = add nsw i32 [[TMP6]], [[ADD_2]]
@@ -63,7 +63,7 @@ define i32 @test1(ptr nocapture %a, i64 %n) {
; CHECK: for.exiting_block.4:
; CHECK-NEXT: br i1 false, label [[OTHEREXIT_LOOPEXIT]], label [[LATCH_4:%.*]]
; CHECK: latch.4:
-; CHECK-NEXT: [[TMP18:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP18:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr i8, ptr [[TMP18]], i64 16
; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX_4]], align 4
; CHECK-NEXT: [[ADD_4:%.*]] = add nsw i32 [[TMP7]], [[ADD_3]]
@@ -71,7 +71,7 @@ define i32 @test1(ptr nocapture %a, i64 %n) {
; CHECK: for.exiting_block.5:
; CHECK-NEXT: br i1 false, label [[OTHEREXIT_LOOPEXIT]], label [[LATCH_5:%.*]]
; CHECK: latch.5:
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr i8, ptr [[TMP12]], i64 20
; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARRAYIDX_5]], align 4
; CHECK-NEXT: [[ADD_5:%.*]] = add nsw i32 [[TMP8]], [[ADD_4]]
@@ -79,7 +79,7 @@ define i32 @test1(ptr nocapture %a, i64 %n) {
; CHECK: for.exiting_block.6:
; CHECK-NEXT: br i1 false, label [[OTHEREXIT_LOOPEXIT]], label [[LATCH_6:%.*]]
; CHECK: latch.6:
-; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr i8, ptr [[TMP14]], i64 24
; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[ARRAYIDX_6]], align 4
; CHECK-NEXT: [[ADD_6:%.*]] = add nsw i32 [[TMP9]], [[ADD_5]]
@@ -87,7 +87,7 @@ define i32 @test1(ptr nocapture %a, i64 %n) {
; CHECK: for.exiting_block.7:
; CHECK-NEXT: br i1 false, label [[OTHEREXIT_LOOPEXIT]], label [[LATCH_7]]
; CHECK: latch.7:
-; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP16:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr i8, ptr [[TMP16]], i64 28
; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX_7]], align 4
; CHECK-NEXT: [[ADD_7]] = add nsw i32 [[TMP10]], [[ADD_6]]
@@ -113,7 +113,7 @@ define i32 @test1(ptr nocapture %a, i64 %n) {
; CHECK-NEXT: [[CMP_EPIL:%.*]] = icmp eq i64 [[TMP0]], 42
; CHECK-NEXT: br i1 [[CMP_EPIL]], label [[OTHEREXIT_LOOPEXIT3:%.*]], label [[LATCH_EPIL]]
; CHECK: latch.epil:
-; CHECK-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_EPIL]]
+; CHECK-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDVARS_IV_EPIL]]
; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX_EPIL]], align 4
; CHECK-NEXT: [[ADD_EPIL]] = add nsw i32 [[TMP11]], [[SUM_02_EPIL]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_EPIL]] = add i64 [[INDVARS_IV_EPIL]], 1
@@ -337,7 +337,7 @@ define i32 @test2(ptr nocapture %a, i64 %n) {
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[N:%.*]], 42
; CHECK-NEXT: br i1 [[CMP]], label [[OTHEREXIT:%.*]], label [[LATCH]]
; CHECK: latch:
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD]] = add nsw i32 [[TMP0]], [[SUM_02]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
@@ -550,7 +550,7 @@ define i32 @test3(ptr nocapture %a, i64 %n) !prof !{!"function_entry_count", i64
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[N:%.*]], 42
; CHECK-NEXT: br i1 [[CMP]], label [[OTHEREXIT:%.*]], label [[LATCH]]
; CHECK: latch:
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD]] = add nsw i32 [[TMP0]], [[SUM_02]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
@@ -650,7 +650,7 @@ define i32 @test4(ptr nocapture %a, i64 %n) !prof !{!"function_entry_count", i64
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[INDVARS_IV]], 4096
; CHECK-NEXT: br i1 [[CMP]], label [[OTHEREXIT:%.*]], label [[LATCH]]
; CHECK: latch:
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD]] = add nsw i32 [[TMP0]], [[SUM_02]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
@@ -757,14 +757,14 @@ define i32 @test5(ptr nocapture %a, i64 %n) {
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[TMP0]], 42
; CHECK-NEXT: br i1 [[CMP]], label [[OTHEREXIT_LOOPEXIT:%.*]], label [[LATCH:%.*]]
; CHECK: latch:
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], [[SUM_02]]
; CHECK-NEXT: br label [[FOR_EXITING_BLOCK_1:%.*]]
; CHECK: for.exiting_block.1:
; CHECK-NEXT: br i1 false, label [[OTHEREXIT_LOOPEXIT]], label [[LATCH_1:%.*]]
; CHECK: latch.1:
-; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr i8, ptr [[TMP13]], i64 4
; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX_1]], align 4
; CHECK-NEXT: [[ADD_1:%.*]] = add nsw i32 [[TMP4]], [[ADD]]
@@ -772,7 +772,7 @@ define i32 @test5(ptr nocapture %a, i64 %n) {
; CHECK: for.exiting_block.2:
; CHECK-NEXT: br i1 false, label [[OTHEREXIT_LOOPEXIT]], label [[LATCH_2:%.*]]
; CHECK: latch.2:
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr i8, ptr [[TMP15]], i64 8
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX_2]], align 4
; CHECK-NEXT: [[ADD_2:%.*]] = add nsw i32 [[TMP5]], [[ADD_1]]
@@ -780,7 +780,7 @@ define i32 @test5(ptr nocapture %a, i64 %n) {
; CHECK: for.exiting_block.3:
; CHECK-NEXT: br i1 false, label [[OTHEREXIT_LOOPEXIT]], label [[LATCH_3:%.*]]
; CHECK: latch.3:
-; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP17:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr i8, ptr [[TMP17]], i64 12
; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX_3]], align 4
; CHECK-NEXT: [[ADD_3:%.*]] = add nsw i32 [[TMP6]], [[ADD_2]]
@@ -788,7 +788,7 @@ define i32 @test5(ptr nocapture %a, i64 %n) {
; CHECK: for.exiting_block.4:
; CHECK-NEXT: br i1 false, label [[OTHEREXIT_LOOPEXIT]], label [[LATCH_4:%.*]]
; CHECK: latch.4:
-; CHECK-NEXT: [[TMP18:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP18:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr i8, ptr [[TMP18]], i64 16
; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX_4]], align 4
; CHECK-NEXT: [[ADD_4:%.*]] = add nsw i32 [[TMP7]], [[ADD_3]]
@@ -796,7 +796,7 @@ define i32 @test5(ptr nocapture %a, i64 %n) {
; CHECK: for.exiting_block.5:
; CHECK-NEXT: br i1 false, label [[OTHEREXIT_LOOPEXIT]], label [[LATCH_5:%.*]]
; CHECK: latch.5:
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr i8, ptr [[TMP12]], i64 20
; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARRAYIDX_5]], align 4
; CHECK-NEXT: [[ADD_5:%.*]] = add nsw i32 [[TMP8]], [[ADD_4]]
@@ -804,7 +804,7 @@ define i32 @test5(ptr nocapture %a, i64 %n) {
; CHECK: for.exiting_block.6:
; CHECK-NEXT: br i1 false, label [[OTHEREXIT_LOOPEXIT]], label [[LATCH_6:%.*]]
; CHECK: latch.6:
-; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr i8, ptr [[TMP14]], i64 24
; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[ARRAYIDX_6]], align 4
; CHECK-NEXT: [[ADD_6:%.*]] = add nsw i32 [[TMP9]], [[ADD_5]]
@@ -812,7 +812,7 @@ define i32 @test5(ptr nocapture %a, i64 %n) {
; CHECK: for.exiting_block.7:
; CHECK-NEXT: br i1 false, label [[OTHEREXIT_LOOPEXIT]], label [[LATCH_7]]
; CHECK: latch.7:
-; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP16:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr i8, ptr [[TMP16]], i64 28
; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX_7]], align 4
; CHECK-NEXT: [[ADD_7]] = add nsw i32 [[TMP10]], [[ADD_6]]
@@ -838,7 +838,7 @@ define i32 @test5(ptr nocapture %a, i64 %n) {
; CHECK-NEXT: [[CMP_EPIL:%.*]] = icmp eq i64 [[TMP0]], 42
; CHECK-NEXT: br i1 [[CMP_EPIL]], label [[OTHEREXIT_LOOPEXIT3:%.*]], label [[LATCH_EPIL]]
; CHECK: latch.epil:
-; CHECK-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_EPIL]]
+; CHECK-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDVARS_IV_EPIL]]
; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX_EPIL]], align 4
; CHECK-NEXT: [[ADD_EPIL]] = add nsw i32 [[TMP11]], [[SUM_02_EPIL]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_EPIL]] = add i64 [[INDVARS_IV_EPIL]], 1
diff --git a/llvm/test/Transforms/LoopUnroll/runtime-unroll-remainder.ll b/llvm/test/Transforms/LoopUnroll/runtime-unroll-remainder.ll
index 5f4bbf15f01af..3c9c3bd10a93c 100644
--- a/llvm/test/Transforms/LoopUnroll/runtime-unroll-remainder.ll
+++ b/llvm/test/Transforms/LoopUnroll/runtime-unroll-remainder.ll
@@ -25,9 +25,9 @@ define i32 @unroll(ptr nocapture readonly %a, ptr nocapture readonly %b, i32 %N)
; CHECK-NEXT: call void @llvm.assume(i1 [[LCMP_MOD2]])
; CHECK-NEXT: br label [[FOR_BODY_EPIL:%.*]]
; CHECK: for.body.epil:
-; CHECK-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDVARS_IV_EPIL_INIT]]
+; CHECK-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDVARS_IV_EPIL_INIT]]
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX_EPIL]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_EPIL:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDVARS_IV_EPIL_INIT]]
+; CHECK-NEXT: [[ARRAYIDX2_EPIL:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i64 [[INDVARS_IV_EPIL_INIT]]
; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX2_EPIL]], align 4
; CHECK-NEXT: [[MUL_EPIL:%.*]] = mul nsw i32 [[TMP2]], [[TMP1]]
; CHECK-NEXT: [[ADD_EPIL:%.*]] = add nsw i32 [[MUL_EPIL]], [[C_010_EPIL_INIT]]
@@ -35,9 +35,9 @@ define i32 @unroll(ptr nocapture readonly %a, ptr nocapture readonly %b, i32 %N)
; CHECK-NEXT: br i1 [[EPIL_ITER_CMP_NOT]], label [[FOR_COND_CLEANUP_LOOPEXIT_EPILOG_LCSSA:%.*]], label [[FOR_BODY_EPIL_1:%.*]]
; CHECK: for.body.epil.1:
; CHECK-NEXT: [[INDVARS_IV_NEXT_EPIL:%.*]] = add nuw nsw i64 [[INDVARS_IV_EPIL_INIT]], 1
-; CHECK-NEXT: [[ARRAYIDX_EPIL_1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT_EPIL]]
+; CHECK-NEXT: [[ARRAYIDX_EPIL_1:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDVARS_IV_NEXT_EPIL]]
; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX_EPIL_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_EPIL_1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV_NEXT_EPIL]]
+; CHECK-NEXT: [[ARRAYIDX2_EPIL_1:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDVARS_IV_NEXT_EPIL]]
; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX2_EPIL_1]], align 4
; CHECK-NEXT: [[MUL_EPIL_1:%.*]] = mul nsw i32 [[TMP4]], [[TMP3]]
; CHECK-NEXT: [[ADD_EPIL_1:%.*]] = add nsw i32 [[MUL_EPIL_1]], [[ADD_EPIL]]
@@ -45,9 +45,9 @@ define i32 @unroll(ptr nocapture readonly %a, ptr nocapture readonly %b, i32 %N)
; CHECK-NEXT: br i1 [[EPIL_ITER_CMP_1_NOT]], label [[FOR_COND_CLEANUP_LOOPEXIT_EPILOG_LCSSA]], label [[FOR_BODY_EPIL_2:%.*]]
; CHECK: for.body.epil.2:
; CHECK-NEXT: [[INDVARS_IV_NEXT_EPIL_1:%.*]] = add nuw nsw i64 [[INDVARS_IV_EPIL_INIT]], 2
-; CHECK-NEXT: [[ARRAYIDX_EPIL_2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT_EPIL_1]]
+; CHECK-NEXT: [[ARRAYIDX_EPIL_2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDVARS_IV_NEXT_EPIL_1]]
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX_EPIL_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_EPIL_2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV_NEXT_EPIL_1]]
+; CHECK-NEXT: [[ARRAYIDX2_EPIL_2:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDVARS_IV_NEXT_EPIL_1]]
; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX2_EPIL_2]], align 4
; CHECK-NEXT: [[MUL_EPIL_2:%.*]] = mul nsw i32 [[TMP6]], [[TMP5]]
; CHECK-NEXT: [[ADD_EPIL_2:%.*]] = add nsw i32 [[MUL_EPIL_2]], [[ADD_EPIL_1]]
@@ -65,30 +65,30 @@ define i32 @unroll(ptr nocapture readonly %a, ptr nocapture readonly %b, i32 %N)
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[FOR_BODY_LR_PH_NEW]] ], [ [[INDVARS_IV_NEXT_3]], [[FOR_BODY]] ]
; CHECK-NEXT: [[C_010:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH_NEW]] ], [ [[ADD_3]], [[FOR_BODY]] ]
; CHECK-NEXT: [[NITER:%.*]] = phi i64 [ 0, [[FOR_BODY_LR_PH_NEW]] ], [ [[NITER_NEXT_3:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], [[TMP7]]
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[MUL]], [[C_010]]
; CHECK-NEXT: [[INDVARS_IV_NEXT:%.*]] = or disjoint i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[INDVARS_IV_NEXT]]
; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[ARRAYIDX_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B]], i64 [[INDVARS_IV_NEXT]]
; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX2_1]], align 4
; CHECK-NEXT: [[MUL_1:%.*]] = mul nsw i32 [[TMP10]], [[TMP9]]
; CHECK-NEXT: [[ADD_1:%.*]] = add nsw i32 [[MUL_1]], [[ADD]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_1:%.*]] = or disjoint i64 [[INDVARS_IV]], 2
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[INDVARS_IV_NEXT_1]]
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[INDVARS_IV_NEXT_1]]
; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[INDVARS_IV_NEXT_1]]
+; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B]], i64 [[INDVARS_IV_NEXT_1]]
; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX2_2]], align 4
; CHECK-NEXT: [[MUL_2:%.*]] = mul nsw i32 [[TMP12]], [[TMP11]]
; CHECK-NEXT: [[ADD_2:%.*]] = add nsw i32 [[MUL_2]], [[ADD_1]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_2:%.*]] = or disjoint i64 [[INDVARS_IV]], 3
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[INDVARS_IV_NEXT_2]]
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[INDVARS_IV_NEXT_2]]
; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[ARRAYIDX_3]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[INDVARS_IV_NEXT_2]]
+; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B]], i64 [[INDVARS_IV_NEXT_2]]
; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[ARRAYIDX2_3]], align 4
; CHECK-NEXT: [[MUL_3:%.*]] = mul nsw i32 [[TMP14]], [[TMP13]]
; CHECK-NEXT: [[ADD_3]] = add nsw i32 [[MUL_3]], [[ADD_2]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/aarch64-predication.ll b/llvm/test/Transforms/LoopVectorize/AArch64/aarch64-predication.ll
index ddfdb257ed49a..1f3949172b758 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/aarch64-predication.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/aarch64-predication.ll
@@ -23,7 +23,7 @@ define i64 @predicated_udiv_scalarized_operand(ptr %a, i64 %x) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[PRED_UDIV_CONTINUE2:%.*]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[ENTRY]] ], [ [[TMP17:%.*]], [[PRED_UDIV_CONTINUE2]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [8 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i64>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <2 x i64> [[WIDE_LOAD]], zeroinitializer
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x i1> [[TMP2]], i64 0
@@ -84,3 +84,5 @@ for.end:
%var7 = phi i64 [ %var6, %for.inc ]
ret i64 %var7
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; COST: {{.*}}
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-cond-inv-loads.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-cond-inv-loads.ll
index 3f32bbe1f0102..daa521c5c0623 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-cond-inv-loads.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-cond-inv-loads.ll
@@ -19,12 +19,12 @@ define void @cond_inv_load_i32i32i16(ptr noalias nocapture %a, ptr noalias nocap
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[COND:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i8], ptr [[COND:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP5]], align 4
; CHECK-NEXT: [[TMP6:%.*]] = icmp ne <vscale x 4 x i32> [[WIDE_LOAD]], zeroinitializer
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16.nxv4p0(<vscale x 4 x ptr> align 2 [[BROADCAST_SPLAT]], <vscale x 4 x i1> [[TMP6]], <vscale x 4 x i16> poison)
; CHECK-NEXT: [[TMP7:%.*]] = sext <vscale x 4 x i16> [[WIDE_MASKED_GATHER]] to <vscale x 4 x i32>
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP7]], ptr align 4 [[TMP8]], <vscale x 4 x i1> [[TMP6]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -77,11 +77,11 @@ define void @cond_inv_load_f64f64f64(ptr noalias nocapture %a, ptr noalias nocap
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds double, ptr [[COND:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [8 x i8], ptr [[COND:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x double>, ptr [[TMP5]], align 8
; CHECK-NEXT: [[TMP6:%.*]] = fcmp ogt <vscale x 4 x double> [[WIDE_LOAD]], splat (double 4.000000e-01)
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x double> @llvm.masked.gather.nxv4f64.nxv4p0(<vscale x 4 x ptr> align 8 [[BROADCAST_SPLAT]], <vscale x 4 x i1> [[TMP6]], <vscale x 4 x double> poison)
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr double, ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr [8 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: call void @llvm.masked.store.nxv4f64.p0(<vscale x 4 x double> [[WIDE_MASKED_GATHER]], ptr align 8 [[TMP7]], <vscale x 4 x i1> [[TMP6]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -134,14 +134,14 @@ define void @invariant_load_cond(ptr noalias nocapture %a, ptr nocapture readonl
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[COND:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x i8], ptr [[COND:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
; CHECK-NEXT: [[TMP7:%.*]] = icmp ne <vscale x 4 x i32> [[WIDE_LOAD]], zeroinitializer
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr [4 x i8], ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr align 4 [[TMP8]], <vscale x 4 x i1> [[TMP7]], <vscale x 4 x i32> poison)
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[BROADCAST_SPLAT]], <vscale x 4 x i1> [[TMP7]], <vscale x 4 x i32> poison)
; CHECK-NEXT: [[TMP9:%.*]] = add nsw <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], [[WIDE_MASKED_LOAD]]
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP9]], ptr align 4 [[TMP10]], <vscale x 4 x i1> [[TMP7]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-gather-scatter.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-gather-scatter.ll
index f964447bb4acc..93ff1352d126f 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-gather-scatter.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-gather-scatter.ll
@@ -17,11 +17,11 @@ define void @gather_nxv4i32_ind64(ptr noalias nocapture readonly %a, ptr noalias
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [8 x i8], ptr [[B:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i64>, ptr [[TMP5]], align 8
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], <vscale x 4 x i64> [[WIDE_LOAD]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], <vscale x 4 x i64> [[WIDE_LOAD]]
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x float> poison)
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[C:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i8], ptr [[C:%.*]], i64 [[INDEX]]
; CHECK-NEXT: store <vscale x 4 x float> [[WIDE_MASKED_GATHER]], ptr [[TMP7]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -68,12 +68,12 @@ define void @scatter_nxv4i32_ind32(ptr noalias nocapture %a, ptr noalias nocaptu
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[C:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i8], ptr [[C:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP5]], align 4
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
; CHECK-NEXT: [[TMP7:%.*]] = sext <vscale x 4 x i32> [[WIDE_LOAD1]] to <vscale x 4 x i64>
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], <vscale x 4 x i64> [[TMP7]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], <vscale x 4 x i64> [[TMP7]]
; CHECK-NEXT: call void @llvm.masked.scatter.nxv4f32.nxv4p0(<vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x ptr> align 4 [[TMP8]], <vscale x 4 x i1> splat (i1 true))
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -120,7 +120,7 @@ define void @scatter_inv_nxv4i32(ptr noalias nocapture %inv, ptr noalias nocaptu
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP5]], align 4
; CHECK-NEXT: [[TMP6:%.*]] = icmp ne <vscale x 4 x i32> [[WIDE_LOAD]], zeroinitializer
; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> splat (i32 3), <vscale x 4 x ptr> align 4 [[BROADCAST_SPLAT]], <vscale x 4 x i1> [[TMP6]])
@@ -172,7 +172,7 @@ define void @gather_inv_nxv4i32(ptr noalias nocapture %a, ptr noalias nocapture
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i32, ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP5]], align 4
; CHECK-NEXT: [[TMP6:%.*]] = icmp sgt <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 3)
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[BROADCAST_SPLAT]], <vscale x 4 x i1> [[TMP6]], <vscale x 4 x i32> poison)
@@ -239,8 +239,8 @@ define void @gather_nxv4i32_ind64_stride2(ptr noalias nocapture %a, ptr noalias
; CHECK-NEXT: [[WIDE_VEC1:%.*]] = load <vscale x 8 x float>, ptr [[TMP15]], align 4
; CHECK-NEXT: [[STRIDED_VEC2:%.*]] = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.vector.deinterleave2.nxv8f32(<vscale x 8 x float> [[WIDE_VEC1]])
; CHECK-NEXT: [[WIDE_MASKED_GATHER2:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[STRIDED_VEC2]], 0
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw float, ptr [[TMP12]], i64 [[TMP3]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[TMP12]], i64 [[TMP3]]
; CHECK-NEXT: store <vscale x 4 x float> [[WIDE_MASKED_GATHER]], ptr [[TMP12]], align 4
; CHECK-NEXT: store <vscale x 4 x float> [[WIDE_MASKED_GATHER2]], ptr [[TMP14]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions.ll
index 337c097c85712..d46a1fb85beac 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions.ll
@@ -31,9 +31,9 @@ define void @cond_ind64(ptr noalias nocapture %a, ptr noalias nocapture readonly
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP6]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP9:%.*]] = trunc <vscale x 4 x i64> [[VEC_IND]] to <vscale x 4 x i1>
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr [[B:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr [4 x i8], ptr [[B:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr align 4 [[TMP10]], <vscale x 4 x i1> [[TMP9]], <vscale x 4 x i32> poison)
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[WIDE_MASKED_LOAD]], ptr align 4 [[TMP11]], <vscale x 4 x i1> [[TMP9]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add nuw nsw <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll
index 9f2354ec28d79..c9194f47a12ea 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll
@@ -36,14 +36,14 @@ define void @test_array_load2_store2(i32 %C, i32 %D) #1 {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr @AB, i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr @AB, i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP2]], align 4
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]])
; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 1
; CHECK-NEXT: [[TMP6:%.*]] = add nsw <vscale x 4 x i32> [[TMP3]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP7:%.*]] = mul nsw <vscale x 4 x i32> [[TMP4]], [[BROADCAST_SPLAT2]]
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr @CD, i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x i8], ptr @CD, i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> [[TMP6]], <vscale x 4 x i32> [[TMP7]])
; CHECK-NEXT: store <vscale x 8 x i32> [[INTERLEAVED_VEC]], ptr [[TMP8]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]]
@@ -114,10 +114,10 @@ define void @test_array_load2_i16_store2(i32 %C, i32 %D) #1 {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP3]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i16, ptr @AB_i16, <vscale x 4 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8], ptr @AB_i16, <vscale x 4 x i64> [[VEC_IND]]
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16.nxv4p0(<vscale x 4 x ptr> align 2 [[TMP6]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i16> poison)
; CHECK-NEXT: [[TMP7:%.*]] = or disjoint <vscale x 4 x i64> [[VEC_IND]], splat (i64 1)
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i16, ptr @AB_i16, <vscale x 4 x i64> [[TMP7]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8], ptr @AB_i16, <vscale x 4 x i64> [[TMP7]]
; CHECK-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16.nxv4p0(<vscale x 4 x ptr> align 2 [[TMP8]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i16> poison)
; CHECK-NEXT: [[TMP9:%.*]] = sext <vscale x 4 x i16> [[WIDE_MASKED_GATHER]] to <vscale x 4 x i32>
; CHECK-NEXT: [[TMP10:%.*]] = add nsw <vscale x 4 x i32> [[BROADCAST_SPLAT]], [[TMP9]]
@@ -207,11 +207,11 @@ define void @test_array_load2_store2_i16(i32 noundef %C, i32 noundef %D) #1 {
; CHECK-NEXT: [[TMP9:%.*]] = or disjoint <vscale x 4 x i64> [[VEC_IND]], splat (i64 1)
; CHECK-NEXT: [[TMP10:%.*]] = add nsw <vscale x 4 x i32> [[TMP7]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP11:%.*]] = trunc <vscale x 4 x i32> [[TMP10]] to <vscale x 4 x i16>
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i16, ptr @CD_i16, <vscale x 4 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [2 x i8], ptr @CD_i16, <vscale x 4 x i64> [[VEC_IND]]
; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i16.nxv4p0(<vscale x 4 x i16> [[TMP11]], <vscale x 4 x ptr> align 2 [[TMP12]], <vscale x 4 x i1> splat (i1 true))
; CHECK-NEXT: [[TMP13:%.*]] = mul nsw <vscale x 4 x i32> [[TMP8]], [[BROADCAST_SPLAT2]]
; CHECK-NEXT: [[TMP14:%.*]] = trunc <vscale x 4 x i32> [[TMP13]] to <vscale x 4 x i16>
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i16, ptr @CD_i16, <vscale x 4 x i64> [[TMP9]]
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [2 x i8], ptr @CD_i16, <vscale x 4 x i64> [[TMP9]]
; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i16.nxv4p0(<vscale x 4 x i16> [[TMP14]], <vscale x 4 x ptr> align 2 [[TMP15]], <vscale x 4 x i1> splat (i1 true))
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add nuw nsw <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT4]]
@@ -267,21 +267,21 @@ define i32 @test_struct_load6(ptr %S) #1 {
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP2]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[S:%.*]], <vscale x 4 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [24 x i8], ptr [[S:%.*]], <vscale x 4 x i64> [[VEC_IND]]
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP5]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
-; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[S]], <vscale x 4 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [24 x i8], ptr [[S]], <vscale x 4 x i64> [[VEC_IND]]
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, <vscale x 4 x ptr> [[DOTSPLIT]], i64 4
; CHECK-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
-; CHECK-NEXT: [[DOTSPLIT6:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[S]], <vscale x 4 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[DOTSPLIT6:%.*]] = getelementptr inbounds [24 x i8], ptr [[S]], <vscale x 4 x i64> [[VEC_IND]]
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, <vscale x 4 x ptr> [[DOTSPLIT6]], i64 8
; CHECK-NEXT: [[WIDE_MASKED_GATHER2:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP7]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
-; CHECK-NEXT: [[DOTSPLIT7:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[S]], <vscale x 4 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[DOTSPLIT7:%.*]] = getelementptr inbounds [24 x i8], ptr [[S]], <vscale x 4 x i64> [[VEC_IND]]
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, <vscale x 4 x ptr> [[DOTSPLIT7]], i64 12
; CHECK-NEXT: [[WIDE_MASKED_GATHER3:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP8]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
-; CHECK-NEXT: [[DOTSPLIT8:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[S]], <vscale x 4 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[DOTSPLIT8:%.*]] = getelementptr inbounds [24 x i8], ptr [[S]], <vscale x 4 x i64> [[VEC_IND]]
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, <vscale x 4 x ptr> [[DOTSPLIT8]], i64 16
; CHECK-NEXT: [[WIDE_MASKED_GATHER4:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP9]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
-; CHECK-NEXT: [[DOTSPLIT9:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[S]], <vscale x 4 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[DOTSPLIT9:%.*]] = getelementptr inbounds [24 x i8], ptr [[S]], <vscale x 4 x i64> [[VEC_IND]]
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, <vscale x 4 x ptr> [[DOTSPLIT9]], i64 20
; CHECK-NEXT: [[WIDE_MASKED_GATHER5:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP10]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
; CHECK-NEXT: [[TMP11:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], [[VEC_PHI]]
@@ -370,10 +370,10 @@ define void @test_reversed_load2_store2(ptr noalias nocapture readonly %A, ptr n
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i32> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]]
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[A:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [8 x i8], ptr [[A:%.*]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[TMP6:%.*]] = shl nuw nsw i64 [[TMP0]], 3
; CHECK-NEXT: [[TMP8:%.*]] = sub nsw i64 2, [[TMP6]]
-; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i64 [[TMP8]]
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP5]], i64 [[TMP8]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP9]], align 4
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]])
; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
@@ -382,10 +382,10 @@ define void @test_reversed_load2_store2(ptr noalias nocapture readonly %A, ptr n
; CHECK-NEXT: [[REVERSE1:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP11]])
; CHECK-NEXT: [[TMP12:%.*]] = add nsw <vscale x 4 x i32> [[REVERSE]], [[VEC_IND]]
; CHECK-NEXT: [[TMP13:%.*]] = sub nsw <vscale x 4 x i32> [[REVERSE1]], [[VEC_IND]]
-; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_ST2]], ptr [[B:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [8 x i8], ptr [[B:%.*]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[TMP15:%.*]] = shl nuw nsw i64 [[TMP0]], 3
; CHECK-NEXT: [[TMP18:%.*]] = sub nsw i64 2, [[TMP15]]
-; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[TMP14]], i64 [[TMP18]]
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP14]], i64 [[TMP18]]
; CHECK-NEXT: [[REVERSE2:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP12]])
; CHECK-NEXT: [[REVERSE3:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP13]])
; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> [[REVERSE2]], <vscale x 4 x i32> [[REVERSE3]])
@@ -452,7 +452,7 @@ define void @even_load_static_tc(ptr noalias nocapture readonly %A, ptr noalias
; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
; CHECK-NEXT: [[TMP6:%.*]] = shl nsw <vscale x 4 x i32> [[TMP5]], splat (i32 1)
; CHECK-NEXT: [[TMP7:%.*]] = and i64 [[INDEX]], 9223372036854775804
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i32, ptr [[B:%.*]], i64 [[TMP7]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B:%.*]], i64 [[TMP7]]
; CHECK-NEXT: store <vscale x 4 x i32> [[TMP6]], ptr [[TMP8]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]]
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -521,7 +521,7 @@ define void @even_load_dynamic_tc(ptr noalias nocapture readonly %A, ptr noalias
; CHECK-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
; CHECK-NEXT: [[TMP14:%.*]] = shl nsw <vscale x 4 x i32> [[TMP13]], splat (i32 1)
; CHECK-NEXT: [[TMP15:%.*]] = and i64 [[INDEX]], 9223372036854775804
-; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw i32, ptr [[B:%.*]], i64 [[TMP15]]
+; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B:%.*]], i64 [[TMP15]]
; CHECK-NEXT: store <vscale x 4 x i32> [[TMP14]], ptr [[TMP16]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -587,8 +587,8 @@ define void @load_gap_reverse(ptr noalias nocapture readonly %P1, ptr noalias no
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP4:%.*]] = add nsw <vscale x 4 x i64> [[BROADCAST_SPLAT1]], [[VEC_IND]]
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[PAIR:%.*]], ptr [[P1:%.*]], <vscale x 4 x i64> [[VEC_IND]]
-; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P2:%.*]], <vscale x 4 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [16 x i8], ptr [[P1:%.*]], <vscale x 4 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [16 x i8], ptr [[P2:%.*]], <vscale x 4 x i64> [[VEC_IND]]
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, <vscale x 4 x ptr> [[DOTSPLIT]], i64 8
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i64> @llvm.masked.gather.nxv4i64.nxv4p0(<vscale x 4 x ptr> align 8 [[TMP6]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i64> poison)
; CHECK-NEXT: [[TMP7:%.*]] = sub nsw <vscale x 4 x i64> [[WIDE_MASKED_GATHER]], [[VEC_IND]]
@@ -643,13 +643,13 @@ define void @mixed_load2_store2(ptr noalias nocapture readonly %A, ptr noalias n
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP2]], align 4
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]])
; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 1
; CHECK-NEXT: [[TMP6:%.*]] = mul nsw <vscale x 4 x i32> [[TMP4]], [[TMP3]]
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[STRIDED_VEC2:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]])
; CHECK-NEXT: [[TMP7:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC2]], 0
; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC2]], 1
@@ -730,7 +730,7 @@ define void @int_float_struct(ptr nocapture readonly %p) #0 {
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_INTFLOAT:%.*]], ptr [[P:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [8 x i8], ptr [[P:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP2]], align 4
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]])
; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
@@ -811,9 +811,9 @@ define void @PR27626_0(ptr %p, i32 %z, i64 %n) #1 {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP9]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], <vscale x 4 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [8 x i8], ptr [[P:%.*]], <vscale x 4 x i64> [[VEC_IND]]
; CHECK-NEXT: [[TMP14:%.*]] = extractelement <vscale x 4 x ptr> [[TMP12]], i64 0
-; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], <vscale x 4 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [8 x i8], ptr [[P]], <vscale x 4 x i64> [[VEC_IND]]
; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, <vscale x 4 x ptr> [[DOTSPLIT]], i64 4
; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[BROADCAST_SPLAT]], <vscale x 4 x ptr> align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true))
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP14]], align 4
@@ -881,8 +881,8 @@ define i32 @PR27626_1(ptr %p, i64 %n) #1 {
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP9]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], <vscale x 4 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [8 x i8], ptr [[P:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [8 x i8], ptr [[P]], <vscale x 4 x i64> [[VEC_IND]]
; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, <vscale x 4 x ptr> [[DOTSPLIT]], i64 4
; CHECK-NEXT: [[TMP15:%.*]] = extractelement <vscale x 4 x ptr> [[TMP13]], i64 0
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP12]], align 4
@@ -958,9 +958,9 @@ define void @PR27626_2(ptr %p, i64 %n, i32 %z) #1 {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP9]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], <vscale x 4 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [8 x i8], ptr [[P:%.*]], <vscale x 4 x i64> [[VEC_IND]]
; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 -8
-; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], <vscale x 4 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [8 x i8], ptr [[P]], <vscale x 4 x i64> [[VEC_IND]]
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, <vscale x 4 x ptr> [[DOTSPLIT]], i64 4
; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[BROADCAST_SPLAT]], <vscale x 4 x ptr> align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true))
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP13]], align 4
@@ -1030,10 +1030,10 @@ define i32 @PR27626_3(ptr %p, i64 %n, i32 %z) #1 {
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP9]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP18:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP12:%.*]] = add nuw nsw <vscale x 4 x i64> [[VEC_IND]], splat (i64 1)
-; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds [8 x i8], ptr [[P:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [8 x i8], ptr [[P]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 4
-; CHECK-NEXT: [[DOTSPLIT3:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], <vscale x 4 x i64> [[TMP12]]
+; CHECK-NEXT: [[DOTSPLIT3:%.*]] = getelementptr inbounds [8 x i8], ptr [[P]], <vscale x 4 x i64> [[TMP12]]
; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, <vscale x 4 x ptr> [[DOTSPLIT3]], i64 4
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP13]], align 4
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]])
@@ -1119,7 +1119,7 @@ define void @PR27626_4(ptr %a, i32 %x, i32 %y, i32 %z, i64 %n) #1 {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP9]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], <vscale x 4 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], <vscale x 4 x i64> [[VEC_IND]]
; CHECK-NEXT: [[P:%.*]] = extractelement <vscale x 4 x ptr> [[TMP13]], i64 0
; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[BROADCAST_SPLAT]], <vscale x 4 x ptr> align 4 [[TMP13]], <vscale x 4 x i1> splat (i1 true))
; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> [[BROADCAST_SPLAT2]], <vscale x 4 x i32> [[BROADCAST_SPLAT4]])
@@ -1199,9 +1199,9 @@ define void @PR27626_5(ptr %a, i32 %x, i32 %y, i32 %z, i64 %n) #1 {
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP13:%.*]] = add nsw <vscale x 4 x i64> [[VEC_IND]], splat (i64 -1)
; CHECK-NEXT: [[TMP14:%.*]] = add nsw <vscale x 4 x i64> [[VEC_IND]], splat (i64 -3)
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], <vscale x 4 x i64> [[VEC_IND]]
-; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A]], <vscale x 4 x i64> [[TMP13]]
-; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[A]], <vscale x 4 x i64> [[TMP14]]
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], <vscale x 4 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], <vscale x 4 x i64> [[TMP13]]
+; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], <vscale x 4 x i64> [[TMP14]]
; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[BROADCAST_SPLAT]], <vscale x 4 x ptr> align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true))
; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[BROADCAST_SPLAT2]], <vscale x 4 x ptr> align 4 [[TMP17]], <vscale x 4 x i1> splat (i1 true))
; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[BROADCAST_SPLAT4]], <vscale x 4 x ptr> align 4 [[TMP15]], <vscale x 4 x i1> splat (i1 true))
@@ -1288,17 +1288,17 @@ define void @PR34743(ptr %a, ptr %b, i64 %n) #1 {
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP15]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP18:%.*]] = or disjoint <vscale x 4 x i64> [[VEC_IND]], splat (i64 1)
; CHECK-NEXT: [[TMP19:%.*]] = add nuw nsw <vscale x 4 x i64> [[VEC_IND]], splat (i64 2)
-; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i16, ptr [[A]], <vscale x 4 x i64> [[TMP18]]
+; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds [2 x i8], ptr [[A]], <vscale x 4 x i64> [[TMP18]]
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP20]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i16> poison), !alias.scope [[META34:![0-9]+]]
; CHECK-NEXT: [[TMP21:%.*]] = sext <vscale x 4 x i16> [[WIDE_MASKED_GATHER]] to <vscale x 4 x i32>
-; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i16, ptr [[A]], <vscale x 4 x i64> [[TMP19]]
+; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x i8], ptr [[A]], <vscale x 4 x i64> [[TMP19]]
; CHECK-NEXT: [[WIDE_MASKED_GATHER4]] = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP22]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i16> poison), !alias.scope [[META34]]
; CHECK-NEXT: [[TMP23:%.*]] = call <vscale x 4 x i16> @llvm.vector.splice.right.nxv4i16(<vscale x 4 x i16> [[VECTOR_RECUR]], <vscale x 4 x i16> [[WIDE_MASKED_GATHER4]], i32 1)
; CHECK-NEXT: [[TMP24:%.*]] = sext <vscale x 4 x i16> [[TMP23]] to <vscale x 4 x i32>
; CHECK-NEXT: [[TMP25:%.*]] = sext <vscale x 4 x i16> [[WIDE_MASKED_GATHER4]] to <vscale x 4 x i32>
; CHECK-NEXT: [[TMP26:%.*]] = mul nsw <vscale x 4 x i32> [[TMP24]], [[TMP21]]
; CHECK-NEXT: [[TMP27:%.*]] = mul nsw <vscale x 4 x i32> [[TMP26]], [[TMP25]]
-; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: store <vscale x 4 x i32> [[TMP27]], ptr [[TMP28]], align 4, !alias.scope [[META37:![0-9]+]], !noalias [[META34]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add nuw nsw <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
@@ -1372,12 +1372,12 @@ define void @interleave_deinterleave_factor3(ptr writeonly noalias %dst, ptr rea
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP6]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT_XYZ:%.*]], ptr [[A:%.*]], <vscale x 4 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds [12 x i8], ptr [[A:%.*]], <vscale x 4 x i64> [[VEC_IND]]
; CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP19]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
-; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_XYZ]], ptr [[B:%.*]], <vscale x 4 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds [12 x i8], ptr [[B:%.*]], <vscale x 4 x i64> [[VEC_IND]]
; CHECK-NEXT: [[TMP11:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP20]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
; CHECK-NEXT: [[TMP14:%.*]] = add nsw <vscale x 4 x i32> [[TMP11]], [[TMP7]]
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_XYZ]], ptr [[DST:%.*]], <vscale x 4 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [12 x i8], ptr [[DST:%.*]], <vscale x 4 x i64> [[VEC_IND]]
; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP14]], <vscale x 4 x ptr> align 4 [[TMP10]], <vscale x 4 x i1> splat (i1 true))
; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw i8, <vscale x 4 x ptr> [[TMP19]], i64 4
; CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP21]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
@@ -1460,14 +1460,14 @@ define void @interleave_deinterleave(ptr writeonly noalias %dst, ptr readonly %a
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_XYZT:%.*]], ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [16 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 16 x i32>, ptr [[TMP6]], align 4
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave4.nxv16i32(<vscale x 16 x i32> [[WIDE_VEC]])
; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 1
; CHECK-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 2
; CHECK-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 3
-; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_XYZT]], ptr [[B:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds [16 x i8], ptr [[B:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_VEC8:%.*]] = load <vscale x 16 x i32>, ptr [[TMP13]], align 4
; CHECK-NEXT: [[STRIDED_VEC2:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave4.nxv16i32(<vscale x 16 x i32> [[WIDE_VEC8]])
; CHECK-NEXT: [[TMP16:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC2]], 0
@@ -1475,7 +1475,7 @@ define void @interleave_deinterleave(ptr writeonly noalias %dst, ptr readonly %a
; CHECK-NEXT: [[TMP18:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC2]], 2
; CHECK-NEXT: [[TMP19:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC2]], 3
; CHECK-NEXT: [[TMP20:%.*]] = add nsw <vscale x 4 x i32> [[TMP16]], [[TMP9]]
-; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_XYZT]], ptr [[DST:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds [16 x i8], ptr [[DST:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP22:%.*]] = sub nsw <vscale x 4 x i32> [[TMP10]], [[TMP17]]
; CHECK-NEXT: [[TMP23:%.*]] = shl <vscale x 4 x i32> [[TMP11]], [[TMP18]]
; CHECK-NEXT: [[TMP24:%.*]] = ashr <vscale x 4 x i32> [[TMP12]], [[TMP19]]
@@ -1561,10 +1561,10 @@ define void @interleave_deinterleave_reverse(ptr noalias nocapture readonly %A,
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i32> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]]
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_XYZT:%.*]], ptr [[A:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [16 x i8], ptr [[A:%.*]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[TMP6:%.*]] = shl nuw nsw i64 [[TMP0]], 4
; CHECK-NEXT: [[TMP9:%.*]] = sub nsw i64 4, [[TMP6]]
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[TMP5]], i64 [[TMP9]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP5]], i64 [[TMP9]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 16 x i32>, ptr [[TMP10]], align 4
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave4.nxv16i32(<vscale x 16 x i32> [[WIDE_VEC]])
; CHECK-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
@@ -1579,10 +1579,10 @@ define void @interleave_deinterleave_reverse(ptr noalias nocapture readonly %A,
; CHECK-NEXT: [[TMP18:%.*]] = sub nsw <vscale x 4 x i32> [[REVERSE3]], [[VEC_IND]]
; CHECK-NEXT: [[TMP19:%.*]] = mul nsw <vscale x 4 x i32> [[REVERSE4]], [[VEC_IND]]
; CHECK-NEXT: [[TMP20:%.*]] = shl nuw nsw <vscale x 4 x i32> [[REVERSE5]], [[VEC_IND]]
-; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_XYZT]], ptr [[B:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds [16 x i8], ptr [[B:%.*]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[TMP22:%.*]] = shl nuw nsw i64 [[TMP0]], 4
; CHECK-NEXT: [[TMP25:%.*]] = sub nsw i64 4, [[TMP22]]
-; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, ptr [[TMP21]], i64 [[TMP25]]
+; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP21]], i64 [[TMP25]]
; CHECK-NEXT: [[REVERSE6:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP17]])
; CHECK-NEXT: [[REVERSE7:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP18]])
; CHECK-NEXT: [[REVERSE8:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP19]])
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-large-strides.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-large-strides.ll
index 76cf7d4c2990b..a01e2cd88096b 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-large-strides.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-large-strides.ll
@@ -6,7 +6,7 @@ define void @stride7_i32(ptr noalias nocapture %dst, i64 %n) #0 {
; CHECK: vector.body
; CHECK: %[[VEC_IND:.*]] = phi <vscale x 4 x i64> [ %{{.*}}, %vector.ph ], [ %{{.*}}, %vector.body ]
; CHECK-NEXT: %[[PTR_INDICES:.*]] = mul nuw nsw <vscale x 4 x i64> %[[VEC_IND]], splat (i64 7)
-; CHECK-NEXT: %[[PTRS:.*]] = getelementptr inbounds i32, ptr %dst, <vscale x 4 x i64> %[[PTR_INDICES]]
+; CHECK-NEXT: %[[PTRS:.*]] = getelementptr inbounds [4 x i8], ptr %dst, <vscale x 4 x i64> %[[PTR_INDICES]]
; CHECK-NEXT: %[[GLOAD:.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 %[[PTRS]]
; CHECK-NEXT: %[[VALS:.*]] = add nsw <vscale x 4 x i32> %[[GLOAD]],
; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> %[[VALS]], <vscale x 4 x ptr> align 4 %[[PTRS]]
@@ -33,7 +33,7 @@ define void @stride7_f64(ptr noalias nocapture %dst, i64 %n) #0 {
; CHECK: vector.body
; CHECK: %[[VEC_IND:.*]] = phi <vscale x 2 x i64> [ %{{.*}}, %vector.ph ], [ %{{.*}}, %vector.body ]
; CHECK-NEXT: %[[PTR_INDICES:.*]] = mul nuw nsw <vscale x 2 x i64> %[[VEC_IND]], splat (i64 7)
-; CHECK-NEXT: %[[PTRS:.*]] = getelementptr inbounds double, ptr %dst, <vscale x 2 x i64> %[[PTR_INDICES]]
+; CHECK-NEXT: %[[PTRS:.*]] = getelementptr inbounds [8 x i8], ptr %dst, <vscale x 2 x i64> %[[PTR_INDICES]]
; CHECK-NEXT: %[[GLOAD:.*]] = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64.nxv2p0(<vscale x 2 x ptr> align 8 %[[PTRS]],
; CHECK-NEXT: %[[VALS:.*]] = fadd <vscale x 2 x double> %[[GLOAD]],
; CHECK-NEXT: call void @llvm.masked.scatter.nxv2f64.nxv2p0(<vscale x 2 x double> %[[VALS]], <vscale x 2 x ptr> align 8 %[[PTRS]],
@@ -60,7 +60,7 @@ define void @cond_stride7_f64(ptr noalias nocapture %dst, ptr noalias nocapture
; CHECK-LABEL: @cond_stride7_f64(
; CHECK: vector.body
; CHECK: %[[MASK:.*]] = icmp ne <vscale x 2 x i64>
-; CHECK: %[[PTRS:.*]] = getelementptr inbounds double, ptr %dst, <vscale x 2 x i64> %{{.*}}
+; CHECK: %[[PTRS:.*]] = getelementptr inbounds [8 x i8], ptr %dst, <vscale x 2 x i64> %{{.*}}
; CHECK-NEXT: %[[GLOAD:.*]] = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64.nxv2p0(<vscale x 2 x ptr> align 8 %[[PTRS]], <vscale x 2 x i1> %[[MASK]]
; CHECK-NEXT: %[[VALS:.*]] = fadd <vscale x 2 x double> %[[GLOAD]],
; CHECK-NEXT: call void @llvm.masked.scatter.nxv2f64.nxv2p0(<vscale x 2 x double> %[[VALS]], <vscale x 2 x ptr> align 8 %[[PTRS]], <vscale x 2 x i1> %[[MASK]])
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-masked-loadstore.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-masked-loadstore.ll
index 54c0dfdfe7366..272bf02ec4699 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-masked-loadstore.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-masked-loadstore.ll
@@ -6,7 +6,7 @@ define void @mloadstore_f32(ptr noalias nocapture %a, ptr noalias nocapture read
; CHECK: vector.body:
; CHECK: %[[LOAD1:.*]] = load <vscale x 4 x float>, ptr
; CHECK-NEXT: %[[MASK:.*]] = fcmp ogt <vscale x 4 x float> %[[LOAD1]],
-; CHECK-NEXT: %[[GEPA:.*]] = getelementptr float, ptr %a,
+; CHECK-NEXT: %[[GEPA:.*]] = getelementptr [4 x i8], ptr %a,
; CHECK-NEXT: %[[LOAD2:.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr align 4 %[[GEPA]], <vscale x 4 x i1> %[[MASK]]
; CHECK-NEXT: %[[FADD:.*]] = fadd <vscale x 4 x float> %[[LOAD1]], %[[LOAD2]]
; CHECK-NEXT: call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> %[[FADD]], ptr align 4 %[[GEPA]], <vscale x 4 x i1> %[[MASK]])
@@ -41,7 +41,7 @@ define void @mloadstore_i32(ptr noalias nocapture %a, ptr noalias nocapture read
; CHECK: vector.body:
; CHECK: %[[LOAD1:.*]] = load <vscale x 4 x i32>, ptr
; CHECK-NEXT: %[[MASK:.*]] = icmp ne <vscale x 4 x i32> %[[LOAD1]],
-; CHECK-NEXT: %[[GEPA:.*]] = getelementptr i32, ptr %a,
+; CHECK-NEXT: %[[GEPA:.*]] = getelementptr [4 x i8], ptr %a,
; CHECK-NEXT: %[[LOAD2:.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr align 4 %[[GEPA]], <vscale x 4 x i1> %[[MASK]]
; CHECK-NEXT: %[[FADD:.*]] = add <vscale x 4 x i32> %[[LOAD1]], %[[LOAD2]]
; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> %[[FADD]], ptr align 4 %[[GEPA]], <vscale x 4 x i1> %[[MASK]])
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-overflow-checks.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-overflow-checks.ll
index 3b8625eb76711..9e053ee0d7fb8 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-overflow-checks.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-overflow-checks.ll
@@ -22,10 +22,10 @@ define void @cannot_overflow_i32_induction_var(ptr noalias %dst, ptr readonly %s
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[SRC:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[SRC:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr align 4 [[TMP0]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i32> poison)
; CHECK-NEXT: [[TMP1:%.*]] = add nsw <vscale x 4 x i32> [[WIDE_MASKED_LOAD]], splat (i32 42)
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[DST:%.*]], i64 [[INDEX]]
; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP1]], ptr align 4 [[TMP2]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK]])
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]]
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX_NEXT]], i64 [[WIDE_TRIP_COUNT]])
@@ -82,10 +82,10 @@ define void @can_overflow_i64_induction_var(ptr noalias %dst, ptr readonly %src,
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[SRC:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [4 x i8], ptr [[SRC:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr align 4 [[TMP3]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i32> poison)
; CHECK-NEXT: [[TMP4:%.*]] = add nsw <vscale x 4 x i32> [[WIDE_MASKED_LOAD]], splat (i32 42)
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i8], ptr [[DST:%.*]], i64 [[INDEX]]
; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP4]], ptr align 4 [[TMP5]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK]])
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP7]]
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP2]])
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-vector-reverse.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-vector-reverse.ll
index f7ca39b8c920e..e1e980bbf79de 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-vector-reverse.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-vector-reverse.ll
@@ -29,24 +29,24 @@ define void @vector_reverse_f64(i64 %N, ptr noalias %a, ptr noalias %b) #0{
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[INDEX]], -1
; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[N]], [[TMP7]]
-; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds double, ptr [[B:%.*]], i64 [[TMP8]]
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [8 x i8], ptr [[B:%.*]], i64 [[TMP8]]
; CHECK-NEXT: [[TMP10:%.*]] = sub i64 1, [[TMP5]]
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds double, ptr [[TMP9]], i64 [[TMP10]]
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [8 x i8], ptr [[TMP9]], i64 [[TMP10]]
; CHECK-NEXT: [[TMP12:%.*]] = sub i64 0, [[TMP5]]
; CHECK-NEXT: [[TMP13:%.*]] = sub i64 1, [[TMP5]]
-; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds double, ptr [[TMP9]], i64 [[TMP12]]
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds double, ptr [[TMP14]], i64 [[TMP13]]
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [8 x i8], ptr [[TMP9]], i64 [[TMP12]]
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [8 x i8], ptr [[TMP14]], i64 [[TMP13]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x double>, ptr [[TMP11]], align 8
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x double>, ptr [[TMP15]], align 8
; CHECK-NEXT: [[TMP16:%.*]] = fadd <vscale x 8 x double> [[WIDE_LOAD]], splat (double 1.000000e+00)
; CHECK-NEXT: [[TMP17:%.*]] = fadd <vscale x 8 x double> [[WIDE_LOAD1]], splat (double 1.000000e+00)
-; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds double, ptr [[A:%.*]], i64 [[TMP8]]
+; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds [8 x i8], ptr [[A:%.*]], i64 [[TMP8]]
; CHECK-NEXT: [[TMP19:%.*]] = sub i64 1, [[TMP5]]
-; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds double, ptr [[TMP18]], i64 [[TMP19]]
+; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds [8 x i8], ptr [[TMP18]], i64 [[TMP19]]
; CHECK-NEXT: [[TMP21:%.*]] = sub i64 0, [[TMP5]]
; CHECK-NEXT: [[TMP22:%.*]] = sub i64 1, [[TMP5]]
-; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds double, ptr [[TMP18]], i64 [[TMP21]]
-; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds double, ptr [[TMP23]], i64 [[TMP22]]
+; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds [8 x i8], ptr [[TMP18]], i64 [[TMP21]]
+; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds [8 x i8], ptr [[TMP23]], i64 [[TMP22]]
; CHECK-NEXT: store <vscale x 8 x double> [[TMP16]], ptr [[TMP20]], align 8
; CHECK-NEXT: store <vscale x 8 x double> [[TMP17]], ptr [[TMP24]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
@@ -106,24 +106,24 @@ define void @vector_reverse_i64(i64 %N, ptr %a, ptr %b) #0 {
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP10:%.*]] = xor i64 [[INDEX]], -1
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[N]], [[TMP10]]
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[TMP11]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [8 x i8], ptr [[B]], i64 [[TMP11]]
; CHECK-NEXT: [[TMP13:%.*]] = sub i64 1, [[TMP8]]
-; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i64, ptr [[TMP12]], i64 [[TMP13]]
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [8 x i8], ptr [[TMP12]], i64 [[TMP13]]
; CHECK-NEXT: [[TMP15:%.*]] = sub i64 0, [[TMP8]]
; CHECK-NEXT: [[TMP16:%.*]] = sub i64 1, [[TMP8]]
-; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i64, ptr [[TMP12]], i64 [[TMP15]]
-; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i64, ptr [[TMP17]], i64 [[TMP16]]
+; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds [8 x i8], ptr [[TMP12]], i64 [[TMP15]]
+; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds [8 x i8], ptr [[TMP17]], i64 [[TMP16]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i64>, ptr [[TMP14]], align 8
; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 8 x i64>, ptr [[TMP18]], align 8
; CHECK-NEXT: [[TMP19:%.*]] = add <vscale x 8 x i64> [[WIDE_LOAD]], splat (i64 1)
; CHECK-NEXT: [[TMP20:%.*]] = add <vscale x 8 x i64> [[WIDE_LOAD3]], splat (i64 1)
-; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP11]]
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds [8 x i8], ptr [[A]], i64 [[TMP11]]
; CHECK-NEXT: [[TMP22:%.*]] = sub i64 1, [[TMP8]]
-; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds i64, ptr [[TMP21]], i64 [[TMP22]]
+; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds [8 x i8], ptr [[TMP21]], i64 [[TMP22]]
; CHECK-NEXT: [[TMP24:%.*]] = sub i64 0, [[TMP8]]
; CHECK-NEXT: [[TMP25:%.*]] = sub i64 1, [[TMP8]]
-; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds i64, ptr [[TMP21]], i64 [[TMP24]]
-; CHECK-NEXT: [[TMP27:%.*]] = getelementptr inbounds i64, ptr [[TMP26]], i64 [[TMP25]]
+; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds [8 x i8], ptr [[TMP21]], i64 [[TMP24]]
+; CHECK-NEXT: [[TMP27:%.*]] = getelementptr inbounds [8 x i8], ptr [[TMP26]], i64 [[TMP25]]
; CHECK-NEXT: store <vscale x 8 x i64> [[TMP19]], ptr [[TMP23]], align 8
; CHECK-NEXT: store <vscale x 8 x i64> [[TMP20]], ptr [[TMP27]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-vfabi.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-vfabi.ll
index 8b6c7fe8f0fa5..87435219bcbdb 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-vfabi.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-vfabi.ll
@@ -13,12 +13,12 @@ define void @test_big_little_params(ptr readonly %a, ptr readonly %b, ptr noalia
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 4 x i1> [ splat (i1 true), [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr align 4 [[TMP2]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i32> poison)
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i8> poison)
; CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 4 x i32> @foo_vector(<vscale x 4 x i32> [[WIDE_MASKED_LOAD]], <vscale x 4 x i8> [[WIDE_MASKED_LOAD1]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK]])
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i8], ptr [[C]], i64 [[INDEX]]
; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP4]], ptr align 4 [[TMP5]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK]])
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]]
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX_NEXT]], i64 1025)
@@ -57,12 +57,12 @@ define void @test_little_big_params(ptr readonly %a, ptr readonly %b, ptr noalia
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ splat (i1 true), [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x float> @llvm.masked.load.nxv2f32.p0(ptr align 4 [[TMP2]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x float> poison)
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr double, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr [8 x i8], ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0(ptr align 8 [[TMP3]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x double> poison)
; CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 2 x double> @bar_vector(<vscale x 2 x float> [[WIDE_MASKED_LOAD]], <vscale x 2 x double> [[WIDE_MASKED_LOAD1]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds double, ptr [[C]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [8 x i8], ptr [[C]], i64 [[INDEX]]
; CHECK-NEXT: call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[TMP4]], ptr align 8 [[TMP5]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]]
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1025)
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll
index dca0065eb854c..6969e15698311 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll
@@ -47,14 +47,14 @@ define void @widen_ptr_phi_unrolled(ptr noalias nocapture %a, ptr noalias nocapt
; CHECK-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC4]], 1
; CHECK-NEXT: [[TMP13:%.*]] = add nsw <vscale x 4 x i32> [[TMP9]], splat (i32 1)
; CHECK-NEXT: [[TMP14:%.*]] = add nsw <vscale x 4 x i32> [[TMP11]], splat (i32 1)
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP15]], i64 [[TMP3]]
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[TMP15]], i64 [[TMP3]]
; CHECK-NEXT: store <vscale x 4 x i32> [[TMP13]], ptr [[TMP15]], align 4
; CHECK-NEXT: store <vscale x 4 x i32> [[TMP14]], ptr [[TMP17]], align 4
; CHECK-NEXT: [[TMP18:%.*]] = add nsw <vscale x 4 x i32> [[TMP10]], splat (i32 1)
; CHECK-NEXT: [[TMP19:%.*]] = add nsw <vscale x 4 x i32> [[TMP12]], splat (i32 1)
-; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP20]], i64 [[TMP3]]
+; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[TMP20]], i64 [[TMP3]]
; CHECK-NEXT: store <vscale x 4 x i32> [[TMP18]], ptr [[TMP20]], align 4
; CHECK-NEXT: store <vscale x 4 x i32> [[TMP19]], ptr [[TMP22]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
@@ -75,10 +75,10 @@ define void @widen_ptr_phi_unrolled(ptr noalias nocapture %a, ptr noalias nocapt
; CHECK-NEXT: [[INCDEC_PTR1]] = getelementptr inbounds nuw i8, ptr [[PTR_014]], i64 8
; CHECK-NEXT: [[TMP25:%.*]] = load i32, ptr [[INCDEC_PTR]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP24]], 1
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[I_013]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[I_013]]
; CHECK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP25]], 1
-; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[I_013]]
+; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[I_013]]
; CHECK-NEXT: store i32 [[ADD2]], ptr [[ARRAYIDX3]], align 4
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_013]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N]]
@@ -146,12 +146,12 @@ define void @widen_2ptrs_phi_unrolled(ptr noalias nocapture %dst, ptr noalias no
; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[OFFSET_IDX4:%.*]] = shl i64 [[INDEX]], 2
; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[DST]], i64 [[OFFSET_IDX4]]
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[NEXT_GEP]], i64 [[TMP7]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr [4 x i8], ptr [[NEXT_GEP]], i64 [[TMP7]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[NEXT_GEP]], align 4
; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <vscale x 4 x i32>, ptr [[TMP8]], align 4
; CHECK-NEXT: [[TMP9:%.*]] = shl nsw <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 1)
; CHECK-NEXT: [[TMP10:%.*]] = shl nsw <vscale x 4 x i32> [[WIDE_LOAD6]], splat (i32 1)
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[NEXT_GEP5]], i64 [[TMP7]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr [4 x i8], ptr [[NEXT_GEP5]], i64 [[TMP7]]
; CHECK-NEXT: store <vscale x 4 x i32> [[TMP9]], ptr [[NEXT_GEP5]], align 4
; CHECK-NEXT: store <vscale x 4 x i32> [[TMP10]], ptr [[TMP12]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt-epilogue.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt-epilogue.ll
index 209fa60b260aa..1aac78b88e9d2 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt-epilogue.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt-epilogue.ll
@@ -25,10 +25,10 @@ define void @simple_histogram(ptr noalias %buckets, ptr readonly %indices, i64 %
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH1]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[INDICES]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x i8], ptr [[INDICES]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x i32>, ptr [[TMP8]], align 4
; CHECK-NEXT: [[TMP14:%.*]] = zext <vscale x 4 x i32> [[WIDE_LOAD1]] to <vscale x 4 x i64>
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[BUCKETS]], <vscale x 4 x i64> [[TMP14]]
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8], ptr [[BUCKETS]], <vscale x 4 x i64> [[TMP14]]
; CHECK-NEXT: call void @llvm.experimental.vector.histogram.add.nxv4p0.i32(<vscale x 4 x ptr> [[TMP15]], i32 1, <vscale x 4 x i1> splat (i1 true))
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -38,7 +38,7 @@ define void @simple_histogram(ptr noalias %buckets, ptr readonly %indices, i64 %
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]]
; CHECK: vec.epilog.iter.check:
; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[SCALAR_PH]], label [[VEC_EPILOG_PH]]
+; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]]
; CHECK: vec.epilog.ph:
; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_PH]] ]
; CHECK-NEXT: [[TMP24:%.*]] = call i64 @llvm.vscale.i64()
@@ -48,14 +48,14 @@ define void @simple_histogram(ptr noalias %buckets, ptr readonly %indices, i64 %
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: vec.epilog.vector.body:
; CHECK-NEXT: [[INDEX4:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT6:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[INDICES]], i64 [[INDEX4]]
+; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x i8], ptr [[INDICES]], i64 [[INDEX4]]
; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 2 x i32>, ptr [[TMP18]], align 4
; CHECK-NEXT: [[TMP19:%.*]] = zext <vscale x 2 x i32> [[WIDE_LOAD5]] to <vscale x 2 x i64>
-; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[BUCKETS]], <vscale x 2 x i64> [[TMP19]]
+; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x i8], ptr [[BUCKETS]], <vscale x 2 x i64> [[TMP19]]
; CHECK-NEXT: call void @llvm.experimental.vector.histogram.add.nxv2p0.i32(<vscale x 2 x ptr> [[TMP20]], i32 1, <vscale x 2 x i1> splat (i1 true))
; CHECK-NEXT: [[INDEX_NEXT6]] = add nuw i64 [[INDEX4]], [[TMP25]]
; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT6]], [[N_VEC3]]
-; CHECK-NEXT: br i1 [[TMP21]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP21]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: vec.epilog.middle.block:
; CHECK-NEXT: [[CMP_N7:%.*]] = icmp eq i64 [[N_MOD_VF2]], 0
; CHECK-NEXT: br i1 [[CMP_N7]], label [[FOR_EXIT]], label [[SCALAR_PH]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt-no-scalar-interleave.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt-no-scalar-interleave.ll
index 013c218da2e16..2b3e124b952ef 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt-no-scalar-interleave.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt-no-scalar-interleave.ll
@@ -14,10 +14,10 @@ define void @simple_histogram_forced_scalar_interleave(ptr noalias %buckets, ptr
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[INDICES]], i64 [[IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[INDICES]], i64 [[IV]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[IDXPROM1:%.*]] = zext i32 [[TMP0]] to i64
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[BUCKETS]], i64 [[IDXPROM1]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[BUCKETS]], i64 [[IDXPROM1]]
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP1]], 1
; CHECK-NEXT: store i32 [[INC]], ptr [[ARRAYIDX2]], align 4
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt-too-many-deps.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt-too-many-deps.ll
index baf050c7facee..3d80b02ae585b 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt-too-many-deps.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt-too-many-deps.ll
@@ -20,17 +20,17 @@ define void @many_deps(ptr noalias %buckets, ptr %array, ptr %indices, ptr %othe
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[GEP_INDICES:%.*]] = getelementptr inbounds nuw i32, ptr [[INDICES]], i64 [[IV]]
+; CHECK-NEXT: [[GEP_INDICES:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[INDICES]], i64 [[IV]]
; CHECK-NEXT: [[L_IDX:%.*]] = load i32, ptr [[GEP_INDICES]], align 4
; CHECK-NEXT: [[IDXPROM1:%.*]] = zext i32 [[L_IDX]] to i64
-; CHECK-NEXT: [[GEP_BUCKET:%.*]] = getelementptr inbounds nuw i32, ptr [[BUCKETS]], i64 [[IDXPROM1]]
+; CHECK-NEXT: [[GEP_BUCKET:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[BUCKETS]], i64 [[IDXPROM1]]
; CHECK-NEXT: [[L_BUCKET:%.*]] = load i32, ptr [[GEP_BUCKET]], align 4
; CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[L_BUCKET]], 1
; CHECK-NEXT: store i32 [[INC]], ptr [[GEP_BUCKET]], align 4
-; CHECK-NEXT: [[IDX_ADDR:%.*]] = getelementptr inbounds nuw i32, ptr [[ARRAY]], i64 [[IV]]
+; CHECK-NEXT: [[IDX_ADDR:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[ARRAY]], i64 [[IV]]
; CHECK-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i32
; CHECK-NEXT: store i32 [[IV_TRUNC]], ptr [[IDX_ADDR]], align 4
-; CHECK-NEXT: [[GEP_OTHER:%.*]] = getelementptr inbounds nuw i32, ptr [[OTHER]], i64 [[IV]]
+; CHECK-NEXT: [[GEP_OTHER:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[OTHER]], i64 [[IV]]
; CHECK-NEXT: [[L_OTHER:%.*]] = load i32, ptr [[GEP_OTHER]], align 4
; CHECK-NEXT: [[ADD_OTHER:%.*]] = add i32 [[L_OTHER]], [[IV_TRUNC]]
; CHECK-NEXT: store i32 [[ADD_OTHER]], ptr [[GEP_OTHER]], align 4
@@ -78,14 +78,14 @@ define void @many_deps(ptr noalias %buckets, ptr %array, ptr %indices, ptr %othe
; NORMAL_DEP_LIMIT: vector.body:
; NORMAL_DEP_LIMIT-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; NORMAL_DEP_LIMIT-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i32> [ [[TMP7]], [[ENTRY]] ], [ [[VEC_IND_NEXT:%.*]], [[FOR_BODY]] ]
-; NORMAL_DEP_LIMIT-NEXT: [[GEP_INDICES:%.*]] = getelementptr inbounds i32, ptr [[INDICES]], i64 [[IV]]
-; NORMAL_DEP_LIMIT-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[GEP_INDICES]], align 4, !alias.scope [[META0:![0-9]+]]
+; NORMAL_DEP_LIMIT-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x i8], ptr [[INDICES]], i64 [[IV]]
+; NORMAL_DEP_LIMIT-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP13]], align 4, !alias.scope [[META0:![0-9]+]]
; NORMAL_DEP_LIMIT-NEXT: [[TMP11:%.*]] = zext <vscale x 4 x i32> [[WIDE_LOAD]] to <vscale x 4 x i64>
-; NORMAL_DEP_LIMIT-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[BUCKETS]], <vscale x 4 x i64> [[TMP11]]
+; NORMAL_DEP_LIMIT-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8], ptr [[BUCKETS]], <vscale x 4 x i64> [[TMP11]]
; NORMAL_DEP_LIMIT-NEXT: call void @llvm.experimental.vector.histogram.add.nxv4p0.i32(<vscale x 4 x ptr> [[TMP12]], i32 1, <vscale x 4 x i1> splat (i1 true))
-; NORMAL_DEP_LIMIT-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[ARRAY]], i64 [[IV]]
-; NORMAL_DEP_LIMIT-NEXT: store <vscale x 4 x i32> [[VEC_IND]], ptr [[TMP13]], align 4, !alias.scope [[META3:![0-9]+]], !noalias [[META5:![0-9]+]]
-; NORMAL_DEP_LIMIT-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[OTHER]], i64 [[IV]]
+; NORMAL_DEP_LIMIT-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i8], ptr [[ARRAY]], i64 [[IV]]
+; NORMAL_DEP_LIMIT-NEXT: store <vscale x 4 x i32> [[VEC_IND]], ptr [[TMP10]], align 4, !alias.scope [[META3:![0-9]+]], !noalias [[META5:![0-9]+]]
+; NORMAL_DEP_LIMIT-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8], ptr [[OTHER]], i64 [[IV]]
; NORMAL_DEP_LIMIT-NEXT: [[WIDE_LOAD10:%.*]] = load <vscale x 4 x i32>, ptr [[TMP14]], align 4, !alias.scope [[META7:![0-9]+]], !noalias [[META0]]
; NORMAL_DEP_LIMIT-NEXT: [[TMP15:%.*]] = add <vscale x 4 x i32> [[WIDE_LOAD10]], [[VEC_IND]]
; NORMAL_DEP_LIMIT-NEXT: store <vscale x 4 x i32> [[TMP15]], ptr [[TMP14]], align 4, !alias.scope [[META7]], !noalias [[META0]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll
index ee3144549bcbb..8816b76bbdaf0 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll
@@ -43,10 +43,10 @@ define void @simple_histogram(ptr noalias %buckets, ptr readonly %indices, i64 %
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[INDICES]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x i8], ptr [[INDICES]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP8]], align 4
; CHECK-NEXT: [[TMP9:%.*]] = zext <vscale x 4 x i32> [[WIDE_LOAD]] to <vscale x 4 x i64>
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[BUCKETS]], <vscale x 4 x i64> [[TMP9]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i8], ptr [[BUCKETS]], <vscale x 4 x i64> [[TMP9]]
; CHECK-NEXT: call void @llvm.experimental.vector.histogram.add.nxv4p0.i32(<vscale x 4 x ptr> [[TMP10]], i32 1, <vscale x 4 x i1> splat (i1 true))
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -92,10 +92,10 @@ define void @simple_histogram_inc_param(ptr noalias %buckets, ptr readonly %indi
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[INDICES]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x i8], ptr [[INDICES]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP8]], align 4
; CHECK-NEXT: [[TMP9:%.*]] = zext <vscale x 4 x i32> [[WIDE_LOAD]] to <vscale x 4 x i64>
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[BUCKETS]], <vscale x 4 x i64> [[TMP9]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i8], ptr [[BUCKETS]], <vscale x 4 x i64> [[TMP9]]
; CHECK-NEXT: call void @llvm.experimental.vector.histogram.add.nxv4p0.i32(<vscale x 4 x ptr> [[TMP10]], i32 [[INCVAL]], <vscale x 4 x i1> splat (i1 true))
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -141,10 +141,10 @@ define void @simple_histogram_sub(ptr noalias %buckets, ptr readonly %indices, i
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[INDICES]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x i8], ptr [[INDICES]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP8]], align 4
; CHECK-NEXT: [[TMP9:%.*]] = sext <vscale x 4 x i32> [[WIDE_LOAD]] to <vscale x 4 x i64>
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[BUCKETS]], <vscale x 4 x i64> [[TMP9]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i8], ptr [[BUCKETS]], <vscale x 4 x i64> [[TMP9]]
; CHECK-NEXT: call void @llvm.experimental.vector.histogram.add.nxv4p0.i32(<vscale x 4 x ptr> [[TMP10]], i32 -1, <vscale x 4 x i1> splat (i1 true))
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -190,11 +190,11 @@ define void @conditional_histogram(ptr noalias %buckets, ptr readonly %indices,
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[INDICES]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x i8], ptr [[INDICES]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP8]], align 4
; CHECK-NEXT: [[TMP9:%.*]] = zext <vscale x 4 x i32> [[WIDE_LOAD]] to <vscale x 4 x i64>
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[BUCKETS]], <vscale x 4 x i64> [[TMP9]]
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[CONDS]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i8], ptr [[BUCKETS]], <vscale x 4 x i64> [[TMP9]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8], ptr [[CONDS]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x i32>, ptr [[TMP12]], align 4
; CHECK-NEXT: [[TMP13:%.*]] = icmp sgt <vscale x 4 x i32> [[WIDE_LOAD1]], splat (i32 5100)
; CHECK-NEXT: call void @llvm.experimental.vector.histogram.add.nxv4p0.i32(<vscale x 4 x ptr> [[TMP10]], i32 1, <vscale x 4 x i1> [[TMP13]])
@@ -257,7 +257,7 @@ define void @histogram_8bit(ptr noalias %buckets, ptr readonly %indices, i64 %N)
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[INDICES]], i64 [[IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[INDICES]], i64 [[IV]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i32>, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[TMP8:%.*]] = zext <vscale x 16 x i32> [[WIDE_LOAD]] to <vscale x 16 x i64>
; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i8, ptr [[BUCKETS]], <vscale x 16 x i64> [[TMP8]]
@@ -269,6 +269,45 @@ define void @histogram_8bit(ptr noalias %buckets, ptr readonly %indices, i64 %N)
; CHECK-NEXT: [[CMP_N1:%.*]] = icmp eq i64 [[N_VEC]], 0
; CHECK-NEXT: br i1 [[CMP_N1]], label [[FOR_EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]]
; CHECK: vec.epilog.iter.check:
+; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC]], [[TMP9]]
+; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF11:![0-9]+]]
+; CHECK: vec.epilog.ph:
+; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC1]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[ENTRY]] ]
+; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP12:%.*]] = shl nuw nsw i64 [[TMP11]], 3
+; CHECK-NEXT: [[DOTNOT1:%.*]] = sub nsw i64 0, [[TMP12]]
+; CHECK-NEXT: [[N_VEC3:%.*]] = and i64 [[N]], [[DOTNOT1]]
+; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]]
+; CHECK: vec.epilog.vector.body:
+; CHECK-NEXT: [[INDEX4:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT6:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x i8], ptr [[INDICES]], i64 [[INDEX4]]
+; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 8 x i32>, ptr [[TMP13]], align 4
+; CHECK-NEXT: [[TMP14:%.*]] = zext <vscale x 8 x i32> [[WIDE_LOAD5]] to <vscale x 8 x i64>
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, ptr [[BUCKETS]], <vscale x 8 x i64> [[TMP14]]
+; CHECK-NEXT: call void @llvm.experimental.vector.histogram.add.nxv8p0.i8(<vscale x 8 x ptr> [[TMP15]], i8 1, <vscale x 8 x i1> splat (i1 true))
+; CHECK-NEXT: [[INDEX_NEXT6]] = add nuw i64 [[INDEX4]], [[TMP12]]
+; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT6]], [[N_VEC3]]
+; CHECK-NEXT: br i1 [[TMP16]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK: vec.epilog.middle.block:
+; CHECK-NEXT: [[CMP_N7:%.*]] = icmp eq i64 [[N]], [[N_VEC3]]
+; CHECK-NEXT: br i1 [[CMP_N7]], label [[FOR_EXIT]], label [[SCALAR_PH]]
+; CHECK: vec.epilog.scalar.ph:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC3]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC1]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[ITER_CHECK:%.*]] ]
+; CHECK-NEXT: br label [[FOR_BODY1:%.*]]
+; CHECK: for.body:
+; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ]
+; CHECK-NEXT: [[GEP_INDICES:%.*]] = getelementptr inbounds [4 x i8], ptr [[INDICES]], i64 [[IV1]]
+; CHECK-NEXT: [[L_IDX:%.*]] = load i32, ptr [[GEP_INDICES]], align 4
+; CHECK-NEXT: [[IDXPROM1:%.*]] = zext i32 [[L_IDX]] to i64
+; CHECK-NEXT: [[GEP_BUCKET:%.*]] = getelementptr inbounds nuw i8, ptr [[BUCKETS]], i64 [[IDXPROM1]]
+; CHECK-NEXT: [[L_BUCKET:%.*]] = load i8, ptr [[GEP_BUCKET]], align 4
+; CHECK-NEXT: [[INC:%.*]] = add nsw i8 [[L_BUCKET]], 1
+; CHECK-NEXT: store i8 [[INC]], ptr [[GEP_BUCKET]], align 4
+; CHECK-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_EXIT]], label [[FOR_BODY1]], !llvm.loop [[LOOP13:![0-9]+]]
+; CHECK: for.exit:
+; CHECK-NEXT: ret void
;
entry:
br label %for.body
@@ -298,16 +337,16 @@ define void @histogram_float(ptr noalias %buckets, ptr readonly %indices, i64 %N
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[INDICES]], i64 [[IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[INDICES]], i64 [[IV]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[IDXPROM1:%.*]] = zext i32 [[TMP0]] to i64
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw float, ptr [[BUCKETS]], i64 [[IDXPROM1]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[BUCKETS]], i64 [[IDXPROM1]]
; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[INC:%.*]] = fadd fast float [[TMP1]], 1.000000e+00
; CHECK-NEXT: store float [[INC]], ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_EXIT:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_EXIT:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK: for.exit:
; CHECK-NEXT: ret void
;
@@ -339,18 +378,18 @@ define void @histogram_varying_increment(ptr noalias %buckets, ptr readonly %ind
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[INDICES]], i64 [[IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[INDICES]], i64 [[IV]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[IDXPROM1:%.*]] = zext i32 [[TMP0]] to i64
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[BUCKETS]], i64 [[IDXPROM1]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[BUCKETS]], i64 [[IDXPROM1]]
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
-; CHECK-NEXT: [[INCIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[INCVALS]], i64 [[IV]]
+; CHECK-NEXT: [[INCIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[INCVALS]], i64 [[IV]]
; CHECK-NEXT: [[INCVAL:%.*]] = load i32, ptr [[INCIDX]], align 4
; CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP1]], [[INCVAL]]
; CHECK-NEXT: store i32 [[INC]], ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_EXIT:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP12]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_EXIT:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP14]]
; CHECK: for.exit:
; CHECK-NEXT: ret void
;
@@ -393,20 +432,20 @@ define void @simple_histogram_user_interleave(ptr noalias %buckets, ptr readonly
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[INDICES]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x i8], ptr [[INDICES]], i64 [[INDEX]]
; CHECK-NEXT: [[DOTIDX:%.*]] = shl nuw nsw i64 [[TMP4]], 4
; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP8]], i64 [[DOTIDX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP8]], align 4
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x i32>, ptr [[TMP17]], align 4
; CHECK-NEXT: [[TMP9:%.*]] = zext <vscale x 4 x i32> [[WIDE_LOAD]] to <vscale x 4 x i64>
; CHECK-NEXT: [[TMP19:%.*]] = zext <vscale x 4 x i32> [[WIDE_LOAD1]] to <vscale x 4 x i64>
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[BUCKETS]], <vscale x 4 x i64> [[TMP9]]
-; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[BUCKETS]], <vscale x 4 x i64> [[TMP19]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i8], ptr [[BUCKETS]], <vscale x 4 x i64> [[TMP9]]
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8], ptr [[BUCKETS]], <vscale x 4 x i64> [[TMP19]]
; CHECK-NEXT: call void @llvm.experimental.vector.histogram.add.nxv4p0.i32(<vscale x 4 x ptr> [[TMP10]], i32 1, <vscale x 4 x i1> splat (i1 true))
; CHECK-NEXT: call void @llvm.experimental.vector.histogram.add.nxv4p0.i32(<vscale x 4 x ptr> [[TMP21]], i32 1, <vscale x 4 x i1> splat (i1 true))
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH]]
@@ -452,14 +491,14 @@ define void @histogram_array_3op_gep(i64 noundef %N) #0 {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr @idx_array, i64 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i8], ptr @idx_array, i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x i32>, ptr [[TMP5]], align 4
; CHECK-NEXT: [[TMP14:%.*]] = sext <vscale x 4 x i32> [[WIDE_LOAD1]] to <vscale x 4 x i64>
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr @data_array, <vscale x 4 x i64> [[TMP14]]
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8], ptr @data_array, <vscale x 4 x i64> [[TMP14]]
; CHECK-NEXT: call void @llvm.experimental.vector.histogram.add.nxv4p0.i32(<vscale x 4 x ptr> [[TMP11]], i32 1, <vscale x 4 x i1> splat (i1 true))
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH]]
@@ -505,15 +544,15 @@ define void @histogram_array_4op_gep_nonzero_const_idx(i64 noundef %N, ptr reado
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[INDICES]], i64 [[IV]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i8], ptr [[INDICES]], i64 [[IV]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP5]], align 4
; CHECK-NEXT: [[TMP6:%.*]] = sext <vscale x 4 x i32> [[WIDE_LOAD]] to <vscale x 4 x i64>
; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds nuw i8, ptr [[DATA_STRUCT]], i64 8388608
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[DOTSPLIT]], <vscale x 4 x i64> [[TMP6]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i8], ptr [[DOTSPLIT]], <vscale x 4 x i64> [[TMP6]]
; CHECK-NEXT: call void @llvm.experimental.vector.histogram.add.nxv4p0.i32(<vscale x 4 x ptr> [[TMP7]], i32 1, <vscale x 4 x i1> splat (i1 true))
; CHECK-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP4]]
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH]]
@@ -556,15 +595,15 @@ define void @simple_histogram_tailfold(ptr noalias %buckets, ptr readonly %indic
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[INDICES]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x i8], ptr [[INDICES]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr align 4 [[TMP8]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i32> poison)
; CHECK-NEXT: [[TMP9:%.*]] = zext <vscale x 4 x i32> [[WIDE_LOAD]] to <vscale x 4 x i64>
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[BUCKETS]], <vscale x 4 x i64> [[TMP9]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i8], ptr [[BUCKETS]], <vscale x 4 x i64> [[TMP9]]
; CHECK-NEXT: call void @llvm.experimental.vector.histogram.add.nxv4p0.i32(<vscale x 4 x ptr> [[TMP10]], i32 1, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]])
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]]
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP6]])
; CHECK-NEXT: [[TMP11:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0
-; CHECK-NEXT: br i1 [[TMP11]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP20:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP11]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP22:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[FOR_EXIT:%.*]]
; CHECK: for.exit:
@@ -622,17 +661,17 @@ define void @simple_histogram_rtdepcheck(ptr noalias %buckets, ptr %array, ptr %
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i32> [ [[TMP9]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[INDICES]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8], ptr [[INDICES]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP12]], align 4
; CHECK-NEXT: [[TMP13:%.*]] = zext <vscale x 4 x i32> [[WIDE_LOAD]] to <vscale x 4 x i64>
-; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[BUCKETS]], <vscale x 4 x i64> [[TMP13]]
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8], ptr [[BUCKETS]], <vscale x 4 x i64> [[TMP13]]
; CHECK-NEXT: call void @llvm.experimental.vector.histogram.add.nxv4p0.i32(<vscale x 4 x ptr> [[TMP14]], i32 1, <vscale x 4 x i1> splat (i1 true))
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[ARRAY]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8], ptr [[ARRAY]], i64 [[INDEX]]
; CHECK-NEXT: store <vscale x 4 x i32> [[VEC_IND]], ptr [[TMP15]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[DOTSPLAT]]
; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH]]
@@ -670,10 +709,10 @@ define void @simple_histogram_unsafe_alias(ptr %buckets, ptr %indices, i64 %N) #
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[INDICES]], i64 [[IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[INDICES]], i64 [[IV]]
; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[IDXPROM1:%.*]] = zext i32 [[TMP12]] to i64
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[BUCKETS]], i64 [[IDXPROM1]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[BUCKETS]], i64 [[IDXPROM1]]
; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP13]], 1
; CHECK-NEXT: store i32 [[INC]], ptr [[ARRAYIDX2]], align 4
@@ -719,13 +758,13 @@ define void @simple_histogram_64b(ptr noalias %buckets, ptr readonly %indices, i
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[INDICES]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [8 x i8], ptr [[INDICES]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP5]], align 4
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[BUCKETS]], <vscale x 2 x i64> [[WIDE_LOAD]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [8 x i8], ptr [[BUCKETS]], <vscale x 2 x i64> [[WIDE_LOAD]]
; CHECK-NEXT: call void @llvm.experimental.vector.histogram.add.nxv2p0.i64(<vscale x 2 x ptr> [[TMP6]], i64 1, <vscale x 2 x i1> splat (i1 true))
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH]]
@@ -759,10 +798,10 @@ define void @histogram_generates_vectors_crash(ptr %data_array, ptr noalias %ind
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[GEP_INDICES:%.*]] = getelementptr [1048576 x i32], ptr [[INDICES]], i64 [[IV]]
+; CHECK-NEXT: [[GEP_INDICES:%.*]] = getelementptr [4194304 x i8], ptr [[INDICES]], i64 [[IV]]
; CHECK-NEXT: [[L_IDX:%.*]] = load i32, ptr [[GEP_INDICES]], align 4
; CHECK-NEXT: [[IDXPROM5:%.*]] = sext i32 [[L_IDX]] to i64
-; CHECK-NEXT: [[GEP_BUCKET:%.*]] = getelementptr [1048576 x i32], ptr [[DATA_ARRAY]], i64 [[IDXPROM5]]
+; CHECK-NEXT: [[GEP_BUCKET:%.*]] = getelementptr [4194304 x i8], ptr [[DATA_ARRAY]], i64 [[IDXPROM5]]
; CHECK-NEXT: [[L_BUCKET:%.*]] = load i32, ptr [[GEP_BUCKET]], align 4
; CHECK-NEXT: [[INC:%.*]] = add i32 [[L_BUCKET]], 1
; CHECK-NEXT: store i32 [[INC]], ptr [[GEP_BUCKET]], align 4
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/uniform-args-call-variants.ll b/llvm/test/Transforms/LoopVectorize/AArch64/uniform-args-call-variants.ll
index 1461fcb20e9e3..ed544eb2eb119 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/uniform-args-call-variants.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/uniform-args-call-variants.ll
@@ -20,10 +20,10 @@ define void @test_uniform(ptr noalias %dst, ptr readonly %src, i64 %uniform , i6
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr double, ptr [[SRC]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr [8 x i8], ptr [[SRC]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0(ptr align 8 [[TMP5]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x double> poison)
; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 2 x double> @foo_uniform(<vscale x 2 x double> [[WIDE_MASKED_LOAD]], i64 [[UNIFORM]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds double, ptr [[DST]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [8 x i8], ptr [[DST]], i64 [[INDEX]]
; CHECK-NEXT: call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[TMP6]], ptr align 8 [[TMP7]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]]
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX]], i64 [[TMP4]])
@@ -50,14 +50,14 @@ define void @test_uniform(ptr noalias %dst, ptr readonly %src, i64 %uniform , i6
; INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK2:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY1]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT4:%.*]], [[VECTOR_BODY]] ]
-; INTERLEAVE-NEXT: [[TMP8:%.*]] = getelementptr double, ptr [[SRC]], i64 [[INDEX]]
-; INTERLEAVE-NEXT: [[TMP9:%.*]] = getelementptr double, ptr [[TMP8]], i64 [[TMP1]]
+; INTERLEAVE-NEXT: [[TMP8:%.*]] = getelementptr [8 x i8], ptr [[SRC]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP9:%.*]] = getelementptr [8 x i8], ptr [[TMP8]], i64 [[TMP1]]
; INTERLEAVE-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0(ptr align 8 [[TMP8]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x double> poison)
; INTERLEAVE-NEXT: [[WIDE_MASKED_LOAD3:%.*]] = call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0(ptr align 8 [[TMP9]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK2]], <vscale x 2 x double> poison)
; INTERLEAVE-NEXT: [[TMP10:%.*]] = call <vscale x 2 x double> @foo_uniform(<vscale x 2 x double> [[WIDE_MASKED_LOAD]], i64 [[UNIFORM]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
; INTERLEAVE-NEXT: [[TMP11:%.*]] = call <vscale x 2 x double> @foo_uniform(<vscale x 2 x double> [[WIDE_MASKED_LOAD3]], i64 [[UNIFORM]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK2]])
-; INTERLEAVE-NEXT: [[TMP12:%.*]] = getelementptr inbounds double, ptr [[DST]], i64 [[INDEX]]
-; INTERLEAVE-NEXT: [[TMP13:%.*]] = getelementptr inbounds double, ptr [[TMP12]], i64 [[TMP1]]
+; INTERLEAVE-NEXT: [[TMP12:%.*]] = getelementptr inbounds [8 x i8], ptr [[DST]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP13:%.*]] = getelementptr inbounds [8 x i8], ptr [[TMP12]], i64 [[TMP1]]
; INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[TMP10]], ptr align 8 [[TMP12]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
; INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[TMP11]], ptr align 8 [[TMP13]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK2]])
; INTERLEAVE-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP2]]
@@ -103,10 +103,10 @@ define void @test_uniform_smaller_scalar(ptr noalias %dst, ptr readonly %src, i3
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr double, ptr [[SRC]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr [8 x i8], ptr [[SRC]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0(ptr align 8 [[TMP5]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x double> poison)
; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 2 x double> @bar_uniform(<vscale x 2 x double> [[WIDE_MASKED_LOAD]], i32 [[UNIFORM]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds double, ptr [[DST]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [8 x i8], ptr [[DST]], i64 [[INDEX]]
; CHECK-NEXT: call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[TMP6]], ptr align 8 [[TMP7]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]]
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX]], i64 [[TMP4]])
@@ -133,14 +133,14 @@ define void @test_uniform_smaller_scalar(ptr noalias %dst, ptr readonly %src, i3
; INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK2:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY1]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT4:%.*]], [[VECTOR_BODY]] ]
-; INTERLEAVE-NEXT: [[TMP8:%.*]] = getelementptr double, ptr [[SRC]], i64 [[INDEX]]
-; INTERLEAVE-NEXT: [[TMP9:%.*]] = getelementptr double, ptr [[TMP8]], i64 [[TMP1]]
+; INTERLEAVE-NEXT: [[TMP8:%.*]] = getelementptr [8 x i8], ptr [[SRC]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP9:%.*]] = getelementptr [8 x i8], ptr [[TMP8]], i64 [[TMP1]]
; INTERLEAVE-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0(ptr align 8 [[TMP8]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x double> poison)
; INTERLEAVE-NEXT: [[WIDE_MASKED_LOAD3:%.*]] = call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0(ptr align 8 [[TMP9]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK2]], <vscale x 2 x double> poison)
; INTERLEAVE-NEXT: [[TMP10:%.*]] = call <vscale x 2 x double> @bar_uniform(<vscale x 2 x double> [[WIDE_MASKED_LOAD]], i32 [[UNIFORM]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
; INTERLEAVE-NEXT: [[TMP11:%.*]] = call <vscale x 2 x double> @bar_uniform(<vscale x 2 x double> [[WIDE_MASKED_LOAD3]], i32 [[UNIFORM]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK2]])
-; INTERLEAVE-NEXT: [[TMP12:%.*]] = getelementptr inbounds double, ptr [[DST]], i64 [[INDEX]]
-; INTERLEAVE-NEXT: [[TMP13:%.*]] = getelementptr inbounds double, ptr [[TMP12]], i64 [[TMP1]]
+; INTERLEAVE-NEXT: [[TMP12:%.*]] = getelementptr inbounds [8 x i8], ptr [[DST]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP13:%.*]] = getelementptr inbounds [8 x i8], ptr [[TMP12]], i64 [[TMP1]]
; INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[TMP10]], ptr align 8 [[TMP12]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
; INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[TMP11]], ptr align 8 [[TMP13]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK2]])
; INTERLEAVE-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP2]]
@@ -180,10 +180,10 @@ define void @test_uniform_not_invariant(ptr noalias %dst, ptr readonly %src, i64
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[GEPSRC:%.*]] = getelementptr double, ptr [[SRC]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[GEPSRC:%.*]] = getelementptr [8 x i8], ptr [[SRC]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[DATA:%.*]] = load double, ptr [[GEPSRC]], align 8
; CHECK-NEXT: [[CALL:%.*]] = call double @foo(double [[DATA]], i64 [[INDVARS_IV]]) #[[ATTR5:[0-9]+]]
-; CHECK-NEXT: [[GEPDST:%.*]] = getelementptr inbounds nuw double, ptr [[DST]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[GEPDST:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[DST]], i64 [[INDVARS_IV]]
; CHECK-NEXT: store double [[CALL]], ptr [[GEPDST]], align 8
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
@@ -204,20 +204,20 @@ define void @test_uniform_not_invariant(ptr noalias %dst, ptr readonly %src, i64
; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK2:%.*]] = phi i1 [ [[ACTIVE_LANE_MASK_ENTRY1]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT5:%.*]], [[PRED_STORE_CONTINUE4]] ]
; INTERLEAVE-NEXT: br i1 [[ACTIVE_LANE_MASK]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
; INTERLEAVE: pred.store.if:
-; INTERLEAVE-NEXT: [[TMP1:%.*]] = getelementptr double, ptr [[SRC]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP1:%.*]] = getelementptr [8 x i8], ptr [[SRC]], i64 [[INDEX]]
; INTERLEAVE-NEXT: [[TMP2:%.*]] = load double, ptr [[TMP1]], align 8
; INTERLEAVE-NEXT: [[TMP3:%.*]] = call double @foo(double [[TMP2]], i64 [[INDEX]]) #[[ATTR5:[0-9]+]]
-; INTERLEAVE-NEXT: [[TMP4:%.*]] = getelementptr inbounds double, ptr [[DST]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP4:%.*]] = getelementptr inbounds [8 x i8], ptr [[DST]], i64 [[INDEX]]
; INTERLEAVE-NEXT: store double [[TMP3]], ptr [[TMP4]], align 8
; INTERLEAVE-NEXT: br label [[PRED_STORE_CONTINUE]]
; INTERLEAVE: pred.store.continue:
; INTERLEAVE-NEXT: br i1 [[ACTIVE_LANE_MASK2]], label [[PRED_STORE_IF3:%.*]], label [[PRED_STORE_CONTINUE4]]
; INTERLEAVE: pred.store.if3:
; INTERLEAVE-NEXT: [[TMP5:%.*]] = or disjoint i64 [[INDEX]], 1
-; INTERLEAVE-NEXT: [[TMP6:%.*]] = getelementptr double, ptr [[SRC]], i64 [[TMP5]]
+; INTERLEAVE-NEXT: [[TMP6:%.*]] = getelementptr [8 x i8], ptr [[SRC]], i64 [[TMP5]]
; INTERLEAVE-NEXT: [[TMP7:%.*]] = load double, ptr [[TMP6]], align 8
; INTERLEAVE-NEXT: [[TMP8:%.*]] = call double @foo(double [[TMP7]], i64 [[TMP5]]) #[[ATTR5]]
-; INTERLEAVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds double, ptr [[DST]], i64 [[TMP5]]
+; INTERLEAVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds [8 x i8], ptr [[DST]], i64 [[TMP5]]
; INTERLEAVE-NEXT: store double [[TMP8]], ptr [[TMP9]], align 8
; INTERLEAVE-NEXT: br label [[PRED_STORE_CONTINUE4]]
; INTERLEAVE: pred.store.continue4:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/vector-reverse-mask4.ll b/llvm/test/Transforms/LoopVectorize/AArch64/vector-reverse-mask4.ll
index f0aa75aab1f91..4a4928c637a5c 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/vector-reverse-mask4.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/vector-reverse-mask4.ll
@@ -33,7 +33,7 @@ define void @vector_reverse_mask_v4i1(ptr noalias %a, ptr noalias %cond, i64 %N)
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP0:%.*]] = xor i64 [[INDEX]], -1
; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[N]], [[TMP0]]
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds double, ptr [[COND:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [8 x i8], ptr [[COND:%.*]], i64 [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i64 -24
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i64 -56
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x double>, ptr [[TMP3]], align 8
@@ -42,7 +42,7 @@ define void @vector_reverse_mask_v4i1(ptr noalias %a, ptr noalias %cond, i64 %N)
; CHECK-NEXT: [[REVERSE2:%.*]] = shufflevector <4 x double> [[WIDE_LOAD1]], <4 x double> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: [[TMP5:%.*]] = fcmp une <4 x double> [[REVERSE]], zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = fcmp une <4 x double> [[REVERSE2]], zeroinitializer
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr double, ptr [[A:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr [8 x i8], ptr [[A:%.*]], i64 [[TMP1]]
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP7]], i64 -24
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[TMP7]], i64 -56
; CHECK-NEXT: [[REVERSE3:%.*]] = shufflevector <4 x i1> [[TMP5]], <4 x i1> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
@@ -69,12 +69,12 @@ define void @vector_reverse_mask_v4i1(ptr noalias %a, ptr noalias %cond, i64 %N)
; CHECK: for.body:
; CHECK-NEXT: [[I_08_IN:%.*]] = phi i64 [ [[I_08:%.*]], [[FOR_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[I_08]] = add nsw i64 [[I_08_IN]], -1
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[COND]], i64 [[I_08]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i8], ptr [[COND]], i64 [[I_08]]
; CHECK-NEXT: [[TMP13:%.*]] = load double, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[TOBOOL:%.*]] = fcmp une double [[TMP13]], 0.000000e+00
; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_THEN:%.*]], label [[FOR_INC]]
; CHECK: if.then:
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[I_08]]
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [8 x i8], ptr [[A]], i64 [[I_08]]
; CHECK-NEXT: [[TMP14:%.*]] = load double, ptr [[ARRAYIDX1]], align 8
; CHECK-NEXT: [[ADD:%.*]] = fadd double [[TMP14]], 1.000000e+00
; CHECK-NEXT: store double [[ADD]], ptr [[ARRAYIDX1]], align 8
diff --git a/llvm/test/Transforms/LoopVectorize/AMDGPU/packed-math.ll b/llvm/test/Transforms/LoopVectorize/AMDGPU/packed-math.ll
index bc9cf4fe93622..158d953a52f09 100644
--- a/llvm/test/Transforms/LoopVectorize/AMDGPU/packed-math.ll
+++ b/llvm/test/Transforms/LoopVectorize/AMDGPU/packed-math.ll
@@ -13,7 +13,7 @@ define half @vectorize_v2f16_loop(ptr addrspace(1) noalias %s) {
; GFX9-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; GFX9-NEXT: [[VEC_PHI:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ]
; GFX9-NEXT: [[VEC_PHI1:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ]
-; GFX9-NEXT: [[TMP0:%.*]] = getelementptr inbounds half, ptr addrspace(1) [[S:%.*]], i64 [[INDEX]]
+; GFX9-NEXT: [[TMP0:%.*]] = getelementptr inbounds [2 x i8], ptr addrspace(1) [[S:%.*]], i64 [[INDEX]]
; GFX9-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr addrspace(1) [[TMP0]], i64 4
; GFX9-NEXT: [[WIDE_LOAD:%.*]] = load <2 x half>, ptr addrspace(1) [[TMP0]], align 2
; GFX9-NEXT: [[WIDE_LOAD2:%.*]] = load <2 x half>, ptr addrspace(1) [[TMP1]], align 2
@@ -38,7 +38,7 @@ define half @vectorize_v2f16_loop(ptr addrspace(1) noalias %s) {
; VI-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; VI-NEXT: [[VEC_PHI:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ]
; VI-NEXT: [[VEC_PHI1:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ]
-; VI-NEXT: [[TMP0:%.*]] = getelementptr inbounds half, ptr addrspace(1) [[S:%.*]], i64 [[INDEX]]
+; VI-NEXT: [[TMP0:%.*]] = getelementptr inbounds [2 x i8], ptr addrspace(1) [[S:%.*]], i64 [[INDEX]]
; VI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr addrspace(1) [[TMP0]], i64 4
; VI-NEXT: [[WIDE_LOAD:%.*]] = load <2 x half>, ptr addrspace(1) [[TMP0]], align 2
; VI-NEXT: [[WIDE_LOAD2:%.*]] = load <2 x half>, ptr addrspace(1) [[TMP1]], align 2
@@ -60,7 +60,7 @@ define half @vectorize_v2f16_loop(ptr addrspace(1) noalias %s) {
; CI: for.body:
; CI-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; CI-NEXT: [[Q_04:%.*]] = phi half [ 0xH0000, [[ENTRY]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
-; CI-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw half, ptr addrspace(1) [[S:%.*]], i64 [[INDVARS_IV]]
+; CI-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [2 x i8], ptr addrspace(1) [[S:%.*]], i64 [[INDVARS_IV]]
; CI-NEXT: [[TMP0:%.*]] = load half, ptr addrspace(1) [[ARRAYIDX]], align 2
; CI-NEXT: [[ADD]] = fadd fast half [[Q_04]], [[TMP0]]
; CI-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-predselect.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-predselect.ll
index 706012c155fdf..ac85e699ce3b6 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-predselect.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-predselect.ll
@@ -14,7 +14,7 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) {
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[INDEX]], i32 257)
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP0]], <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> zeroinitializer)
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_MASKED_LOAD]])
; CHECK-NEXT: [[TMP2]] = add i32 [[VEC_PHI]], [[TMP1]]
@@ -55,9 +55,9 @@ define i32 @reduction_sum(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP8:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[INDEX]], i32 257)
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP0]], <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> zeroinitializer)
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP1]], <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> zeroinitializer)
; CHECK-NEXT: [[TMP2:%.*]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> [[VEC_IND]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP2]])
@@ -107,9 +107,9 @@ define i32 @reduction_prod(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ splat (i32 1), [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[INDEX]], i32 257)
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP0]], <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> poison)
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP1]], <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> poison)
; CHECK-NEXT: [[TMP2:%.*]] = mul <4 x i32> [[VEC_PHI]], [[WIDE_MASKED_LOAD]]
; CHECK-NEXT: [[TMP3:%.*]] = mul <4 x i32> [[TMP2]], [[WIDE_MASKED_LOAD1]]
@@ -154,9 +154,9 @@ define i32 @reduction_and(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ splat (i32 -1), [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[INDEX]], i32 257)
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP0]], <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> poison)
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP1]], <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> poison)
; CHECK-NEXT: [[TMP2:%.*]] = and <4 x i32> [[WIDE_MASKED_LOAD]], [[WIDE_MASKED_LOAD1]]
; CHECK-NEXT: [[TMP3:%.*]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> [[TMP2]], <4 x i32> splat (i32 -1)
@@ -201,9 +201,9 @@ define i32 @reduction_or(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[INDEX]], i32 257)
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP0]], <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> poison)
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP1]], <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> poison)
; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_LOAD1]], [[WIDE_MASKED_LOAD]]
; CHECK-NEXT: [[TMP3:%.*]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> [[TMP2]], <4 x i32> zeroinitializer
@@ -248,9 +248,9 @@ define i32 @reduction_xor(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[INDEX]], i32 257)
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP0]], <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> poison)
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP1]], <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> poison)
; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_LOAD1]], [[WIDE_MASKED_LOAD]]
; CHECK-NEXT: [[TMP3:%.*]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> [[TMP2]], <4 x i32> zeroinitializer
@@ -295,9 +295,9 @@ define float @reduction_fadd(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x float> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[INDEX]], i32 257)
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr align 4 [[TMP0]], <4 x i1> [[ACTIVE_LANE_MASK]], <4 x float> poison)
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr align 4 [[TMP1]], <4 x i1> [[ACTIVE_LANE_MASK]], <4 x float> poison)
; CHECK-NEXT: [[TMP2:%.*]] = fadd fast <4 x float> [[VEC_PHI]], [[WIDE_MASKED_LOAD]]
; CHECK-NEXT: [[TMP3:%.*]] = fadd fast <4 x float> [[TMP2]], [[WIDE_MASKED_LOAD1]]
@@ -342,9 +342,9 @@ define float @reduction_fmul(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x float> [ <float 0.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[INDEX]], i32 257)
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr align 4 [[TMP0]], <4 x i1> [[ACTIVE_LANE_MASK]], <4 x float> poison)
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr align 4 [[TMP1]], <4 x i1> [[ACTIVE_LANE_MASK]], <4 x float> poison)
; CHECK-NEXT: [[TMP2:%.*]] = fmul fast <4 x float> [[VEC_PHI]], [[WIDE_MASKED_LOAD]]
; CHECK-NEXT: [[TMP3:%.*]] = fmul fast <4 x float> [[TMP2]], [[WIDE_MASKED_LOAD1]]
@@ -388,7 +388,7 @@ define i32 @reduction_min(ptr nocapture %A, ptr nocapture %B) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ splat (i32 1000), [[VECTOR_PH]] ], [ [[TMP1:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP1]] = call <4 x i32> @llvm.smin.v4i32(<4 x i32> [[VEC_PHI]], <4 x i32> [[WIDE_LOAD]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
@@ -402,7 +402,7 @@ define i32 @reduction_min(ptr nocapture %A, ptr nocapture %B) {
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i32 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ 256, [[SCALAR_PH]] ]
; CHECK-NEXT: [[RESULT_08:%.*]] = phi i32 [ [[V0:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i32 [[INDVARS_IV]]
; CHECK-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[V0]] = call i32 @llvm.smin.i32(i32 [[RESULT_08]], i32 [[L0]])
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i32 [[INDVARS_IV]], 1
@@ -439,7 +439,7 @@ define i32 @reduction_max(ptr nocapture %A, ptr nocapture %B) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ splat (i32 1000), [[VECTOR_PH]] ], [ [[TMP1:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP1]] = call <4 x i32> @llvm.umax.v4i32(<4 x i32> [[VEC_PHI]], <4 x i32> [[WIDE_LOAD]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
@@ -453,7 +453,7 @@ define i32 @reduction_max(ptr nocapture %A, ptr nocapture %B) {
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i32 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ 256, [[SCALAR_PH]] ]
; CHECK-NEXT: [[RESULT_08:%.*]] = phi i32 [ [[V0:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i32 [[INDVARS_IV]]
; CHECK-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[V0]] = call i32 @llvm.umax.i32(i32 [[RESULT_08]], i32 [[L0]])
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i32 [[INDVARS_IV]], 1
@@ -488,7 +488,7 @@ define float @reduction_fmax(ptr nocapture %A, ptr nocapture %B) {
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i32 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: [[RESULT_08:%.*]] = phi float [ [[V0:%.*]], [[FOR_BODY]] ], [ 1.000000e+03, [[ENTRY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i32 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i32 [[INDVARS_IV]]
; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[C0:%.*]] = fcmp ogt float [[RESULT_08]], [[L0]]
; CHECK-NEXT: [[V0]] = select i1 [[C0]], float [[RESULT_08]], float [[L0]]
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions-interleave.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions-interleave.ll
index 27e87bc84dc86..f571a21da2089 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions-interleave.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions-interleave.ll
@@ -17,7 +17,7 @@ define i64 @add_i32_i64(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[X:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[X:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i32 16
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i32>, ptr [[TMP8]], align 4
@@ -41,7 +41,7 @@ define i64 @add_i32_i64(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK: loop:
; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[R_07:%.*]] = phi i64 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[I_08]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[X]], i32 [[I_08]]
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[CONV:%.*]] = sext i32 [[TMP5]] to i64
; CHECK-NEXT: [[ADD]] = add nsw i64 [[R_07]], [[CONV]]
@@ -84,11 +84,11 @@ define i64 @mla_i32_i64(ptr nocapture readonly %x, ptr nocapture readonly %y, i3
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[TMP11:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[X:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[X:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i32 16
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i32>, ptr [[TMP12]], align 4
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[Y:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[Y:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP1]], i32 16
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x i32>, ptr [[TMP13]], align 4
@@ -114,9 +114,9 @@ define i64 @mla_i32_i64(ptr nocapture readonly %x, ptr nocapture readonly %y, i3
; CHECK: loop:
; CHECK-NEXT: [[I_010:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[R_09:%.*]] = phi i64 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[I_010]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[X]], i32 [[I_010]]
; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[Y]], i32 [[I_010]]
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [4 x i8], ptr [[Y]], i32 [[I_010]]
; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4
; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], [[TMP7]]
; CHECK-NEXT: [[CONV:%.*]] = sext i32 [[MUL]] to i64
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll
index d9c441446750f..5caf8a04077a6 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll
@@ -13,7 +13,7 @@ define i64 @add_i64_i64(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK: for.body:
; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: [[R_07:%.*]] = phi i64 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i64, ptr [[X:%.*]], i32 [[I_08]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[X:%.*]], i32 [[I_08]]
; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[ADD]] = add nsw i64 [[TMP0]], [[R_07]]
; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1
@@ -58,7 +58,7 @@ define i64 @add_i32_i64(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[X:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[X:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = sext <4 x i32> [[WIDE_LOAD]] to <4 x i64>
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP1]])
@@ -76,7 +76,7 @@ define i64 @add_i32_i64(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK: for.body:
; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[R_07:%.*]] = phi i64 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[X]], i32 [[I_08]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[X]], i32 [[I_08]]
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[CONV:%.*]] = sext i32 [[TMP5]] to i64
; CHECK-NEXT: [[ADD]] = add nsw i64 [[R_07]], [[CONV]]
@@ -123,7 +123,7 @@ define i64 @add_i16_i64(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [2 x i8], ptr [[X:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i16>, ptr [[TMP0]], align 2
; CHECK-NEXT: [[TMP1:%.*]] = sext <4 x i16> [[WIDE_LOAD]] to <4 x i64>
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP1]])
@@ -141,7 +141,7 @@ define i64 @add_i16_i64(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK: for.body:
; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[R_07:%.*]] = phi i64 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i16, ptr [[X]], i32 [[I_08]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[X]], i32 [[I_08]]
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[ARRAYIDX]], align 2
; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP5]] to i64
; CHECK-NEXT: [[ADD]] = add nsw i64 [[R_07]], [[CONV]]
@@ -251,7 +251,7 @@ define i32 @add_i32_i32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[INDEX]], i32 [[N]])
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[X:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[X:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP0]], <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> zeroinitializer)
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_MASKED_LOAD]])
; CHECK-NEXT: [[TMP2]] = add i32 [[VEC_PHI]], [[TMP1]]
@@ -295,7 +295,7 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 [[INDEX]], i32 [[N]])
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [2 x i8], ptr [[X:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr align 2 [[TMP0]], <8 x i1> [[ACTIVE_LANE_MASK]], <8 x i16> poison)
; CHECK-NEXT: [[TMP1:%.*]] = sext <8 x i16> [[WIDE_MASKED_LOAD]] to <8 x i32>
; CHECK-NEXT: [[TMP2:%.*]] = select <8 x i1> [[ACTIVE_LANE_MASK]], <8 x i32> [[TMP1]], <8 x i32> zeroinitializer
@@ -389,7 +389,7 @@ define signext i16 @add_i16_i16(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i16 [ 0, [[VECTOR_PH]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 [[INDEX]], i32 [[N]])
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [2 x i8], ptr [[X:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr align 2 [[TMP0]], <8 x i1> [[ACTIVE_LANE_MASK]], <8 x i16> zeroinitializer)
; CHECK-NEXT: [[TMP1:%.*]] = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> [[WIDE_MASKED_LOAD]])
; CHECK-NEXT: [[TMP2]] = add i16 [[VEC_PHI]], [[TMP1]]
@@ -519,9 +519,9 @@ define i64 @mla_i64_i64(ptr nocapture readonly %x, ptr nocapture readonly %y, i3
; CHECK: for.body:
; CHECK-NEXT: [[I_010:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: [[R_09:%.*]] = phi i64 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i64, ptr [[X:%.*]], i32 [[I_010]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[X:%.*]], i32 [[I_010]]
; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds nuw i64, ptr [[Y:%.*]], i32 [[I_010]]
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[Y:%.*]], i32 [[I_010]]
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr [[ARRAYIDX1]], align 8
; CHECK-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP1]], [[TMP0]]
; CHECK-NEXT: [[ADD]] = add nsw i64 [[MUL]], [[R_09]]
@@ -570,9 +570,9 @@ define i64 @mla_i32_i64(ptr nocapture readonly %x, ptr nocapture readonly %y, i3
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[X:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[X:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[Y:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[Y:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = mul nsw <4 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD]]
; CHECK-NEXT: [[TMP3:%.*]] = sext <4 x i32> [[TMP2]] to <4 x i64>
@@ -591,9 +591,9 @@ define i64 @mla_i32_i64(ptr nocapture readonly %x, ptr nocapture readonly %y, i3
; CHECK: for.body:
; CHECK-NEXT: [[I_010:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[R_09:%.*]] = phi i64 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[X]], i32 [[I_010]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[X]], i32 [[I_010]]
; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds nuw i32, ptr [[Y]], i32 [[I_010]]
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[Y]], i32 [[I_010]]
; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4
; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], [[TMP7]]
; CHECK-NEXT: [[CONV:%.*]] = sext i32 [[MUL]] to i64
@@ -644,9 +644,9 @@ define i64 @mla_i16_i64(ptr nocapture readonly %x, ptr nocapture readonly %y, i3
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [2 x i8], ptr [[X:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[TMP0]], align 2
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i16, ptr [[Y:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2 x i8], ptr [[Y:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <8 x i16>, ptr [[TMP2]], align 2
; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i16> [[WIDE_LOAD1]] to <8 x i64>
; CHECK-NEXT: [[TMP3:%.*]] = sext <8 x i16> [[WIDE_LOAD]] to <8 x i64>
@@ -666,10 +666,10 @@ define i64 @mla_i16_i64(ptr nocapture readonly %x, ptr nocapture readonly %y, i3
; CHECK: for.body:
; CHECK-NEXT: [[I_012:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[R_011:%.*]] = phi i64 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i16, ptr [[X]], i32 [[I_012]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[X]], i32 [[I_012]]
; CHECK-NEXT: [[TMP9:%.*]] = load i16, ptr [[ARRAYIDX]], align 2
; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP9]] to i32
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds nuw i16, ptr [[Y]], i32 [[I_012]]
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[Y]], i32 [[I_012]]
; CHECK-NEXT: [[TMP10:%.*]] = load i16, ptr [[ARRAYIDX1]], align 2
; CHECK-NEXT: [[CONV2:%.*]] = sext i16 [[TMP10]] to i32
; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[CONV2]], [[CONV]]
@@ -800,9 +800,9 @@ define i32 @mla_i32_i32(ptr nocapture readonly %x, ptr nocapture readonly %y, i3
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[INDEX]], i32 [[N]])
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[Y:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[X:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP1]], <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> poison)
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[Y1:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i8], ptr [[Y:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD2:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP7]], <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> poison)
; CHECK-NEXT: [[TMP2:%.*]] = mul nsw <4 x i32> [[WIDE_MASKED_LOAD2]], [[WIDE_MASKED_LOAD1]]
; CHECK-NEXT: [[TMP3:%.*]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> [[TMP2]], <4 x i32> zeroinitializer
@@ -851,9 +851,9 @@ define i32 @mla_i16_i32(ptr nocapture readonly %x, ptr nocapture readonly %y, i3
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 [[INDEX]], i32 [[N]])
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [2 x i8], ptr [[X:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr align 2 [[TMP0]], <8 x i1> [[ACTIVE_LANE_MASK]], <8 x i16> poison)
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i16, ptr [[Y:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2 x i8], ptr [[Y:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr align 2 [[TMP2]], <8 x i1> [[ACTIVE_LANE_MASK]], <8 x i16> poison)
; CHECK-NEXT: [[TMP3:%.*]] = sext <8 x i16> [[WIDE_MASKED_LOAD1]] to <8 x i32>
; CHECK-NEXT: [[TMP1:%.*]] = sext <8 x i16> [[WIDE_MASKED_LOAD]] to <8 x i32>
@@ -961,9 +961,9 @@ define signext i16 @mla_i16_i16(ptr nocapture readonly %x, ptr nocapture readonl
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i16 [ 0, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 [[INDEX]], i32 [[N]])
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i16, ptr [[Y:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [2 x i8], ptr [[X:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr align 2 [[TMP1]], <8 x i1> [[ACTIVE_LANE_MASK]], <8 x i16> poison)
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i16, ptr [[Y1:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x i8], ptr [[Y:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD2:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr align 2 [[TMP7]], <8 x i1> [[ACTIVE_LANE_MASK]], <8 x i16> poison)
; CHECK-NEXT: [[TMP2:%.*]] = mul <8 x i16> [[WIDE_MASKED_LOAD2]], [[WIDE_MASKED_LOAD1]]
; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[ACTIVE_LANE_MASK]], <8 x i16> [[TMP2]], <8 x i16> zeroinitializer
@@ -1120,7 +1120,7 @@ define i32 @red_mla_ext_s8_s16_s32(ptr noalias nocapture readonly %A, ptr noalia
; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 [[INDEX]], i32 [[N]])
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr align 1 [[TMP0]], <8 x i1> [[ACTIVE_LANE_MASK]], <8 x i8> poison)
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i16, ptr [[B:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2 x i8], ptr [[B:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr align 2 [[TMP2]], <8 x i1> [[ACTIVE_LANE_MASK]], <8 x i16> poison)
; CHECK-NEXT: [[TMP3:%.*]] = sext <8 x i16> [[WIDE_MASKED_LOAD1]] to <8 x i32>
; CHECK-NEXT: [[TMP1:%.*]] = sext <8 x i8> [[WIDE_MASKED_LOAD]] to <8 x i32>
@@ -1181,10 +1181,10 @@ define i64 @red_mla_ext_s16_u16_s64(ptr noalias nocapture readonly %A, ptr noali
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i16, ptr [[B:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2 x i8], ptr [[A:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i16>, ptr [[TMP2]], align 1
; CHECK-NEXT: [[TMP1:%.*]] = sext <4 x i16> [[WIDE_LOAD]] to <4 x i32>
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i16, ptr [[B1:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x i8], ptr [[B:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i16>, ptr [[TMP11]], align 2
; CHECK-NEXT: [[TMP3:%.*]] = zext <4 x i16> [[WIDE_LOAD1]] to <4 x i32>
; CHECK-NEXT: [[TMP4:%.*]] = mul nsw <4 x i32> [[TMP3]], [[TMP1]]
@@ -1204,10 +1204,10 @@ define i64 @red_mla_ext_s16_u16_s64(ptr noalias nocapture readonly %A, ptr noali
; CHECK: for.body:
; CHECK-NEXT: [[I_011:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[S_010:%.*]] = phi i64 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[B]], i32 [[I_011]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i8], ptr [[A]], i32 [[I_011]]
; CHECK-NEXT: [[TMP9:%.*]] = load i16, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP9]] to i32
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i16, ptr [[B1]], i32 [[I_011]]
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [2 x i8], ptr [[B]], i32 [[I_011]]
; CHECK-NEXT: [[TMP10:%.*]] = load i16, ptr [[ARRAYIDX1]], align 2
; CHECK-NEXT: [[CONV2:%.*]] = zext i16 [[TMP10]] to i32
; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[CONV2]], [[CONV]]
@@ -1355,10 +1355,10 @@ define i32 @reduction_interleave_group(i32 %n, ptr %arr) #0 {
; CHECK: for.body:
; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[RED_PHI:%.*]] = phi i32 [ [[RED_2:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[ARR]], i32 [[IV]]
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr [4 x i8], ptr [[ARR]], i32 [[IV]]
; CHECK-NEXT: [[GEP_0:%.*]] = getelementptr i8, ptr [[TMP11]], i32 4
; CHECK-NEXT: [[L_0:%.*]] = load i32, ptr [[GEP_0]], align 4
-; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i32 [[IV]]
+; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr inbounds [4 x i8], ptr [[ARR]], i32 [[IV]]
; CHECK-NEXT: [[L_1:%.*]] = load i32, ptr [[GEP_1]], align 4
; CHECK-NEXT: [[RED_1:%.*]] = add i32 [[L_0]], [[RED_PHI]]
; CHECK-NEXT: [[RED_2]] = add i32 [[RED_1]], [[L_1]]
@@ -1455,7 +1455,7 @@ define i64 @mla_xx_sext_zext(ptr nocapture noundef readonly %x, i32 %n) #0 {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [2 x i8], ptr [[X:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[TMP0]], align 2
; CHECK-NEXT: [[TMP1:%.*]] = sext <8 x i16> [[WIDE_LOAD]] to <8 x i64>
; CHECK-NEXT: [[TMP3:%.*]] = mul nsw <8 x i64> [[TMP1]], [[TMP1]]
@@ -1477,7 +1477,7 @@ define i64 @mla_xx_sext_zext(ptr nocapture noundef readonly %x, i32 %n) #0 {
; CHECK: for.body:
; CHECK-NEXT: [[I_011:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[S_010:%.*]] = phi i64 [ [[ADD]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i16, ptr [[X]], i32 [[I_011]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[X]], i32 [[I_011]]
; CHECK-NEXT: [[TMP7:%.*]] = load i16, ptr [[ARRAYIDX]], align 2
; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP7]] to i32
; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[CONV]], [[CONV]]
@@ -1523,7 +1523,7 @@ define i64 @mla_and_add_together_16_64(ptr nocapture noundef readonly %x, i32 no
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [2 x i8], ptr [[X:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[TMP0]], align 2
; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i16> [[WIDE_LOAD]] to <8 x i64>
; CHECK-NEXT: [[TMP2:%.*]] = mul nsw <8 x i64> [[TMP4]], [[TMP4]]
@@ -1553,7 +1553,7 @@ define i64 @mla_and_add_together_16_64(ptr nocapture noundef readonly %x, i32 no
; CHECK-NEXT: [[I_019:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[T_018:%.*]] = phi i64 [ [[ADD]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[S_017:%.*]] = phi i32 [ [[ADD6]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX2]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i16, ptr [[X]], i32 [[I_019]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[X]], i32 [[I_019]]
; CHECK-NEXT: [[TMP9:%.*]] = load i16, ptr [[ARRAYIDX]], align 2
; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP9]] to i32
; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[CONV]], [[CONV]]
@@ -1601,20 +1601,20 @@ define i64 @interleave_doublereduct_i16_i64(ptr %x, ptr %y, i32 %n) {
; CHECK: for.body:
; CHECK-NEXT: [[I_025:%.*]] = phi i32 [ [[ADD13:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY]] ]
; CHECK-NEXT: [[T_024:%.*]] = phi i64 [ [[ADD12]], [[FOR_BODY]] ], [ 0, [[ENTRY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i16, ptr [[X:%.*]], i32 [[I_025]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[X:%.*]], i32 [[I_025]]
; CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr [[ARRAYIDX]], align 2
; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP0]] to i32
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds nuw i16, ptr [[Y:%.*]], i32 [[I_025]]
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[Y:%.*]], i32 [[I_025]]
; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr [[ARRAYIDX1]], align 2
; CHECK-NEXT: [[CONV2:%.*]] = sext i16 [[TMP1]] to i32
; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[CONV2]], [[CONV]]
; CHECK-NEXT: [[CONV3:%.*]] = sext i32 [[MUL]] to i64
; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[T_024]], [[CONV3]]
; CHECK-NEXT: [[ADD4:%.*]] = or disjoint i32 [[I_025]], 1
-; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds nuw i16, ptr [[X]], i32 [[ADD4]]
+; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[X]], i32 [[ADD4]]
; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr [[ARRAYIDX5]], align 2
; CHECK-NEXT: [[CONV6:%.*]] = sext i16 [[TMP2]] to i32
-; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds nuw i16, ptr [[Y]], i32 [[ADD4]]
+; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[Y]], i32 [[ADD4]]
; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr [[ARRAYIDX8]], align 2
; CHECK-NEXT: [[CONV9:%.*]] = sext i16 [[TMP3]] to i32
; CHECK-NEXT: [[MUL10:%.*]] = mul nsw i32 [[CONV9]], [[CONV6]]
@@ -1673,7 +1673,7 @@ define i64 @test_std_q31(ptr %x, i32 %n) #0 {
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[FOR_BODY1]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[FOR_BODY1]] ]
; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[TMP8:%.*]], [[FOR_BODY1]] ]
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[X:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i8], ptr [[X:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = ashr <4 x i32> [[WIDE_LOAD]], splat (i32 8)
; CHECK-NEXT: [[TMP2:%.*]] = sext <4 x i32> [[TMP1]] to <4 x i64>
@@ -1702,7 +1702,7 @@ define i64 @test_std_q31(ptr %x, i32 %n) #0 {
; CHECK-NEXT: [[S_014:%.*]] = phi i64 [ [[ADD1]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[I_013:%.*]] = phi i32 [ [[ADD4:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[T_012:%.*]] = phi i64 [ [[ADD5]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX2]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[X]], i32 [[I_013]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[X]], i32 [[I_013]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[SHR:%.*]] = ashr i32 [[TMP0]], 8
; CHECK-NEXT: [[CONV:%.*]] = sext i32 [[SHR]] to i64
@@ -1757,11 +1757,11 @@ define i64 @test_fir_q15(ptr %x, ptr %y, i32 %n) #0 {
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i32 [[INDEX]], 1
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i32 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [2 x i8], ptr [[X:%.*]], i32 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <16 x i16>, ptr [[TMP3]], align 2
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <16 x i16> [[WIDE_VEC]], <16 x i16> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
; CHECK-NEXT: [[STRIDED_VEC1:%.*]] = shufflevector <16 x i16> [[WIDE_VEC]], <16 x i16> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i16, ptr [[Y:%.*]], i32 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x i8], ptr [[Y:%.*]], i32 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC2:%.*]] = load <16 x i16>, ptr [[TMP4]], align 2
; CHECK-NEXT: [[STRIDED_VEC3:%.*]] = shufflevector <16 x i16> [[WIDE_VEC2]], <16 x i16> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
; CHECK-NEXT: [[STRIDED_VEC4:%.*]] = shufflevector <16 x i16> [[WIDE_VEC2]], <16 x i16> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
@@ -1791,20 +1791,20 @@ define i64 @test_fir_q15(ptr %x, ptr %y, i32 %n) #0 {
; CHECK: for.body:
; CHECK-NEXT: [[I_025:%.*]] = phi i32 [ [[ADD13:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[S_024:%.*]] = phi i64 [ [[ADD12]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[X]], i32 [[I_025]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i8], ptr [[X]], i32 [[I_025]]
; CHECK-NEXT: [[TMP18:%.*]] = load i16, ptr [[ARRAYIDX]], align 2
; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP18]] to i32
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i16, ptr [[Y]], i32 [[I_025]]
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [2 x i8], ptr [[Y]], i32 [[I_025]]
; CHECK-NEXT: [[TMP19:%.*]] = load i16, ptr [[ARRAYIDX1]], align 2
; CHECK-NEXT: [[CONV2:%.*]] = sext i16 [[TMP19]] to i32
; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[CONV2]], [[CONV]]
; CHECK-NEXT: [[CONV3:%.*]] = sext i32 [[MUL]] to i64
; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[S_024]], [[CONV3]]
; CHECK-NEXT: [[ADD4:%.*]] = or disjoint i32 [[I_025]], 1
-; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, ptr [[X]], i32 [[ADD4]]
+; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [2 x i8], ptr [[X]], i32 [[ADD4]]
; CHECK-NEXT: [[TMP20:%.*]] = load i16, ptr [[ARRAYIDX5]], align 2
; CHECK-NEXT: [[CONV6:%.*]] = sext i16 [[TMP20]] to i32
-; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, ptr [[Y]], i32 [[ADD4]]
+; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [2 x i8], ptr [[Y]], i32 [[ADD4]]
; CHECK-NEXT: [[TMP21:%.*]] = load i16, ptr [[ARRAYIDX8]], align 2
; CHECK-NEXT: [[CONV9:%.*]] = sext i16 [[TMP21]] to i32
; CHECK-NEXT: [[MUL10:%.*]] = mul nsw i32 [[CONV9]], [[CONV6]]
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/tail-fold-multiple-icmps.ll b/llvm/test/Transforms/LoopVectorize/ARM/tail-fold-multiple-icmps.ll
index 6f7b45beeb6e7..45490e1b65e4b 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/tail-fold-multiple-icmps.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/tail-fold-multiple-icmps.ll
@@ -18,7 +18,7 @@ define arm_aapcs_vfpcc i32 @minmaxval4(ptr nocapture readonly %x, ptr nocapture
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ splat (i32 2147483647), [[VECTOR_PH]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ splat (i32 -2147483648), [[VECTOR_PH]] ], [ [[TMP1:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[X:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[X:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP1]] = call <4 x i32> @llvm.smax.v4i32(<4 x i32> [[WIDE_LOAD]], <4 x i32> [[VEC_PHI1]])
; CHECK-NEXT: [[TMP2]] = call <4 x i32> @llvm.smin.v4i32(<4 x i32> [[WIDE_LOAD]], <4 x i32> [[VEC_PHI]])
@@ -44,7 +44,7 @@ define arm_aapcs_vfpcc i32 @minmaxval4(ptr nocapture readonly %x, ptr nocapture
; CHECK-NEXT: [[I_029:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[MIN_028:%.*]] = phi i32 [ [[COND9]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[MAX_027:%.*]] = phi i32 [ [[COND]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX2]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[I_029]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[X]], i32 [[I_029]]
; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[COND]] = call i32 @llvm.smax.i32(i32 [[TMP6]], i32 [[MAX_027]])
; CHECK-NEXT: [[COND9]] = call i32 @llvm.smin.i32(i32 [[TMP6]], i32 [[MIN_028]])
diff --git a/llvm/test/Transforms/LoopVectorize/PowerPC/vsx-tsvc-s173.ll b/llvm/test/Transforms/LoopVectorize/PowerPC/vsx-tsvc-s173.ll
index 48a9bdc12a104..95d756194045b 100644
--- a/llvm/test/Transforms/LoopVectorize/PowerPC/vsx-tsvc-s173.ll
+++ b/llvm/test/Transforms/LoopVectorize/PowerPC/vsx-tsvc-s173.ll
@@ -40,7 +40,7 @@ define signext i32 @s173() #0 {
; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x float>, ptr [[TMP6]], align 4
; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP7]], align 4
; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x float>, ptr [[TMP8]], align 4
-; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, ptr getelementptr inbounds nuw (i8, ptr @global_data, i64 128016), i64 [[INDEX]]
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8], ptr getelementptr inbounds nuw (i8, ptr @global_data, i64 128016), i64 [[INDEX]]
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP9]], i64 16
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP9]], i64 32
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP9]], i64 48
diff --git a/llvm/test/Transforms/LoopVectorize/PowerPC/widened-massv-call.ll b/llvm/test/Transforms/LoopVectorize/PowerPC/widened-massv-call.ll
index 8dcc211d22cc4..9923f898c44fc 100644
--- a/llvm/test/Transforms/LoopVectorize/PowerPC/widened-massv-call.ll
+++ b/llvm/test/Transforms/LoopVectorize/PowerPC/widened-massv-call.ll
@@ -10,7 +10,7 @@ define dso_local double @test(ptr %Arr) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <2 x double> [ zeroinitializer, [[ENTRY]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw float, ptr [[ARR:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[ARR:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x float>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = fpext <2 x float> [[WIDE_LOAD]] to <2 x double>
; CHECK-NEXT: [[TMP2:%.*]] = tail call fast <2 x double> @__sind2(<2 x double> [[TMP1]])
diff --git a/llvm/test/Transforms/LoopVectorize/PowerPC/widened-massv-vfabi-attr.ll b/llvm/test/Transforms/LoopVectorize/PowerPC/widened-massv-vfabi-attr.ll
index 172d77d849223..a87dace6f7402 100644
--- a/llvm/test/Transforms/LoopVectorize/PowerPC/widened-massv-vfabi-attr.ll
+++ b/llvm/test/Transforms/LoopVectorize/PowerPC/widened-massv-vfabi-attr.ll
@@ -9,7 +9,7 @@ define dso_local double @test(ptr %Arr) {
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <2 x double> [ zeroinitializer, [[ENTRY]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[INDEX]] to i64
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[ARR:%.*]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[ARR:%.*]], i64 [[TMP0]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x float>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = fpext <2 x float> [[WIDE_LOAD]] to <2 x double>
; CHECK-NEXT: [[TMP3:%.*]] = call fast <2 x double> @__sind2_P8(<2 x double> [[TMP2]])
diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/addressing.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/addressing.ll
index 1153d18656004..386ad85d2bd13 100644
--- a/llvm/test/Transforms/LoopVectorize/SystemZ/addressing.ll
+++ b/llvm/test/Transforms/LoopVectorize/SystemZ/addressing.ll
@@ -57,8 +57,8 @@ define void @foo1(ptr nocapture noalias %A, ptr nocapture %PtrPtr) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds ptr, ptr [[PTRPTR:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr ptr, ptr [[PTRPTR]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [8 x i8], ptr [[PTRPTR:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr [8 x i8], ptr [[PTRPTR]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP11]], i64 8
; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP1]], align 8
; CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP2]], align 8
@@ -66,7 +66,7 @@ define void @foo1(ptr nocapture noalias %A, ptr nocapture %PtrPtr) {
; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP4]], align 4
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x i32> poison, i32 [[TMP5]], i64 0
; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x i32> [[TMP7]], i32 [[TMP6]], i64 1
-; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: store <2 x i32> [[TMP8]], ptr [[TMP9]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10000
diff --git a/llvm/test/Transforms/LoopVectorize/X86/interleaving.ll b/llvm/test/Transforms/LoopVectorize/X86/interleaving.ll
index e2329fe31cd56..938eda6c8b457 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/interleaving.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/interleaving.ll
@@ -26,7 +26,7 @@ define void @foo(ptr noalias nocapture %a, ptr noalias nocapture readonly %b) {
; SSE-NEXT: [[STRIDED_VEC4:%.*]] = shufflevector <8 x i32> [[WIDE_VEC2]], <8 x i32> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
; SSE-NEXT: [[TMP4:%.*]] = add nsw <4 x i32> [[STRIDED_VEC1]], [[STRIDED_VEC]]
; SSE-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[STRIDED_VEC4]], [[STRIDED_VEC3]]
-; SSE-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
+; SSE-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; SSE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP6]], i64 16
; SSE-NEXT: store <4 x i32> [[TMP4]], ptr [[TMP6]], align 4
; SSE-NEXT: store <4 x i32> [[TMP5]], ptr [[TMP7]], align 4
@@ -72,7 +72,7 @@ define void @foo(ptr noalias nocapture %a, ptr noalias nocapture readonly %b) {
; AVX1-NEXT: [[TMP11:%.*]] = add nsw <4 x i32> [[STRIDED_VEC4]], [[STRIDED_VEC3]]
; AVX1-NEXT: [[TMP12:%.*]] = add nsw <4 x i32> [[STRIDED_VEC7]], [[STRIDED_VEC6]]
; AVX1-NEXT: [[TMP13:%.*]] = add nsw <4 x i32> [[STRIDED_VEC10]], [[STRIDED_VEC9]]
-; AVX1-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
+; AVX1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; AVX1-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP14]], i64 16
; AVX1-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP14]], i64 32
; AVX1-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP14]], i64 48
@@ -122,7 +122,7 @@ define void @foo(ptr noalias nocapture %a, ptr noalias nocapture readonly %b) {
; AVX2-NEXT: [[TMP11:%.*]] = add nsw <8 x i32> [[STRIDED_VEC4]], [[STRIDED_VEC3]]
; AVX2-NEXT: [[TMP12:%.*]] = add nsw <8 x i32> [[STRIDED_VEC7]], [[STRIDED_VEC6]]
; AVX2-NEXT: [[TMP13:%.*]] = add nsw <8 x i32> [[STRIDED_VEC10]], [[STRIDED_VEC9]]
-; AVX2-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
+; AVX2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; AVX2-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP14]], i64 32
; AVX2-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP14]], i64 64
; AVX2-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP14]], i64 96
@@ -146,13 +146,13 @@ define void @foo(ptr noalias nocapture %a, ptr noalias nocapture readonly %b) {
; ATOM: for.body:
; ATOM-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; ATOM-NEXT: [[TMP0:%.*]] = shl nuw nsw i64 [[INDVARS_IV]], 1
-; ATOM-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[B:%.*]], i64 [[TMP0]]
+; ATOM-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B:%.*]], i64 [[TMP0]]
; ATOM-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; ATOM-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP0]]
+; ATOM-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B]], i64 [[TMP0]]
; ATOM-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP2]], i64 4
; ATOM-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX3]], align 4
; ATOM-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP3]], [[TMP1]]
-; ATOM-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds nuw i32, ptr [[A:%.*]], i64 [[INDVARS_IV]]
+; ATOM-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A:%.*]], i64 [[INDVARS_IV]]
; ATOM-NEXT: store i32 [[ADD4]], ptr [[ARRAYIDX6]], align 4
; ATOM-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; ATOM-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024
diff --git a/llvm/test/Transforms/LoopVectorize/X86/metadata-enable.ll b/llvm/test/Transforms/LoopVectorize/X86/metadata-enable.ll
index d514ab6bc72b7..60ef7008f5c9c 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/metadata-enable.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/metadata-enable.ll
@@ -807,10 +807,10 @@ define i32 @nopragma(ptr noalias nocapture %a, ptr noalias nocapture readonly %b
; O1-NEXT: br label [[FOR_BODY:%.*]]
; O1: for.body:
; O1-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; O1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[B:%.*]], i64 [[INDVARS_IV]]
+; O1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B:%.*]], i64 [[INDVARS_IV]]
; O1-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; O1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], [[N:%.*]]
-; O1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[A:%.*]], i64 [[INDVARS_IV]]
+; O1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A:%.*]], i64 [[INDVARS_IV]]
; O1-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4
; O1-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; O1-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64
@@ -1164,10 +1164,10 @@ define i32 @nopragma(ptr noalias nocapture %a, ptr noalias nocapture readonly %b
; Oz-NEXT: br label [[FOR_BODY:%.*]]
; Oz: for.body:
; Oz-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; Oz-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[B:%.*]], i64 [[INDVARS_IV]]
+; Oz-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B:%.*]], i64 [[INDVARS_IV]]
; Oz-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; Oz-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], [[N:%.*]]
-; Oz-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[A:%.*]], i64 [[INDVARS_IV]]
+; Oz-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A:%.*]], i64 [[INDVARS_IV]]
; Oz-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4
; Oz-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; Oz-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64
@@ -1185,13 +1185,13 @@ define i32 @nopragma(ptr noalias nocapture %a, ptr noalias nocapture readonly %b
; O1VEC2-NEXT: br label [[VECTOR_BODY:%.*]]
; O1VEC2: vector.body:
; O1VEC2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; O1VEC2-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i32, ptr [[B:%.*]], i64 [[INDEX]]
+; O1VEC2-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B:%.*]], i64 [[INDEX]]
; O1VEC2-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP1]], i64 4
; O1VEC2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
; O1VEC2-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4
; O1VEC2-NEXT: [[TMP4:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
; O1VEC2-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[BROADCAST_SPLAT]]
-; O1VEC2-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[A:%.*]], i64 [[INDEX]]
+; O1VEC2-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; O1VEC2-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP6]], i64 4
; O1VEC2-NEXT: store <4 x i32> [[TMP4]], ptr [[TMP6]], align 4
; O1VEC2-NEXT: store <4 x i32> [[TMP5]], ptr [[TMP8]], align 4
@@ -1213,13 +1213,13 @@ define i32 @nopragma(ptr noalias nocapture %a, ptr noalias nocapture readonly %b
; OzVEC2-NEXT: br label [[VECTOR_BODY:%.*]]
; OzVEC2: vector.body:
; OzVEC2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; OzVEC2-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i32, ptr [[B:%.*]], i64 [[INDEX]]
+; OzVEC2-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B:%.*]], i64 [[INDEX]]
; OzVEC2-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP1]], i64 4
; OzVEC2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
; OzVEC2-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4
; OzVEC2-NEXT: [[TMP4:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
; OzVEC2-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[BROADCAST_SPLAT]]
-; OzVEC2-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[A:%.*]], i64 [[INDEX]]
+; OzVEC2-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; OzVEC2-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP6]], i64 4
; OzVEC2-NEXT: store <4 x i32> [[TMP4]], ptr [[TMP6]], align 4
; OzVEC2-NEXT: store <4 x i32> [[TMP5]], ptr [[TMP8]], align 4
@@ -1237,10 +1237,10 @@ define i32 @nopragma(ptr noalias nocapture %a, ptr noalias nocapture readonly %b
; O3DIS-NEXT: br label [[FOR_BODY:%.*]]
; O3DIS: for.body:
; O3DIS-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; O3DIS-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[B:%.*]], i64 [[INDVARS_IV]]
+; O3DIS-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B:%.*]], i64 [[INDVARS_IV]]
; O3DIS-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; O3DIS-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], [[N:%.*]]
-; O3DIS-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[A:%.*]], i64 [[INDVARS_IV]]
+; O3DIS-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A:%.*]], i64 [[INDVARS_IV]]
; O3DIS-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4
; O3DIS-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; O3DIS-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64
@@ -1274,10 +1274,10 @@ define i32 @disabled(ptr noalias nocapture %a, ptr noalias nocapture readonly %b
; O1-NEXT: br label [[FOR_BODY:%.*]]
; O1: for.body:
; O1-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; O1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[B:%.*]], i64 [[INDVARS_IV]]
+; O1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B:%.*]], i64 [[INDVARS_IV]]
; O1-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; O1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], [[N:%.*]]
-; O1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[A:%.*]], i64 [[INDVARS_IV]]
+; O1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A:%.*]], i64 [[INDVARS_IV]]
; O1-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4
; O1-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; O1-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 48
@@ -1291,10 +1291,10 @@ define i32 @disabled(ptr noalias nocapture %a, ptr noalias nocapture readonly %b
; O2-NEXT: br label [[FOR_BODY:%.*]]
; O2: for.body:
; O2-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; O2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[B:%.*]], i64 [[INDVARS_IV]]
+; O2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B:%.*]], i64 [[INDVARS_IV]]
; O2-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; O2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], [[N:%.*]]
-; O2-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[A:%.*]], i64 [[INDVARS_IV]]
+; O2-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A:%.*]], i64 [[INDVARS_IV]]
; O2-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4
; O2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; O2-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 48
@@ -1308,10 +1308,10 @@ define i32 @disabled(ptr noalias nocapture %a, ptr noalias nocapture readonly %b
; O3-NEXT: br label [[FOR_BODY:%.*]]
; O3: for.body:
; O3-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; O3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[B:%.*]], i64 [[INDVARS_IV]]
+; O3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B:%.*]], i64 [[INDVARS_IV]]
; O3-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; O3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], [[N:%.*]]
-; O3-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[A:%.*]], i64 [[INDVARS_IV]]
+; O3-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A:%.*]], i64 [[INDVARS_IV]]
; O3-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4
; O3-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; O3-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 48
@@ -1390,10 +1390,10 @@ define i32 @disabled(ptr noalias nocapture %a, ptr noalias nocapture readonly %b
; Os-NEXT: br label [[FOR_BODY:%.*]]
; Os: for.body:
; Os-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; Os-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[B:%.*]], i64 [[INDVARS_IV]]
+; Os-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B:%.*]], i64 [[INDVARS_IV]]
; Os-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; Os-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], [[N:%.*]]
-; Os-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[A:%.*]], i64 [[INDVARS_IV]]
+; Os-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A:%.*]], i64 [[INDVARS_IV]]
; Os-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4
; Os-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; Os-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 48
@@ -1407,10 +1407,10 @@ define i32 @disabled(ptr noalias nocapture %a, ptr noalias nocapture readonly %b
; Oz-NEXT: br label [[FOR_BODY:%.*]]
; Oz: for.body:
; Oz-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; Oz-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[B:%.*]], i64 [[INDVARS_IV]]
+; Oz-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B:%.*]], i64 [[INDVARS_IV]]
; Oz-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; Oz-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], [[N:%.*]]
-; Oz-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[A:%.*]], i64 [[INDVARS_IV]]
+; Oz-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A:%.*]], i64 [[INDVARS_IV]]
; Oz-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4
; Oz-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; Oz-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 48
@@ -1424,10 +1424,10 @@ define i32 @disabled(ptr noalias nocapture %a, ptr noalias nocapture readonly %b
; O1VEC2-NEXT: br label [[FOR_BODY:%.*]]
; O1VEC2: for.body:
; O1VEC2-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; O1VEC2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[B:%.*]], i64 [[INDVARS_IV]]
+; O1VEC2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B:%.*]], i64 [[INDVARS_IV]]
; O1VEC2-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; O1VEC2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], [[N:%.*]]
-; O1VEC2-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[A:%.*]], i64 [[INDVARS_IV]]
+; O1VEC2-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A:%.*]], i64 [[INDVARS_IV]]
; O1VEC2-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4
; O1VEC2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; O1VEC2-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 48
@@ -1441,10 +1441,10 @@ define i32 @disabled(ptr noalias nocapture %a, ptr noalias nocapture readonly %b
; OzVEC2-NEXT: br label [[FOR_BODY:%.*]]
; OzVEC2: for.body:
; OzVEC2-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; OzVEC2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[B:%.*]], i64 [[INDVARS_IV]]
+; OzVEC2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B:%.*]], i64 [[INDVARS_IV]]
; OzVEC2-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; OzVEC2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], [[N:%.*]]
-; OzVEC2-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[A:%.*]], i64 [[INDVARS_IV]]
+; OzVEC2-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A:%.*]], i64 [[INDVARS_IV]]
; OzVEC2-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4
; OzVEC2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; OzVEC2-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 48
@@ -1458,10 +1458,10 @@ define i32 @disabled(ptr noalias nocapture %a, ptr noalias nocapture readonly %b
; O3DIS-NEXT: br label [[FOR_BODY:%.*]]
; O3DIS: for.body:
; O3DIS-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; O3DIS-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[B:%.*]], i64 [[INDVARS_IV]]
+; O3DIS-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B:%.*]], i64 [[INDVARS_IV]]
; O3DIS-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; O3DIS-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], [[N:%.*]]
-; O3DIS-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[A:%.*]], i64 [[INDVARS_IV]]
+; O3DIS-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A:%.*]], i64 [[INDVARS_IV]]
; O3DIS-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4
; O3DIS-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; O3DIS-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 48
diff --git a/llvm/test/Transforms/LoopVectorize/X86/parallel-loops.ll b/llvm/test/Transforms/LoopVectorize/X86/parallel-loops.ll
index 66809ebb715f0..74c20175c86da 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/parallel-loops.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/parallel-loops.ll
@@ -19,15 +19,15 @@ define void @loop(ptr nocapture %a, ptr nocapture %b) nounwind uwtable {
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[IDXPROM3:%.*]] = sext i32 [[TMP1]] to i64
-; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IDXPROM3]]
+; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[IDXPROM3]]
; CHECK-NEXT: store i32 [[TMP0]], ptr [[ARRAYIDX4]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDVARS_IV_NEXT]]
; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX6]], align 4
; CHECK-NEXT: store i32 [[TMP2]], ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[INDVARS_IV_NEXT]], 4294967295
@@ -71,18 +71,18 @@ define void @parallel_loop(ptr nocapture %a, ptr nocapture %b) nounwind uwtable
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4, !llvm.access.group [[ACC_GRP0:![0-9]+]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4, !llvm.access.group [[ACC_GRP0:![0-9]+]]
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i64 0
; CHECK-NEXT: [[TMP21:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i64 1
; CHECK-NEXT: [[TMP22:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i64 2
; CHECK-NEXT: [[TMP23:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i64 3
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP2]], i64 4
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP26:%.*]] = getelementptr i8, ptr [[TMP4]], i64 8
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP6]], i64 12
; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4, !llvm.access.group [[ACC_GRP0]]
; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP8]], align 4, !llvm.access.group [[ACC_GRP0]]
@@ -92,15 +92,15 @@ define void @parallel_loop(ptr nocapture %a, ptr nocapture %b) nounwind uwtable
; CHECK-NEXT: [[TMP14:%.*]] = sext i32 [[TMP10]] to i64
; CHECK-NEXT: [[TMP15:%.*]] = sext i32 [[TMP11]] to i64
; CHECK-NEXT: [[TMP16:%.*]] = sext i32 [[TMP12]] to i64
-; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP13]]
-; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP14]]
-; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP15]]
-; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP16]]
-; CHECK-NEXT: store i32 [[TMP1]], ptr [[TMP17]], align 4, !llvm.access.group [[ACC_GRP1:![0-9]+]]
-; CHECK-NEXT: store i32 [[TMP21]], ptr [[TMP18]], align 4, !llvm.access.group [[ACC_GRP1]]
-; CHECK-NEXT: store i32 [[TMP22]], ptr [[TMP19]], align 4, !llvm.access.group [[ACC_GRP1]]
-; CHECK-NEXT: store i32 [[TMP23]], ptr [[TMP20]], align 4, !llvm.access.group [[ACC_GRP1]]
-; CHECK-NEXT: [[TMP27:%.*]] = getelementptr i32, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP13]]
+; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP14]]
+; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP15]]
+; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP16]]
+; CHECK-NEXT: store i32 [[TMP1]], ptr [[TMP20]], align 4, !llvm.access.group [[ACC_GRP1:![0-9]+]]
+; CHECK-NEXT: store i32 [[TMP21]], ptr [[TMP24]], align 4, !llvm.access.group [[ACC_GRP1]]
+; CHECK-NEXT: store i32 [[TMP22]], ptr [[TMP28]], align 4, !llvm.access.group [[ACC_GRP1]]
+; CHECK-NEXT: store i32 [[TMP23]], ptr [[TMP30]], align 4, !llvm.access.group [[ACC_GRP1]]
+; CHECK-NEXT: [[TMP27:%.*]] = getelementptr [4 x i8], ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP25:%.*]] = getelementptr i8, ptr [[TMP27]], i64 4
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP25]], align 4, !llvm.access.group [[ACC_GRP0]]
; CHECK-NEXT: store <4 x i32> [[WIDE_LOAD1]], ptr [[TMP5]], align 4, !llvm.access.group [[ACC_GRP0]]
@@ -148,20 +148,20 @@ define void @mixed_metadata(ptr nocapture %a, ptr nocapture %b) nounwind uwtable
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]]
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP7]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP6]]
; CHECK-NEXT: [[IDXPROM3:%.*]] = sext i32 [[TMP1]] to i64
-; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IDXPROM3]]
-; CHECK-NEXT: store i32 [[TMP0]], ptr [[ARRAYIDX4]], align 4, !llvm.access.group [[ACC_GRP8:![0-9]+]]
+; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[IDXPROM3]]
+; CHECK-NEXT: store i32 [[TMP0]], ptr [[ARRAYIDX4]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV_NEXT]]
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX6]], align 4, !llvm.access.group [[ACC_GRP7]]
-; CHECK-NEXT: store i32 [[TMP2]], ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP7]]
+; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX6]], align 4, !llvm.access.group [[ACC_GRP6]]
+; CHECK-NEXT: store i32 [[TMP2]], ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP6]]
; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[INDVARS_IV_NEXT]], 4294967295
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP3]], 512
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/X86/small-size.ll b/llvm/test/Transforms/LoopVectorize/X86/small-size.ll
index 93cf59c019d5f..856374dc0f684 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/small-size.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/small-size.ll
@@ -28,12 +28,12 @@ define void @example1() optsize {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr @b, i64 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [4 x i8], ptr @b, i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr @c, i64 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr @c, i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD]]
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr @a, i64 [[INDEX]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x i8], ptr @a, i64 [[INDEX]]
; CHECK-NEXT: store <4 x i32> [[TMP3]], ptr [[TMP4]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
@@ -85,14 +85,14 @@ define void @example2(i32 %n, i32 %x) optsize {
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i1> [[TMP3]], i64 0
; CHECK-NEXT: br i1 [[TMP4]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
; CHECK: pred.store.if:
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr @b, i64 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i8], ptr @b, i64 [[INDEX]]
; CHECK-NEXT: store i32 [[X:%.*]], ptr [[TMP5]], align 4
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE]]
; CHECK: pred.store.continue:
; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[TMP3]], i64 1
; CHECK-NEXT: br i1 [[TMP6]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2:%.*]]
; CHECK: pred.store.if1:
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr @b, i64 [[INDEX]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr [4 x i8], ptr @b, i64 [[INDEX]]
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP7]], i64 4
; CHECK-NEXT: store i32 [[X]], ptr [[TMP8]], align 4
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE2]]
@@ -100,7 +100,7 @@ define void @example2(i32 %n, i32 %x) optsize {
; CHECK-NEXT: [[TMP9:%.*]] = extractelement <4 x i1> [[TMP3]], i64 2
; CHECK-NEXT: br i1 [[TMP9]], label [[PRED_STORE_IF3:%.*]], label [[PRED_STORE_CONTINUE4:%.*]]
; CHECK: pred.store.if3:
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr @b, i64 [[INDEX]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr [4 x i8], ptr @b, i64 [[INDEX]]
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[TMP10]], i64 8
; CHECK-NEXT: store i32 [[X]], ptr [[TMP11]], align 4
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE4]]
@@ -108,7 +108,7 @@ define void @example2(i32 %n, i32 %x) optsize {
; CHECK-NEXT: [[TMP12:%.*]] = extractelement <4 x i1> [[TMP3]], i64 3
; CHECK-NEXT: br i1 [[TMP12]], label [[PRED_STORE_IF5:%.*]], label [[PRED_STORE_CONTINUE6]]
; CHECK: pred.store.if5:
-; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr @b, i64 [[INDEX]]
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr [4 x i8], ptr @b, i64 [[INDEX]]
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[TMP13]], i64 12
; CHECK-NEXT: store i32 [[X]], ptr [[TMP14]], align 4
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE6]]
@@ -146,11 +146,11 @@ define void @example2(i32 %n, i32 %x) optsize {
; CHECK-NEXT: [[TMP19:%.*]] = extractelement <4 x i1> [[TMP18]], i64 0
; CHECK-NEXT: br i1 [[TMP19]], label [[PRED_STORE_IF19:%.*]], label [[PRED_STORE_CONTINUE20:%.*]]
; CHECK: pred.store.if18:
-; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr @b, i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x i8], ptr @b, i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP20]], align 4
-; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr @c, i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8], ptr @c, i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
-; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds i32, ptr @a, i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8], ptr @a, i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[TMP25:%.*]] = and i32 [[TMP23]], [[TMP21]]
; CHECK-NEXT: store i32 [[TMP25]], ptr [[TMP24]], align 4
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE20]]
@@ -159,11 +159,11 @@ define void @example2(i32 %n, i32 %x) optsize {
; CHECK-NEXT: br i1 [[TMP26]], label [[PRED_STORE_IF21:%.*]], label [[PRED_STORE_CONTINUE22:%.*]]
; CHECK: pred.store.if20:
; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[OFFSET_IDX]], 1
-; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds i32, ptr @b, i64 [[TMP27]]
+; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8], ptr @b, i64 [[TMP27]]
; CHECK-NEXT: [[TMP29:%.*]] = load i32, ptr [[TMP28]], align 4
-; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds i32, ptr @c, i64 [[TMP27]]
+; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds [4 x i8], ptr @c, i64 [[TMP27]]
; CHECK-NEXT: [[TMP31:%.*]] = load i32, ptr [[TMP30]], align 4
-; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds i32, ptr @a, i64 [[TMP27]]
+; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds [4 x i8], ptr @a, i64 [[TMP27]]
; CHECK-NEXT: [[TMP33:%.*]] = and i32 [[TMP31]], [[TMP29]]
; CHECK-NEXT: store i32 [[TMP33]], ptr [[TMP32]], align 4
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE22]]
@@ -172,11 +172,11 @@ define void @example2(i32 %n, i32 %x) optsize {
; CHECK-NEXT: br i1 [[TMP34]], label [[PRED_STORE_IF23:%.*]], label [[PRED_STORE_CONTINUE24:%.*]]
; CHECK: pred.store.if22:
; CHECK-NEXT: [[TMP35:%.*]] = add i64 [[OFFSET_IDX]], 2
-; CHECK-NEXT: [[TMP36:%.*]] = getelementptr inbounds i32, ptr @b, i64 [[TMP35]]
+; CHECK-NEXT: [[TMP36:%.*]] = getelementptr inbounds [4 x i8], ptr @b, i64 [[TMP35]]
; CHECK-NEXT: [[TMP37:%.*]] = load i32, ptr [[TMP36]], align 4
-; CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds i32, ptr @c, i64 [[TMP35]]
+; CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds [4 x i8], ptr @c, i64 [[TMP35]]
; CHECK-NEXT: [[TMP39:%.*]] = load i32, ptr [[TMP38]], align 4
-; CHECK-NEXT: [[TMP40:%.*]] = getelementptr inbounds i32, ptr @a, i64 [[TMP35]]
+; CHECK-NEXT: [[TMP40:%.*]] = getelementptr inbounds [4 x i8], ptr @a, i64 [[TMP35]]
; CHECK-NEXT: [[TMP41:%.*]] = and i32 [[TMP39]], [[TMP37]]
; CHECK-NEXT: store i32 [[TMP41]], ptr [[TMP40]], align 4
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE24]]
@@ -185,11 +185,11 @@ define void @example2(i32 %n, i32 %x) optsize {
; CHECK-NEXT: br i1 [[TMP42]], label [[PRED_STORE_IF25:%.*]], label [[PRED_STORE_CONTINUE26]]
; CHECK: pred.store.if24:
; CHECK-NEXT: [[TMP43:%.*]] = add i64 [[OFFSET_IDX]], 3
-; CHECK-NEXT: [[TMP44:%.*]] = getelementptr inbounds i32, ptr @b, i64 [[TMP43]]
+; CHECK-NEXT: [[TMP44:%.*]] = getelementptr inbounds [4 x i8], ptr @b, i64 [[TMP43]]
; CHECK-NEXT: [[TMP45:%.*]] = load i32, ptr [[TMP44]], align 4
-; CHECK-NEXT: [[TMP46:%.*]] = getelementptr inbounds i32, ptr @c, i64 [[TMP43]]
+; CHECK-NEXT: [[TMP46:%.*]] = getelementptr inbounds [4 x i8], ptr @c, i64 [[TMP43]]
; CHECK-NEXT: [[TMP47:%.*]] = load i32, ptr [[TMP46]], align 4
-; CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds i32, ptr @a, i64 [[TMP43]]
+; CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds [4 x i8], ptr @a, i64 [[TMP43]]
; CHECK-NEXT: [[TMP49:%.*]] = and i32 [[TMP47]], [[TMP45]]
; CHECK-NEXT: store i32 [[TMP49]], ptr [[TMP48]], align 4
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE26]]
diff --git a/llvm/test/Transforms/LoopVectorize/X86/x86-interleaved-store-accesses-with-gaps.ll b/llvm/test/Transforms/LoopVectorize/X86/x86-interleaved-store-accesses-with-gaps.ll
index cfc588a7f5d89..3b8d2c623d13c 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/x86-interleaved-store-accesses-with-gaps.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/x86-interleaved-store-accesses-with-gaps.ll
@@ -25,7 +25,7 @@ define dso_local void @test1(ptr noalias nocapture %points, ptr noalias nocaptur
; DISABLED_MASKED_STRIDED: vector.body:
; DISABLED_MASKED_STRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; DISABLED_MASKED_STRIDED-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, [[ENTRY]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; DISABLED_MASKED_STRIDED-NEXT: [[TMP0:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i64 [[INDEX]]
+; DISABLED_MASKED_STRIDED-NEXT: [[TMP0:%.*]] = getelementptr inbounds [2 x i8], ptr [[X:%.*]], i64 [[INDEX]]
; DISABLED_MASKED_STRIDED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i16>, ptr [[TMP0]], align 2
; DISABLED_MASKED_STRIDED-NEXT: [[TMP10:%.*]] = extractelement <4 x i16> [[WIDE_LOAD]], i64 0
; DISABLED_MASKED_STRIDED-NEXT: [[TMP11:%.*]] = extractelement <4 x i16> [[WIDE_LOAD]], i64 1
@@ -36,15 +36,15 @@ define dso_local void @test1(ptr noalias nocapture %points, ptr noalias nocaptur
; DISABLED_MASKED_STRIDED-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP1]], i64 1
; DISABLED_MASKED_STRIDED-NEXT: [[TMP6:%.*]] = extractelement <4 x i64> [[TMP1]], i64 2
; DISABLED_MASKED_STRIDED-NEXT: [[TMP8:%.*]] = extractelement <4 x i64> [[TMP1]], i64 3
-; DISABLED_MASKED_STRIDED-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i16, ptr [[POINTS:%.*]], i64 [[TMP2]]
-; DISABLED_MASKED_STRIDED-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw i16, ptr [[POINTS]], i64 [[TMP4]]
-; DISABLED_MASKED_STRIDED-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i16, ptr [[POINTS]], i64 [[TMP6]]
-; DISABLED_MASKED_STRIDED-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i16, ptr [[POINTS]], i64 [[TMP8]]
+; DISABLED_MASKED_STRIDED-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[POINTS:%.*]], i64 [[TMP2]]
+; DISABLED_MASKED_STRIDED-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[POINTS]], i64 [[TMP4]]
+; DISABLED_MASKED_STRIDED-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[POINTS]], i64 [[TMP6]]
+; DISABLED_MASKED_STRIDED-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[POINTS]], i64 [[TMP8]]
; DISABLED_MASKED_STRIDED-NEXT: store i16 [[TMP10]], ptr [[TMP3]], align 2
; DISABLED_MASKED_STRIDED-NEXT: store i16 [[TMP11]], ptr [[TMP5]], align 2
; DISABLED_MASKED_STRIDED-NEXT: store i16 [[TMP12]], ptr [[TMP7]], align 2
; DISABLED_MASKED_STRIDED-NEXT: store i16 [[TMP13]], ptr [[TMP9]], align 2
-; DISABLED_MASKED_STRIDED-NEXT: [[TMP14:%.*]] = getelementptr inbounds i16, ptr [[Y:%.*]], i64 [[INDEX]]
+; DISABLED_MASKED_STRIDED-NEXT: [[TMP14:%.*]] = getelementptr inbounds [2 x i8], ptr [[Y:%.*]], i64 [[INDEX]]
; DISABLED_MASKED_STRIDED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i16>, ptr [[TMP14]], align 2
; DISABLED_MASKED_STRIDED-NEXT: [[TMP24:%.*]] = extractelement <4 x i16> [[WIDE_LOAD1]], i64 0
; DISABLED_MASKED_STRIDED-NEXT: [[TMP25:%.*]] = extractelement <4 x i16> [[WIDE_LOAD1]], i64 1
@@ -55,10 +55,10 @@ define dso_local void @test1(ptr noalias nocapture %points, ptr noalias nocaptur
; DISABLED_MASKED_STRIDED-NEXT: [[TMP18:%.*]] = extractelement <4 x i64> [[TMP15]], i64 1
; DISABLED_MASKED_STRIDED-NEXT: [[TMP20:%.*]] = extractelement <4 x i64> [[TMP15]], i64 2
; DISABLED_MASKED_STRIDED-NEXT: [[TMP22:%.*]] = extractelement <4 x i64> [[TMP15]], i64 3
-; DISABLED_MASKED_STRIDED-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw i16, ptr [[POINTS]], i64 [[TMP16]]
-; DISABLED_MASKED_STRIDED-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw i16, ptr [[POINTS]], i64 [[TMP18]]
-; DISABLED_MASKED_STRIDED-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw i16, ptr [[POINTS]], i64 [[TMP20]]
-; DISABLED_MASKED_STRIDED-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw i16, ptr [[POINTS]], i64 [[TMP22]]
+; DISABLED_MASKED_STRIDED-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[POINTS]], i64 [[TMP16]]
+; DISABLED_MASKED_STRIDED-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[POINTS]], i64 [[TMP18]]
+; DISABLED_MASKED_STRIDED-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[POINTS]], i64 [[TMP20]]
+; DISABLED_MASKED_STRIDED-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[POINTS]], i64 [[TMP22]]
; DISABLED_MASKED_STRIDED-NEXT: store i16 [[TMP24]], ptr [[TMP17]], align 2
; DISABLED_MASKED_STRIDED-NEXT: store i16 [[TMP25]], ptr [[TMP19]], align 2
; DISABLED_MASKED_STRIDED-NEXT: store i16 [[TMP26]], ptr [[TMP21]], align 2
@@ -75,11 +75,11 @@ define dso_local void @test1(ptr noalias nocapture %points, ptr noalias nocaptur
; ENABLED_MASKED_STRIDED-NEXT: br label [[VECTOR_BODY:%.*]]
; ENABLED_MASKED_STRIDED: vector.body:
; ENABLED_MASKED_STRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; ENABLED_MASKED_STRIDED-NEXT: [[TMP0:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i64 [[INDEX]]
+; ENABLED_MASKED_STRIDED-NEXT: [[TMP0:%.*]] = getelementptr inbounds [2 x i8], ptr [[X:%.*]], i64 [[INDEX]]
; ENABLED_MASKED_STRIDED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i16>, ptr [[TMP0]], align 2
; ENABLED_MASKED_STRIDED-NEXT: [[TMP1:%.*]] = shl nsw i64 [[INDEX]], 3
; ENABLED_MASKED_STRIDED-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[POINTS:%.*]], i64 [[TMP1]]
-; ENABLED_MASKED_STRIDED-NEXT: [[TMP2:%.*]] = getelementptr inbounds i16, ptr [[Y:%.*]], i64 [[INDEX]]
+; ENABLED_MASKED_STRIDED-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2 x i8], ptr [[Y:%.*]], i64 [[INDEX]]
; ENABLED_MASKED_STRIDED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i16>, ptr [[TMP2]], align 2
; ENABLED_MASKED_STRIDED-NEXT: [[INTERLEAVED_VEC:%.*]] = shufflevector <4 x i16> [[WIDE_LOAD]], <4 x i16> [[WIDE_LOAD1]], <16 x i32> <i32 0, i32 4, i32 poison, i32 poison, i32 1, i32 5, i32 poison, i32 poison, i32 2, i32 6, i32 poison, i32 poison, i32 3, i32 7, i32 poison, i32 poison>
; ENABLED_MASKED_STRIDED-NEXT: call void @llvm.masked.store.v16i16.p0(<16 x i16> [[INTERLEAVED_VEC]], ptr align 2 [[GEP]], <16 x i1> <i1 true, i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false>)
@@ -139,14 +139,14 @@ define dso_local void @test2(ptr noalias nocapture %points, i32 %numPoints, ptr
; DISABLED_MASKED_STRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE15:%.*]] ]
; DISABLED_MASKED_STRIDED-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[PRED_STORE_CONTINUE15]] ]
; DISABLED_MASKED_STRIDED-NEXT: [[TMP0:%.*]] = icmp ule <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
-; DISABLED_MASKED_STRIDED-NEXT: [[TMP1:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i64 [[INDEX]]
+; DISABLED_MASKED_STRIDED-NEXT: [[TMP1:%.*]] = getelementptr inbounds [2 x i8], ptr [[X:%.*]], i64 [[INDEX]]
; DISABLED_MASKED_STRIDED-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr align 2 [[TMP1]], <4 x i1> [[TMP0]], <4 x i16> poison)
; DISABLED_MASKED_STRIDED-NEXT: [[TMP2:%.*]] = shl nsw <4 x i64> [[VEC_IND]], splat (i64 2)
; DISABLED_MASKED_STRIDED-NEXT: [[TMP3:%.*]] = extractelement <4 x i1> [[TMP0]], i64 0
; DISABLED_MASKED_STRIDED-NEXT: br i1 [[TMP3]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
; DISABLED_MASKED_STRIDED: pred.store.if:
; DISABLED_MASKED_STRIDED-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP2]], i64 0
-; DISABLED_MASKED_STRIDED-NEXT: [[TMP5:%.*]] = getelementptr inbounds i16, ptr [[POINTS:%.*]], i64 [[TMP4]]
+; DISABLED_MASKED_STRIDED-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x i8], ptr [[POINTS:%.*]], i64 [[TMP4]]
; DISABLED_MASKED_STRIDED-NEXT: [[TMP6:%.*]] = extractelement <4 x i16> [[WIDE_MASKED_LOAD]], i64 0
; DISABLED_MASKED_STRIDED-NEXT: store i16 [[TMP6]], ptr [[TMP5]], align 2
; DISABLED_MASKED_STRIDED-NEXT: br label [[PRED_STORE_CONTINUE]]
@@ -155,7 +155,7 @@ define dso_local void @test2(ptr noalias nocapture %points, i32 %numPoints, ptr
; DISABLED_MASKED_STRIDED-NEXT: br i1 [[TMP7]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2:%.*]]
; DISABLED_MASKED_STRIDED: pred.store.if1:
; DISABLED_MASKED_STRIDED-NEXT: [[TMP8:%.*]] = extractelement <4 x i64> [[TMP2]], i64 1
-; DISABLED_MASKED_STRIDED-NEXT: [[TMP9:%.*]] = getelementptr inbounds i16, ptr [[POINTS]], i64 [[TMP8]]
+; DISABLED_MASKED_STRIDED-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8], ptr [[POINTS]], i64 [[TMP8]]
; DISABLED_MASKED_STRIDED-NEXT: [[TMP10:%.*]] = extractelement <4 x i16> [[WIDE_MASKED_LOAD]], i64 1
; DISABLED_MASKED_STRIDED-NEXT: store i16 [[TMP10]], ptr [[TMP9]], align 2
; DISABLED_MASKED_STRIDED-NEXT: br label [[PRED_STORE_CONTINUE2]]
@@ -164,7 +164,7 @@ define dso_local void @test2(ptr noalias nocapture %points, i32 %numPoints, ptr
; DISABLED_MASKED_STRIDED-NEXT: br i1 [[TMP11]], label [[PRED_STORE_IF3:%.*]], label [[PRED_STORE_CONTINUE4:%.*]]
; DISABLED_MASKED_STRIDED: pred.store.if3:
; DISABLED_MASKED_STRIDED-NEXT: [[TMP12:%.*]] = extractelement <4 x i64> [[TMP2]], i64 2
-; DISABLED_MASKED_STRIDED-NEXT: [[TMP13:%.*]] = getelementptr inbounds i16, ptr [[POINTS]], i64 [[TMP12]]
+; DISABLED_MASKED_STRIDED-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x i8], ptr [[POINTS]], i64 [[TMP12]]
; DISABLED_MASKED_STRIDED-NEXT: [[TMP14:%.*]] = extractelement <4 x i16> [[WIDE_MASKED_LOAD]], i64 2
; DISABLED_MASKED_STRIDED-NEXT: store i16 [[TMP14]], ptr [[TMP13]], align 2
; DISABLED_MASKED_STRIDED-NEXT: br label [[PRED_STORE_CONTINUE4]]
@@ -173,19 +173,19 @@ define dso_local void @test2(ptr noalias nocapture %points, i32 %numPoints, ptr
; DISABLED_MASKED_STRIDED-NEXT: br i1 [[TMP15]], label [[PRED_STORE_IF5:%.*]], label [[PRED_STORE_CONTINUE6:%.*]]
; DISABLED_MASKED_STRIDED: pred.store.if5:
; DISABLED_MASKED_STRIDED-NEXT: [[TMP16:%.*]] = extractelement <4 x i64> [[TMP2]], i64 3
-; DISABLED_MASKED_STRIDED-NEXT: [[TMP17:%.*]] = getelementptr inbounds i16, ptr [[POINTS]], i64 [[TMP16]]
+; DISABLED_MASKED_STRIDED-NEXT: [[TMP17:%.*]] = getelementptr inbounds [2 x i8], ptr [[POINTS]], i64 [[TMP16]]
; DISABLED_MASKED_STRIDED-NEXT: [[TMP18:%.*]] = extractelement <4 x i16> [[WIDE_MASKED_LOAD]], i64 3
; DISABLED_MASKED_STRIDED-NEXT: store i16 [[TMP18]], ptr [[TMP17]], align 2
; DISABLED_MASKED_STRIDED-NEXT: br label [[PRED_STORE_CONTINUE6]]
; DISABLED_MASKED_STRIDED: pred.store.continue6:
-; DISABLED_MASKED_STRIDED-NEXT: [[TMP19:%.*]] = getelementptr inbounds i16, ptr [[Y:%.*]], i64 [[INDEX]]
+; DISABLED_MASKED_STRIDED-NEXT: [[TMP19:%.*]] = getelementptr inbounds [2 x i8], ptr [[Y:%.*]], i64 [[INDEX]]
; DISABLED_MASKED_STRIDED-NEXT: [[WIDE_MASKED_LOAD7:%.*]] = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr align 2 [[TMP19]], <4 x i1> [[TMP0]], <4 x i16> poison)
; DISABLED_MASKED_STRIDED-NEXT: [[TMP20:%.*]] = or disjoint <4 x i64> [[TMP2]], splat (i64 1)
; DISABLED_MASKED_STRIDED-NEXT: [[TMP21:%.*]] = extractelement <4 x i1> [[TMP0]], i64 0
; DISABLED_MASKED_STRIDED-NEXT: br i1 [[TMP21]], label [[PRED_STORE_IF8:%.*]], label [[PRED_STORE_CONTINUE9:%.*]]
; DISABLED_MASKED_STRIDED: pred.store.if8:
; DISABLED_MASKED_STRIDED-NEXT: [[TMP22:%.*]] = extractelement <4 x i64> [[TMP20]], i64 0
-; DISABLED_MASKED_STRIDED-NEXT: [[TMP23:%.*]] = getelementptr inbounds i16, ptr [[POINTS]], i64 [[TMP22]]
+; DISABLED_MASKED_STRIDED-NEXT: [[TMP23:%.*]] = getelementptr inbounds [2 x i8], ptr [[POINTS]], i64 [[TMP22]]
; DISABLED_MASKED_STRIDED-NEXT: [[TMP24:%.*]] = extractelement <4 x i16> [[WIDE_MASKED_LOAD7]], i64 0
; DISABLED_MASKED_STRIDED-NEXT: store i16 [[TMP24]], ptr [[TMP23]], align 2
; DISABLED_MASKED_STRIDED-NEXT: br label [[PRED_STORE_CONTINUE9]]
@@ -194,7 +194,7 @@ define dso_local void @test2(ptr noalias nocapture %points, i32 %numPoints, ptr
; DISABLED_MASKED_STRIDED-NEXT: br i1 [[TMP25]], label [[PRED_STORE_IF10:%.*]], label [[PRED_STORE_CONTINUE11:%.*]]
; DISABLED_MASKED_STRIDED: pred.store.if10:
; DISABLED_MASKED_STRIDED-NEXT: [[TMP26:%.*]] = extractelement <4 x i64> [[TMP20]], i64 1
-; DISABLED_MASKED_STRIDED-NEXT: [[TMP27:%.*]] = getelementptr inbounds i16, ptr [[POINTS]], i64 [[TMP26]]
+; DISABLED_MASKED_STRIDED-NEXT: [[TMP27:%.*]] = getelementptr inbounds [2 x i8], ptr [[POINTS]], i64 [[TMP26]]
; DISABLED_MASKED_STRIDED-NEXT: [[TMP28:%.*]] = extractelement <4 x i16> [[WIDE_MASKED_LOAD7]], i64 1
; DISABLED_MASKED_STRIDED-NEXT: store i16 [[TMP28]], ptr [[TMP27]], align 2
; DISABLED_MASKED_STRIDED-NEXT: br label [[PRED_STORE_CONTINUE11]]
@@ -203,7 +203,7 @@ define dso_local void @test2(ptr noalias nocapture %points, i32 %numPoints, ptr
; DISABLED_MASKED_STRIDED-NEXT: br i1 [[TMP29]], label [[PRED_STORE_IF12:%.*]], label [[PRED_STORE_CONTINUE13:%.*]]
; DISABLED_MASKED_STRIDED: pred.store.if12:
; DISABLED_MASKED_STRIDED-NEXT: [[TMP30:%.*]] = extractelement <4 x i64> [[TMP20]], i64 2
-; DISABLED_MASKED_STRIDED-NEXT: [[TMP31:%.*]] = getelementptr inbounds i16, ptr [[POINTS]], i64 [[TMP30]]
+; DISABLED_MASKED_STRIDED-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8], ptr [[POINTS]], i64 [[TMP30]]
; DISABLED_MASKED_STRIDED-NEXT: [[TMP32:%.*]] = extractelement <4 x i16> [[WIDE_MASKED_LOAD7]], i64 2
; DISABLED_MASKED_STRIDED-NEXT: store i16 [[TMP32]], ptr [[TMP31]], align 2
; DISABLED_MASKED_STRIDED-NEXT: br label [[PRED_STORE_CONTINUE13]]
@@ -212,7 +212,7 @@ define dso_local void @test2(ptr noalias nocapture %points, i32 %numPoints, ptr
; DISABLED_MASKED_STRIDED-NEXT: br i1 [[TMP33]], label [[PRED_STORE_IF14:%.*]], label [[PRED_STORE_CONTINUE15]]
; DISABLED_MASKED_STRIDED: pred.store.if14:
; DISABLED_MASKED_STRIDED-NEXT: [[TMP34:%.*]] = extractelement <4 x i64> [[TMP20]], i64 3
-; DISABLED_MASKED_STRIDED-NEXT: [[TMP35:%.*]] = getelementptr inbounds i16, ptr [[POINTS]], i64 [[TMP34]]
+; DISABLED_MASKED_STRIDED-NEXT: [[TMP35:%.*]] = getelementptr inbounds [2 x i8], ptr [[POINTS]], i64 [[TMP34]]
; DISABLED_MASKED_STRIDED-NEXT: [[TMP36:%.*]] = extractelement <4 x i16> [[WIDE_MASKED_LOAD7]], i64 3
; DISABLED_MASKED_STRIDED-NEXT: store i16 [[TMP36]], ptr [[TMP35]], align 2
; DISABLED_MASKED_STRIDED-NEXT: br label [[PRED_STORE_CONTINUE15]]
@@ -244,11 +244,11 @@ define dso_local void @test2(ptr noalias nocapture %points, i32 %numPoints, ptr
; ENABLED_MASKED_STRIDED-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT1]], <4 x i64> poison, <4 x i32> zeroinitializer
; ENABLED_MASKED_STRIDED-NEXT: [[VEC_IV:%.*]] = or disjoint <4 x i64> [[BROADCAST_SPLAT2]], <i64 0, i64 1, i64 2, i64 3>
; ENABLED_MASKED_STRIDED-NEXT: [[TMP0:%.*]] = icmp ule <4 x i64> [[VEC_IV]], [[BROADCAST_SPLAT]]
-; ENABLED_MASKED_STRIDED-NEXT: [[TMP1:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i64 [[INDEX]]
+; ENABLED_MASKED_STRIDED-NEXT: [[TMP1:%.*]] = getelementptr inbounds [2 x i8], ptr [[X:%.*]], i64 [[INDEX]]
; ENABLED_MASKED_STRIDED-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr align 2 [[TMP1]], <4 x i1> [[TMP0]], <4 x i16> poison)
; ENABLED_MASKED_STRIDED-NEXT: [[TMP2:%.*]] = shl nsw i64 [[INDEX]], 3
; ENABLED_MASKED_STRIDED-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[POINTS:%.*]], i64 [[TMP2]]
-; ENABLED_MASKED_STRIDED-NEXT: [[TMP3:%.*]] = getelementptr inbounds i16, ptr [[Y:%.*]], i64 [[INDEX]]
+; ENABLED_MASKED_STRIDED-NEXT: [[TMP3:%.*]] = getelementptr inbounds [2 x i8], ptr [[Y:%.*]], i64 [[INDEX]]
; ENABLED_MASKED_STRIDED-NEXT: [[WIDE_MASKED_LOAD3:%.*]] = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr align 2 [[TMP3]], <4 x i1> [[TMP0]], <4 x i16> poison)
; ENABLED_MASKED_STRIDED-NEXT: [[INTERLEAVED_VEC:%.*]] = shufflevector <4 x i16> [[WIDE_MASKED_LOAD]], <4 x i16> [[WIDE_MASKED_LOAD3]], <16 x i32> <i32 0, i32 4, i32 poison, i32 poison, i32 1, i32 5, i32 poison, i32 poison, i32 2, i32 6, i32 poison, i32 poison, i32 3, i32 7, i32 poison, i32 poison>
; ENABLED_MASKED_STRIDED-NEXT: [[INTERLEAVED_MASK:%.*]] = shufflevector <4 x i1> [[TMP0]], <4 x i1> poison, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 1, i32 1, i32 1, i32 1, i32 2, i32 2, i32 2, i32 2, i32 3, i32 3, i32 3, i32 3>
@@ -311,7 +311,7 @@ define dso_local void @test(ptr noalias nocapture %points, ptr noalias nocapture
; DISABLED_MASKED_STRIDED: vector.body:
; DISABLED_MASKED_STRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE6:%.*]] ]
; DISABLED_MASKED_STRIDED-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, [[ENTRY]] ], [ [[VEC_IND_NEXT:%.*]], [[PRED_STORE_CONTINUE6]] ]
-; DISABLED_MASKED_STRIDED-NEXT: [[TMP0:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i64 [[INDEX]]
+; DISABLED_MASKED_STRIDED-NEXT: [[TMP0:%.*]] = getelementptr inbounds [2 x i8], ptr [[X:%.*]], i64 [[INDEX]]
; DISABLED_MASKED_STRIDED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i16>, ptr [[TMP0]], align 2
; DISABLED_MASKED_STRIDED-NEXT: [[TMP1:%.*]] = icmp sgt <4 x i16> [[WIDE_LOAD]], zeroinitializer
; DISABLED_MASKED_STRIDED-NEXT: [[TMP2:%.*]] = mul nuw nsw <4 x i64> [[VEC_IND]], splat (i64 3)
@@ -319,7 +319,7 @@ define dso_local void @test(ptr noalias nocapture %points, ptr noalias nocapture
; DISABLED_MASKED_STRIDED-NEXT: br i1 [[TMP3]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
; DISABLED_MASKED_STRIDED: pred.store.if:
; DISABLED_MASKED_STRIDED-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP2]], i64 0
-; DISABLED_MASKED_STRIDED-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw i16, ptr [[POINTS:%.*]], i64 [[TMP4]]
+; DISABLED_MASKED_STRIDED-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[POINTS:%.*]], i64 [[TMP4]]
; DISABLED_MASKED_STRIDED-NEXT: [[TMP6:%.*]] = extractelement <4 x i16> [[WIDE_LOAD]], i64 0
; DISABLED_MASKED_STRIDED-NEXT: store i16 [[TMP6]], ptr [[TMP5]], align 2
; DISABLED_MASKED_STRIDED-NEXT: br label [[PRED_STORE_CONTINUE]]
@@ -328,7 +328,7 @@ define dso_local void @test(ptr noalias nocapture %points, ptr noalias nocapture
; DISABLED_MASKED_STRIDED-NEXT: br i1 [[TMP7]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2:%.*]]
; DISABLED_MASKED_STRIDED: pred.store.if1:
; DISABLED_MASKED_STRIDED-NEXT: [[TMP8:%.*]] = extractelement <4 x i64> [[TMP2]], i64 1
-; DISABLED_MASKED_STRIDED-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i16, ptr [[POINTS]], i64 [[TMP8]]
+; DISABLED_MASKED_STRIDED-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[POINTS]], i64 [[TMP8]]
; DISABLED_MASKED_STRIDED-NEXT: [[TMP10:%.*]] = extractelement <4 x i16> [[WIDE_LOAD]], i64 1
; DISABLED_MASKED_STRIDED-NEXT: store i16 [[TMP10]], ptr [[TMP9]], align 2
; DISABLED_MASKED_STRIDED-NEXT: br label [[PRED_STORE_CONTINUE2]]
@@ -337,7 +337,7 @@ define dso_local void @test(ptr noalias nocapture %points, ptr noalias nocapture
; DISABLED_MASKED_STRIDED-NEXT: br i1 [[TMP11]], label [[PRED_STORE_IF3:%.*]], label [[PRED_STORE_CONTINUE4:%.*]]
; DISABLED_MASKED_STRIDED: pred.store.if3:
; DISABLED_MASKED_STRIDED-NEXT: [[TMP12:%.*]] = extractelement <4 x i64> [[TMP2]], i64 2
-; DISABLED_MASKED_STRIDED-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw i16, ptr [[POINTS]], i64 [[TMP12]]
+; DISABLED_MASKED_STRIDED-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[POINTS]], i64 [[TMP12]]
; DISABLED_MASKED_STRIDED-NEXT: [[TMP14:%.*]] = extractelement <4 x i16> [[WIDE_LOAD]], i64 2
; DISABLED_MASKED_STRIDED-NEXT: store i16 [[TMP14]], ptr [[TMP13]], align 2
; DISABLED_MASKED_STRIDED-NEXT: br label [[PRED_STORE_CONTINUE4]]
@@ -346,7 +346,7 @@ define dso_local void @test(ptr noalias nocapture %points, ptr noalias nocapture
; DISABLED_MASKED_STRIDED-NEXT: br i1 [[TMP15]], label [[PRED_STORE_IF5:%.*]], label [[PRED_STORE_CONTINUE6]]
; DISABLED_MASKED_STRIDED: pred.store.if5:
; DISABLED_MASKED_STRIDED-NEXT: [[TMP16:%.*]] = extractelement <4 x i64> [[TMP2]], i64 3
-; DISABLED_MASKED_STRIDED-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw i16, ptr [[POINTS]], i64 [[TMP16]]
+; DISABLED_MASKED_STRIDED-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[POINTS]], i64 [[TMP16]]
; DISABLED_MASKED_STRIDED-NEXT: [[TMP18:%.*]] = extractelement <4 x i16> [[WIDE_LOAD]], i64 3
; DISABLED_MASKED_STRIDED-NEXT: store i16 [[TMP18]], ptr [[TMP17]], align 2
; DISABLED_MASKED_STRIDED-NEXT: br label [[PRED_STORE_CONTINUE6]]
@@ -364,7 +364,7 @@ define dso_local void @test(ptr noalias nocapture %points, ptr noalias nocapture
; ENABLED_MASKED_STRIDED: vector.body:
; ENABLED_MASKED_STRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE6:%.*]] ]
; ENABLED_MASKED_STRIDED-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, [[ENTRY]] ], [ [[VEC_IND_NEXT:%.*]], [[PRED_STORE_CONTINUE6]] ]
-; ENABLED_MASKED_STRIDED-NEXT: [[TMP0:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i64 [[INDEX]]
+; ENABLED_MASKED_STRIDED-NEXT: [[TMP0:%.*]] = getelementptr inbounds [2 x i8], ptr [[X:%.*]], i64 [[INDEX]]
; ENABLED_MASKED_STRIDED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i16>, ptr [[TMP0]], align 2
; ENABLED_MASKED_STRIDED-NEXT: [[TMP1:%.*]] = icmp sgt <4 x i16> [[WIDE_LOAD]], zeroinitializer
; ENABLED_MASKED_STRIDED-NEXT: [[TMP2:%.*]] = mul nuw nsw <4 x i64> [[VEC_IND]], splat (i64 3)
@@ -372,7 +372,7 @@ define dso_local void @test(ptr noalias nocapture %points, ptr noalias nocapture
; ENABLED_MASKED_STRIDED-NEXT: br i1 [[TMP3]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
; ENABLED_MASKED_STRIDED: pred.store.if:
; ENABLED_MASKED_STRIDED-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP2]], i64 0
-; ENABLED_MASKED_STRIDED-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw i16, ptr [[POINTS:%.*]], i64 [[TMP4]]
+; ENABLED_MASKED_STRIDED-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[POINTS:%.*]], i64 [[TMP4]]
; ENABLED_MASKED_STRIDED-NEXT: [[TMP6:%.*]] = extractelement <4 x i16> [[WIDE_LOAD]], i64 0
; ENABLED_MASKED_STRIDED-NEXT: store i16 [[TMP6]], ptr [[TMP5]], align 2
; ENABLED_MASKED_STRIDED-NEXT: br label [[PRED_STORE_CONTINUE]]
@@ -381,7 +381,7 @@ define dso_local void @test(ptr noalias nocapture %points, ptr noalias nocapture
; ENABLED_MASKED_STRIDED-NEXT: br i1 [[TMP7]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2:%.*]]
; ENABLED_MASKED_STRIDED: pred.store.if1:
; ENABLED_MASKED_STRIDED-NEXT: [[TMP8:%.*]] = extractelement <4 x i64> [[TMP2]], i64 1
-; ENABLED_MASKED_STRIDED-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i16, ptr [[POINTS]], i64 [[TMP8]]
+; ENABLED_MASKED_STRIDED-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[POINTS]], i64 [[TMP8]]
; ENABLED_MASKED_STRIDED-NEXT: [[TMP10:%.*]] = extractelement <4 x i16> [[WIDE_LOAD]], i64 1
; ENABLED_MASKED_STRIDED-NEXT: store i16 [[TMP10]], ptr [[TMP9]], align 2
; ENABLED_MASKED_STRIDED-NEXT: br label [[PRED_STORE_CONTINUE2]]
@@ -390,7 +390,7 @@ define dso_local void @test(ptr noalias nocapture %points, ptr noalias nocapture
; ENABLED_MASKED_STRIDED-NEXT: br i1 [[TMP11]], label [[PRED_STORE_IF3:%.*]], label [[PRED_STORE_CONTINUE4:%.*]]
; ENABLED_MASKED_STRIDED: pred.store.if3:
; ENABLED_MASKED_STRIDED-NEXT: [[TMP12:%.*]] = extractelement <4 x i64> [[TMP2]], i64 2
-; ENABLED_MASKED_STRIDED-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw i16, ptr [[POINTS]], i64 [[TMP12]]
+; ENABLED_MASKED_STRIDED-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[POINTS]], i64 [[TMP12]]
; ENABLED_MASKED_STRIDED-NEXT: [[TMP14:%.*]] = extractelement <4 x i16> [[WIDE_LOAD]], i64 2
; ENABLED_MASKED_STRIDED-NEXT: store i16 [[TMP14]], ptr [[TMP13]], align 2
; ENABLED_MASKED_STRIDED-NEXT: br label [[PRED_STORE_CONTINUE4]]
@@ -399,7 +399,7 @@ define dso_local void @test(ptr noalias nocapture %points, ptr noalias nocapture
; ENABLED_MASKED_STRIDED-NEXT: br i1 [[TMP15]], label [[PRED_STORE_IF5:%.*]], label [[PRED_STORE_CONTINUE6]]
; ENABLED_MASKED_STRIDED: pred.store.if5:
; ENABLED_MASKED_STRIDED-NEXT: [[TMP16:%.*]] = extractelement <4 x i64> [[TMP2]], i64 3
-; ENABLED_MASKED_STRIDED-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw i16, ptr [[POINTS]], i64 [[TMP16]]
+; ENABLED_MASKED_STRIDED-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[POINTS]], i64 [[TMP16]]
; ENABLED_MASKED_STRIDED-NEXT: [[TMP18:%.*]] = extractelement <4 x i16> [[WIDE_LOAD]], i64 3
; ENABLED_MASKED_STRIDED-NEXT: store i16 [[TMP18]], ptr [[TMP17]], align 2
; ENABLED_MASKED_STRIDED-NEXT: br label [[PRED_STORE_CONTINUE6]]
diff --git a/llvm/test/Transforms/LoopVectorize/bsd_regex.ll b/llvm/test/Transforms/LoopVectorize/bsd_regex.ll
index 784ccd22f98d7..cf6d95949b06a 100644
--- a/llvm/test/Transforms/LoopVectorize/bsd_regex.ll
+++ b/llvm/test/Transforms/LoopVectorize/bsd_regex.ll
@@ -24,10 +24,10 @@ define void @foo(ptr nocapture %A) {
; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i64> [[STEP_ADD]], splat (i64 8)
; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x i64> [[TMP1]], i64 0
; CHECK-NEXT: [[TMP11:%.*]] = extractelement <2 x i64> [[TMP1]], i64 1
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP4]]
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP6]]
-; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP8]]
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP11]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[TMP4]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP6]]
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP8]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP11]]
; CHECK-NEXT: store i32 4, ptr [[TMP5]], align 4
; CHECK-NEXT: store i32 4, ptr [[TMP7]], align 4
; CHECK-NEXT: store i32 4, ptr [[TMP9]], align 4
diff --git a/llvm/test/Transforms/LoopVectorize/extract-last-veclane.ll b/llvm/test/Transforms/LoopVectorize/extract-last-veclane.ll
index 93ac34003d4b7..80b035f9f0c56 100644
--- a/llvm/test/Transforms/LoopVectorize/extract-last-veclane.ll
+++ b/llvm/test/Transforms/LoopVectorize/extract-last-veclane.ll
@@ -11,10 +11,10 @@ define void @inv_store_last_lane(ptr noalias nocapture %a, ptr noalias nocapture
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = shl nsw <4 x i32> [[WIDE_LOAD]], splat (i32 1)
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: store <4 x i32> [[TMP1]], ptr [[TMP2]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -28,10 +28,10 @@ define void @inv_store_last_lane(ptr noalias nocapture %a, ptr noalias nocapture
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[MUL:%.*]] = shl nsw i32 [[TMP5]], 1
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: store i32 [[MUL]], ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
@@ -72,10 +72,10 @@ define float @ret_last_lane(ptr noalias nocapture %a, ptr noalias nocapture read
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = fmul <4 x float> [[WIDE_LOAD]], splat (float 2.000000e+00)
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: store <4 x float> [[TMP1]], ptr [[TMP2]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -89,10 +89,10 @@ define float @ret_last_lane(ptr noalias nocapture %a, ptr noalias nocapture read
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[MUL:%.*]] = fmul float [[TMP5]], 2.000000e+00
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: store float [[MUL]], ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
diff --git a/llvm/test/Transforms/LoopVectorize/float-induction.ll b/llvm/test/Transforms/LoopVectorize/float-induction.ll
index f56699a45320d..210d43bc893cf 100644
--- a/llvm/test/Transforms/LoopVectorize/float-induction.ll
+++ b/llvm/test/Transforms/LoopVectorize/float-induction.ll
@@ -45,7 +45,7 @@ define void @fp_iv_loop1_fast_FMF(float %init, ptr noalias nocapture %A, i32 %N)
; VEC4_INTERL1: vector.body:
; VEC4_INTERL1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; VEC4_INTERL1-NEXT: [[VEC_IND:%.*]] = phi <4 x float> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; VEC4_INTERL1-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
+; VEC4_INTERL1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; VEC4_INTERL1-NEXT: store <4 x float> [[VEC_IND]], ptr [[TMP4]], align 4
; VEC4_INTERL1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; VEC4_INTERL1-NEXT: [[VEC_IND_NEXT]] = fsub fast <4 x float> [[VEC_IND]], [[DOTSPLAT5]]
@@ -61,7 +61,7 @@ define void @fp_iv_loop1_fast_FMF(float %init, ptr noalias nocapture %A, i32 %N)
; VEC4_INTERL1: for.body:
; VEC4_INTERL1-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; VEC4_INTERL1-NEXT: [[X_05:%.*]] = phi float [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
-; VEC4_INTERL1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; VEC4_INTERL1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; VEC4_INTERL1-NEXT: store float [[X_05]], ptr [[ARRAYIDX]], align 4
; VEC4_INTERL1-NEXT: [[ADD]] = fsub fast float [[X_05]], [[FPINC]]
; VEC4_INTERL1-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
@@ -99,7 +99,7 @@ define void @fp_iv_loop1_fast_FMF(float %init, ptr noalias nocapture %A, i32 %N)
; VEC4_INTERL2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; VEC4_INTERL2-NEXT: [[VEC_IND:%.*]] = phi <4 x float> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; VEC4_INTERL2-NEXT: [[STEP_ADD:%.*]] = fsub fast <4 x float> [[VEC_IND]], [[DOTSPLAT5]]
-; VEC4_INTERL2-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
+; VEC4_INTERL2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; VEC4_INTERL2-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP4]], i64 16
; VEC4_INTERL2-NEXT: store <4 x float> [[VEC_IND]], ptr [[TMP4]], align 4
; VEC4_INTERL2-NEXT: store <4 x float> [[STEP_ADD]], ptr [[TMP5]], align 4
@@ -117,7 +117,7 @@ define void @fp_iv_loop1_fast_FMF(float %init, ptr noalias nocapture %A, i32 %N)
; VEC4_INTERL2: for.body:
; VEC4_INTERL2-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; VEC4_INTERL2-NEXT: [[X_05:%.*]] = phi float [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
-; VEC4_INTERL2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; VEC4_INTERL2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; VEC4_INTERL2-NEXT: store float [[X_05]], ptr [[ARRAYIDX]], align 4
; VEC4_INTERL2-NEXT: [[ADD]] = fsub fast float [[X_05]], [[FPINC]]
; VEC4_INTERL2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
@@ -150,8 +150,8 @@ define void @fp_iv_loop1_fast_FMF(float %init, ptr noalias nocapture %A, i32 %N)
; VEC1_INTERL2-NEXT: [[TMP3:%.*]] = fmul fast float [[FPINC]], [[DOTCAST2]]
; VEC1_INTERL2-NEXT: [[OFFSET_IDX:%.*]] = fsub fast float [[INIT]], [[TMP3]]
; VEC1_INTERL2-NEXT: [[TMP4:%.*]] = fsub fast float [[OFFSET_IDX]], [[FPINC]]
-; VEC1_INTERL2-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
-; VEC1_INTERL2-NEXT: [[TMP9:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]]
+; VEC1_INTERL2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
+; VEC1_INTERL2-NEXT: [[TMP9:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; VEC1_INTERL2-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP9]], i64 4
; VEC1_INTERL2-NEXT: store float [[OFFSET_IDX]], ptr [[TMP8]], align 4
; VEC1_INTERL2-NEXT: store float [[TMP4]], ptr [[TMP6]], align 4
@@ -168,7 +168,7 @@ define void @fp_iv_loop1_fast_FMF(float %init, ptr noalias nocapture %A, i32 %N)
; VEC1_INTERL2: for.body:
; VEC1_INTERL2-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; VEC1_INTERL2-NEXT: [[X_05:%.*]] = phi float [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
-; VEC1_INTERL2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; VEC1_INTERL2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; VEC1_INTERL2-NEXT: store float [[X_05]], ptr [[ARRAYIDX]], align 4
; VEC1_INTERL2-NEXT: [[ADD]] = fsub fast float [[X_05]], [[FPINC]]
; VEC1_INTERL2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
@@ -207,7 +207,7 @@ define void @fp_iv_loop1_fast_FMF(float %init, ptr noalias nocapture %A, i32 %N)
; VEC2_INTERL1_PRED_STORE: vector.body:
; VEC2_INTERL1_PRED_STORE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; VEC2_INTERL1_PRED_STORE-NEXT: [[VEC_IND:%.*]] = phi <2 x float> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
+; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; VEC2_INTERL1_PRED_STORE-NEXT: store <2 x float> [[VEC_IND]], ptr [[TMP4]], align 4
; VEC2_INTERL1_PRED_STORE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; VEC2_INTERL1_PRED_STORE-NEXT: [[VEC_IND_NEXT]] = fsub fast <2 x float> [[VEC_IND]], [[DOTSPLAT5]]
@@ -219,7 +219,7 @@ define void @fp_iv_loop1_fast_FMF(float %init, ptr noalias nocapture %A, i32 %N)
; VEC2_INTERL1_PRED_STORE: for.body:
; VEC2_INTERL1_PRED_STORE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_LR_PH]] ]
; VEC2_INTERL1_PRED_STORE-NEXT: [[X_05:%.*]] = phi float [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[INIT]], [[FOR_BODY_LR_PH]] ]
-; VEC2_INTERL1_PRED_STORE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]]
+; VEC2_INTERL1_PRED_STORE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; VEC2_INTERL1_PRED_STORE-NEXT: store float [[X_05]], ptr [[ARRAYIDX]], align 4
; VEC2_INTERL1_PRED_STORE-NEXT: [[ADD]] = fsub fast float [[X_05]], [[FPINC]]
; VEC2_INTERL1_PRED_STORE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
@@ -290,7 +290,7 @@ define void @fp_iv_loop1_reassoc_FMF(float %init, ptr noalias nocapture %A, i32
; VEC4_INTERL1: vector.body:
; VEC4_INTERL1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; VEC4_INTERL1-NEXT: [[VEC_IND:%.*]] = phi <4 x float> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; VEC4_INTERL1-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
+; VEC4_INTERL1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; VEC4_INTERL1-NEXT: store <4 x float> [[VEC_IND]], ptr [[TMP4]], align 4
; VEC4_INTERL1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; VEC4_INTERL1-NEXT: [[VEC_IND_NEXT]] = fsub reassoc <4 x float> [[VEC_IND]], [[DOTSPLAT5]]
@@ -306,7 +306,7 @@ define void @fp_iv_loop1_reassoc_FMF(float %init, ptr noalias nocapture %A, i32
; VEC4_INTERL1: for.body:
; VEC4_INTERL1-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; VEC4_INTERL1-NEXT: [[X_05:%.*]] = phi float [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
-; VEC4_INTERL1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; VEC4_INTERL1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; VEC4_INTERL1-NEXT: store float [[X_05]], ptr [[ARRAYIDX]], align 4
; VEC4_INTERL1-NEXT: [[ADD]] = fsub reassoc float [[X_05]], [[FPINC]]
; VEC4_INTERL1-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
@@ -344,7 +344,7 @@ define void @fp_iv_loop1_reassoc_FMF(float %init, ptr noalias nocapture %A, i32
; VEC4_INTERL2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; VEC4_INTERL2-NEXT: [[VEC_IND:%.*]] = phi <4 x float> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; VEC4_INTERL2-NEXT: [[STEP_ADD:%.*]] = fsub reassoc <4 x float> [[VEC_IND]], [[DOTSPLAT5]]
-; VEC4_INTERL2-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
+; VEC4_INTERL2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; VEC4_INTERL2-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP4]], i64 16
; VEC4_INTERL2-NEXT: store <4 x float> [[VEC_IND]], ptr [[TMP4]], align 4
; VEC4_INTERL2-NEXT: store <4 x float> [[STEP_ADD]], ptr [[TMP5]], align 4
@@ -362,7 +362,7 @@ define void @fp_iv_loop1_reassoc_FMF(float %init, ptr noalias nocapture %A, i32
; VEC4_INTERL2: for.body:
; VEC4_INTERL2-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; VEC4_INTERL2-NEXT: [[X_05:%.*]] = phi float [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
-; VEC4_INTERL2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; VEC4_INTERL2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; VEC4_INTERL2-NEXT: store float [[X_05]], ptr [[ARRAYIDX]], align 4
; VEC4_INTERL2-NEXT: [[ADD]] = fsub reassoc float [[X_05]], [[FPINC]]
; VEC4_INTERL2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
@@ -395,8 +395,8 @@ define void @fp_iv_loop1_reassoc_FMF(float %init, ptr noalias nocapture %A, i32
; VEC1_INTERL2-NEXT: [[TMP3:%.*]] = fmul reassoc float [[FPINC]], [[DOTCAST2]]
; VEC1_INTERL2-NEXT: [[OFFSET_IDX:%.*]] = fsub reassoc float [[INIT]], [[TMP3]]
; VEC1_INTERL2-NEXT: [[TMP6:%.*]] = fsub reassoc float [[OFFSET_IDX]], [[FPINC]]
-; VEC1_INTERL2-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
-; VEC1_INTERL2-NEXT: [[TMP10:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]]
+; VEC1_INTERL2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
+; VEC1_INTERL2-NEXT: [[TMP10:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; VEC1_INTERL2-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP10]], i64 4
; VEC1_INTERL2-NEXT: store float [[OFFSET_IDX]], ptr [[TMP7]], align 4
; VEC1_INTERL2-NEXT: store float [[TMP6]], ptr [[TMP8]], align 4
@@ -413,7 +413,7 @@ define void @fp_iv_loop1_reassoc_FMF(float %init, ptr noalias nocapture %A, i32
; VEC1_INTERL2: for.body:
; VEC1_INTERL2-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; VEC1_INTERL2-NEXT: [[X_05:%.*]] = phi float [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
-; VEC1_INTERL2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; VEC1_INTERL2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; VEC1_INTERL2-NEXT: store float [[X_05]], ptr [[ARRAYIDX]], align 4
; VEC1_INTERL2-NEXT: [[ADD]] = fsub reassoc float [[X_05]], [[FPINC]]
; VEC1_INTERL2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
@@ -452,7 +452,7 @@ define void @fp_iv_loop1_reassoc_FMF(float %init, ptr noalias nocapture %A, i32
; VEC2_INTERL1_PRED_STORE: vector.body:
; VEC2_INTERL1_PRED_STORE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; VEC2_INTERL1_PRED_STORE-NEXT: [[VEC_IND:%.*]] = phi <2 x float> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
+; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; VEC2_INTERL1_PRED_STORE-NEXT: store <2 x float> [[VEC_IND]], ptr [[TMP4]], align 4
; VEC2_INTERL1_PRED_STORE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; VEC2_INTERL1_PRED_STORE-NEXT: [[VEC_IND_NEXT]] = fsub reassoc <2 x float> [[VEC_IND]], [[DOTSPLAT5]]
@@ -464,7 +464,7 @@ define void @fp_iv_loop1_reassoc_FMF(float %init, ptr noalias nocapture %A, i32
; VEC2_INTERL1_PRED_STORE: for.body:
; VEC2_INTERL1_PRED_STORE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_LR_PH]] ]
; VEC2_INTERL1_PRED_STORE-NEXT: [[X_05:%.*]] = phi float [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[INIT]], [[FOR_BODY_LR_PH]] ]
-; VEC2_INTERL1_PRED_STORE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]]
+; VEC2_INTERL1_PRED_STORE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; VEC2_INTERL1_PRED_STORE-NEXT: store float [[X_05]], ptr [[ARRAYIDX]], align 4
; VEC2_INTERL1_PRED_STORE-NEXT: [[ADD]] = fsub reassoc float [[X_05]], [[FPINC]]
; VEC2_INTERL1_PRED_STORE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
@@ -530,7 +530,7 @@ define void @fp_iv_loop2(float %init, ptr noalias nocapture %A, i32 %N) #0 {
; VEC4_INTERL1: vector.body:
; VEC4_INTERL1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; VEC4_INTERL1-NEXT: [[VEC_IND:%.*]] = phi <4 x float> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; VEC4_INTERL1-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
+; VEC4_INTERL1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; VEC4_INTERL1-NEXT: store <4 x float> [[VEC_IND]], ptr [[TMP2]], align 4
; VEC4_INTERL1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; VEC4_INTERL1-NEXT: [[VEC_IND_NEXT]] = fadd fast <4 x float> [[VEC_IND]], splat (float 2.000000e+00)
@@ -546,7 +546,7 @@ define void @fp_iv_loop2(float %init, ptr noalias nocapture %A, i32 %N) #0 {
; VEC4_INTERL1: for.body:
; VEC4_INTERL1-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; VEC4_INTERL1-NEXT: [[X_06:%.*]] = phi float [ [[CONV1:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ]
-; VEC4_INTERL1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; VEC4_INTERL1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; VEC4_INTERL1-NEXT: store float [[X_06]], ptr [[ARRAYIDX]], align 4
; VEC4_INTERL1-NEXT: [[CONV1]] = fadd fast float [[X_06]], 5.000000e-01
; VEC4_INTERL1-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
@@ -579,7 +579,7 @@ define void @fp_iv_loop2(float %init, ptr noalias nocapture %A, i32 %N) #0 {
; VEC4_INTERL2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; VEC4_INTERL2-NEXT: [[VEC_IND:%.*]] = phi <4 x float> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; VEC4_INTERL2-NEXT: [[STEP_ADD:%.*]] = fadd fast <4 x float> [[VEC_IND]], splat (float 2.000000e+00)
-; VEC4_INTERL2-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
+; VEC4_INTERL2-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; VEC4_INTERL2-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP2]], i64 16
; VEC4_INTERL2-NEXT: store <4 x float> [[VEC_IND]], ptr [[TMP2]], align 4
; VEC4_INTERL2-NEXT: store <4 x float> [[STEP_ADD]], ptr [[TMP3]], align 4
@@ -597,7 +597,7 @@ define void @fp_iv_loop2(float %init, ptr noalias nocapture %A, i32 %N) #0 {
; VEC4_INTERL2: for.body:
; VEC4_INTERL2-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; VEC4_INTERL2-NEXT: [[X_06:%.*]] = phi float [ [[CONV1:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ]
-; VEC4_INTERL2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; VEC4_INTERL2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; VEC4_INTERL2-NEXT: store float [[X_06]], ptr [[ARRAYIDX]], align 4
; VEC4_INTERL2-NEXT: [[CONV1]] = fadd fast float [[X_06]], 5.000000e-01
; VEC4_INTERL2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
@@ -629,8 +629,8 @@ define void @fp_iv_loop2(float %init, ptr noalias nocapture %A, i32 %N) #0 {
; VEC1_INTERL2-NEXT: [[TMP3:%.*]] = fmul fast float [[DOTCAST2]], 5.000000e-01
; VEC1_INTERL2-NEXT: [[OFFSET_IDX:%.*]] = fadd fast float [[INIT]], [[TMP3]]
; VEC1_INTERL2-NEXT: [[TMP4:%.*]] = fadd fast float [[OFFSET_IDX]], 5.000000e-01
-; VEC1_INTERL2-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
-; VEC1_INTERL2-NEXT: [[TMP9:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]]
+; VEC1_INTERL2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
+; VEC1_INTERL2-NEXT: [[TMP9:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; VEC1_INTERL2-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP9]], i64 4
; VEC1_INTERL2-NEXT: store float [[OFFSET_IDX]], ptr [[TMP8]], align 4
; VEC1_INTERL2-NEXT: store float [[TMP4]], ptr [[TMP6]], align 4
@@ -647,7 +647,7 @@ define void @fp_iv_loop2(float %init, ptr noalias nocapture %A, i32 %N) #0 {
; VEC1_INTERL2: for.body:
; VEC1_INTERL2-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; VEC1_INTERL2-NEXT: [[X_06:%.*]] = phi float [ [[CONV1:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ]
-; VEC1_INTERL2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; VEC1_INTERL2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; VEC1_INTERL2-NEXT: store float [[X_06]], ptr [[ARRAYIDX]], align 4
; VEC1_INTERL2-NEXT: [[CONV1]] = fadd fast float [[X_06]], 5.000000e-01
; VEC1_INTERL2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
@@ -679,7 +679,7 @@ define void @fp_iv_loop2(float %init, ptr noalias nocapture %A, i32 %N) #0 {
; VEC2_INTERL1_PRED_STORE: vector.body:
; VEC2_INTERL1_PRED_STORE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; VEC2_INTERL1_PRED_STORE-NEXT: [[VEC_IND:%.*]] = phi <2 x float> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
+; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; VEC2_INTERL1_PRED_STORE-NEXT: store <2 x float> [[VEC_IND]], ptr [[TMP2]], align 4
; VEC2_INTERL1_PRED_STORE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; VEC2_INTERL1_PRED_STORE-NEXT: [[VEC_IND_NEXT]] = fadd fast <2 x float> [[VEC_IND]], splat (float 1.000000e+00)
@@ -691,7 +691,7 @@ define void @fp_iv_loop2(float %init, ptr noalias nocapture %A, i32 %N) #0 {
; VEC2_INTERL1_PRED_STORE: for.body:
; VEC2_INTERL1_PRED_STORE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
; VEC2_INTERL1_PRED_STORE-NEXT: [[X_06:%.*]] = phi float [ [[CONV1:%.*]], [[FOR_BODY]] ], [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[INIT]], [[FOR_BODY_PREHEADER]] ]
-; VEC2_INTERL1_PRED_STORE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]]
+; VEC2_INTERL1_PRED_STORE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; VEC2_INTERL1_PRED_STORE-NEXT: store float [[X_06]], ptr [[ARRAYIDX]], align 4
; VEC2_INTERL1_PRED_STORE-NEXT: [[CONV1]] = fadd fast float [[X_06]], 5.000000e-01
; VEC2_INTERL1_PRED_STORE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
@@ -774,14 +774,14 @@ define void @fp_iv_loop3(float %init, ptr noalias nocapture %A, ptr noalias noca
; VEC4_INTERL1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; VEC4_INTERL1-NEXT: [[VEC_IND:%.*]] = phi <4 x float> [ <float 0x3FB99999A0000000, float 0xBFD99999A0000000, float 0xBFECCCCCC0000000, float 0xBFF6666660000000>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; VEC4_INTERL1-NEXT: [[VEC_IND9:%.*]] = phi <4 x float> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT10:%.*]], [[VECTOR_BODY]] ]
-; VEC4_INTERL1-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
+; VEC4_INTERL1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; VEC4_INTERL1-NEXT: store <4 x float> [[VEC_IND9]], ptr [[TMP6]], align 4
; VEC4_INTERL1-NEXT: [[TMP7:%.*]] = fadd fast <4 x float> [[VEC_IND9]], [[DOTSPLAT]]
; VEC4_INTERL1-NEXT: [[TMP8:%.*]] = fadd fast <4 x float> [[VEC_IND]], splat (float -5.000000e-01)
; VEC4_INTERL1-NEXT: [[TMP9:%.*]] = fadd fast <4 x float> [[TMP8]], [[TMP7]]
-; VEC4_INTERL1-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 [[INDEX]]
+; VEC4_INTERL1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i64 [[INDEX]]
; VEC4_INTERL1-NEXT: store <4 x float> [[TMP9]], ptr [[TMP10]], align 4
-; VEC4_INTERL1-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[C:%.*]], i64 [[INDEX]]
+; VEC4_INTERL1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8], ptr [[C:%.*]], i64 [[INDEX]]
; VEC4_INTERL1-NEXT: store <4 x float> [[TMP8]], ptr [[TMP11]], align 4
; VEC4_INTERL1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; VEC4_INTERL1-NEXT: [[VEC_IND_NEXT]] = fadd fast <4 x float> [[VEC_IND]], splat (float -2.000000e+00)
@@ -800,14 +800,14 @@ define void @fp_iv_loop3(float %init, ptr noalias nocapture %A, ptr noalias noca
; VEC4_INTERL1-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; VEC4_INTERL1-NEXT: [[Y_012:%.*]] = phi float [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[CONV1:%.*]], [[FOR_BODY]] ]
; VEC4_INTERL1-NEXT: [[X_011:%.*]] = phi float [ [[BC_RESUME_VAL4]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
-; VEC4_INTERL1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; VEC4_INTERL1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; VEC4_INTERL1-NEXT: store float [[X_011]], ptr [[ARRAYIDX]], align 4
; VEC4_INTERL1-NEXT: [[ADD]] = fadd fast float [[X_011]], [[TMP0]]
; VEC4_INTERL1-NEXT: [[CONV1]] = fadd fast float [[Y_012]], -5.000000e-01
; VEC4_INTERL1-NEXT: [[ADD2:%.*]] = fadd fast float [[CONV1]], [[ADD]]
-; VEC4_INTERL1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDVARS_IV]]
+; VEC4_INTERL1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B]], i64 [[INDVARS_IV]]
; VEC4_INTERL1-NEXT: store float [[ADD2]], ptr [[ARRAYIDX4]], align 4
-; VEC4_INTERL1-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds nuw float, ptr [[C]], i64 [[INDVARS_IV]]
+; VEC4_INTERL1-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[C]], i64 [[INDVARS_IV]]
; VEC4_INTERL1-NEXT: store float [[CONV1]], ptr [[ARRAYIDX6]], align 4
; VEC4_INTERL1-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; VEC4_INTERL1-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
@@ -850,7 +850,7 @@ define void @fp_iv_loop3(float %init, ptr noalias nocapture %A, ptr noalias noca
; VEC4_INTERL2-NEXT: [[VEC_IND:%.*]] = phi <4 x float> [ <float 0x3FB99999A0000000, float 0xBFD99999A0000000, float 0xBFECCCCCC0000000, float 0xBFF6666660000000>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; VEC4_INTERL2-NEXT: [[VEC_IND10:%.*]] = phi <4 x float> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT13:%.*]], [[VECTOR_BODY]] ]
; VEC4_INTERL2-NEXT: [[STEP_ADD11:%.*]] = fadd fast <4 x float> [[VEC_IND10]], [[DOTSPLAT5]]
-; VEC4_INTERL2-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
+; VEC4_INTERL2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; VEC4_INTERL2-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP6]], i64 16
; VEC4_INTERL2-NEXT: store <4 x float> [[VEC_IND10]], ptr [[TMP6]], align 4
; VEC4_INTERL2-NEXT: store <4 x float> [[STEP_ADD11]], ptr [[TMP7]], align 4
@@ -860,11 +860,11 @@ define void @fp_iv_loop3(float %init, ptr noalias nocapture %A, ptr noalias noca
; VEC4_INTERL2-NEXT: [[TMP11:%.*]] = fadd fast <4 x float> [[VEC_IND]], splat (float -2.500000e+00)
; VEC4_INTERL2-NEXT: [[TMP12:%.*]] = fadd fast <4 x float> [[TMP10]], [[TMP8]]
; VEC4_INTERL2-NEXT: [[TMP13:%.*]] = fadd fast <4 x float> [[TMP11]], [[TMP9]]
-; VEC4_INTERL2-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 [[INDEX]]
+; VEC4_INTERL2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i64 [[INDEX]]
; VEC4_INTERL2-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP14]], i64 16
; VEC4_INTERL2-NEXT: store <4 x float> [[TMP12]], ptr [[TMP14]], align 4
; VEC4_INTERL2-NEXT: store <4 x float> [[TMP13]], ptr [[TMP15]], align 4
-; VEC4_INTERL2-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[C:%.*]], i64 [[INDEX]]
+; VEC4_INTERL2-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8], ptr [[C:%.*]], i64 [[INDEX]]
; VEC4_INTERL2-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP16]], i64 16
; VEC4_INTERL2-NEXT: store <4 x float> [[TMP10]], ptr [[TMP16]], align 4
; VEC4_INTERL2-NEXT: store <4 x float> [[TMP11]], ptr [[TMP17]], align 4
@@ -885,14 +885,14 @@ define void @fp_iv_loop3(float %init, ptr noalias nocapture %A, ptr noalias noca
; VEC4_INTERL2-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; VEC4_INTERL2-NEXT: [[Y_012:%.*]] = phi float [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[CONV1:%.*]], [[FOR_BODY]] ]
; VEC4_INTERL2-NEXT: [[X_011:%.*]] = phi float [ [[BC_RESUME_VAL4]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
-; VEC4_INTERL2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; VEC4_INTERL2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; VEC4_INTERL2-NEXT: store float [[X_011]], ptr [[ARRAYIDX]], align 4
; VEC4_INTERL2-NEXT: [[ADD]] = fadd fast float [[X_011]], [[TMP0]]
; VEC4_INTERL2-NEXT: [[CONV1]] = fadd fast float [[Y_012]], -5.000000e-01
; VEC4_INTERL2-NEXT: [[ADD2:%.*]] = fadd fast float [[CONV1]], [[ADD]]
-; VEC4_INTERL2-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDVARS_IV]]
+; VEC4_INTERL2-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B]], i64 [[INDVARS_IV]]
; VEC4_INTERL2-NEXT: store float [[ADD2]], ptr [[ARRAYIDX4]], align 4
-; VEC4_INTERL2-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds nuw float, ptr [[C]], i64 [[INDVARS_IV]]
+; VEC4_INTERL2-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[C]], i64 [[INDVARS_IV]]
; VEC4_INTERL2-NEXT: store float [[CONV1]], ptr [[ARRAYIDX6]], align 4
; VEC4_INTERL2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; VEC4_INTERL2-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
@@ -930,8 +930,8 @@ define void @fp_iv_loop3(float %init, ptr noalias nocapture %A, ptr noalias noca
; VEC1_INTERL2-NEXT: [[TMP6:%.*]] = fmul fast float [[TMP0]], [[DOTCAST6]]
; VEC1_INTERL2-NEXT: [[OFFSET_IDX7:%.*]] = fadd fast float [[INIT]], [[TMP6]]
; VEC1_INTERL2-NEXT: [[TMP7:%.*]] = fadd fast float [[OFFSET_IDX7]], [[TMP0]]
-; VEC1_INTERL2-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
-; VEC1_INTERL2-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP4]]
+; VEC1_INTERL2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
+; VEC1_INTERL2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP4]]
; VEC1_INTERL2-NEXT: store float [[OFFSET_IDX7]], ptr [[TMP8]], align 4
; VEC1_INTERL2-NEXT: store float [[TMP7]], ptr [[TMP9]], align 4
; VEC1_INTERL2-NEXT: [[TMP10:%.*]] = fadd fast float [[OFFSET_IDX7]], [[TMP0]]
@@ -940,12 +940,12 @@ define void @fp_iv_loop3(float %init, ptr noalias nocapture %A, ptr noalias noca
; VEC1_INTERL2-NEXT: [[TMP13:%.*]] = fadd fast float [[TMP5]], 0xBFECCCCCC0000000
; VEC1_INTERL2-NEXT: [[TMP14:%.*]] = fadd fast float [[TMP12]], [[TMP10]]
; VEC1_INTERL2-NEXT: [[TMP15:%.*]] = fadd fast float [[TMP13]], [[TMP11]]
-; VEC1_INTERL2-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 [[INDEX]]
-; VEC1_INTERL2-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP4]]
+; VEC1_INTERL2-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i64 [[INDEX]]
+; VEC1_INTERL2-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[TMP4]]
; VEC1_INTERL2-NEXT: store float [[TMP14]], ptr [[TMP16]], align 4
; VEC1_INTERL2-NEXT: store float [[TMP15]], ptr [[TMP17]], align 4
-; VEC1_INTERL2-NEXT: [[TMP18:%.*]] = getelementptr inbounds float, ptr [[C:%.*]], i64 [[INDEX]]
-; VEC1_INTERL2-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[C]], i64 [[TMP4]]
+; VEC1_INTERL2-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x i8], ptr [[C:%.*]], i64 [[INDEX]]
+; VEC1_INTERL2-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8], ptr [[C]], i64 [[TMP4]]
; VEC1_INTERL2-NEXT: store float [[TMP12]], ptr [[TMP18]], align 4
; VEC1_INTERL2-NEXT: store float [[TMP13]], ptr [[TMP19]], align 4
; VEC1_INTERL2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
@@ -963,14 +963,14 @@ define void @fp_iv_loop3(float %init, ptr noalias nocapture %A, ptr noalias noca
; VEC1_INTERL2-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; VEC1_INTERL2-NEXT: [[Y_012:%.*]] = phi float [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[CONV1:%.*]], [[FOR_BODY]] ]
; VEC1_INTERL2-NEXT: [[X_011:%.*]] = phi float [ [[BC_RESUME_VAL4]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
-; VEC1_INTERL2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; VEC1_INTERL2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; VEC1_INTERL2-NEXT: store float [[X_011]], ptr [[ARRAYIDX]], align 4
; VEC1_INTERL2-NEXT: [[ADD]] = fadd fast float [[X_011]], [[TMP0]]
; VEC1_INTERL2-NEXT: [[CONV1]] = fadd fast float [[Y_012]], -5.000000e-01
; VEC1_INTERL2-NEXT: [[ADD2:%.*]] = fadd fast float [[CONV1]], [[ADD]]
-; VEC1_INTERL2-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDVARS_IV]]
+; VEC1_INTERL2-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B]], i64 [[INDVARS_IV]]
; VEC1_INTERL2-NEXT: store float [[ADD2]], ptr [[ARRAYIDX4]], align 4
-; VEC1_INTERL2-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds nuw float, ptr [[C]], i64 [[INDVARS_IV]]
+; VEC1_INTERL2-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[C]], i64 [[INDVARS_IV]]
; VEC1_INTERL2-NEXT: store float [[CONV1]], ptr [[ARRAYIDX6]], align 4
; VEC1_INTERL2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; VEC1_INTERL2-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
@@ -1014,14 +1014,14 @@ define void @fp_iv_loop3(float %init, ptr noalias nocapture %A, ptr noalias noca
; VEC2_INTERL1_PRED_STORE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; VEC2_INTERL1_PRED_STORE-NEXT: [[VEC_IND:%.*]] = phi <2 x float> [ <float 0x3FB99999A0000000, float 0xBFD99999A0000000>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; VEC2_INTERL1_PRED_STORE-NEXT: [[VEC_IND9:%.*]] = phi <2 x float> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT10:%.*]], [[VECTOR_BODY]] ]
-; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
+; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; VEC2_INTERL1_PRED_STORE-NEXT: store <2 x float> [[VEC_IND9]], ptr [[TMP6]], align 4
; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP7:%.*]] = fadd fast <2 x float> [[VEC_IND9]], [[DOTSPLAT]]
; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP8:%.*]] = fadd fast <2 x float> [[VEC_IND]], splat (float -5.000000e-01)
; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP9:%.*]] = fadd fast <2 x float> [[TMP8]], [[TMP7]]
-; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 [[INDEX]]
+; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i64 [[INDEX]]
; VEC2_INTERL1_PRED_STORE-NEXT: store <2 x float> [[TMP9]], ptr [[TMP10]], align 4
-; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[C:%.*]], i64 [[INDEX]]
+; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8], ptr [[C:%.*]], i64 [[INDEX]]
; VEC2_INTERL1_PRED_STORE-NEXT: store <2 x float> [[TMP8]], ptr [[TMP11]], align 4
; VEC2_INTERL1_PRED_STORE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; VEC2_INTERL1_PRED_STORE-NEXT: [[VEC_IND_NEXT]] = fadd fast <2 x float> [[VEC_IND]], splat (float -1.000000e+00)
@@ -1035,14 +1035,14 @@ define void @fp_iv_loop3(float %init, ptr noalias nocapture %A, ptr noalias noca
; VEC2_INTERL1_PRED_STORE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_LR_PH]] ]
; VEC2_INTERL1_PRED_STORE-NEXT: [[Y_012:%.*]] = phi float [ [[CONV1:%.*]], [[FOR_BODY]] ], [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 0x3FB99999A0000000, [[FOR_BODY_LR_PH]] ]
; VEC2_INTERL1_PRED_STORE-NEXT: [[X_011:%.*]] = phi float [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[IND_END3]], [[MIDDLE_BLOCK]] ], [ [[INIT]], [[FOR_BODY_LR_PH]] ]
-; VEC2_INTERL1_PRED_STORE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]]
+; VEC2_INTERL1_PRED_STORE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; VEC2_INTERL1_PRED_STORE-NEXT: store float [[X_011]], ptr [[ARRAYIDX]], align 4
; VEC2_INTERL1_PRED_STORE-NEXT: [[ADD]] = fadd fast float [[X_011]], [[TMP0]]
; VEC2_INTERL1_PRED_STORE-NEXT: [[CONV1]] = fadd fast float [[Y_012]], -5.000000e-01
; VEC2_INTERL1_PRED_STORE-NEXT: [[ADD2:%.*]] = fadd fast float [[CONV1]], [[ADD]]
-; VEC2_INTERL1_PRED_STORE-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDVARS_IV]]
+; VEC2_INTERL1_PRED_STORE-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDVARS_IV]]
; VEC2_INTERL1_PRED_STORE-NEXT: store float [[ADD2]], ptr [[ARRAYIDX4]], align 4
-; VEC2_INTERL1_PRED_STORE-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[C]], i64 [[INDVARS_IV]]
+; VEC2_INTERL1_PRED_STORE-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x i8], ptr [[C]], i64 [[INDVARS_IV]]
; VEC2_INTERL1_PRED_STORE-NEXT: store float [[CONV1]], ptr [[ARRAYIDX6]], align 4
; VEC2_INTERL1_PRED_STORE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; VEC2_INTERL1_PRED_STORE-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
@@ -1112,7 +1112,7 @@ define void @fp_iv_loop4(ptr noalias nocapture %A, i32 %N) {
; VEC4_INTERL1: vector.body:
; VEC4_INTERL1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; VEC4_INTERL1-NEXT: [[VEC_IND:%.*]] = phi <4 x float> [ <float 1.000000e+00, float 1.500000e+00, float 2.000000e+00, float 2.500000e+00>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; VEC4_INTERL1-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
+; VEC4_INTERL1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; VEC4_INTERL1-NEXT: store <4 x float> [[VEC_IND]], ptr [[TMP2]], align 4
; VEC4_INTERL1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; VEC4_INTERL1-NEXT: [[VEC_IND_NEXT]] = fadd fast <4 x float> [[VEC_IND]], splat (float 2.000000e+00)
@@ -1128,7 +1128,7 @@ define void @fp_iv_loop4(ptr noalias nocapture %A, i32 %N) {
; VEC4_INTERL1: for.body:
; VEC4_INTERL1-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; VEC4_INTERL1-NEXT: [[X_06:%.*]] = phi float [ [[CONV1:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ]
-; VEC4_INTERL1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; VEC4_INTERL1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; VEC4_INTERL1-NEXT: store float [[X_06]], ptr [[ARRAYIDX]], align 4
; VEC4_INTERL1-NEXT: [[CONV1]] = fadd fast float [[X_06]], 5.000000e-01
; VEC4_INTERL1-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
@@ -1158,7 +1158,7 @@ define void @fp_iv_loop4(ptr noalias nocapture %A, i32 %N) {
; VEC4_INTERL2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; VEC4_INTERL2-NEXT: [[VEC_IND:%.*]] = phi <4 x float> [ <float 1.000000e+00, float 1.500000e+00, float 2.000000e+00, float 2.500000e+00>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; VEC4_INTERL2-NEXT: [[STEP_ADD:%.*]] = fadd fast <4 x float> [[VEC_IND]], splat (float 2.000000e+00)
-; VEC4_INTERL2-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
+; VEC4_INTERL2-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; VEC4_INTERL2-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP2]], i64 16
; VEC4_INTERL2-NEXT: store <4 x float> [[VEC_IND]], ptr [[TMP2]], align 4
; VEC4_INTERL2-NEXT: store <4 x float> [[STEP_ADD]], ptr [[TMP3]], align 4
@@ -1176,7 +1176,7 @@ define void @fp_iv_loop4(ptr noalias nocapture %A, i32 %N) {
; VEC4_INTERL2: for.body:
; VEC4_INTERL2-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; VEC4_INTERL2-NEXT: [[X_06:%.*]] = phi float [ [[CONV1:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ]
-; VEC4_INTERL2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; VEC4_INTERL2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; VEC4_INTERL2-NEXT: store float [[X_06]], ptr [[ARRAYIDX]], align 4
; VEC4_INTERL2-NEXT: [[CONV1]] = fadd fast float [[X_06]], 5.000000e-01
; VEC4_INTERL2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
@@ -1208,8 +1208,8 @@ define void @fp_iv_loop4(ptr noalias nocapture %A, i32 %N) {
; VEC1_INTERL2-NEXT: [[TMP3:%.*]] = fmul fast float [[DOTCAST2]], 5.000000e-01
; VEC1_INTERL2-NEXT: [[OFFSET_IDX:%.*]] = fadd fast float [[TMP3]], 1.000000e+00
; VEC1_INTERL2-NEXT: [[TMP4:%.*]] = fadd fast float [[TMP3]], 1.500000e+00
-; VEC1_INTERL2-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
-; VEC1_INTERL2-NEXT: [[TMP9:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]]
+; VEC1_INTERL2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
+; VEC1_INTERL2-NEXT: [[TMP9:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; VEC1_INTERL2-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP9]], i64 4
; VEC1_INTERL2-NEXT: store float [[OFFSET_IDX]], ptr [[TMP8]], align 4
; VEC1_INTERL2-NEXT: store float [[TMP4]], ptr [[TMP6]], align 4
@@ -1226,7 +1226,7 @@ define void @fp_iv_loop4(ptr noalias nocapture %A, i32 %N) {
; VEC1_INTERL2: for.body:
; VEC1_INTERL2-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; VEC1_INTERL2-NEXT: [[X_06:%.*]] = phi float [ [[CONV1:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ]
-; VEC1_INTERL2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; VEC1_INTERL2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; VEC1_INTERL2-NEXT: store float [[X_06]], ptr [[ARRAYIDX]], align 4
; VEC1_INTERL2-NEXT: [[CONV1]] = fadd fast float [[X_06]], 5.000000e-01
; VEC1_INTERL2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
@@ -1255,7 +1255,7 @@ define void @fp_iv_loop4(ptr noalias nocapture %A, i32 %N) {
; VEC2_INTERL1_PRED_STORE: vector.body:
; VEC2_INTERL1_PRED_STORE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; VEC2_INTERL1_PRED_STORE-NEXT: [[VEC_IND:%.*]] = phi <2 x float> [ <float 1.000000e+00, float 1.500000e+00>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
+; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; VEC2_INTERL1_PRED_STORE-NEXT: store <2 x float> [[VEC_IND]], ptr [[TMP2]], align 4
; VEC2_INTERL1_PRED_STORE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; VEC2_INTERL1_PRED_STORE-NEXT: [[VEC_IND_NEXT]] = fadd fast <2 x float> [[VEC_IND]], splat (float 1.000000e+00)
@@ -1267,7 +1267,7 @@ define void @fp_iv_loop4(ptr noalias nocapture %A, i32 %N) {
; VEC2_INTERL1_PRED_STORE: for.body:
; VEC2_INTERL1_PRED_STORE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
; VEC2_INTERL1_PRED_STORE-NEXT: [[X_06:%.*]] = phi float [ [[CONV1:%.*]], [[FOR_BODY]] ], [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 1.000000e+00, [[FOR_BODY_PREHEADER]] ]
-; VEC2_INTERL1_PRED_STORE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]]
+; VEC2_INTERL1_PRED_STORE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; VEC2_INTERL1_PRED_STORE-NEXT: store float [[X_06]], ptr [[ARRAYIDX]], align 4
; VEC2_INTERL1_PRED_STORE-NEXT: [[CONV1]] = fadd fast float [[X_06]], 5.000000e-01
; VEC2_INTERL1_PRED_STORE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
@@ -1316,20 +1316,20 @@ define void @non_primary_iv_float_scalar(ptr %A, i64 %N) {
; VEC4_INTERL1: vector.body:
; VEC4_INTERL1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE7:%.*]] ]
; VEC4_INTERL1-NEXT: [[DOTCAST2:%.*]] = sitofp i64 [[INDEX]] to float
-; VEC4_INTERL1-NEXT: [[TMP0:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
+; VEC4_INTERL1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; VEC4_INTERL1-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP0]], align 4
; VEC4_INTERL1-NEXT: [[TMP1:%.*]] = fcmp fast oeq <4 x float> [[WIDE_LOAD]], zeroinitializer
; VEC4_INTERL1-NEXT: [[TMP2:%.*]] = extractelement <4 x i1> [[TMP1]], i64 0
; VEC4_INTERL1-NEXT: br i1 [[TMP2]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
; VEC4_INTERL1: pred.store.if:
-; VEC4_INTERL1-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; VEC4_INTERL1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDEX]]
; VEC4_INTERL1-NEXT: store float [[DOTCAST2]], ptr [[TMP3]], align 4
; VEC4_INTERL1-NEXT: br label [[PRED_STORE_CONTINUE]]
; VEC4_INTERL1: pred.store.continue:
; VEC4_INTERL1-NEXT: [[TMP4:%.*]] = extractelement <4 x i1> [[TMP1]], i64 1
; VEC4_INTERL1-NEXT: br i1 [[TMP4]], label [[PRED_STORE_IF2:%.*]], label [[PRED_STORE_CONTINUE3:%.*]]
; VEC4_INTERL1: pred.store.if2:
-; VEC4_INTERL1-NEXT: [[TMP5:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]]
+; VEC4_INTERL1-NEXT: [[TMP5:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; VEC4_INTERL1-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP5]], i64 4
; VEC4_INTERL1-NEXT: [[TMP7:%.*]] = fadd fast float [[DOTCAST2]], 1.000000e+00
; VEC4_INTERL1-NEXT: store float [[TMP7]], ptr [[TMP6]], align 4
@@ -1338,7 +1338,7 @@ define void @non_primary_iv_float_scalar(ptr %A, i64 %N) {
; VEC4_INTERL1-NEXT: [[TMP8:%.*]] = extractelement <4 x i1> [[TMP1]], i64 2
; VEC4_INTERL1-NEXT: br i1 [[TMP8]], label [[PRED_STORE_IF4:%.*]], label [[PRED_STORE_CONTINUE5:%.*]]
; VEC4_INTERL1: pred.store.if4:
-; VEC4_INTERL1-NEXT: [[TMP9:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]]
+; VEC4_INTERL1-NEXT: [[TMP9:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; VEC4_INTERL1-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[TMP9]], i64 8
; VEC4_INTERL1-NEXT: [[TMP11:%.*]] = fadd fast float [[DOTCAST2]], 2.000000e+00
; VEC4_INTERL1-NEXT: store float [[TMP11]], ptr [[TMP10]], align 4
@@ -1347,7 +1347,7 @@ define void @non_primary_iv_float_scalar(ptr %A, i64 %N) {
; VEC4_INTERL1-NEXT: [[TMP12:%.*]] = extractelement <4 x i1> [[TMP1]], i64 3
; VEC4_INTERL1-NEXT: br i1 [[TMP12]], label [[PRED_STORE_IF6:%.*]], label [[PRED_STORE_CONTINUE7]]
; VEC4_INTERL1: pred.store.if6:
-; VEC4_INTERL1-NEXT: [[TMP13:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]]
+; VEC4_INTERL1-NEXT: [[TMP13:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; VEC4_INTERL1-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[TMP13]], i64 12
; VEC4_INTERL1-NEXT: [[TMP15:%.*]] = fadd fast float [[DOTCAST2]], 3.000000e+00
; VEC4_INTERL1-NEXT: store float [[TMP15]], ptr [[TMP14]], align 4
@@ -1366,7 +1366,7 @@ define void @non_primary_iv_float_scalar(ptr %A, i64 %N) {
; VEC4_INTERL1: for.body:
; VEC4_INTERL1-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; VEC4_INTERL1-NEXT: [[J:%.*]] = phi float [ [[J_NEXT:%.*]], [[FOR_INC]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ]
-; VEC4_INTERL1-NEXT: [[VAR0:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[I]]
+; VEC4_INTERL1-NEXT: [[VAR0:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[I]]
; VEC4_INTERL1-NEXT: [[VAR1:%.*]] = load float, ptr [[VAR0]], align 4
; VEC4_INTERL1-NEXT: [[VAR2:%.*]] = fcmp fast oeq float [[VAR1]], 0.000000e+00
; VEC4_INTERL1-NEXT: br i1 [[VAR2]], label [[IF_PRED:%.*]], label [[FOR_INC]]
@@ -1393,7 +1393,7 @@ define void @non_primary_iv_float_scalar(ptr %A, i64 %N) {
; VEC4_INTERL2: vector.body:
; VEC4_INTERL2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE16:%.*]] ]
; VEC4_INTERL2-NEXT: [[DOTCAST2:%.*]] = sitofp i64 [[INDEX]] to float
-; VEC4_INTERL2-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
+; VEC4_INTERL2-NEXT: [[TMP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; VEC4_INTERL2-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP1]], i64 16
; VEC4_INTERL2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP1]], align 4
; VEC4_INTERL2-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x float>, ptr [[TMP2]], align 4
@@ -1402,14 +1402,14 @@ define void @non_primary_iv_float_scalar(ptr %A, i64 %N) {
; VEC4_INTERL2-NEXT: [[TMP5:%.*]] = extractelement <4 x i1> [[TMP3]], i64 0
; VEC4_INTERL2-NEXT: br i1 [[TMP5]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
; VEC4_INTERL2: pred.store.if:
-; VEC4_INTERL2-NEXT: [[TMP35:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; VEC4_INTERL2-NEXT: [[TMP35:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDEX]]
; VEC4_INTERL2-NEXT: store float [[DOTCAST2]], ptr [[TMP35]], align 4
; VEC4_INTERL2-NEXT: br label [[PRED_STORE_CONTINUE]]
; VEC4_INTERL2: pred.store.continue:
; VEC4_INTERL2-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[TMP3]], i64 1
; VEC4_INTERL2-NEXT: br i1 [[TMP6]], label [[PRED_STORE_IF3:%.*]], label [[PRED_STORE_CONTINUE4:%.*]]
; VEC4_INTERL2: pred.store.if3:
-; VEC4_INTERL2-NEXT: [[TMP7:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]]
+; VEC4_INTERL2-NEXT: [[TMP7:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; VEC4_INTERL2-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP7]], i64 4
; VEC4_INTERL2-NEXT: [[TMP9:%.*]] = fadd fast float [[DOTCAST2]], 1.000000e+00
; VEC4_INTERL2-NEXT: store float [[TMP9]], ptr [[TMP8]], align 4
@@ -1418,7 +1418,7 @@ define void @non_primary_iv_float_scalar(ptr %A, i64 %N) {
; VEC4_INTERL2-NEXT: [[TMP10:%.*]] = extractelement <4 x i1> [[TMP3]], i64 2
; VEC4_INTERL2-NEXT: br i1 [[TMP10]], label [[PRED_STORE_IF5:%.*]], label [[PRED_STORE_CONTINUE6:%.*]]
; VEC4_INTERL2: pred.store.if5:
-; VEC4_INTERL2-NEXT: [[TMP11:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]]
+; VEC4_INTERL2-NEXT: [[TMP11:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; VEC4_INTERL2-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[TMP11]], i64 8
; VEC4_INTERL2-NEXT: [[TMP13:%.*]] = fadd fast float [[DOTCAST2]], 2.000000e+00
; VEC4_INTERL2-NEXT: store float [[TMP13]], ptr [[TMP12]], align 4
@@ -1427,7 +1427,7 @@ define void @non_primary_iv_float_scalar(ptr %A, i64 %N) {
; VEC4_INTERL2-NEXT: [[TMP14:%.*]] = extractelement <4 x i1> [[TMP3]], i64 3
; VEC4_INTERL2-NEXT: br i1 [[TMP14]], label [[PRED_STORE_IF7:%.*]], label [[PRED_STORE_CONTINUE8:%.*]]
; VEC4_INTERL2: pred.store.if7:
-; VEC4_INTERL2-NEXT: [[TMP15:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]]
+; VEC4_INTERL2-NEXT: [[TMP15:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; VEC4_INTERL2-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[TMP15]], i64 12
; VEC4_INTERL2-NEXT: [[TMP17:%.*]] = fadd fast float [[DOTCAST2]], 3.000000e+00
; VEC4_INTERL2-NEXT: store float [[TMP17]], ptr [[TMP16]], align 4
@@ -1436,7 +1436,7 @@ define void @non_primary_iv_float_scalar(ptr %A, i64 %N) {
; VEC4_INTERL2-NEXT: [[TMP18:%.*]] = extractelement <4 x i1> [[TMP4]], i64 0
; VEC4_INTERL2-NEXT: br i1 [[TMP18]], label [[PRED_STORE_IF9:%.*]], label [[PRED_STORE_CONTINUE10:%.*]]
; VEC4_INTERL2: pred.store.if9:
-; VEC4_INTERL2-NEXT: [[TMP19:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]]
+; VEC4_INTERL2-NEXT: [[TMP19:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; VEC4_INTERL2-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[TMP19]], i64 16
; VEC4_INTERL2-NEXT: [[TMP21:%.*]] = fadd fast float [[DOTCAST2]], 4.000000e+00
; VEC4_INTERL2-NEXT: store float [[TMP21]], ptr [[TMP20]], align 4
@@ -1445,7 +1445,7 @@ define void @non_primary_iv_float_scalar(ptr %A, i64 %N) {
; VEC4_INTERL2-NEXT: [[TMP22:%.*]] = extractelement <4 x i1> [[TMP4]], i64 1
; VEC4_INTERL2-NEXT: br i1 [[TMP22]], label [[PRED_STORE_IF11:%.*]], label [[PRED_STORE_CONTINUE12:%.*]]
; VEC4_INTERL2: pred.store.if11:
-; VEC4_INTERL2-NEXT: [[TMP23:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]]
+; VEC4_INTERL2-NEXT: [[TMP23:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; VEC4_INTERL2-NEXT: [[TMP24:%.*]] = getelementptr i8, ptr [[TMP23]], i64 20
; VEC4_INTERL2-NEXT: [[TMP25:%.*]] = fadd fast float [[DOTCAST2]], 5.000000e+00
; VEC4_INTERL2-NEXT: store float [[TMP25]], ptr [[TMP24]], align 4
@@ -1454,7 +1454,7 @@ define void @non_primary_iv_float_scalar(ptr %A, i64 %N) {
; VEC4_INTERL2-NEXT: [[TMP26:%.*]] = extractelement <4 x i1> [[TMP4]], i64 2
; VEC4_INTERL2-NEXT: br i1 [[TMP26]], label [[PRED_STORE_IF13:%.*]], label [[PRED_STORE_CONTINUE14:%.*]]
; VEC4_INTERL2: pred.store.if13:
-; VEC4_INTERL2-NEXT: [[TMP27:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]]
+; VEC4_INTERL2-NEXT: [[TMP27:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; VEC4_INTERL2-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[TMP27]], i64 24
; VEC4_INTERL2-NEXT: [[TMP29:%.*]] = fadd fast float [[DOTCAST2]], 6.000000e+00
; VEC4_INTERL2-NEXT: store float [[TMP29]], ptr [[TMP28]], align 4
@@ -1463,7 +1463,7 @@ define void @non_primary_iv_float_scalar(ptr %A, i64 %N) {
; VEC4_INTERL2-NEXT: [[TMP30:%.*]] = extractelement <4 x i1> [[TMP4]], i64 3
; VEC4_INTERL2-NEXT: br i1 [[TMP30]], label [[PRED_STORE_IF15:%.*]], label [[PRED_STORE_CONTINUE16]]
; VEC4_INTERL2: pred.store.if15:
-; VEC4_INTERL2-NEXT: [[TMP31:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]]
+; VEC4_INTERL2-NEXT: [[TMP31:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; VEC4_INTERL2-NEXT: [[TMP32:%.*]] = getelementptr i8, ptr [[TMP31]], i64 28
; VEC4_INTERL2-NEXT: [[TMP33:%.*]] = fadd fast float [[DOTCAST2]], 7.000000e+00
; VEC4_INTERL2-NEXT: store float [[TMP33]], ptr [[TMP32]], align 4
@@ -1482,7 +1482,7 @@ define void @non_primary_iv_float_scalar(ptr %A, i64 %N) {
; VEC4_INTERL2: for.body:
; VEC4_INTERL2-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; VEC4_INTERL2-NEXT: [[J:%.*]] = phi float [ [[J_NEXT:%.*]], [[FOR_INC]] ], [ [[BC_RESUME_VAL17]], [[SCALAR_PH]] ]
-; VEC4_INTERL2-NEXT: [[VAR0:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[I]]
+; VEC4_INTERL2-NEXT: [[VAR0:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[I]]
; VEC4_INTERL2-NEXT: [[VAR1:%.*]] = load float, ptr [[VAR0]], align 4
; VEC4_INTERL2-NEXT: [[VAR2:%.*]] = fcmp fast oeq float [[VAR1]], 0.000000e+00
; VEC4_INTERL2-NEXT: br i1 [[VAR2]], label [[IF_PRED:%.*]], label [[FOR_INC]]
@@ -1509,8 +1509,8 @@ define void @non_primary_iv_float_scalar(ptr %A, i64 %N) {
; VEC1_INTERL2: vector.body:
; VEC1_INTERL2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE3:%.*]] ]
; VEC1_INTERL2-NEXT: [[DOTCAST1:%.*]] = sitofp i64 [[INDEX]] to float
-; VEC1_INTERL2-NEXT: [[TMP0:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
-; VEC1_INTERL2-NEXT: [[TMP1:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]]
+; VEC1_INTERL2-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
+; VEC1_INTERL2-NEXT: [[TMP1:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; VEC1_INTERL2-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP1]], i64 4
; VEC1_INTERL2-NEXT: [[TMP3:%.*]] = load float, ptr [[TMP0]], align 4
; VEC1_INTERL2-NEXT: [[TMP4:%.*]] = load float, ptr [[TMP2]], align 4
@@ -1540,7 +1540,7 @@ define void @non_primary_iv_float_scalar(ptr %A, i64 %N) {
; VEC1_INTERL2: for.body:
; VEC1_INTERL2-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; VEC1_INTERL2-NEXT: [[J:%.*]] = phi float [ [[J_NEXT:%.*]], [[FOR_INC]] ], [ [[BC_RESUME_VAL4]], [[SCALAR_PH]] ]
-; VEC1_INTERL2-NEXT: [[VAR0:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[I]]
+; VEC1_INTERL2-NEXT: [[VAR0:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[I]]
; VEC1_INTERL2-NEXT: [[VAR1:%.*]] = load float, ptr [[VAR0]], align 4
; VEC1_INTERL2-NEXT: [[VAR2:%.*]] = fcmp fast oeq float [[VAR1]], 0.000000e+00
; VEC1_INTERL2-NEXT: br i1 [[VAR2]], label [[IF_PRED:%.*]], label [[FOR_INC]]
@@ -1567,20 +1567,20 @@ define void @non_primary_iv_float_scalar(ptr %A, i64 %N) {
; VEC2_INTERL1_PRED_STORE: vector.body:
; VEC2_INTERL1_PRED_STORE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE3:%.*]] ]
; VEC2_INTERL1_PRED_STORE-NEXT: [[DOTCAST2:%.*]] = sitofp i64 [[INDEX]] to float
-; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP0:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
+; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; VEC2_INTERL1_PRED_STORE-NEXT: [[WIDE_LOAD:%.*]] = load <2 x float>, ptr [[TMP0]], align 4
; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP1:%.*]] = fcmp fast oeq <2 x float> [[WIDE_LOAD]], zeroinitializer
; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP2:%.*]] = extractelement <2 x i1> [[TMP1]], i64 0
; VEC2_INTERL1_PRED_STORE-NEXT: br i1 [[TMP2]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
; VEC2_INTERL1_PRED_STORE: pred.store.if:
-; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP3:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDEX]]
; VEC2_INTERL1_PRED_STORE-NEXT: store float [[DOTCAST2]], ptr [[TMP3]], align 4
; VEC2_INTERL1_PRED_STORE-NEXT: br label [[PRED_STORE_CONTINUE]]
; VEC2_INTERL1_PRED_STORE: pred.store.continue:
; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP4:%.*]] = extractelement <2 x i1> [[TMP1]], i64 1
; VEC2_INTERL1_PRED_STORE-NEXT: br i1 [[TMP4]], label [[PRED_STORE_IF2:%.*]], label [[PRED_STORE_CONTINUE3]]
; VEC2_INTERL1_PRED_STORE: pred.store.if2:
-; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP5:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]]
+; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP5:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP5]], i64 4
; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP7:%.*]] = fadd fast float [[DOTCAST2]], 1.000000e+00
; VEC2_INTERL1_PRED_STORE-NEXT: store float [[TMP7]], ptr [[TMP6]], align 4
@@ -1595,7 +1595,7 @@ define void @non_primary_iv_float_scalar(ptr %A, i64 %N) {
; VEC2_INTERL1_PRED_STORE: for.body:
; VEC2_INTERL1_PRED_STORE-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_INC:%.*]] ], [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
; VEC2_INTERL1_PRED_STORE-NEXT: [[J:%.*]] = phi float [ [[J_NEXT:%.*]], [[FOR_INC]] ], [ [[DOTCAST]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY]] ]
-; VEC2_INTERL1_PRED_STORE-NEXT: [[VAR0:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[I]]
+; VEC2_INTERL1_PRED_STORE-NEXT: [[VAR0:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[I]]
; VEC2_INTERL1_PRED_STORE-NEXT: [[VAR1:%.*]] = load float, ptr [[VAR0]], align 4
; VEC2_INTERL1_PRED_STORE-NEXT: [[VAR2:%.*]] = fcmp fast oeq float [[VAR1]], 0.000000e+00
; VEC2_INTERL1_PRED_STORE-NEXT: br i1 [[VAR2]], label [[IF_PRED:%.*]], label [[FOR_INC]]
@@ -1643,7 +1643,7 @@ define i32 @float_induction_with_dbg_on_fadd(ptr %dst) {
; VEC4_INTERL1-NEXT: br label [[VECTOR_BODY:%.*]]
; VEC4_INTERL1: vector.body:
; VEC4_INTERL1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; VEC4_INTERL1-NEXT: [[TMP0:%.*]] = getelementptr float, ptr null, i64 [[INDEX]]
+; VEC4_INTERL1-NEXT: [[TMP0:%.*]] = getelementptr [4 x i8], ptr null, i64 [[INDEX]]
; VEC4_INTERL1-NEXT: store <4 x float> poison, ptr [[TMP0]], align 8
; VEC4_INTERL1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; VEC4_INTERL1-NEXT: [[TMP1:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200
@@ -1660,7 +1660,7 @@ define i32 @float_induction_with_dbg_on_fadd(ptr %dst) {
; VEC4_INTERL2-NEXT: br label [[VECTOR_BODY:%.*]]
; VEC4_INTERL2: vector.body:
; VEC4_INTERL2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; VEC4_INTERL2-NEXT: [[TMP0:%.*]] = getelementptr float, ptr null, i64 [[INDEX]]
+; VEC4_INTERL2-NEXT: [[TMP0:%.*]] = getelementptr [4 x i8], ptr null, i64 [[INDEX]]
; VEC4_INTERL2-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP0]], i64 16
; VEC4_INTERL2-NEXT: store <4 x float> poison, ptr [[TMP0]], align 8
; VEC4_INTERL2-NEXT: store <4 x float> zeroinitializer, ptr [[TMP1]], align 8
@@ -1682,8 +1682,8 @@ define i32 @float_induction_with_dbg_on_fadd(ptr %dst) {
; VEC1_INTERL2-NEXT: [[DOTCAST:%.*]] = sitofp i64 [[INDEX]] to float
; VEC1_INTERL2-NEXT: [[TMP0:%.*]] = call reassoc float @llvm.copysign.f32(float 0.000000e+00, float [[DOTCAST]])
; VEC1_INTERL2-NEXT: [[OFFSET_IDX:%.*]] = fadd reassoc float [[TMP0]], 0.000000e+00
-; VEC1_INTERL2-NEXT: [[TMP1:%.*]] = getelementptr float, ptr null, i64 [[INDEX]]
-; VEC1_INTERL2-NEXT: [[TMP2:%.*]] = getelementptr float, ptr null, i64 [[INDEX]]
+; VEC1_INTERL2-NEXT: [[TMP1:%.*]] = getelementptr [4 x i8], ptr null, i64 [[INDEX]]
+; VEC1_INTERL2-NEXT: [[TMP2:%.*]] = getelementptr [4 x i8], ptr null, i64 [[INDEX]]
; VEC1_INTERL2-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[TMP2]], i64 4
; VEC1_INTERL2-NEXT: store float poison, ptr [[TMP1]], align 8
; VEC1_INTERL2-NEXT: store float [[OFFSET_IDX]], ptr [[TMP4]], align 8
@@ -1700,7 +1700,7 @@ define i32 @float_induction_with_dbg_on_fadd(ptr %dst) {
; VEC2_INTERL1_PRED_STORE-NEXT: br label [[VECTOR_BODY:%.*]]
; VEC2_INTERL1_PRED_STORE: vector.body:
; VEC2_INTERL1_PRED_STORE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP0:%.*]] = getelementptr float, ptr null, i64 [[INDEX]]
+; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP0:%.*]] = getelementptr [4 x i8], ptr null, i64 [[INDEX]]
; VEC2_INTERL1_PRED_STORE-NEXT: store <2 x float> poison, ptr [[TMP0]], align 8
; VEC2_INTERL1_PRED_STORE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP1:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200
diff --git a/llvm/test/Transforms/LoopVectorize/forked-pointers.ll b/llvm/test/Transforms/LoopVectorize/forked-pointers.ll
index 52bab3c51a328..f75a7c8f6dacc 100644
--- a/llvm/test/Transforms/LoopVectorize/forked-pointers.ll
+++ b/llvm/test/Transforms/LoopVectorize/forked-pointers.ll
@@ -43,7 +43,7 @@ define dso_local void @forked_ptrs_different_base_same_offset(ptr nocapture read
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[PREDS]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x i8], ptr [[PREDS]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP6]], align 4
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq <4 x i32> [[WIDE_LOAD]], zeroinitializer
; CHECK-NEXT: [[TMP8:%.*]] = select <4 x i1> [[TMP7]], <4 x ptr> [[BROADCAST_SPLAT]], <4 x ptr> [[BROADCAST_SPLAT9]]
@@ -51,12 +51,12 @@ define dso_local void @forked_ptrs_different_base_same_offset(ptr nocapture read
; CHECK-NEXT: [[TMP11:%.*]] = extractelement <4 x ptr> [[TMP8]], i64 1
; CHECK-NEXT: [[TMP13:%.*]] = extractelement <4 x ptr> [[TMP8]], i64 2
; CHECK-NEXT: [[TMP15:%.*]] = extractelement <4 x ptr> [[TMP8]], i64 3
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[TMP9]], i64 [[INDEX]]
-; CHECK-NEXT: [[TMP29:%.*]] = getelementptr float, ptr [[TMP11]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP9]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP29:%.*]] = getelementptr [4 x i8], ptr [[TMP11]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[TMP29]], i64 4
-; CHECK-NEXT: [[TMP30:%.*]] = getelementptr float, ptr [[TMP13]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP30:%.*]] = getelementptr [4 x i8], ptr [[TMP13]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[TMP30]], i64 8
-; CHECK-NEXT: [[TMP31:%.*]] = getelementptr float, ptr [[TMP15]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP31:%.*]] = getelementptr [4 x i8], ptr [[TMP15]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[TMP31]], i64 12
; CHECK-NEXT: [[TMP17:%.*]] = load float, ptr [[TMP10]], align 4
; CHECK-NEXT: [[TMP18:%.*]] = load float, ptr [[TMP12]], align 4
@@ -66,7 +66,7 @@ define dso_local void @forked_ptrs_different_base_same_offset(ptr nocapture read
; CHECK-NEXT: [[TMP22:%.*]] = insertelement <4 x float> [[TMP21]], float [[TMP18]], i64 1
; CHECK-NEXT: [[TMP23:%.*]] = insertelement <4 x float> [[TMP22]], float [[TMP19]], i64 2
; CHECK-NEXT: [[TMP24:%.*]] = insertelement <4 x float> [[TMP23]], float [[TMP20]], i64 3
-; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds float, ptr [[DEST]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8], ptr [[DEST]], i64 [[INDEX]]
; CHECK-NEXT: store <4 x float> [[TMP24]], ptr [[TMP25]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100
@@ -79,13 +79,13 @@ define dso_local void @forked_ptrs_different_base_same_offset(ptr nocapture read
; CHECK-NEXT: ret void
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[PREDS]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[PREDS]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP27:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[CMP1_NOT:%.*]] = icmp eq i32 [[TMP27]], 0
; CHECK-NEXT: [[SPEC_SELECT:%.*]] = select i1 [[CMP1_NOT]], ptr [[BASE2]], ptr [[BASE1]]
-; CHECK-NEXT: [[DOTSINK_IN:%.*]] = getelementptr inbounds nuw float, ptr [[SPEC_SELECT]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[DOTSINK_IN:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[SPEC_SELECT]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[DOTSINK:%.*]] = load float, ptr [[DOTSINK_IN]], align 4
-; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw float, ptr [[DEST]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[DEST]], i64 [[INDVARS_IV]]
; CHECK-NEXT: store float [[DOTSINK]], ptr [[TMP28]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 100
@@ -133,7 +133,7 @@ define dso_local void @forked_ptrs_same_base_different_offset(ptr nocapture read
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[I_014:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[PREDS:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[PREDS:%.*]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[CMP1_NOT:%.*]] = icmp eq i32 [[TMP0]], 0
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
@@ -141,9 +141,9 @@ define dso_local void @forked_ptrs_same_base_different_offset(ptr nocapture read
; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[INDVARS_IV]] to i32
; CHECK-NEXT: [[OFFSET_0:%.*]] = select i1 [[CMP1_NOT]], i32 [[ADD]], i32 [[TMP1]]
; CHECK-NEXT: [[IDXPROM213:%.*]] = zext i32 [[OFFSET_0]] to i64
-; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds nuw float, ptr [[BASE:%.*]], i64 [[IDXPROM213]]
+; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[BASE:%.*]], i64 [[IDXPROM213]]
; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[ARRAYIDX3]], align 4
-; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds nuw float, ptr [[DEST:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[DEST:%.*]], i64 [[INDVARS_IV]]
; CHECK-NEXT: store float [[TMP2]], ptr [[ARRAYIDX5]], align 4
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 100
; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]]
diff --git a/llvm/test/Transforms/LoopVectorize/gep_with_bitcast.ll b/llvm/test/Transforms/LoopVectorize/gep_with_bitcast.ll
index a6c9aadaac36c..2e4e97385385e 100644
--- a/llvm/test/Transforms/LoopVectorize/gep_with_bitcast.ll
+++ b/llvm/test/Transforms/LoopVectorize/gep_with_bitcast.ll
@@ -13,7 +13,7 @@ target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
; CHECK-LABEL: @foo
; CHECK: vector.body
; CHECK: %[[IV:.+]] = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-; CHECK: %[[v0:.+]] = getelementptr inbounds ptr, ptr %in, i64 %[[IV]]
+; CHECK: %[[v0:.+]] = getelementptr inbounds [8 x i8], ptr %in, i64 %[[IV]]
; CHECK: %wide.load = load <4 x i64>, ptr %[[v0]], align 8
; CHECK: icmp eq <4 x i64> %wide.load, zeroinitializer
; CHECK: br i1
diff --git a/llvm/test/Transforms/LoopVectorize/histograms.ll b/llvm/test/Transforms/LoopVectorize/histograms.ll
index 5bb8722d734f4..5850ac3195c39 100644
--- a/llvm/test/Transforms/LoopVectorize/histograms.ll
+++ b/llvm/test/Transforms/LoopVectorize/histograms.ll
@@ -12,13 +12,13 @@ define void @simple_histogram(ptr noalias %buckets, ptr readonly %indices, i64 %
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[INDICES]], i64 [[IV]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[INDICES]], i64 [[IV]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = zext <2 x i32> [[WIDE_LOAD]] to <2 x i64>
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x i64> [[TMP1]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP1]], i64 1
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i32, ptr [[BUCKETS]], i64 [[TMP2]]
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw i32, ptr [[BUCKETS]], i64 [[TMP4]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[BUCKETS]], i64 [[TMP2]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[BUCKETS]], i64 [[TMP4]]
; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x ptr> poison, ptr [[TMP3]], i64 0
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x ptr> [[TMP6]], ptr [[TMP5]], i64 1
; CHECK-NEXT: call void @llvm.experimental.vector.histogram.add.v2p0.i32(<2 x ptr> [[TMP7]], i32 1, <2 x i1> splat (i1 true))
@@ -33,10 +33,10 @@ define void @simple_histogram(ptr noalias %buckets, ptr readonly %indices, i64 %
; CHECK-NEXT: br label [[FOR_BODY1:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[INDICES]], i64 [[IV1]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[INDICES]], i64 [[IV1]]
; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[IDXPROM1:%.*]] = zext i32 [[TMP12]] to i64
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[BUCKETS]], i64 [[IDXPROM1]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[BUCKETS]], i64 [[IDXPROM1]]
; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP13]], 1
; CHECK-NEXT: store i32 [[INC]], ptr [[ARRAYIDX2]], align 4
diff --git a/llvm/test/Transforms/LoopVectorize/if-conversion-nest.ll b/llvm/test/Transforms/LoopVectorize/if-conversion-nest.ll
index 51255b2e35707..6333dd3a094c3 100644
--- a/llvm/test/Transforms/LoopVectorize/if-conversion-nest.ll
+++ b/llvm/test/Transforms/LoopVectorize/if-conversion-nest.ll
@@ -28,9 +28,9 @@ define void @foo(ptr nocapture %A, ptr nocapture %B, i32 %n) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4, !alias.scope [[META0:![0-9]+]], !noalias [[META3:![0-9]+]]
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i32>, ptr [[TMP6]], align 4, !alias.scope [[META3]]
; CHECK-NEXT: [[TMP7:%.*]] = icmp sgt <4 x i32> [[WIDE_LOAD]], [[WIDE_LOAD2]]
; CHECK-NEXT: [[TMP8:%.*]] = icmp sgt <4 x i32> [[WIDE_LOAD]], splat (i32 19)
@@ -50,9 +50,9 @@ define void @foo(ptr nocapture %A, ptr nocapture %B, i32 %n) {
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[IF_END14:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
; CHECK-NEXT: br i1 [[CMP3]], label [[IF_THEN:%.*]], label [[IF_END14]]
@@ -135,9 +135,9 @@ define void @multi_variable_if_nest(ptr nocapture %A, ptr nocapture %B, i32 %n)
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4, !alias.scope [[META9:![0-9]+]], !noalias [[META12:![0-9]+]]
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i32>, ptr [[TMP6]], align 4, !alias.scope [[META12]]
; CHECK-NEXT: [[TMP7:%.*]] = icmp sgt <4 x i32> [[WIDE_LOAD]], [[WIDE_LOAD2]]
; CHECK-NEXT: [[TMP8:%.*]] = icmp sgt <4 x i32> [[WIDE_LOAD]], splat (i32 19)
@@ -162,9 +162,9 @@ define void @multi_variable_if_nest(ptr nocapture %A, ptr nocapture %B, i32 %n)
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[IF_END14:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[TMP16]], [[TMP17]]
; CHECK-NEXT: br i1 [[CMP3]], label [[IF_THEN:%.*]], label [[IF_END14]]
diff --git a/llvm/test/Transforms/LoopVectorize/if-conversion.ll b/llvm/test/Transforms/LoopVectorize/if-conversion.ll
index 350c267445e54..2c54fe3eb104d 100644
--- a/llvm/test/Transforms/LoopVectorize/if-conversion.ll
+++ b/llvm/test/Transforms/LoopVectorize/if-conversion.ll
@@ -54,9 +54,9 @@ define void @function0(ptr nocapture %a, ptr nocapture %b, i32 %start, i32 %end)
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[INDEX]], [[TMP0]]
-; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP13]], align 4, !alias.scope [[META0:![0-9]+]], !noalias [[META3:![0-9]+]]
-; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x i32>, ptr [[TMP14]], align 4, !alias.scope [[META3]]
; CHECK-NEXT: [[DOTNOT:%.*]] = icmp sgt <4 x i32> [[WIDE_LOAD]], [[WIDE_LOAD4]]
; CHECK-NEXT: [[TMP15:%.*]] = mul <4 x i32> [[WIDE_LOAD]], splat (i32 5)
@@ -74,9 +74,9 @@ define void @function0(ptr nocapture %a, ptr nocapture %b, i32 %start, i32 %end)
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[IF_END:.*]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARRAYIDX4]], align 4
; CHECK-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP18]], [[TMP19]]
; CHECK-NEXT: br i1 [[CMP5]], label %[[IF_THEN:.*]], label %[[IF_END]]
@@ -157,7 +157,7 @@ define i32 @reduction_func(ptr nocapture %A, i32 %n) nounwind uwtable readonly s
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[PREDPHI:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <4 x i32> [[WIDE_LOAD]], splat (i32 30)
; CHECK-NEXT: [[TMP3:%.*]] = add <4 x i32> [[VEC_PHI]], splat (i32 2)
@@ -177,7 +177,7 @@ define i32 @reduction_func(ptr nocapture %A, i32 %n) nounwind uwtable readonly s
; CHECK: [[FOR_BODY]]:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_INC:.*]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
; CHECK-NEXT: [[SUM_011:%.*]] = phi i32 [ [[SUM_1:%.*]], %[[FOR_INC]] ], [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[TMP7]], 30
; CHECK-NEXT: br i1 [[CMP1]], label %[[IF_THEN:.*]], label %[[FOR_INC]]
diff --git a/llvm/test/Transforms/LoopVectorize/induction.ll b/llvm/test/Transforms/LoopVectorize/induction.ll
index bdf984e0956a8..2aa81038c0207 100644
--- a/llvm/test/Transforms/LoopVectorize/induction.ll
+++ b/llvm/test/Transforms/LoopVectorize/induction.ll
@@ -67,7 +67,7 @@ define void @multi_int_induction(ptr %A, i32 %N) {
; IND: vector.body:
; IND-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IND-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ <i32 190, i32 191>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IND-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
+; IND-NEXT: [[TMP3:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; IND-NEXT: store <2 x i32> [[VEC_IND]], ptr [[TMP3]], align 4
; IND-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; IND-NEXT: [[VEC_IND_NEXT]] = add nuw nsw <2 x i32> [[VEC_IND]], splat (i32 2)
@@ -83,7 +83,7 @@ define void @multi_int_induction(ptr %A, i32 %N) {
; IND: for.body:
; IND-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; IND-NEXT: [[COUNT_09:%.*]] = phi i32 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
-; IND-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
+; IND-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; IND-NEXT: store i32 [[COUNT_09]], ptr [[ARRAYIDX2]], align 4
; IND-NEXT: [[INC]] = add nsw i32 [[COUNT_09]], 1
; IND-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
@@ -109,7 +109,7 @@ define void @multi_int_induction(ptr %A, i32 %N) {
; UNROLL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; UNROLL-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ <i32 190, i32 191>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; UNROLL-NEXT: [[STEP_ADD:%.*]] = add <2 x i32> [[VEC_IND]], splat (i32 2)
-; UNROLL-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
+; UNROLL-NEXT: [[TMP3:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; UNROLL-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP3]], i64 8
; UNROLL-NEXT: store <2 x i32> [[VEC_IND]], ptr [[TMP3]], align 4
; UNROLL-NEXT: store <2 x i32> [[STEP_ADD]], ptr [[TMP4]], align 4
@@ -127,7 +127,7 @@ define void @multi_int_induction(ptr %A, i32 %N) {
; UNROLL: for.body:
; UNROLL-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; UNROLL-NEXT: [[COUNT_09:%.*]] = phi i32 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
-; UNROLL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
+; UNROLL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; UNROLL-NEXT: store i32 [[COUNT_09]], ptr [[ARRAYIDX2]], align 4
; UNROLL-NEXT: [[INC]] = add nsw i32 [[COUNT_09]], 1
; UNROLL-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
@@ -198,7 +198,7 @@ define void @multi_int_induction(ptr %A, i32 %N) {
; INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; INTERLEAVE-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 190, i32 191, i32 192, i32 193>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; INTERLEAVE-NEXT: [[STEP_ADD:%.*]] = add <4 x i32> [[VEC_IND]], splat (i32 4)
-; INTERLEAVE-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP3:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; INTERLEAVE-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP3]], i64 16
; INTERLEAVE-NEXT: store <4 x i32> [[VEC_IND]], ptr [[TMP3]], align 4
; INTERLEAVE-NEXT: store <4 x i32> [[STEP_ADD]], ptr [[TMP4]], align 4
@@ -216,7 +216,7 @@ define void @multi_int_induction(ptr %A, i32 %N) {
; INTERLEAVE: for.body:
; INTERLEAVE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; INTERLEAVE-NEXT: [[COUNT_09:%.*]] = phi i32 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
-; INTERLEAVE-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
+; INTERLEAVE-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; INTERLEAVE-NEXT: store i32 [[COUNT_09]], ptr [[ARRAYIDX2]], align 4
; INTERLEAVE-NEXT: [[INC]] = add nsw i32 [[COUNT_09]], 1
; INTERLEAVE-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
@@ -339,11 +339,11 @@ define void @scalar_use(ptr %a, float %b, i64 %offset, i64 %offset2, i64 %n) {
; IND-NEXT: br label [[VECTOR_BODY:%.*]]
; IND: vector.body:
; IND-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IND-NEXT: [[TMP5:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]]
-; IND-NEXT: [[TMP6:%.*]] = getelementptr float, ptr [[TMP5]], i64 [[OFFSET]]
+; IND-NEXT: [[TMP5:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
+; IND-NEXT: [[TMP6:%.*]] = getelementptr [4 x i8], ptr [[TMP5]], i64 [[OFFSET]]
; IND-NEXT: [[WIDE_LOAD:%.*]] = load <2 x float>, ptr [[TMP6]], align 4, !alias.scope [[META4:![0-9]+]], !noalias [[META7:![0-9]+]]
-; IND-NEXT: [[TMP7:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]]
-; IND-NEXT: [[TMP8:%.*]] = getelementptr float, ptr [[TMP7]], i64 [[OFFSET2]]
+; IND-NEXT: [[TMP7:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
+; IND-NEXT: [[TMP8:%.*]] = getelementptr [4 x i8], ptr [[TMP7]], i64 [[OFFSET2]]
; IND-NEXT: [[WIDE_LOAD4:%.*]] = load <2 x float>, ptr [[TMP8]], align 4, !alias.scope [[META7]]
; IND-NEXT: [[TMP9:%.*]] = fmul fast <2 x float> [[BROADCAST_SPLAT]], [[WIDE_LOAD4]]
; IND-NEXT: [[TMP10:%.*]] = fadd fast <2 x float> [[WIDE_LOAD]], [[TMP9]]
@@ -359,11 +359,11 @@ define void @scalar_use(ptr %a, float %b, i64 %offset, i64 %offset2, i64 %n) {
; IND-NEXT: br label [[FOR_BODY:%.*]]
; IND: for.body:
; IND-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IND-NEXT: [[TMP12:%.*]] = getelementptr float, ptr [[A]], i64 [[IV]]
-; IND-NEXT: [[ARR_IDX:%.*]] = getelementptr float, ptr [[TMP12]], i64 [[OFFSET]]
+; IND-NEXT: [[TMP12:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[IV]]
+; IND-NEXT: [[ARR_IDX:%.*]] = getelementptr [4 x i8], ptr [[TMP12]], i64 [[OFFSET]]
; IND-NEXT: [[L1:%.*]] = load float, ptr [[ARR_IDX]], align 4
-; IND-NEXT: [[TMP13:%.*]] = getelementptr float, ptr [[A]], i64 [[IV]]
-; IND-NEXT: [[ARR_IDX2:%.*]] = getelementptr float, ptr [[TMP13]], i64 [[OFFSET2]]
+; IND-NEXT: [[TMP13:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[IV]]
+; IND-NEXT: [[ARR_IDX2:%.*]] = getelementptr [4 x i8], ptr [[TMP13]], i64 [[OFFSET2]]
; IND-NEXT: [[L2:%.*]] = load float, ptr [[ARR_IDX2]], align 4
; IND-NEXT: [[M:%.*]] = fmul fast float [[B]], [[L2]]
; IND-NEXT: [[AD:%.*]] = fadd fast float [[L1]], [[M]]
@@ -399,13 +399,13 @@ define void @scalar_use(ptr %a, float %b, i64 %offset, i64 %offset2, i64 %n) {
; UNROLL-NEXT: br label [[VECTOR_BODY:%.*]]
; UNROLL: vector.body:
; UNROLL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; UNROLL-NEXT: [[TMP5:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]]
-; UNROLL-NEXT: [[TMP6:%.*]] = getelementptr float, ptr [[TMP5]], i64 [[OFFSET]]
+; UNROLL-NEXT: [[TMP5:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
+; UNROLL-NEXT: [[TMP6:%.*]] = getelementptr [4 x i8], ptr [[TMP5]], i64 [[OFFSET]]
; UNROLL-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP6]], i64 8
; UNROLL-NEXT: [[WIDE_LOAD:%.*]] = load <2 x float>, ptr [[TMP6]], align 4, !alias.scope [[META4:![0-9]+]], !noalias [[META7:![0-9]+]]
; UNROLL-NEXT: [[WIDE_LOAD4:%.*]] = load <2 x float>, ptr [[TMP7]], align 4, !alias.scope [[META4]], !noalias [[META7]]
-; UNROLL-NEXT: [[TMP8:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]]
-; UNROLL-NEXT: [[TMP9:%.*]] = getelementptr float, ptr [[TMP8]], i64 [[OFFSET2]]
+; UNROLL-NEXT: [[TMP8:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
+; UNROLL-NEXT: [[TMP9:%.*]] = getelementptr [4 x i8], ptr [[TMP8]], i64 [[OFFSET2]]
; UNROLL-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP9]], i64 8
; UNROLL-NEXT: [[WIDE_LOAD5:%.*]] = load <2 x float>, ptr [[TMP9]], align 4, !alias.scope [[META7]]
; UNROLL-NEXT: [[WIDE_LOAD6:%.*]] = load <2 x float>, ptr [[TMP10]], align 4, !alias.scope [[META7]]
@@ -426,11 +426,11 @@ define void @scalar_use(ptr %a, float %b, i64 %offset, i64 %offset2, i64 %n) {
; UNROLL-NEXT: br label [[FOR_BODY:%.*]]
; UNROLL: for.body:
; UNROLL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; UNROLL-NEXT: [[TMP16:%.*]] = getelementptr float, ptr [[A]], i64 [[IV]]
-; UNROLL-NEXT: [[ARR_IDX:%.*]] = getelementptr float, ptr [[TMP16]], i64 [[OFFSET]]
+; UNROLL-NEXT: [[TMP16:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[IV]]
+; UNROLL-NEXT: [[ARR_IDX:%.*]] = getelementptr [4 x i8], ptr [[TMP16]], i64 [[OFFSET]]
; UNROLL-NEXT: [[L1:%.*]] = load float, ptr [[ARR_IDX]], align 4
-; UNROLL-NEXT: [[TMP17:%.*]] = getelementptr float, ptr [[A]], i64 [[IV]]
-; UNROLL-NEXT: [[ARR_IDX2:%.*]] = getelementptr float, ptr [[TMP17]], i64 [[OFFSET2]]
+; UNROLL-NEXT: [[TMP17:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[IV]]
+; UNROLL-NEXT: [[ARR_IDX2:%.*]] = getelementptr [4 x i8], ptr [[TMP17]], i64 [[OFFSET2]]
; UNROLL-NEXT: [[L2:%.*]] = load float, ptr [[ARR_IDX2]], align 4
; UNROLL-NEXT: [[M:%.*]] = fmul fast float [[B]], [[L2]]
; UNROLL-NEXT: [[AD:%.*]] = fadd fast float [[L1]], [[M]]
@@ -534,13 +534,13 @@ define void @scalar_use(ptr %a, float %b, i64 %offset, i64 %offset2, i64 %n) {
; INTERLEAVE-NEXT: br label [[VECTOR_BODY:%.*]]
; INTERLEAVE: vector.body:
; INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; INTERLEAVE-NEXT: [[TMP5:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]]
-; INTERLEAVE-NEXT: [[TMP6:%.*]] = getelementptr float, ptr [[TMP5]], i64 [[OFFSET]]
+; INTERLEAVE-NEXT: [[TMP5:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP6:%.*]] = getelementptr [4 x i8], ptr [[TMP5]], i64 [[OFFSET]]
; INTERLEAVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP6]], i64 16
; INTERLEAVE-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP6]], align 4, !alias.scope [[META4:![0-9]+]], !noalias [[META7:![0-9]+]]
; INTERLEAVE-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x float>, ptr [[TMP7]], align 4, !alias.scope [[META4]], !noalias [[META7]]
-; INTERLEAVE-NEXT: [[TMP8:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]]
-; INTERLEAVE-NEXT: [[TMP9:%.*]] = getelementptr float, ptr [[TMP8]], i64 [[OFFSET2]]
+; INTERLEAVE-NEXT: [[TMP8:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP9:%.*]] = getelementptr [4 x i8], ptr [[TMP8]], i64 [[OFFSET2]]
; INTERLEAVE-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP9]], i64 16
; INTERLEAVE-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x float>, ptr [[TMP9]], align 4, !alias.scope [[META7]]
; INTERLEAVE-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP10]], align 4, !alias.scope [[META7]]
@@ -561,11 +561,11 @@ define void @scalar_use(ptr %a, float %b, i64 %offset, i64 %offset2, i64 %n) {
; INTERLEAVE-NEXT: br label [[FOR_BODY:%.*]]
; INTERLEAVE: for.body:
; INTERLEAVE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; INTERLEAVE-NEXT: [[TMP16:%.*]] = getelementptr float, ptr [[A]], i64 [[IV]]
-; INTERLEAVE-NEXT: [[ARR_IDX:%.*]] = getelementptr float, ptr [[TMP16]], i64 [[OFFSET]]
+; INTERLEAVE-NEXT: [[TMP16:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[IV]]
+; INTERLEAVE-NEXT: [[ARR_IDX:%.*]] = getelementptr [4 x i8], ptr [[TMP16]], i64 [[OFFSET]]
; INTERLEAVE-NEXT: [[L1:%.*]] = load float, ptr [[ARR_IDX]], align 4
-; INTERLEAVE-NEXT: [[TMP17:%.*]] = getelementptr float, ptr [[A]], i64 [[IV]]
-; INTERLEAVE-NEXT: [[ARR_IDX2:%.*]] = getelementptr float, ptr [[TMP17]], i64 [[OFFSET2]]
+; INTERLEAVE-NEXT: [[TMP17:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[IV]]
+; INTERLEAVE-NEXT: [[ARR_IDX2:%.*]] = getelementptr [4 x i8], ptr [[TMP17]], i64 [[OFFSET2]]
; INTERLEAVE-NEXT: [[L2:%.*]] = load float, ptr [[ARR_IDX2]], align 4
; INTERLEAVE-NEXT: [[M:%.*]] = fmul fast float [[B]], [[L2]]
; INTERLEAVE-NEXT: [[AD:%.*]] = fadd fast float [[L1]], [[M]]
@@ -659,7 +659,7 @@ define i64 @scalarize_induction_variable_01(ptr %a, i64 %n) {
; IND: vector.body:
; IND-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IND-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP1:%.*]], [[VECTOR_BODY]] ]
-; IND-NEXT: [[TMP0:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
+; IND-NEXT: [[TMP0:%.*]] = getelementptr inbounds [8 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; IND-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i64>, ptr [[TMP0]], align 8
; IND-NEXT: [[TMP1]] = add <2 x i64> [[WIDE_LOAD]], [[VEC_PHI]]
; IND-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
@@ -676,7 +676,7 @@ define i64 @scalarize_induction_variable_01(ptr %a, i64 %n) {
; IND: for.body:
; IND-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; IND-NEXT: [[SUM:%.*]] = phi i64 [ [[TMP6:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
-; IND-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw i64, ptr [[A]], i64 [[I]]
+; IND-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[A]], i64 [[I]]
; IND-NEXT: [[TMP5:%.*]] = load i64, ptr [[TMP4]], align 8
; IND-NEXT: [[TMP6]] = add i64 [[TMP5]], [[SUM]]
; IND-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
@@ -698,7 +698,7 @@ define i64 @scalarize_induction_variable_01(ptr %a, i64 %n) {
; UNROLL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; UNROLL-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ]
; UNROLL-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ]
-; UNROLL-NEXT: [[TMP0:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
+; UNROLL-NEXT: [[TMP0:%.*]] = getelementptr inbounds [8 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; UNROLL-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 16
; UNROLL-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i64>, ptr [[TMP0]], align 8
; UNROLL-NEXT: [[WIDE_LOAD2:%.*]] = load <2 x i64>, ptr [[TMP1]], align 8
@@ -719,7 +719,7 @@ define i64 @scalarize_induction_variable_01(ptr %a, i64 %n) {
; UNROLL: for.body:
; UNROLL-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; UNROLL-NEXT: [[SUM:%.*]] = phi i64 [ [[TMP8:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
-; UNROLL-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i64, ptr [[A]], i64 [[I]]
+; UNROLL-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[A]], i64 [[I]]
; UNROLL-NEXT: [[TMP7:%.*]] = load i64, ptr [[TMP6]], align 8
; UNROLL-NEXT: [[TMP8]] = add i64 [[TMP7]], [[SUM]]
; UNROLL-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
@@ -785,7 +785,7 @@ define i64 @scalarize_induction_variable_01(ptr %a, i64 %n) {
; INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; INTERLEAVE-NEXT: [[VEC_PHI:%.*]] = phi <4 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ]
; INTERLEAVE-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ]
-; INTERLEAVE-NEXT: [[TMP0:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP0:%.*]] = getelementptr inbounds [8 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; INTERLEAVE-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 32
; INTERLEAVE-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP0]], align 8
; INTERLEAVE-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8
@@ -806,7 +806,7 @@ define i64 @scalarize_induction_variable_01(ptr %a, i64 %n) {
; INTERLEAVE: for.body:
; INTERLEAVE-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; INTERLEAVE-NEXT: [[SUM:%.*]] = phi i64 [ [[TMP8:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
-; INTERLEAVE-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i64, ptr [[A]], i64 [[I]]
+; INTERLEAVE-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[A]], i64 [[I]]
; INTERLEAVE-NEXT: [[TMP7:%.*]] = load i64, ptr [[TMP6]], align 8
; INTERLEAVE-NEXT: [[TMP8]] = add i64 [[TMP7]], [[SUM]]
; INTERLEAVE-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
@@ -925,14 +925,14 @@ define float @scalarize_induction_variable_02(ptr %a, ptr %b, i64 %n) {
; IND-NEXT: [[VEC_PHI:%.*]] = phi <2 x float> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP18:%.*]], [[VECTOR_BODY]] ]
; IND-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 3
; IND-NEXT: [[TMP3:%.*]] = or disjoint i64 [[OFFSET_IDX]], 8
-; IND-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[OFFSET_IDX]]
-; IND-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP3]]
+; IND-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[OFFSET_IDX]]
+; IND-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP3]]
; IND-NEXT: [[TMP6:%.*]] = load float, ptr [[TMP4]], align 4
; IND-NEXT: [[TMP7:%.*]] = load float, ptr [[TMP5]], align 4
; IND-NEXT: [[TMP8:%.*]] = insertelement <2 x float> poison, float [[TMP6]], i64 0
; IND-NEXT: [[TMP9:%.*]] = insertelement <2 x float> [[TMP8]], float [[TMP7]], i64 1
-; IND-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 [[OFFSET_IDX]]
-; IND-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP3]]
+; IND-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i64 [[OFFSET_IDX]]
+; IND-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[TMP3]]
; IND-NEXT: [[TMP12:%.*]] = load float, ptr [[TMP10]], align 4
; IND-NEXT: [[TMP13:%.*]] = load float, ptr [[TMP11]], align 4
; IND-NEXT: [[TMP14:%.*]] = insertelement <2 x float> poison, float [[TMP12]], i64 0
@@ -954,9 +954,9 @@ define float @scalarize_induction_variable_02(ptr %a, ptr %b, i64 %n) {
; IND: for.body:
; IND-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[I_NEXT:%.*]], [[FOR_BODY]] ]
; IND-NEXT: [[S:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[TMP27:%.*]], [[FOR_BODY]] ]
-; IND-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[I]]
+; IND-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[I]]
; IND-NEXT: [[TMP22:%.*]] = load float, ptr [[TMP21]], align 4
-; IND-NEXT: [[TMP23:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[I]]
+; IND-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[I]]
; IND-NEXT: [[TMP24:%.*]] = load float, ptr [[TMP23]], align 4
; IND-NEXT: [[TMP25:%.*]] = fadd fast float [[S]], 1.000000e+00
; IND-NEXT: [[TMP26:%.*]] = fadd fast float [[TMP25]], [[TMP22]]
@@ -988,10 +988,10 @@ define float @scalarize_induction_variable_02(ptr %a, ptr %b, i64 %n) {
; UNROLL-NEXT: [[TMP3:%.*]] = or disjoint i64 [[OFFSET_IDX]], 8
; UNROLL-NEXT: [[TMP4:%.*]] = or disjoint i64 [[OFFSET_IDX]], 16
; UNROLL-NEXT: [[TMP5:%.*]] = or disjoint i64 [[OFFSET_IDX]], 24
-; UNROLL-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[OFFSET_IDX]]
-; UNROLL-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP3]]
-; UNROLL-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP4]]
-; UNROLL-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP5]]
+; UNROLL-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[OFFSET_IDX]]
+; UNROLL-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP3]]
+; UNROLL-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP4]]
+; UNROLL-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP5]]
; UNROLL-NEXT: [[TMP10:%.*]] = load float, ptr [[TMP6]], align 4
; UNROLL-NEXT: [[TMP11:%.*]] = load float, ptr [[TMP7]], align 4
; UNROLL-NEXT: [[TMP12:%.*]] = insertelement <2 x float> poison, float [[TMP10]], i64 0
@@ -1000,10 +1000,10 @@ define float @scalarize_induction_variable_02(ptr %a, ptr %b, i64 %n) {
; UNROLL-NEXT: [[TMP15:%.*]] = load float, ptr [[TMP9]], align 4
; UNROLL-NEXT: [[TMP16:%.*]] = insertelement <2 x float> poison, float [[TMP14]], i64 0
; UNROLL-NEXT: [[TMP17:%.*]] = insertelement <2 x float> [[TMP16]], float [[TMP15]], i64 1
-; UNROLL-NEXT: [[TMP18:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 [[OFFSET_IDX]]
-; UNROLL-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP3]]
-; UNROLL-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP4]]
-; UNROLL-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP5]]
+; UNROLL-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i64 [[OFFSET_IDX]]
+; UNROLL-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[TMP3]]
+; UNROLL-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[TMP4]]
+; UNROLL-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[TMP5]]
; UNROLL-NEXT: [[TMP22:%.*]] = load float, ptr [[TMP18]], align 4
; UNROLL-NEXT: [[TMP23:%.*]] = load float, ptr [[TMP19]], align 4
; UNROLL-NEXT: [[TMP24:%.*]] = insertelement <2 x float> poison, float [[TMP22]], i64 0
@@ -1033,9 +1033,9 @@ define float @scalarize_induction_variable_02(ptr %a, ptr %b, i64 %n) {
; UNROLL: for.body:
; UNROLL-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[I_NEXT:%.*]], [[FOR_BODY]] ]
; UNROLL-NEXT: [[S:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[TMP44:%.*]], [[FOR_BODY]] ]
-; UNROLL-NEXT: [[TMP38:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[I]]
+; UNROLL-NEXT: [[TMP38:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[I]]
; UNROLL-NEXT: [[TMP39:%.*]] = load float, ptr [[TMP38]], align 4
-; UNROLL-NEXT: [[TMP40:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[I]]
+; UNROLL-NEXT: [[TMP40:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[I]]
; UNROLL-NEXT: [[TMP41:%.*]] = load float, ptr [[TMP40]], align 4
; UNROLL-NEXT: [[TMP42:%.*]] = fadd fast float [[S]], 1.000000e+00
; UNROLL-NEXT: [[TMP43:%.*]] = fadd fast float [[TMP42]], [[TMP39]]
@@ -1148,14 +1148,14 @@ define float @scalarize_induction_variable_02(ptr %a, ptr %b, i64 %n) {
; INTERLEAVE-NEXT: [[VEC_PHI1:%.*]] = phi <4 x float> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
; INTERLEAVE-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 3
; INTERLEAVE-NEXT: [[TMP5:%.*]] = or disjoint i64 [[OFFSET_IDX]], 32
-; INTERLEAVE-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[OFFSET_IDX]]
-; INTERLEAVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP5]]
+; INTERLEAVE-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[OFFSET_IDX]]
+; INTERLEAVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP5]]
; INTERLEAVE-NEXT: [[WIDE_VEC:%.*]] = load <32 x float>, ptr [[TMP6]], align 4
; INTERLEAVE-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <32 x float> [[WIDE_VEC]], <32 x float> poison, <4 x i32> <i32 0, i32 8, i32 16, i32 24>
; INTERLEAVE-NEXT: [[WIDE_VEC2:%.*]] = load <32 x float>, ptr [[TMP7]], align 4
; INTERLEAVE-NEXT: [[STRIDED_VEC3:%.*]] = shufflevector <32 x float> [[WIDE_VEC2]], <32 x float> poison, <4 x i32> <i32 0, i32 8, i32 16, i32 24>
-; INTERLEAVE-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 [[OFFSET_IDX]]
-; INTERLEAVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP5]]
+; INTERLEAVE-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i64 [[OFFSET_IDX]]
+; INTERLEAVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[TMP5]]
; INTERLEAVE-NEXT: [[WIDE_VEC4:%.*]] = load <32 x float>, ptr [[TMP8]], align 4
; INTERLEAVE-NEXT: [[STRIDED_VEC5:%.*]] = shufflevector <32 x float> [[WIDE_VEC4]], <32 x float> poison, <4 x i32> <i32 0, i32 8, i32 16, i32 24>
; INTERLEAVE-NEXT: [[WIDE_VEC6:%.*]] = load <32 x float>, ptr [[TMP9]], align 4
@@ -1180,9 +1180,9 @@ define float @scalarize_induction_variable_02(ptr %a, ptr %b, i64 %n) {
; INTERLEAVE: for.body:
; INTERLEAVE-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[I_NEXT:%.*]], [[FOR_BODY]] ]
; INTERLEAVE-NEXT: [[S:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[TMP24:%.*]], [[FOR_BODY]] ]
-; INTERLEAVE-NEXT: [[TMP18:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[I]]
+; INTERLEAVE-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[I]]
; INTERLEAVE-NEXT: [[TMP19:%.*]] = load float, ptr [[TMP18]], align 4
-; INTERLEAVE-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[I]]
+; INTERLEAVE-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[I]]
; INTERLEAVE-NEXT: [[TMP21:%.*]] = load float, ptr [[TMP20]], align 4
; INTERLEAVE-NEXT: [[TMP22:%.*]] = fadd fast float [[S]], 1.000000e+00
; INTERLEAVE-NEXT: [[TMP23:%.*]] = fadd fast float [[TMP22]], [[TMP19]]
@@ -1283,9 +1283,9 @@ define void @scalarize_induction_variable_03(ptr %p, i32 %y, i64 %n) {
; IND-NEXT: br label [[VECTOR_BODY:%.*]]
; IND: vector.body:
; IND-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IND-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], i64 [[INDEX]]
+; IND-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [8 x i8], ptr [[P:%.*]], i64 [[INDEX]]
; IND-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 4
-; IND-NEXT: [[TMP13:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; IND-NEXT: [[TMP13:%.*]] = getelementptr [8 x i8], ptr [[P]], i64 [[INDEX]]
; IND-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP13]], i64 12
; IND-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP1]], align 8
; IND-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 8
@@ -1307,7 +1307,7 @@ define void @scalarize_induction_variable_03(ptr %p, i32 %y, i64 %n) {
; IND-NEXT: br label [[FOR_BODY:%.*]]
; IND: for.body:
; IND-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; IND-NEXT: [[F_SPLIT:%.*]] = getelementptr inbounds nuw [[PAIR_I32]], ptr [[P]], i64 [[I]]
+; IND-NEXT: [[F_SPLIT:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[P]], i64 [[I]]
; IND-NEXT: [[F:%.*]] = getelementptr inbounds nuw i8, ptr [[F_SPLIT]], i64 4
; IND-NEXT: [[TMP11:%.*]] = load i32, ptr [[F]], align 8
; IND-NEXT: [[TMP12:%.*]] = xor i32 [[TMP11]], [[Y]]
@@ -1330,13 +1330,13 @@ define void @scalarize_induction_variable_03(ptr %p, i32 %y, i64 %n) {
; UNROLL-NEXT: br label [[VECTOR_BODY:%.*]]
; UNROLL: vector.body:
; UNROLL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; UNROLL-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], i64 [[INDEX]]
+; UNROLL-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [8 x i8], ptr [[P:%.*]], i64 [[INDEX]]
; UNROLL-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 4
-; UNROLL-NEXT: [[TMP1:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; UNROLL-NEXT: [[TMP1:%.*]] = getelementptr [8 x i8], ptr [[P]], i64 [[INDEX]]
; UNROLL-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[TMP1]], i64 12
-; UNROLL-NEXT: [[TMP24:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; UNROLL-NEXT: [[TMP24:%.*]] = getelementptr [8 x i8], ptr [[P]], i64 [[INDEX]]
; UNROLL-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP24]], i64 20
-; UNROLL-NEXT: [[TMP25:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; UNROLL-NEXT: [[TMP25:%.*]] = getelementptr [8 x i8], ptr [[P]], i64 [[INDEX]]
; UNROLL-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP25]], i64 28
; UNROLL-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP3]], align 8
; UNROLL-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP4]], align 8
@@ -1367,7 +1367,7 @@ define void @scalarize_induction_variable_03(ptr %p, i32 %y, i64 %n) {
; UNROLL-NEXT: br label [[FOR_BODY:%.*]]
; UNROLL: for.body:
; UNROLL-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; UNROLL-NEXT: [[F_SPLIT:%.*]] = getelementptr inbounds nuw [[PAIR_I32]], ptr [[P]], i64 [[I]]
+; UNROLL-NEXT: [[F_SPLIT:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[P]], i64 [[I]]
; UNROLL-NEXT: [[F:%.*]] = getelementptr inbounds nuw i8, ptr [[F_SPLIT]], i64 4
; UNROLL-NEXT: [[TMP22:%.*]] = load i32, ptr [[F]], align 8
; UNROLL-NEXT: [[TMP23:%.*]] = xor i32 [[TMP22]], [[Y]]
@@ -1452,21 +1452,21 @@ define void @scalarize_induction_variable_03(ptr %p, i32 %y, i64 %n) {
; INTERLEAVE-NEXT: br label [[VECTOR_BODY:%.*]]
; INTERLEAVE: vector.body:
; INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; INTERLEAVE-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [8 x i8], ptr [[P:%.*]], i64 [[INDEX]]
; INTERLEAVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 4
-; INTERLEAVE-NEXT: [[TMP3:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP3:%.*]] = getelementptr [8 x i8], ptr [[P]], i64 [[INDEX]]
; INTERLEAVE-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[TMP3]], i64 12
-; INTERLEAVE-NEXT: [[TMP5:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP5:%.*]] = getelementptr [8 x i8], ptr [[P]], i64 [[INDEX]]
; INTERLEAVE-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[TMP5]], i64 20
-; INTERLEAVE-NEXT: [[TMP7:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP7:%.*]] = getelementptr [8 x i8], ptr [[P]], i64 [[INDEX]]
; INTERLEAVE-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[TMP7]], i64 28
-; INTERLEAVE-NEXT: [[TMP30:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP30:%.*]] = getelementptr [8 x i8], ptr [[P]], i64 [[INDEX]]
; INTERLEAVE-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[TMP30]], i64 36
-; INTERLEAVE-NEXT: [[TMP31:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP31:%.*]] = getelementptr [8 x i8], ptr [[P]], i64 [[INDEX]]
; INTERLEAVE-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[TMP31]], i64 44
-; INTERLEAVE-NEXT: [[TMP32:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP32:%.*]] = getelementptr [8 x i8], ptr [[P]], i64 [[INDEX]]
; INTERLEAVE-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[TMP32]], i64 52
-; INTERLEAVE-NEXT: [[TMP33:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP33:%.*]] = getelementptr [8 x i8], ptr [[P]], i64 [[INDEX]]
; INTERLEAVE-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[TMP33]], i64 60
; INTERLEAVE-NEXT: [[WIDE_VEC:%.*]] = load <8 x i32>, ptr [[TMP9]], align 8
; INTERLEAVE-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
@@ -1500,7 +1500,7 @@ define void @scalarize_induction_variable_03(ptr %p, i32 %y, i64 %n) {
; INTERLEAVE-NEXT: br label [[FOR_BODY:%.*]]
; INTERLEAVE: for.body:
; INTERLEAVE-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; INTERLEAVE-NEXT: [[F_SPLIT:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]]
+; INTERLEAVE-NEXT: [[F_SPLIT:%.*]] = getelementptr inbounds [8 x i8], ptr [[P]], i64 [[I]]
; INTERLEAVE-NEXT: [[F:%.*]] = getelementptr inbounds nuw i8, ptr [[F_SPLIT]], i64 4
; INTERLEAVE-NEXT: [[TMP28:%.*]] = load i32, ptr [[F]], align 8
; INTERLEAVE-NEXT: [[TMP29:%.*]] = xor i32 [[TMP28]], [[Y]]
@@ -1631,13 +1631,13 @@ define void @scalarize_induction_variable_04(ptr %a, ptr %p, i32 %n) {
; IND-NEXT: [[TMP10:%.*]] = shl nsw <2 x i64> [[VEC_IND]], splat (i64 2)
; IND-NEXT: [[TMP11:%.*]] = extractelement <2 x i64> [[TMP10]], i64 0
; IND-NEXT: [[TMP13:%.*]] = extractelement <2 x i64> [[TMP10]], i64 1
-; IND-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP11]]
-; IND-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP13]]
+; IND-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[TMP11]]
+; IND-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[TMP13]]
; IND-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP12]], align 1, !alias.scope [[META17:![0-9]+]]
; IND-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP24]], align 1, !alias.scope [[META17]]
-; IND-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P]], i64 [[INDEX]]
+; IND-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [8 x i8], ptr [[P]], i64 [[INDEX]]
; IND-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 4
-; IND-NEXT: [[TMP17:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; IND-NEXT: [[TMP17:%.*]] = getelementptr [8 x i8], ptr [[P]], i64 [[INDEX]]
; IND-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[TMP17]], i64 12
; IND-NEXT: store i32 [[TMP14]], ptr [[TMP16]], align 1, !alias.scope [[META20:![0-9]+]], !noalias [[META17]]
; IND-NEXT: store i32 [[TMP15]], ptr [[TMP18]], align 1, !alias.scope [[META20]], !noalias [[META17]]
@@ -1656,7 +1656,7 @@ define void @scalarize_induction_variable_04(ptr %a, ptr %p, i32 %n) {
; IND-NEXT: [[DOTIDX:%.*]] = shl nsw i64 [[I]], 4
; IND-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[DOTIDX]]
; IND-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP20]], align 1
-; IND-NEXT: [[DOTSPLIT4:%.*]] = getelementptr inbounds nuw [[PAIR_I32]], ptr [[P]], i64 [[I]]
+; IND-NEXT: [[DOTSPLIT4:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[P]], i64 [[I]]
; IND-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT4]], i64 4
; IND-NEXT: store i32 [[TMP21]], ptr [[TMP22]], align 1
; IND-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
@@ -1700,25 +1700,25 @@ define void @scalarize_induction_variable_04(ptr %a, ptr %p, i32 %n) {
; UNROLL-NEXT: [[TMP13:%.*]] = add <2 x i64> [[STEP_ADD]], splat (i64 8)
; UNROLL-NEXT: [[TMP20:%.*]] = extractelement <2 x i64> [[TMP13]], i64 0
; UNROLL-NEXT: [[TMP35:%.*]] = extractelement <2 x i64> [[TMP13]], i64 1
-; UNROLL-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP16]]
-; UNROLL-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP18]]
-; UNROLL-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP20]]
-; UNROLL-NEXT: [[TMP36:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP35]]
-; UNROLL-NEXT: [[TMP37:%.*]] = load i32, ptr [[TMP17]], align 1, !alias.scope [[META17:![0-9]+]]
-; UNROLL-NEXT: [[TMP38:%.*]] = load i32, ptr [[TMP19]], align 1, !alias.scope [[META17]]
-; UNROLL-NEXT: [[TMP39:%.*]] = load i32, ptr [[TMP21]], align 1, !alias.scope [[META17]]
-; UNROLL-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP36]], align 1, !alias.scope [[META17]]
-; UNROLL-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P]], i64 [[INDEX]]
+; UNROLL-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP16]]
+; UNROLL-NEXT: [[TMP36:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP18]]
+; UNROLL-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP20]]
+; UNROLL-NEXT: [[TMP37:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP35]]
+; UNROLL-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP15]], align 1, !alias.scope [[META17:![0-9]+]]
+; UNROLL-NEXT: [[TMP38:%.*]] = load i32, ptr [[TMP36]], align 1, !alias.scope [[META17]]
+; UNROLL-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP17]], align 1, !alias.scope [[META17]]
+; UNROLL-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP37]], align 1, !alias.scope [[META17]]
+; UNROLL-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [8 x i8], ptr [[P]], i64 [[INDEX]]
; UNROLL-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 4
-; UNROLL-NEXT: [[TMP24:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; UNROLL-NEXT: [[TMP24:%.*]] = getelementptr [8 x i8], ptr [[P]], i64 [[INDEX]]
; UNROLL-NEXT: [[TMP25:%.*]] = getelementptr i8, ptr [[TMP24]], i64 12
-; UNROLL-NEXT: [[TMP26:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; UNROLL-NEXT: [[TMP26:%.*]] = getelementptr [8 x i8], ptr [[P]], i64 [[INDEX]]
; UNROLL-NEXT: [[TMP27:%.*]] = getelementptr i8, ptr [[TMP26]], i64 20
-; UNROLL-NEXT: [[TMP28:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; UNROLL-NEXT: [[TMP28:%.*]] = getelementptr [8 x i8], ptr [[P]], i64 [[INDEX]]
; UNROLL-NEXT: [[TMP29:%.*]] = getelementptr i8, ptr [[TMP28]], i64 28
-; UNROLL-NEXT: store i32 [[TMP37]], ptr [[TMP23]], align 1, !alias.scope [[META20:![0-9]+]], !noalias [[META17]]
+; UNROLL-NEXT: store i32 [[TMP19]], ptr [[TMP23]], align 1, !alias.scope [[META20:![0-9]+]], !noalias [[META17]]
; UNROLL-NEXT: store i32 [[TMP38]], ptr [[TMP25]], align 1, !alias.scope [[META20]], !noalias [[META17]]
-; UNROLL-NEXT: store i32 [[TMP39]], ptr [[TMP27]], align 1, !alias.scope [[META20]], !noalias [[META17]]
+; UNROLL-NEXT: store i32 [[TMP21]], ptr [[TMP27]], align 1, !alias.scope [[META20]], !noalias [[META17]]
; UNROLL-NEXT: store i32 [[TMP22]], ptr [[TMP29]], align 1, !alias.scope [[META20]], !noalias [[META17]]
; UNROLL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; UNROLL-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4)
@@ -1735,7 +1735,7 @@ define void @scalarize_induction_variable_04(ptr %a, ptr %p, i32 %n) {
; UNROLL-NEXT: [[DOTIDX:%.*]] = shl nsw i64 [[I]], 4
; UNROLL-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[DOTIDX]]
; UNROLL-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP31]], align 1
-; UNROLL-NEXT: [[DOTSPLIT6:%.*]] = getelementptr inbounds nuw [[PAIR_I32]], ptr [[P]], i64 [[I]]
+; UNROLL-NEXT: [[DOTSPLIT6:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[P]], i64 [[I]]
; UNROLL-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT6]], i64 4
; UNROLL-NEXT: store i32 [[TMP32]], ptr [[TMP33]], align 1
; UNROLL-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
@@ -1868,21 +1868,21 @@ define void @scalarize_induction_variable_04(ptr %a, ptr %p, i32 %n) {
; INTERLEAVE-NEXT: [[TMP33:%.*]] = extractelement <16 x i32> [[WIDE_VEC3]], i64 4
; INTERLEAVE-NEXT: [[TMP34:%.*]] = extractelement <16 x i32> [[WIDE_VEC3]], i64 8
; INTERLEAVE-NEXT: [[TMP35:%.*]] = extractelement <16 x i32> [[WIDE_VEC3]], i64 12
-; INTERLEAVE-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [8 x i8], ptr [[P]], i64 [[INDEX]]
; INTERLEAVE-NEXT: [[TMP41:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 4
-; INTERLEAVE-NEXT: [[TMP15:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP15:%.*]] = getelementptr [8 x i8], ptr [[P]], i64 [[INDEX]]
; INTERLEAVE-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[TMP15]], i64 12
-; INTERLEAVE-NEXT: [[TMP17:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP17:%.*]] = getelementptr [8 x i8], ptr [[P]], i64 [[INDEX]]
; INTERLEAVE-NEXT: [[TMP42:%.*]] = getelementptr i8, ptr [[TMP17]], i64 20
-; INTERLEAVE-NEXT: [[TMP43:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP43:%.*]] = getelementptr [8 x i8], ptr [[P]], i64 [[INDEX]]
; INTERLEAVE-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[TMP43]], i64 28
-; INTERLEAVE-NEXT: [[DOTSPLIT9:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP14]]
+; INTERLEAVE-NEXT: [[DOTSPLIT9:%.*]] = getelementptr inbounds [8 x i8], ptr [[P]], i64 [[TMP14]]
; INTERLEAVE-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT9]], i64 4
-; INTERLEAVE-NEXT: [[TMP22:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP22:%.*]] = getelementptr [8 x i8], ptr [[P]], i64 [[INDEX]]
; INTERLEAVE-NEXT: [[TMP23:%.*]] = getelementptr i8, ptr [[TMP22]], i64 44
-; INTERLEAVE-NEXT: [[TMP24:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP24:%.*]] = getelementptr [8 x i8], ptr [[P]], i64 [[INDEX]]
; INTERLEAVE-NEXT: [[TMP25:%.*]] = getelementptr i8, ptr [[TMP24]], i64 52
-; INTERLEAVE-NEXT: [[TMP26:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP26:%.*]] = getelementptr [8 x i8], ptr [[P]], i64 [[INDEX]]
; INTERLEAVE-NEXT: [[TMP27:%.*]] = getelementptr i8, ptr [[TMP26]], i64 60
; INTERLEAVE-NEXT: store i32 [[TMP28]], ptr [[TMP41]], align 1, !alias.scope [[META20:![0-9]+]], !noalias [[META17]]
; INTERLEAVE-NEXT: store i32 [[TMP29]], ptr [[TMP16]], align 1, !alias.scope [[META20]], !noalias [[META17]]
@@ -1905,7 +1905,7 @@ define void @scalarize_induction_variable_04(ptr %a, ptr %p, i32 %n) {
; INTERLEAVE-NEXT: [[DOTIDX6:%.*]] = shl nsw i64 [[I]], 4
; INTERLEAVE-NEXT: [[TMP37:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[DOTIDX6]]
; INTERLEAVE-NEXT: [[TMP38:%.*]] = load i32, ptr [[TMP37]], align 1
-; INTERLEAVE-NEXT: [[DOTSPLIT14:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]]
+; INTERLEAVE-NEXT: [[DOTSPLIT14:%.*]] = getelementptr inbounds [8 x i8], ptr [[P]], i64 [[I]]
; INTERLEAVE-NEXT: [[TMP39:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT14]], i64 4
; INTERLEAVE-NEXT: store i32 [[TMP38]], ptr [[TMP39]], align 1
; INTERLEAVE-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
@@ -2027,7 +2027,7 @@ define i32 @scalarize_induction_variable_05(ptr %a, i32 %x, i1 %c, i32 %n) {
; IND-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_UDIV_CONTINUE2:%.*]] ]
; IND-NEXT: [[VEC_PHI:%.*]] = phi <2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP13:%.*]], [[PRED_UDIV_CONTINUE2]] ]
; IND-NEXT: [[TMP0:%.*]] = sext i32 [[INDEX]] to i64
-; IND-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP0]]
+; IND-NEXT: [[TMP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[TMP0]]
; IND-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP1]], align 4
; IND-NEXT: br i1 [[C:%.*]], label [[PRED_UDIV_IF:%.*]], label [[PRED_UDIV_CONTINUE:%.*]]
; IND: pred.udiv.if:
@@ -2062,7 +2062,7 @@ define i32 @scalarize_induction_variable_05(ptr %a, i32 %x, i1 %c, i32 %n) {
; IND-NEXT: [[I:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[I_NEXT:%.*]], [[IF_END:%.*]] ]
; IND-NEXT: [[SUM:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[VAR4:%.*]], [[IF_END]] ]
; IND-NEXT: [[TMP16:%.*]] = zext nneg i32 [[I]] to i64
-; IND-NEXT: [[VAR0:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP16]]
+; IND-NEXT: [[VAR0:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[TMP16]]
; IND-NEXT: [[VAR1:%.*]] = load i32, ptr [[VAR0]], align 4
; IND-NEXT: br i1 [[C]], label [[IF_THEN:%.*]], label [[IF_END]]
; IND: if.then:
@@ -2091,7 +2091,7 @@ define i32 @scalarize_induction_variable_05(ptr %a, i32 %x, i1 %c, i32 %n) {
; UNROLL-NEXT: [[VEC_PHI:%.*]] = phi <2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP22:%.*]], [[PRED_UDIV_CONTINUE8]] ]
; UNROLL-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP23:%.*]], [[PRED_UDIV_CONTINUE8]] ]
; UNROLL-NEXT: [[TMP0:%.*]] = sext i32 [[INDEX]] to i64
-; UNROLL-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP0]]
+; UNROLL-NEXT: [[TMP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[TMP0]]
; UNROLL-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP1]], i64 8
; UNROLL-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP1]], align 4
; UNROLL-NEXT: [[WIDE_LOAD2:%.*]] = load <2 x i32>, ptr [[TMP2]], align 4
@@ -2149,7 +2149,7 @@ define i32 @scalarize_induction_variable_05(ptr %a, i32 %x, i1 %c, i32 %n) {
; UNROLL-NEXT: [[I:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[I_NEXT:%.*]], [[IF_END:%.*]] ]
; UNROLL-NEXT: [[SUM:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[VAR4:%.*]], [[IF_END]] ]
; UNROLL-NEXT: [[TMP26:%.*]] = zext nneg i32 [[I]] to i64
-; UNROLL-NEXT: [[VAR0:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP26]]
+; UNROLL-NEXT: [[VAR0:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[TMP26]]
; UNROLL-NEXT: [[VAR1:%.*]] = load i32, ptr [[VAR0]], align 4
; UNROLL-NEXT: br i1 [[C]], label [[IF_THEN:%.*]], label [[IF_END]]
; UNROLL: if.then:
@@ -2266,7 +2266,7 @@ define i32 @scalarize_induction_variable_05(ptr %a, i32 %x, i1 %c, i32 %n) {
; INTERLEAVE-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP42:%.*]], [[PRED_UDIV_CONTINUE16]] ]
; INTERLEAVE-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP43:%.*]], [[PRED_UDIV_CONTINUE16]] ]
; INTERLEAVE-NEXT: [[TMP0:%.*]] = sext i32 [[INDEX]] to i64
-; INTERLEAVE-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP0]]
+; INTERLEAVE-NEXT: [[TMP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[TMP0]]
; INTERLEAVE-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP1]], i64 16
; INTERLEAVE-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
; INTERLEAVE-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4
@@ -2360,7 +2360,7 @@ define i32 @scalarize_induction_variable_05(ptr %a, i32 %x, i1 %c, i32 %n) {
; INTERLEAVE-NEXT: [[I:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[I_NEXT:%.*]], [[IF_END:%.*]] ]
; INTERLEAVE-NEXT: [[SUM:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[VAR4:%.*]], [[IF_END]] ]
; INTERLEAVE-NEXT: [[TMP46:%.*]] = zext nneg i32 [[I]] to i64
-; INTERLEAVE-NEXT: [[VAR0:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP46]]
+; INTERLEAVE-NEXT: [[VAR0:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[TMP46]]
; INTERLEAVE-NEXT: [[VAR1:%.*]] = load i32, ptr [[VAR0]], align 4
; INTERLEAVE-NEXT: br i1 [[C]], label [[IF_THEN:%.*]], label [[IF_END]]
; INTERLEAVE: if.then:
@@ -2481,9 +2481,9 @@ define void @iv_vector_and_scalar_users(ptr %p, i32 %a, i32 %n) {
; IND-NEXT: [[TMP5:%.*]] = trunc <2 x i32> [[TMP4]] to <2 x i16>
; IND-NEXT: [[TMP8:%.*]] = extractelement <2 x i16> [[TMP5]], i64 0
; IND-NEXT: [[TMP9:%.*]] = extractelement <2 x i16> [[TMP5]], i64 1
-; IND-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR_I16:%.*]], ptr [[P:%.*]], i64 [[INDEX]]
+; IND-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [4 x i8], ptr [[P:%.*]], i64 [[INDEX]]
; IND-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 2
-; IND-NEXT: [[TMP16:%.*]] = getelementptr [[PAIR_I16]], ptr [[P]], i64 [[INDEX]]
+; IND-NEXT: [[TMP16:%.*]] = getelementptr [4 x i8], ptr [[P]], i64 [[INDEX]]
; IND-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP16]], i64 6
; IND-NEXT: store i16 [[TMP8]], ptr [[TMP6]], align 2
; IND-NEXT: store i16 [[TMP9]], ptr [[TMP7]], align 2
@@ -2502,7 +2502,7 @@ define void @iv_vector_and_scalar_users(ptr %p, i32 %a, i32 %n) {
; IND-NEXT: [[TMP11:%.*]] = trunc i64 [[I]] to i32
; IND-NEXT: [[TMP12:%.*]] = add i32 [[A]], [[TMP11]]
; IND-NEXT: [[TMP13:%.*]] = trunc i32 [[TMP12]] to i16
-; IND-NEXT: [[DOTSPLIT2:%.*]] = getelementptr inbounds nuw [[PAIR_I16]], ptr [[P]], i64 [[I]]
+; IND-NEXT: [[DOTSPLIT2:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[P]], i64 [[I]]
; IND-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT2]], i64 2
; IND-NEXT: store i16 [[TMP13]], ptr [[TMP14]], align 2
; IND-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
@@ -2536,13 +2536,13 @@ define void @iv_vector_and_scalar_users(ptr %p, i32 %a, i32 %n) {
; UNROLL-NEXT: [[TMP9:%.*]] = trunc <2 x i32> [[TMP7]] to <2 x i16>
; UNROLL-NEXT: [[TMP16:%.*]] = extractelement <2 x i16> [[TMP9]], i64 0
; UNROLL-NEXT: [[TMP17:%.*]] = extractelement <2 x i16> [[TMP9]], i64 1
-; UNROLL-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR_I16:%.*]], ptr [[P:%.*]], i64 [[INDEX]]
+; UNROLL-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [4 x i8], ptr [[P:%.*]], i64 [[INDEX]]
; UNROLL-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 2
-; UNROLL-NEXT: [[TMP24:%.*]] = getelementptr [[PAIR_I16]], ptr [[P]], i64 [[INDEX]]
+; UNROLL-NEXT: [[TMP24:%.*]] = getelementptr [4 x i8], ptr [[P]], i64 [[INDEX]]
; UNROLL-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[TMP24]], i64 6
-; UNROLL-NEXT: [[TMP25:%.*]] = getelementptr [[PAIR_I16]], ptr [[P]], i64 [[INDEX]]
+; UNROLL-NEXT: [[TMP25:%.*]] = getelementptr [4 x i8], ptr [[P]], i64 [[INDEX]]
; UNROLL-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[TMP25]], i64 10
-; UNROLL-NEXT: [[TMP26:%.*]] = getelementptr [[PAIR_I16]], ptr [[P]], i64 [[INDEX]]
+; UNROLL-NEXT: [[TMP26:%.*]] = getelementptr [4 x i8], ptr [[P]], i64 [[INDEX]]
; UNROLL-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[TMP26]], i64 14
; UNROLL-NEXT: store i16 [[TMP14]], ptr [[TMP10]], align 2
; UNROLL-NEXT: store i16 [[TMP15]], ptr [[TMP11]], align 2
@@ -2563,7 +2563,7 @@ define void @iv_vector_and_scalar_users(ptr %p, i32 %a, i32 %n) {
; UNROLL-NEXT: [[TMP19:%.*]] = trunc i64 [[I]] to i32
; UNROLL-NEXT: [[TMP20:%.*]] = add i32 [[A]], [[TMP19]]
; UNROLL-NEXT: [[TMP21:%.*]] = trunc i32 [[TMP20]] to i16
-; UNROLL-NEXT: [[DOTSPLIT4:%.*]] = getelementptr inbounds nuw [[PAIR_I16]], ptr [[P]], i64 [[I]]
+; UNROLL-NEXT: [[DOTSPLIT4:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[P]], i64 [[I]]
; UNROLL-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT4]], i64 2
; UNROLL-NEXT: store i16 [[TMP21]], ptr [[TMP22]], align 2
; UNROLL-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
@@ -2662,21 +2662,21 @@ define void @iv_vector_and_scalar_users(ptr %p, i32 %a, i32 %n) {
; INTERLEAVE-NEXT: [[TMP27:%.*]] = extractelement <4 x i16> [[TMP13]], i64 1
; INTERLEAVE-NEXT: [[TMP28:%.*]] = extractelement <4 x i16> [[TMP13]], i64 2
; INTERLEAVE-NEXT: [[TMP29:%.*]] = extractelement <4 x i16> [[TMP13]], i64 3
-; INTERLEAVE-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR_I16:%.*]], ptr [[P:%.*]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [4 x i8], ptr [[P:%.*]], i64 [[INDEX]]
; INTERLEAVE-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 2
-; INTERLEAVE-NEXT: [[TMP8:%.*]] = getelementptr [[PAIR_I16]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP8:%.*]] = getelementptr [4 x i8], ptr [[P]], i64 [[INDEX]]
; INTERLEAVE-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[TMP8]], i64 6
-; INTERLEAVE-NEXT: [[TMP36:%.*]] = getelementptr [[PAIR_I16]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP36:%.*]] = getelementptr [4 x i8], ptr [[P]], i64 [[INDEX]]
; INTERLEAVE-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[TMP36]], i64 10
-; INTERLEAVE-NEXT: [[TMP37:%.*]] = getelementptr [[PAIR_I16]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP37:%.*]] = getelementptr [4 x i8], ptr [[P]], i64 [[INDEX]]
; INTERLEAVE-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP37]], i64 14
-; INTERLEAVE-NEXT: [[TMP38:%.*]] = getelementptr [[PAIR_I16]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP38:%.*]] = getelementptr [4 x i8], ptr [[P]], i64 [[INDEX]]
; INTERLEAVE-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[TMP38]], i64 18
-; INTERLEAVE-NEXT: [[TMP39:%.*]] = getelementptr [[PAIR_I16]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP39:%.*]] = getelementptr [4 x i8], ptr [[P]], i64 [[INDEX]]
; INTERLEAVE-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[TMP39]], i64 22
-; INTERLEAVE-NEXT: [[TMP40:%.*]] = getelementptr [[PAIR_I16]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP40:%.*]] = getelementptr [4 x i8], ptr [[P]], i64 [[INDEX]]
; INTERLEAVE-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[TMP40]], i64 26
-; INTERLEAVE-NEXT: [[TMP41:%.*]] = getelementptr [[PAIR_I16]], ptr [[P]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP41:%.*]] = getelementptr [4 x i8], ptr [[P]], i64 [[INDEX]]
; INTERLEAVE-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[TMP41]], i64 30
; INTERLEAVE-NEXT: store i16 [[TMP22]], ptr [[TMP14]], align 2
; INTERLEAVE-NEXT: store i16 [[TMP23]], ptr [[TMP15]], align 2
@@ -2701,7 +2701,7 @@ define void @iv_vector_and_scalar_users(ptr %p, i32 %a, i32 %n) {
; INTERLEAVE-NEXT: [[TMP31:%.*]] = trunc i64 [[I]] to i32
; INTERLEAVE-NEXT: [[TMP32:%.*]] = add i32 [[A]], [[TMP31]]
; INTERLEAVE-NEXT: [[TMP33:%.*]] = trunc i32 [[TMP32]] to i16
-; INTERLEAVE-NEXT: [[DOTSPLIT8:%.*]] = getelementptr inbounds nuw [[PAIR_I16]], ptr [[P]], i64 [[I]]
+; INTERLEAVE-NEXT: [[DOTSPLIT8:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[P]], i64 [[I]]
; INTERLEAVE-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT8]], i64 2
; INTERLEAVE-NEXT: store i16 [[TMP33]], ptr [[TMP34]], align 2
; INTERLEAVE-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
@@ -3372,7 +3372,7 @@ define void @wrappingindvars1(i8 %t, i32 %len, ptr %A) {
; IND-NEXT: [[DOTCAST4:%.*]] = trunc i32 [[INDEX]] to i8
; IND-NEXT: [[OFFSET_IDX:%.*]] = add i8 [[T]], [[DOTCAST4]]
; IND-NEXT: [[TMP10:%.*]] = sext i8 [[OFFSET_IDX]] to i64
-; IND-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP10]]
+; IND-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[TMP10]]
; IND-NEXT: store <2 x i32> [[VEC_IND]], ptr [[TMP11]], align 4
; IND-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2
; IND-NEXT: [[VEC_IND_NEXT]] = add <2 x i32> [[VEC_IND]], splat (i32 2)
@@ -3391,7 +3391,7 @@ define void @wrappingindvars1(i8 %t, i32 %len, ptr %A) {
; IND-NEXT: [[IDX_B:%.*]] = phi i32 [ [[IDX_B_INC:%.*]], [[LOOP]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ]
; IND-NEXT: [[SPHI:%.*]] = phi i32 [ [[IDX_INC_EXT:%.*]], [[LOOP]] ], [ [[BC_RESUME_VAL3]], [[SCALAR_PH]] ]
; IND-NEXT: [[TMP13:%.*]] = sext i8 [[IDX]] to i64
-; IND-NEXT: [[PTR:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP13]]
+; IND-NEXT: [[PTR:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP13]]
; IND-NEXT: store i32 [[SPHI]], ptr [[PTR]], align 4
; IND-NEXT: [[IDX_INC]] = add i8 [[IDX]], 1
; IND-NEXT: [[IDX_INC_EXT]] = zext i8 [[IDX_INC]] to i32
@@ -3439,7 +3439,7 @@ define void @wrappingindvars1(i8 %t, i32 %len, ptr %A) {
; UNROLL-NEXT: [[DOTCAST4:%.*]] = trunc i32 [[INDEX]] to i8
; UNROLL-NEXT: [[OFFSET_IDX:%.*]] = add i8 [[T]], [[DOTCAST4]]
; UNROLL-NEXT: [[TMP10:%.*]] = sext i8 [[OFFSET_IDX]] to i64
-; UNROLL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP10]]
+; UNROLL-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[TMP10]]
; UNROLL-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP11]], i64 8
; UNROLL-NEXT: store <2 x i32> [[VEC_IND]], ptr [[TMP11]], align 4
; UNROLL-NEXT: store <2 x i32> [[STEP_ADD]], ptr [[TMP12]], align 4
@@ -3460,7 +3460,7 @@ define void @wrappingindvars1(i8 %t, i32 %len, ptr %A) {
; UNROLL-NEXT: [[IDX_B:%.*]] = phi i32 [ [[IDX_B_INC:%.*]], [[LOOP]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ]
; UNROLL-NEXT: [[SPHI:%.*]] = phi i32 [ [[IDX_INC_EXT:%.*]], [[LOOP]] ], [ [[BC_RESUME_VAL3]], [[SCALAR_PH]] ]
; UNROLL-NEXT: [[TMP14:%.*]] = sext i8 [[IDX]] to i64
-; UNROLL-NEXT: [[PTR:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP14]]
+; UNROLL-NEXT: [[PTR:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP14]]
; UNROLL-NEXT: store i32 [[SPHI]], ptr [[PTR]], align 4
; UNROLL-NEXT: [[IDX_INC]] = add i8 [[IDX]], 1
; UNROLL-NEXT: [[IDX_INC_EXT]] = zext i8 [[IDX_INC]] to i32
@@ -3579,7 +3579,7 @@ define void @wrappingindvars1(i8 %t, i32 %len, ptr %A) {
; INTERLEAVE-NEXT: [[DOTCAST4:%.*]] = trunc i32 [[INDEX]] to i8
; INTERLEAVE-NEXT: [[OFFSET_IDX:%.*]] = add i8 [[T]], [[DOTCAST4]]
; INTERLEAVE-NEXT: [[TMP10:%.*]] = sext i8 [[OFFSET_IDX]] to i64
-; INTERLEAVE-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP10]]
+; INTERLEAVE-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[TMP10]]
; INTERLEAVE-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP11]], i64 16
; INTERLEAVE-NEXT: store <4 x i32> [[VEC_IND]], ptr [[TMP11]], align 4
; INTERLEAVE-NEXT: store <4 x i32> [[STEP_ADD]], ptr [[TMP12]], align 4
@@ -3600,7 +3600,7 @@ define void @wrappingindvars1(i8 %t, i32 %len, ptr %A) {
; INTERLEAVE-NEXT: [[IDX_B:%.*]] = phi i32 [ [[IDX_B_INC:%.*]], [[LOOP]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ]
; INTERLEAVE-NEXT: [[SPHI:%.*]] = phi i32 [ [[IDX_INC_EXT:%.*]], [[LOOP]] ], [ [[BC_RESUME_VAL3]], [[SCALAR_PH]] ]
; INTERLEAVE-NEXT: [[TMP14:%.*]] = sext i8 [[IDX]] to i64
-; INTERLEAVE-NEXT: [[PTR:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP14]]
+; INTERLEAVE-NEXT: [[PTR:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP14]]
; INTERLEAVE-NEXT: store i32 [[SPHI]], ptr [[PTR]], align 4
; INTERLEAVE-NEXT: [[IDX_INC]] = add i8 [[IDX]], 1
; INTERLEAVE-NEXT: [[IDX_INC_EXT]] = zext i8 [[IDX_INC]] to i32
@@ -3751,7 +3751,7 @@ define void @wrappingindvars2(i8 %t, i32 %len, ptr %A) {
; IND-NEXT: [[DOTCAST4:%.*]] = trunc i32 [[INDEX]] to i8
; IND-NEXT: [[OFFSET_IDX:%.*]] = add i8 [[T]], [[DOTCAST4]]
; IND-NEXT: [[TMP10:%.*]] = sext i8 [[OFFSET_IDX]] to i64
-; IND-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP10]]
+; IND-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[TMP10]]
; IND-NEXT: store <2 x i32> [[VEC_IND]], ptr [[TMP11]], align 4
; IND-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2
; IND-NEXT: [[VEC_IND_NEXT]] = add <2 x i32> [[VEC_IND]], splat (i32 8)
@@ -3770,7 +3770,7 @@ define void @wrappingindvars2(i8 %t, i32 %len, ptr %A) {
; IND-NEXT: [[SPHI:%.*]] = phi i32 [ [[MUL:%.*]], [[LOOP]] ], [ [[BC_RESUME_VAL2]], [[SCALAR_PH]] ]
; IND-NEXT: [[IDX_B:%.*]] = phi i32 [ [[IDX_B_INC:%.*]], [[LOOP]] ], [ [[BC_RESUME_VAL3]], [[SCALAR_PH]] ]
; IND-NEXT: [[TMP13:%.*]] = sext i8 [[IDX]] to i64
-; IND-NEXT: [[PTR:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP13]]
+; IND-NEXT: [[PTR:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP13]]
; IND-NEXT: store i32 [[SPHI]], ptr [[PTR]], align 4
; IND-NEXT: [[IDX_INC]] = add i8 [[IDX]], 1
; IND-NEXT: [[IDX_INC_EXT:%.*]] = zext i8 [[IDX_INC]] to i32
@@ -3821,7 +3821,7 @@ define void @wrappingindvars2(i8 %t, i32 %len, ptr %A) {
; UNROLL-NEXT: [[DOTCAST4:%.*]] = trunc i32 [[INDEX]] to i8
; UNROLL-NEXT: [[OFFSET_IDX:%.*]] = add i8 [[T]], [[DOTCAST4]]
; UNROLL-NEXT: [[TMP10:%.*]] = sext i8 [[OFFSET_IDX]] to i64
-; UNROLL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP10]]
+; UNROLL-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[TMP10]]
; UNROLL-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP11]], i64 8
; UNROLL-NEXT: store <2 x i32> [[VEC_IND]], ptr [[TMP11]], align 4
; UNROLL-NEXT: store <2 x i32> [[STEP_ADD]], ptr [[TMP12]], align 4
@@ -3842,7 +3842,7 @@ define void @wrappingindvars2(i8 %t, i32 %len, ptr %A) {
; UNROLL-NEXT: [[SPHI:%.*]] = phi i32 [ [[MUL:%.*]], [[LOOP]] ], [ [[BC_RESUME_VAL2]], [[SCALAR_PH]] ]
; UNROLL-NEXT: [[IDX_B:%.*]] = phi i32 [ [[IDX_B_INC:%.*]], [[LOOP]] ], [ [[BC_RESUME_VAL3]], [[SCALAR_PH]] ]
; UNROLL-NEXT: [[TMP14:%.*]] = sext i8 [[IDX]] to i64
-; UNROLL-NEXT: [[PTR:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP14]]
+; UNROLL-NEXT: [[PTR:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP14]]
; UNROLL-NEXT: store i32 [[SPHI]], ptr [[PTR]], align 4
; UNROLL-NEXT: [[IDX_INC]] = add i8 [[IDX]], 1
; UNROLL-NEXT: [[IDX_INC_EXT:%.*]] = zext i8 [[IDX_INC]] to i32
@@ -3967,7 +3967,7 @@ define void @wrappingindvars2(i8 %t, i32 %len, ptr %A) {
; INTERLEAVE-NEXT: [[DOTCAST4:%.*]] = trunc i32 [[INDEX]] to i8
; INTERLEAVE-NEXT: [[OFFSET_IDX:%.*]] = add i8 [[T]], [[DOTCAST4]]
; INTERLEAVE-NEXT: [[TMP10:%.*]] = sext i8 [[OFFSET_IDX]] to i64
-; INTERLEAVE-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP10]]
+; INTERLEAVE-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[TMP10]]
; INTERLEAVE-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP11]], i64 16
; INTERLEAVE-NEXT: store <4 x i32> [[VEC_IND]], ptr [[TMP11]], align 4
; INTERLEAVE-NEXT: store <4 x i32> [[STEP_ADD]], ptr [[TMP12]], align 4
@@ -3988,7 +3988,7 @@ define void @wrappingindvars2(i8 %t, i32 %len, ptr %A) {
; INTERLEAVE-NEXT: [[SPHI:%.*]] = phi i32 [ [[MUL:%.*]], [[LOOP]] ], [ [[BC_RESUME_VAL2]], [[SCALAR_PH]] ]
; INTERLEAVE-NEXT: [[IDX_B:%.*]] = phi i32 [ [[IDX_B_INC:%.*]], [[LOOP]] ], [ [[BC_RESUME_VAL3]], [[SCALAR_PH]] ]
; INTERLEAVE-NEXT: [[TMP14:%.*]] = sext i8 [[IDX]] to i64
-; INTERLEAVE-NEXT: [[PTR:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP14]]
+; INTERLEAVE-NEXT: [[PTR:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP14]]
; INTERLEAVE-NEXT: store i32 [[SPHI]], ptr [[PTR]], align 4
; INTERLEAVE-NEXT: [[IDX_INC]] = add i8 [[IDX]], 1
; INTERLEAVE-NEXT: [[IDX_INC_EXT:%.*]] = zext i8 [[IDX_INC]] to i32
@@ -4078,7 +4078,7 @@ define void @veciv(ptr nocapture %a, i32 %start, i32 %k) {
; IND-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IND-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ <i32 0, i32 1>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; IND-NEXT: [[TMP0:%.*]] = sext i32 [[INDEX]] to i64
-; IND-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP0]]
+; IND-NEXT: [[TMP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[TMP0]]
; IND-NEXT: store <2 x i32> [[VEC_IND]], ptr [[TMP1]], align 4
; IND-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2
; IND-NEXT: [[VEC_IND_NEXT]] = add nuw nsw <2 x i32> [[VEC_IND]], splat (i32 2)
@@ -4093,7 +4093,7 @@ define void @veciv(ptr nocapture %a, i32 %start, i32 %k) {
; IND: for.body:
; IND-NEXT: [[INDVARS_IV:%.*]] = phi i32 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; IND-NEXT: [[TMP3:%.*]] = sext i32 [[INDVARS_IV]] to i64
-; IND-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP3]]
+; IND-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP3]]
; IND-NEXT: store i32 [[INDVARS_IV]], ptr [[ARRAYIDX]], align 4
; IND-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i32 [[INDVARS_IV]], 1
; IND-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INDVARS_IV_NEXT]], [[K]]
@@ -4113,7 +4113,7 @@ define void @veciv(ptr nocapture %a, i32 %start, i32 %k) {
; UNROLL-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ <i32 0, i32 1>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; UNROLL-NEXT: [[STEP_ADD:%.*]] = add <2 x i32> [[VEC_IND]], splat (i32 2)
; UNROLL-NEXT: [[TMP0:%.*]] = sext i32 [[INDEX]] to i64
-; UNROLL-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP0]]
+; UNROLL-NEXT: [[TMP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[TMP0]]
; UNROLL-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP1]], i64 8
; UNROLL-NEXT: store <2 x i32> [[VEC_IND]], ptr [[TMP1]], align 4
; UNROLL-NEXT: store <2 x i32> [[STEP_ADD]], ptr [[TMP2]], align 4
@@ -4130,7 +4130,7 @@ define void @veciv(ptr nocapture %a, i32 %start, i32 %k) {
; UNROLL: for.body:
; UNROLL-NEXT: [[INDVARS_IV:%.*]] = phi i32 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; UNROLL-NEXT: [[TMP4:%.*]] = sext i32 [[INDVARS_IV]] to i64
-; UNROLL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP4]]
+; UNROLL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP4]]
; UNROLL-NEXT: store i32 [[INDVARS_IV]], ptr [[ARRAYIDX]], align 4
; UNROLL-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i32 [[INDVARS_IV]], 1
; UNROLL-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INDVARS_IV_NEXT]], [[K]]
@@ -4186,7 +4186,7 @@ define void @veciv(ptr nocapture %a, i32 %start, i32 %k) {
; INTERLEAVE-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; INTERLEAVE-NEXT: [[STEP_ADD:%.*]] = add <4 x i32> [[VEC_IND]], splat (i32 4)
; INTERLEAVE-NEXT: [[TMP0:%.*]] = sext i32 [[INDEX]] to i64
-; INTERLEAVE-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP0]]
+; INTERLEAVE-NEXT: [[TMP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[TMP0]]
; INTERLEAVE-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP1]], i64 16
; INTERLEAVE-NEXT: store <4 x i32> [[VEC_IND]], ptr [[TMP1]], align 4
; INTERLEAVE-NEXT: store <4 x i32> [[STEP_ADD]], ptr [[TMP2]], align 4
@@ -4203,7 +4203,7 @@ define void @veciv(ptr nocapture %a, i32 %start, i32 %k) {
; INTERLEAVE: for.body:
; INTERLEAVE-NEXT: [[INDVARS_IV:%.*]] = phi i32 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; INTERLEAVE-NEXT: [[TMP4:%.*]] = sext i32 [[INDVARS_IV]] to i64
-; INTERLEAVE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP4]]
+; INTERLEAVE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP4]]
; INTERLEAVE-NEXT: store i32 [[INDVARS_IV]], ptr [[ARRAYIDX]], align 4
; INTERLEAVE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i32 [[INDVARS_IV]], 1
; INTERLEAVE-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INDVARS_IV_NEXT]], [[K]]
@@ -4515,7 +4515,7 @@ define void @nonprimary(ptr nocapture %a, i32 %start, i32 %i, i32 %k) {
; IND-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; IND-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[I]], [[INDEX]]
; IND-NEXT: [[TMP1:%.*]] = sext i32 [[OFFSET_IDX]] to i64
-; IND-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP1]]
+; IND-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[TMP1]]
; IND-NEXT: store <2 x i32> [[VEC_IND]], ptr [[TMP2]], align 4
; IND-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2
; IND-NEXT: [[VEC_IND_NEXT]] = add nuw nsw <2 x i32> [[VEC_IND]], splat (i32 2)
@@ -4530,7 +4530,7 @@ define void @nonprimary(ptr nocapture %a, i32 %start, i32 %i, i32 %k) {
; IND: for.body:
; IND-NEXT: [[INDVARS_IV:%.*]] = phi i32 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; IND-NEXT: [[TMP4:%.*]] = sext i32 [[INDVARS_IV]] to i64
-; IND-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP4]]
+; IND-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP4]]
; IND-NEXT: store i32 [[INDVARS_IV]], ptr [[ARRAYIDX]], align 4
; IND-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i32 [[INDVARS_IV]], 1
; IND-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INDVARS_IV_NEXT]], [[K]]
@@ -4556,7 +4556,7 @@ define void @nonprimary(ptr nocapture %a, i32 %start, i32 %i, i32 %k) {
; UNROLL-NEXT: [[STEP_ADD:%.*]] = add <2 x i32> [[VEC_IND]], splat (i32 2)
; UNROLL-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[I]], [[INDEX]]
; UNROLL-NEXT: [[TMP1:%.*]] = sext i32 [[OFFSET_IDX]] to i64
-; UNROLL-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP1]]
+; UNROLL-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[TMP1]]
; UNROLL-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP2]], i64 8
; UNROLL-NEXT: store <2 x i32> [[VEC_IND]], ptr [[TMP2]], align 4
; UNROLL-NEXT: store <2 x i32> [[STEP_ADD]], ptr [[TMP3]], align 4
@@ -4573,7 +4573,7 @@ define void @nonprimary(ptr nocapture %a, i32 %start, i32 %i, i32 %k) {
; UNROLL: for.body:
; UNROLL-NEXT: [[INDVARS_IV:%.*]] = phi i32 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; UNROLL-NEXT: [[TMP5:%.*]] = sext i32 [[INDVARS_IV]] to i64
-; UNROLL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP5]]
+; UNROLL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP5]]
; UNROLL-NEXT: store i32 [[INDVARS_IV]], ptr [[ARRAYIDX]], align 4
; UNROLL-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i32 [[INDVARS_IV]], 1
; UNROLL-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INDVARS_IV_NEXT]], [[K]]
@@ -4641,7 +4641,7 @@ define void @nonprimary(ptr nocapture %a, i32 %start, i32 %i, i32 %k) {
; INTERLEAVE-NEXT: [[STEP_ADD:%.*]] = add <4 x i32> [[VEC_IND]], splat (i32 4)
; INTERLEAVE-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[I]], [[INDEX]]
; INTERLEAVE-NEXT: [[TMP1:%.*]] = sext i32 [[OFFSET_IDX]] to i64
-; INTERLEAVE-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP1]]
+; INTERLEAVE-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[TMP1]]
; INTERLEAVE-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP2]], i64 16
; INTERLEAVE-NEXT: store <4 x i32> [[VEC_IND]], ptr [[TMP2]], align 4
; INTERLEAVE-NEXT: store <4 x i32> [[STEP_ADD]], ptr [[TMP3]], align 4
@@ -4658,7 +4658,7 @@ define void @nonprimary(ptr nocapture %a, i32 %start, i32 %i, i32 %k) {
; INTERLEAVE: for.body:
; INTERLEAVE-NEXT: [[INDVARS_IV:%.*]] = phi i32 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; INTERLEAVE-NEXT: [[TMP5:%.*]] = sext i32 [[INDVARS_IV]] to i64
-; INTERLEAVE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP5]]
+; INTERLEAVE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP5]]
; INTERLEAVE-NEXT: store i32 [[INDVARS_IV]], ptr [[ARRAYIDX]], align 4
; INTERLEAVE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i32 [[INDVARS_IV]], 1
; INTERLEAVE-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INDVARS_IV_NEXT]], [[K]]
@@ -4733,7 +4733,7 @@ define void @non_primary_iv_trunc(ptr %a, i64 %n) {
; IND: vector.body:
; IND-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IND-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ <i32 0, i32 2>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IND-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
+; IND-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; IND-NEXT: store <2 x i32> [[VEC_IND]], ptr [[TMP0]], align 4
; IND-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; IND-NEXT: [[VEC_IND_NEXT]] = add <2 x i32> [[VEC_IND]], splat (i32 4)
@@ -4749,7 +4749,7 @@ define void @non_primary_iv_trunc(ptr %a, i64 %n) {
; IND: for.body:
; IND-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; IND-NEXT: [[J:%.*]] = phi i64 [ [[J_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ]
-; IND-NEXT: [[VAR0:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[I]]
+; IND-NEXT: [[VAR0:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[I]]
; IND-NEXT: [[VAR1:%.*]] = trunc i64 [[J]] to i32
; IND-NEXT: store i32 [[VAR1]], ptr [[VAR0]], align 4
; IND-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
@@ -4772,7 +4772,7 @@ define void @non_primary_iv_trunc(ptr %a, i64 %n) {
; UNROLL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; UNROLL-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ <i32 0, i32 2>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; UNROLL-NEXT: [[STEP_ADD:%.*]] = add <2 x i32> [[VEC_IND]], splat (i32 4)
-; UNROLL-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
+; UNROLL-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; UNROLL-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 8
; UNROLL-NEXT: store <2 x i32> [[VEC_IND]], ptr [[TMP0]], align 4
; UNROLL-NEXT: store <2 x i32> [[STEP_ADD]], ptr [[TMP1]], align 4
@@ -4790,7 +4790,7 @@ define void @non_primary_iv_trunc(ptr %a, i64 %n) {
; UNROLL: for.body:
; UNROLL-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; UNROLL-NEXT: [[J:%.*]] = phi i64 [ [[J_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ]
-; UNROLL-NEXT: [[VAR0:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[I]]
+; UNROLL-NEXT: [[VAR0:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[I]]
; UNROLL-NEXT: [[VAR1:%.*]] = trunc i64 [[J]] to i32
; UNROLL-NEXT: store i32 [[VAR1]], ptr [[VAR0]], align 4
; UNROLL-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
@@ -4855,7 +4855,7 @@ define void @non_primary_iv_trunc(ptr %a, i64 %n) {
; INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; INTERLEAVE-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 2, i32 4, i32 6>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; INTERLEAVE-NEXT: [[STEP_ADD:%.*]] = add <4 x i32> [[VEC_IND]], splat (i32 8)
-; INTERLEAVE-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; INTERLEAVE-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 16
; INTERLEAVE-NEXT: store <4 x i32> [[VEC_IND]], ptr [[TMP0]], align 4
; INTERLEAVE-NEXT: store <4 x i32> [[STEP_ADD]], ptr [[TMP1]], align 4
@@ -4873,7 +4873,7 @@ define void @non_primary_iv_trunc(ptr %a, i64 %n) {
; INTERLEAVE: for.body:
; INTERLEAVE-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; INTERLEAVE-NEXT: [[J:%.*]] = phi i64 [ [[J_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ]
-; INTERLEAVE-NEXT: [[VAR0:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[I]]
+; INTERLEAVE-NEXT: [[VAR0:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[I]]
; INTERLEAVE-NEXT: [[VAR1:%.*]] = trunc i64 [[J]] to i32
; INTERLEAVE-NEXT: store i32 [[VAR1]], ptr [[VAR0]], align 4
; INTERLEAVE-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
@@ -5981,7 +5981,7 @@ define void @test_optimized_cast_induction_feeding_first_order_recurrence(i64 %n
; IND-NEXT: [[VECTOR_RECUR:%.*]] = phi <2 x i32> [ <i32 poison, i32 0>, [[VECTOR_PH]] ], [ [[VEC_IND:%.*]], [[VECTOR_BODY]] ]
; IND-NEXT: [[VEC_IND]] = phi <2 x i32> [ [[TMP15]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; IND-NEXT: [[TMP17:%.*]] = shufflevector <2 x i32> [[VECTOR_RECUR]], <2 x i32> [[VEC_IND]], <2 x i32> <i32 1, i32 2>
-; IND-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[PTR:%.*]], i64 [[INDEX]]
+; IND-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x i8], ptr [[PTR:%.*]], i64 [[INDEX]]
; IND-NEXT: store <2 x i32> [[TMP17]], ptr [[TMP18]], align 4
; IND-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; IND-NEXT: [[VEC_IND_NEXT]] = add nsw <2 x i32> [[VEC_IND]], [[DOTSPLAT3]]
@@ -6002,7 +6002,7 @@ define void @test_optimized_cast_induction_feeding_first_order_recurrence(i64 %n
; IND-NEXT: [[IV_2:%.*]] = phi i32 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], [[LOOP]] ]
; IND-NEXT: [[IV_2_EXT:%.*]] = shl i32 [[IV_2]], 24
; IND-NEXT: [[IV_2_CONV]] = ashr exact i32 [[IV_2_EXT]], 24
-; IND-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[IV_1]]
+; IND-NEXT: [[GEP:%.*]] = getelementptr inbounds [4 x i8], ptr [[PTR]], i64 [[IV_1]]
; IND-NEXT: store i32 [[FOR]], ptr [[GEP]], align 4
; IND-NEXT: [[IV_2_NEXT]] = add nsw i32 [[IV_2_CONV]], [[STEP]]
; IND-NEXT: [[IV_1_NEXT]] = add nuw nsw i64 [[IV_1]], 1
@@ -6052,7 +6052,7 @@ define void @test_optimized_cast_induction_feeding_first_order_recurrence(i64 %n
; UNROLL-NEXT: [[STEP_ADD]] = add <2 x i32> [[VEC_IND]], [[TMP16]]
; UNROLL-NEXT: [[TMP18:%.*]] = shufflevector <2 x i32> [[VECTOR_RECUR]], <2 x i32> [[VEC_IND]], <2 x i32> <i32 1, i32 2>
; UNROLL-NEXT: [[TMP19:%.*]] = shufflevector <2 x i32> [[VEC_IND]], <2 x i32> [[STEP_ADD]], <2 x i32> <i32 1, i32 2>
-; UNROLL-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[PTR:%.*]], i64 [[INDEX]]
+; UNROLL-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x i8], ptr [[PTR:%.*]], i64 [[INDEX]]
; UNROLL-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP20]], i64 8
; UNROLL-NEXT: store <2 x i32> [[TMP18]], ptr [[TMP20]], align 4
; UNROLL-NEXT: store <2 x i32> [[TMP19]], ptr [[TMP21]], align 4
@@ -6075,7 +6075,7 @@ define void @test_optimized_cast_induction_feeding_first_order_recurrence(i64 %n
; UNROLL-NEXT: [[IV_2:%.*]] = phi i32 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], [[LOOP]] ]
; UNROLL-NEXT: [[IV_2_EXT:%.*]] = shl i32 [[IV_2]], 24
; UNROLL-NEXT: [[IV_2_CONV]] = ashr exact i32 [[IV_2_EXT]], 24
-; UNROLL-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[IV_1]]
+; UNROLL-NEXT: [[GEP:%.*]] = getelementptr inbounds [4 x i8], ptr [[PTR]], i64 [[IV_1]]
; UNROLL-NEXT: store i32 [[FOR]], ptr [[GEP]], align 4
; UNROLL-NEXT: [[IV_2_NEXT]] = add nsw i32 [[IV_2_CONV]], [[STEP]]
; UNROLL-NEXT: [[IV_1_NEXT]] = add nuw nsw i64 [[IV_1]], 1
@@ -6201,7 +6201,7 @@ define void @test_optimized_cast_induction_feeding_first_order_recurrence(i64 %n
; INTERLEAVE-NEXT: [[STEP_ADD]] = add <4 x i32> [[VEC_IND]], [[TMP16]]
; INTERLEAVE-NEXT: [[TMP18:%.*]] = shufflevector <4 x i32> [[VECTOR_RECUR]], <4 x i32> [[VEC_IND]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
; INTERLEAVE-NEXT: [[TMP19:%.*]] = shufflevector <4 x i32> [[VEC_IND]], <4 x i32> [[STEP_ADD]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
-; INTERLEAVE-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[PTR:%.*]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x i8], ptr [[PTR:%.*]], i64 [[INDEX]]
; INTERLEAVE-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP20]], i64 16
; INTERLEAVE-NEXT: store <4 x i32> [[TMP18]], ptr [[TMP20]], align 4
; INTERLEAVE-NEXT: store <4 x i32> [[TMP19]], ptr [[TMP21]], align 4
@@ -6224,7 +6224,7 @@ define void @test_optimized_cast_induction_feeding_first_order_recurrence(i64 %n
; INTERLEAVE-NEXT: [[IV_2:%.*]] = phi i32 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], [[LOOP]] ]
; INTERLEAVE-NEXT: [[IV_2_EXT:%.*]] = shl i32 [[IV_2]], 24
; INTERLEAVE-NEXT: [[IV_2_CONV]] = ashr exact i32 [[IV_2_EXT]], 24
-; INTERLEAVE-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[IV_1]]
+; INTERLEAVE-NEXT: [[GEP:%.*]] = getelementptr inbounds [4 x i8], ptr [[PTR]], i64 [[IV_1]]
; INTERLEAVE-NEXT: store i32 [[FOR]], ptr [[GEP]], align 4
; INTERLEAVE-NEXT: [[IV_2_NEXT]] = add nsw i32 [[IV_2_CONV]], [[STEP]]
; INTERLEAVE-NEXT: [[IV_1_NEXT]] = add nuw nsw i64 [[IV_1]], 1
diff --git a/llvm/test/Transforms/LoopVectorize/interleaved-accesses-2.ll b/llvm/test/Transforms/LoopVectorize/interleaved-accesses-2.ll
index b8eb5311b4bb2..738a8be782108 100644
--- a/llvm/test/Transforms/LoopVectorize/interleaved-accesses-2.ll
+++ b/llvm/test/Transforms/LoopVectorize/interleaved-accesses-2.ll
@@ -43,7 +43,7 @@ define void @_Z4testPfS_m(ptr noalias nocapture %out, ptr noalias nocapture read
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[IN]], i64 [[DOTIDX]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <8 x i32>, ptr [[TMP2]], align 4
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[OUT]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [4 x i8], ptr [[OUT]], i64 [[INDEX]]
; CHECK-NEXT: store <4 x i32> [[STRIDED_VEC]], ptr [[TMP3]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -62,7 +62,7 @@ define void @_Z4testPfS_m(ptr noalias nocapture %out, ptr noalias nocapture read
; CHECK-NEXT: [[ARRAYIDX_IDX:%.*]] = shl i64 [[OUT_OFFSET_08]], 3
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[IN]], i64 [[ARRAYIDX_IDX]]
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[OUT]], i64 [[OUT_OFFSET_08]]
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [4 x i8], ptr [[OUT]], i64 [[OUT_OFFSET_08]]
; CHECK-NEXT: store i32 [[TMP5]], ptr [[ARRAYIDX1]], align 4
; CHECK-NEXT: [[INC]] = add nuw i64 [[OUT_OFFSET_08]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[SIZE]]
diff --git a/llvm/test/Transforms/LoopVectorize/interleaved-accesses-pred-stores.ll b/llvm/test/Transforms/LoopVectorize/interleaved-accesses-pred-stores.ll
index 16a56f3a38ac9..3bd712132637f 100644
--- a/llvm/test/Transforms/LoopVectorize/interleaved-accesses-pred-stores.ll
+++ b/llvm/test/Transforms/LoopVectorize/interleaved-accesses-pred-stores.ll
@@ -27,7 +27,7 @@ define void @interleaved_with_cond_store_0(ptr %p, i64 %x, i64 %n) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE2:%.*]] ]
-; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR:%.*]], ptr [[P:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [16 x i8], ptr [[P:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 8
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <4 x i64>, ptr [[TMP0]], align 8
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <4 x i64> [[WIDE_VEC]], <4 x i64> poison, <2 x i32> <i32 0, i32 2>
@@ -35,7 +35,7 @@ define void @interleaved_with_cond_store_0(ptr %p, i64 %x, i64 %n) {
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x i1> [[TMP1]], i64 0
; CHECK-NEXT: br i1 [[TMP2]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
; CHECK: pred.store.if:
-; CHECK-NEXT: [[DOTSPLIT3:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[INDEX]]
+; CHECK-NEXT: [[DOTSPLIT3:%.*]] = getelementptr inbounds [16 x i8], ptr [[P]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT3]], i64 8
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[WIDE_VEC]], i64 0
; CHECK-NEXT: store i64 [[TMP4]], ptr [[TMP3]], align 8
@@ -44,7 +44,7 @@ define void @interleaved_with_cond_store_0(ptr %p, i64 %x, i64 %n) {
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x i1> [[TMP1]], i64 1
; CHECK-NEXT: br i1 [[TMP5]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2]]
; CHECK: pred.store.if1:
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr [[PAIR]], ptr [[P]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr [16 x i8], ptr [[P]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP6]], i64 24
; CHECK-NEXT: [[TMP8:%.*]] = extractelement <4 x i64> [[WIDE_VEC]], i64 2
; CHECK-NEXT: store i64 [[TMP8]], ptr [[TMP7]], align 8
@@ -60,7 +60,7 @@ define void @interleaved_with_cond_store_0(ptr %p, i64 %x, i64 %n) {
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[IF_MERGE:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[P_1_SPLIT:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[I]]
+; CHECK-NEXT: [[P_1_SPLIT:%.*]] = getelementptr inbounds [16 x i8], ptr [[P]], i64 [[I]]
; CHECK-NEXT: [[P_1:%.*]] = getelementptr inbounds nuw i8, ptr [[P_1_SPLIT]], i64 8
; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr [[P_1]], align 8
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[TMP10]], [[X]]
@@ -123,10 +123,10 @@ define void @interleaved_with_cond_store_1(ptr %p, i64 %x, i64 %n) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE2:%.*]] ]
; CHECK-NEXT: [[TMP0:%.*]] = or disjoint i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[PAIR:%.*]], ptr [[P:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [16 x i8], ptr [[P:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [16 x i8], ptr [[P]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 8
-; CHECK-NEXT: [[DOTSPLIT5:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[TMP0]]
+; CHECK-NEXT: [[DOTSPLIT5:%.*]] = getelementptr inbounds [16 x i8], ptr [[P]], i64 [[TMP0]]
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT5]], i64 8
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <4 x i64>, ptr [[TMP2]], align 8
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <4 x i64> [[WIDE_VEC]], <4 x i64> poison, <2 x i32> <i32 0, i32 2>
@@ -134,7 +134,7 @@ define void @interleaved_with_cond_store_1(ptr %p, i64 %x, i64 %n) {
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x i1> [[TMP4]], i64 0
; CHECK-NEXT: br i1 [[TMP5]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
; CHECK: pred.store.if:
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [16 x i8], ptr [[P]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP7:%.*]] = extractelement <4 x i64> [[WIDE_VEC]], i64 0
; CHECK-NEXT: store i64 [[TMP7]], ptr [[TMP6]], align 8
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE]]
@@ -142,7 +142,7 @@ define void @interleaved_with_cond_store_1(ptr %p, i64 %x, i64 %n) {
; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x i1> [[TMP4]], i64 1
; CHECK-NEXT: br i1 [[TMP8]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2]]
; CHECK: pred.store.if1:
-; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [16 x i8], ptr [[P]], i64 [[TMP0]]
; CHECK-NEXT: [[TMP10:%.*]] = extractelement <4 x i64> [[WIDE_VEC]], i64 2
; CHECK-NEXT: store i64 [[TMP10]], ptr [[TMP9]], align 8
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE2]]
@@ -162,8 +162,8 @@ define void @interleaved_with_cond_store_1(ptr %p, i64 %x, i64 %n) {
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[IF_MERGE:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[P_0:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[I]]
-; CHECK-NEXT: [[P_1_SPLIT:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[I]]
+; CHECK-NEXT: [[P_0:%.*]] = getelementptr inbounds [16 x i8], ptr [[P]], i64 [[I]]
+; CHECK-NEXT: [[P_1_SPLIT:%.*]] = getelementptr inbounds [16 x i8], ptr [[P]], i64 [[I]]
; CHECK-NEXT: [[P_1:%.*]] = getelementptr inbounds nuw i8, ptr [[P_1_SPLIT]], i64 8
; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr [[P_1]], align 8
; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[TMP14]], [[X]]
@@ -230,9 +230,9 @@ define void @interleaved_with_cond_store_2(ptr %p, i64 %x, i64 %n) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE2:%.*]] ]
; CHECK-NEXT: [[TMP0:%.*]] = or disjoint i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[PAIR:%.*]], ptr [[P:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[TMP0]]
-; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [16 x i8], ptr [[P:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [16 x i8], ptr [[P]], i64 [[TMP0]]
+; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [16 x i8], ptr [[P]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 8
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <4 x i64>, ptr [[TMP3]], align 8
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <4 x i64> [[WIDE_VEC]], <4 x i64> poison, <2 x i32> <i32 0, i32 2>
@@ -242,7 +242,7 @@ define void @interleaved_with_cond_store_2(ptr %p, i64 %x, i64 %n) {
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x i1> [[TMP4]], i64 0
; CHECK-NEXT: br i1 [[TMP5]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
; CHECK: pred.store.if:
-; CHECK-NEXT: [[DOTSPLIT3:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[INDEX]]
+; CHECK-NEXT: [[DOTSPLIT3:%.*]] = getelementptr inbounds [16 x i8], ptr [[P]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT3]], i64 8
; CHECK-NEXT: [[TMP7:%.*]] = extractelement <4 x i64> [[WIDE_VEC]], i64 0
; CHECK-NEXT: store i64 [[TMP7]], ptr [[TMP6]], align 8
@@ -251,7 +251,7 @@ define void @interleaved_with_cond_store_2(ptr %p, i64 %x, i64 %n) {
; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x i1> [[TMP4]], i64 1
; CHECK-NEXT: br i1 [[TMP8]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2]]
; CHECK: pred.store.if1:
-; CHECK-NEXT: [[DOTSPLIT4:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[TMP0]]
+; CHECK-NEXT: [[DOTSPLIT4:%.*]] = getelementptr inbounds [16 x i8], ptr [[P]], i64 [[TMP0]]
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT4]], i64 8
; CHECK-NEXT: [[TMP10:%.*]] = extractelement <4 x i64> [[WIDE_VEC]], i64 2
; CHECK-NEXT: store i64 [[TMP10]], ptr [[TMP9]], align 8
@@ -267,8 +267,8 @@ define void @interleaved_with_cond_store_2(ptr %p, i64 %x, i64 %n) {
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[IF_MERGE:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[P_0:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[I]]
-; CHECK-NEXT: [[P_1_SPLIT:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[I]]
+; CHECK-NEXT: [[P_0:%.*]] = getelementptr inbounds [16 x i8], ptr [[P]], i64 [[I]]
+; CHECK-NEXT: [[P_1_SPLIT:%.*]] = getelementptr inbounds [16 x i8], ptr [[P]], i64 [[I]]
; CHECK-NEXT: [[P_1:%.*]] = getelementptr inbounds nuw i8, ptr [[P_1_SPLIT]], i64 8
; CHECK-NEXT: [[TMP12:%.*]] = load i64, ptr [[P_1]], align 8
; CHECK-NEXT: store i64 [[X]], ptr [[P_0]], align 8
diff --git a/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll b/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll
index caaf7bbd687b1..acc58112db0a0 100644
--- a/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll
@@ -34,13 +34,13 @@ define void @test_array_load2_store2(i32 %C, i32 %D) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr @AB, i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr @AB, i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <8 x i32>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
; CHECK-NEXT: [[STRIDED_VEC1:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[STRIDED_VEC]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP3:%.*]] = mul nsw <4 x i32> [[STRIDED_VEC1]], [[BROADCAST_SPLAT3]]
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr @CD, i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x i8], ptr @CD, i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> [[TMP3]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
; CHECK-NEXT: store <8 x i32> [[INTERLEAVED_VEC]], ptr [[TMP4]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
@@ -109,7 +109,7 @@ define void @test_struct_array_load3_store3() {
; CHECK-NEXT: [[STRIDED_VEC2:%.*]] = shufflevector <12 x i32> [[WIDE_VEC]], <12 x i32> poison, <4 x i32> <i32 1, i32 4, i32 7, i32 10>
; CHECK-NEXT: [[STRIDED_VEC3:%.*]] = shufflevector <12 x i32> [[WIDE_VEC]], <12 x i32> poison, <4 x i32> <i32 2, i32 5, i32 8, i32 11>
; CHECK-NEXT: [[TMP0:%.*]] = add nsw <4 x i32> [[STRIDED_VEC]], splat (i32 1)
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ST3:%.*]], ptr @S, i64 [[INDEX]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [12 x i8], ptr @S, i64 [[INDEX]]
; CHECK-NEXT: [[TMP1:%.*]] = add nsw <4 x i32> [[STRIDED_VEC2]], splat (i32 2)
; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[STRIDED_VEC3]], splat (i32 3)
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i32> [[TMP0]], <4 x i32> [[TMP1]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -184,7 +184,7 @@ define i32 @test_struct_load4(ptr nocapture readonly %S) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST4:%.*]], ptr [[S:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [16 x i8], ptr [[S:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <16 x i32>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <16 x i32> [[WIDE_VEC]], <16 x i32> poison, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
; CHECK-NEXT: [[STRIDED_VEC1:%.*]] = shufflevector <16 x i32> [[WIDE_VEC]], <16 x i32> poison, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
@@ -255,7 +255,7 @@ define void @test_struct_store4(ptr noalias nocapture readonly %A, ptr noalias n
; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[NEXT_GEP]], align 4
; CHECK-NEXT: [[TMP0:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], splat (i32 1)
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_ST4:%.*]], ptr [[B:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [16 x i8], ptr [[B:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP1:%.*]] = shl nsw <4 x i32> [[WIDE_LOAD]], splat (i32 1)
; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], splat (i32 3)
; CHECK-NEXT: [[TMP3:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], splat (i32 4)
@@ -329,7 +329,7 @@ define void @test_reversed_load2_store2(ptr noalias nocapture readonly %A, ptr n
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 1023, i32 1022, i32 1021, i32 1020>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[A:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [8 x i8], ptr [[A:%.*]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[TMP0]], i64 -24
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <8 x i32>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
@@ -338,7 +338,7 @@ define void @test_reversed_load2_store2(ptr noalias nocapture readonly %A, ptr n
; CHECK-NEXT: [[REVERSE2:%.*]] = shufflevector <4 x i32> [[STRIDED_VEC1]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[REVERSE]], [[VEC_IND]]
; CHECK-NEXT: [[TMP3:%.*]] = sub nsw <4 x i32> [[REVERSE2]], [[VEC_IND]]
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_ST2]], ptr [[B:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [8 x i8], ptr [[B:%.*]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i64 -24
; CHECK-NEXT: [[REVERSE3:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: [[REVERSE4:%.*]] = shufflevector <4 x i32> [[TMP3]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
@@ -402,7 +402,7 @@ define void @even_load_static_tc(ptr noalias nocapture readonly %A, ptr noalias
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
; CHECK-NEXT: [[TMP1:%.*]] = shl nsw <4 x i32> [[STRIDED_VEC]], splat (i32 1)
; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[INDEX]], 9223372036854775804
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i32, ptr [[B:%.*]], i64 [[TMP2]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B:%.*]], i64 [[TMP2]]
; CHECK-NEXT: store <4 x i32> [[TMP1]], ptr [[TMP3]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 508
@@ -415,11 +415,11 @@ define void @even_load_static_tc(ptr noalias nocapture readonly %A, ptr noalias
; CHECK-NEXT: ret void
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 1016, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[MUL:%.*]] = shl nsw i32 [[TMP]], 1
; CHECK-NEXT: [[TMP1:%.*]] = lshr exact i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP1]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B]], i64 [[TMP1]]
; CHECK-NEXT: store i32 [[MUL]], ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 2
; CHECK-NEXT: [[CMP:%.*]] = icmp samesign ult i64 [[INDVARS_IV]], 1022
@@ -478,7 +478,7 @@ define void @even_load_dynamic_tc(ptr noalias nocapture readonly %A, ptr noalias
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
; CHECK-NEXT: [[TMP6:%.*]] = shl nsw <4 x i32> [[STRIDED_VEC]], splat (i32 1)
; CHECK-NEXT: [[TMP7:%.*]] = and i64 [[INDEX]], 9223372036854775804
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i32, ptr [[B:%.*]], i64 [[TMP7]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B:%.*]], i64 [[TMP7]]
; CHECK-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP8]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -492,11 +492,11 @@ define void @even_load_dynamic_tc(ptr noalias nocapture readonly %A, ptr noalias
; CHECK-NEXT: ret void
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[MUL:%.*]] = shl nsw i32 [[TMP]], 1
; CHECK-NEXT: [[TMP1:%.*]] = lshr exact i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP1]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B]], i64 [[TMP1]]
; CHECK-NEXT: store i32 [[MUL]], ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 2
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[INDVARS_IV_NEXT]], [[N]]
@@ -561,17 +561,17 @@ define void @load_gap_reverse(ptr noalias nocapture %P1, ptr noalias nocapture %
; CHECK-NEXT: [[TMP22:%.*]] = extractelement <4 x i64> [[TMP3]], i64 1
; CHECK-NEXT: [[TMP23:%.*]] = extractelement <4 x i64> [[TMP3]], i64 2
; CHECK-NEXT: [[TMP24:%.*]] = extractelement <4 x i64> [[TMP3]], i64 3
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[PAIR:%.*]], ptr [[P1:%.*]], i64 [[OFFSET_IDX]]
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P1]], i64 [[TMP0]]
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P1]], i64 [[TMP1]]
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P1]], i64 [[TMP2]]
-; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P2:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [16 x i8], ptr [[P1:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [16 x i8], ptr [[P1]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [16 x i8], ptr [[P1]], i64 [[TMP1]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [16 x i8], ptr [[P1]], i64 [[TMP2]]
+; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [16 x i8], ptr [[P2:%.*]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 8
-; CHECK-NEXT: [[DOTSPLIT1:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P2]], i64 [[TMP0]]
+; CHECK-NEXT: [[DOTSPLIT1:%.*]] = getelementptr inbounds [16 x i8], ptr [[P2]], i64 [[TMP0]]
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT1]], i64 8
-; CHECK-NEXT: [[DOTSPLIT2:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P2]], i64 [[TMP1]]
+; CHECK-NEXT: [[DOTSPLIT2:%.*]] = getelementptr inbounds [16 x i8], ptr [[P2]], i64 [[TMP1]]
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT2]], i64 8
-; CHECK-NEXT: [[DOTSPLIT3:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P2]], i64 [[TMP2]]
+; CHECK-NEXT: [[DOTSPLIT3:%.*]] = getelementptr inbounds [16 x i8], ptr [[P2]], i64 [[TMP2]]
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT3]], i64 8
; CHECK-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8
; CHECK-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP9]], align 8
@@ -642,12 +642,12 @@ define void @mixed_load2_store2(ptr noalias nocapture readonly %A, ptr noalias n
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <8 x i32>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
; CHECK-NEXT: [[STRIDED_VEC1:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
; CHECK-NEXT: [[TMP2:%.*]] = mul nsw <4 x i32> [[STRIDED_VEC1]], [[STRIDED_VEC]]
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[STRIDED_VEC3:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
; CHECK-NEXT: [[STRIDED_VEC4:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
; CHECK-NEXT: [[TMP3:%.*]] = add nsw <4 x i32> [[STRIDED_VEC4]], [[STRIDED_VEC3]]
@@ -793,7 +793,7 @@ define void @int_float_struct(ptr nocapture readonly %A) {
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x float> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_INTFLOAT:%.*]], ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [8 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <8 x i32>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
; CHECK-NEXT: [[STRIDED_VEC2:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
@@ -870,17 +870,17 @@ define void @PR27626_0(ptr %p, i32 %z, i64 %n) {
; CHECK-NEXT: [[TMP2:%.*]] = or disjoint i64 [[INDEX]], 1
; CHECK-NEXT: [[TMP3:%.*]] = or disjoint i64 [[INDEX]], 2
; CHECK-NEXT: [[TMP4:%.*]] = or disjoint i64 [[INDEX]], 3
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP2]]
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP3]]
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP4]]
-; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [8 x i8], ptr [[P:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [8 x i8], ptr [[P]], i64 [[TMP2]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [8 x i8], ptr [[P]], i64 [[TMP3]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [8 x i8], ptr [[P]], i64 [[TMP4]]
+; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [8 x i8], ptr [[P]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 4
-; CHECK-NEXT: [[DOTSPLIT1:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP2]]
+; CHECK-NEXT: [[DOTSPLIT1:%.*]] = getelementptr inbounds [8 x i8], ptr [[P]], i64 [[TMP2]]
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT1]], i64 4
-; CHECK-NEXT: [[DOTSPLIT2:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP3]]
+; CHECK-NEXT: [[DOTSPLIT2:%.*]] = getelementptr inbounds [8 x i8], ptr [[P]], i64 [[TMP3]]
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT2]], i64 4
-; CHECK-NEXT: [[DOTSPLIT3:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP4]]
+; CHECK-NEXT: [[DOTSPLIT3:%.*]] = getelementptr inbounds [8 x i8], ptr [[P]], i64 [[TMP4]]
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT3]], i64 4
; CHECK-NEXT: store i32 [[Z:%.*]], ptr [[TMP5]], align 4
; CHECK-NEXT: store i32 [[Z]], ptr [[TMP6]], align 4
@@ -905,8 +905,8 @@ define void @PR27626_0(ptr %p, i32 %z, i64 %n) {
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[P_I_X:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]]
-; CHECK-NEXT: [[P_I_Y_SPLIT:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]]
+; CHECK-NEXT: [[P_I_X:%.*]] = getelementptr inbounds [8 x i8], ptr [[P]], i64 [[I]]
+; CHECK-NEXT: [[P_I_Y_SPLIT:%.*]] = getelementptr inbounds [8 x i8], ptr [[P]], i64 [[I]]
; CHECK-NEXT: [[P_I_Y:%.*]] = getelementptr inbounds nuw i8, ptr [[P_I_Y_SPLIT]], i64 4
; CHECK-NEXT: store i32 [[Z]], ptr [[P_I_X]], align 4
; CHECK-NEXT: store i32 [[Z]], ptr [[P_I_Y]], align 4
@@ -960,14 +960,14 @@ define i32 @PR27626_1(ptr %p, i64 %n) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [8 x i8], ptr [[P:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [8 x i8], ptr [[P]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 4
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr [8 x i8], ptr [[P]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP4]], i64 12
-; CHECK-NEXT: [[TMP19:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr [8 x i8], ptr [[P]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP19]], i64 20
-; CHECK-NEXT: [[TMP20:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP20:%.*]] = getelementptr [8 x i8], ptr [[P]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[TMP20]], i64 28
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <8 x i32>, ptr [[TMP5]], align 4
; CHECK-NEXT: [[TMP10:%.*]] = extractelement <8 x i32> [[WIDE_VEC]], i64 0
@@ -994,8 +994,8 @@ define i32 @PR27626_1(ptr %p, i64 %n) {
; CHECK: for.body:
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[S:%.*]] = phi i32 [ [[TMP18:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[P_I_X:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]]
-; CHECK-NEXT: [[P_I_Y_SPLIT:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]]
+; CHECK-NEXT: [[P_I_X:%.*]] = getelementptr inbounds [8 x i8], ptr [[P]], i64 [[I]]
+; CHECK-NEXT: [[P_I_Y_SPLIT:%.*]] = getelementptr inbounds [8 x i8], ptr [[P]], i64 [[I]]
; CHECK-NEXT: [[P_I_Y:%.*]] = getelementptr inbounds nuw i8, ptr [[P_I_Y_SPLIT]], i64 4
; CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr [[P_I_X]], align 4
; CHECK-NEXT: store i32 [[TMP17]], ptr [[P_I_Y]], align 4
@@ -1054,18 +1054,18 @@ define void @PR27626_2(ptr %p, i64 %n, i32 %z) {
; CHECK-NEXT: [[TMP2:%.*]] = or disjoint i64 [[INDEX]], 1
; CHECK-NEXT: [[TMP3:%.*]] = or disjoint i64 [[INDEX]], 2
; CHECK-NEXT: [[TMP4:%.*]] = or disjoint i64 [[INDEX]], 3
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP2]]
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP3]]
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [8 x i8], ptr [[P:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [8 x i8], ptr [[P]], i64 [[TMP2]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [8 x i8], ptr [[P]], i64 [[TMP3]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [8 x i8], ptr [[P]], i64 [[TMP4]]
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 -8
-; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [8 x i8], ptr [[P]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 4
-; CHECK-NEXT: [[DOTSPLIT1:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP2]]
+; CHECK-NEXT: [[DOTSPLIT1:%.*]] = getelementptr inbounds [8 x i8], ptr [[P]], i64 [[TMP2]]
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT1]], i64 4
-; CHECK-NEXT: [[DOTSPLIT2:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP3]]
+; CHECK-NEXT: [[DOTSPLIT2:%.*]] = getelementptr inbounds [8 x i8], ptr [[P]], i64 [[TMP3]]
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT2]], i64 4
-; CHECK-NEXT: [[DOTSPLIT3:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP4]]
+; CHECK-NEXT: [[DOTSPLIT3:%.*]] = getelementptr inbounds [8 x i8], ptr [[P]], i64 [[TMP4]]
; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT3]], i64 4
; CHECK-NEXT: store i32 [[Z:%.*]], ptr [[TMP5]], align 4
; CHECK-NEXT: store i32 [[Z]], ptr [[TMP6]], align 4
@@ -1090,9 +1090,9 @@ define void @PR27626_2(ptr %p, i64 %n, i32 %z) {
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[P_I_X:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]]
+; CHECK-NEXT: [[P_I_X:%.*]] = getelementptr inbounds [8 x i8], ptr [[P]], i64 [[I]]
; CHECK-NEXT: [[P_I_MINUS_1_X:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 -8
-; CHECK-NEXT: [[P_I_Y_SPLIT:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]]
+; CHECK-NEXT: [[P_I_Y_SPLIT:%.*]] = getelementptr inbounds [8 x i8], ptr [[P]], i64 [[I]]
; CHECK-NEXT: [[P_I_Y:%.*]] = getelementptr inbounds nuw i8, ptr [[P_I_Y_SPLIT]], i64 4
; CHECK-NEXT: store i32 [[Z]], ptr [[P_I_X]], align 4
; CHECK-NEXT: [[TMP19:%.*]] = load i32, ptr [[P_I_MINUS_1_X]], align 4
@@ -1154,16 +1154,16 @@ define i32 @PR27626_3(ptr %p, i64 %n, i32 %z) {
; CHECK-NEXT: [[TMP7:%.*]] = extractelement <4 x i64> [[TMP2]], i64 1
; CHECK-NEXT: [[TMP9:%.*]] = extractelement <4 x i64> [[TMP2]], i64 2
; CHECK-NEXT: [[TMP11:%.*]] = extractelement <4 x i64> [[TMP2]], i64 3
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [8 x i8], ptr [[P:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr inbounds [8 x i8], ptr [[P]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT]], i64 4
-; CHECK-NEXT: [[DOTSPLIT3:%.*]] = getelementptr inbounds nuw [[PAIR_I32]], ptr [[P]], i64 [[TMP5]]
+; CHECK-NEXT: [[DOTSPLIT3:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[P]], i64 [[TMP5]]
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT3]], i64 4
-; CHECK-NEXT: [[DOTSPLIT4:%.*]] = getelementptr inbounds nuw [[PAIR_I32]], ptr [[P]], i64 [[TMP7]]
+; CHECK-NEXT: [[DOTSPLIT4:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[P]], i64 [[TMP7]]
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT4]], i64 4
-; CHECK-NEXT: [[DOTSPLIT5:%.*]] = getelementptr inbounds nuw [[PAIR_I32]], ptr [[P]], i64 [[TMP9]]
+; CHECK-NEXT: [[DOTSPLIT5:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[P]], i64 [[TMP9]]
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT5]], i64 4
-; CHECK-NEXT: [[DOTSPLIT6:%.*]] = getelementptr inbounds nuw [[PAIR_I32]], ptr [[P]], i64 [[TMP11]]
+; CHECK-NEXT: [[DOTSPLIT6:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[P]], i64 [[TMP11]]
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTSPLIT6]], i64 4
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <8 x i32>, ptr [[TMP3]], align 4
; CHECK-NEXT: [[TMP13:%.*]] = extractelement <8 x i32> [[WIDE_VEC]], i64 0
@@ -1191,10 +1191,10 @@ define i32 @PR27626_3(ptr %p, i64 %n, i32 %z) {
; CHECK: for.body:
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[S:%.*]] = phi i32 [ [[TMP22:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[P_I_X:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]]
-; CHECK-NEXT: [[P_I_Y_SPLIT:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]]
+; CHECK-NEXT: [[P_I_X:%.*]] = getelementptr inbounds [8 x i8], ptr [[P]], i64 [[I]]
+; CHECK-NEXT: [[P_I_Y_SPLIT:%.*]] = getelementptr inbounds [8 x i8], ptr [[P]], i64 [[I]]
; CHECK-NEXT: [[P_I_Y:%.*]] = getelementptr inbounds nuw i8, ptr [[P_I_Y_SPLIT]], i64 4
-; CHECK-NEXT: [[TMP23:%.*]] = getelementptr [[PAIR_I32]], ptr [[P]], i64 [[I]]
+; CHECK-NEXT: [[TMP23:%.*]] = getelementptr [8 x i8], ptr [[P]], i64 [[I]]
; CHECK-NEXT: [[P_I_PLUS_1_Y:%.*]] = getelementptr i8, ptr [[TMP23]], i64 12
; CHECK-NEXT: [[TMP20:%.*]] = load i32, ptr [[P_I_X]], align 4
; CHECK-NEXT: store i32 [[TMP20]], ptr [[P_I_PLUS_1_Y]], align 4
@@ -1262,12 +1262,12 @@ define void @PR27626_4(ptr %a, i32 %x, i32 %y, i32 %z, i64 %n) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[OFFSET_IDX]]
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i32, ptr [[A]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP5]], i64 8
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[A]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[TMP7]], i64 16
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[A]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[TMP11]], i64 24
; CHECK-NEXT: store i32 [[X:%.*]], ptr [[TMP14]], align 4
; CHECK-NEXT: store i32 [[X]], ptr [[TMP8]], align 4
@@ -1286,8 +1286,8 @@ define void @PR27626_4(ptr %a, i32 %x, i32 %y, i32 %z, i64 %n) {
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[I]]
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[A]], i64 [[I]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[I]]
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[I]]
; CHECK-NEXT: [[A_I_PLUS_1:%.*]] = getelementptr i8, ptr [[TMP15]], i64 4
; CHECK-NEXT: store i32 [[Y]], ptr [[TMP12]], align 4
; CHECK-NEXT: store i32 [[Z]], ptr [[A_I_PLUS_1]], align 4
@@ -1356,22 +1356,22 @@ define void @PR27626_5(ptr %a, i32 %x, i32 %y, i32 %z, i64 %n) {
; CHECK-NEXT: [[TMP24:%.*]] = extractelement <4 x i64> [[TMP8]], i64 1
; CHECK-NEXT: [[TMP26:%.*]] = extractelement <4 x i64> [[TMP8]], i64 2
; CHECK-NEXT: [[TMP28:%.*]] = extractelement <4 x i64> [[TMP8]], i64 3
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[A:%.*]], i64 [[TMP4]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr [4 x i8], ptr [[A:%.*]], i64 [[TMP4]]
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[TMP12]], i64 12
-; CHECK-NEXT: [[TMP33:%.*]] = getelementptr i32, ptr [[A]], i64 [[TMP4]]
+; CHECK-NEXT: [[TMP33:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[TMP4]]
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[TMP33]], i64 20
-; CHECK-NEXT: [[TMP34:%.*]] = getelementptr i32, ptr [[A]], i64 [[TMP4]]
+; CHECK-NEXT: [[TMP34:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[TMP4]]
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[TMP34]], i64 28
-; CHECK-NEXT: [[TMP35:%.*]] = getelementptr i32, ptr [[A]], i64 [[TMP4]]
+; CHECK-NEXT: [[TMP35:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[TMP4]]
; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[TMP35]], i64 36
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP14]]
-; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP16]]
-; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP18]]
-; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP20]]
-; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP22]]
-; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP24]]
-; CHECK-NEXT: [[TMP27:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP26]]
-; CHECK-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP28]]
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP14]]
+; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP16]]
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP18]]
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP20]]
+; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP22]]
+; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP24]]
+; CHECK-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP26]]
+; CHECK-NEXT: [[TMP29:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP28]]
; CHECK-NEXT: store i32 [[X:%.*]], ptr [[TMP15]], align 4
; CHECK-NEXT: store i32 [[X]], ptr [[TMP17]], align 4
; CHECK-NEXT: store i32 [[X]], ptr [[TMP19]], align 4
@@ -1396,10 +1396,10 @@ define void @PR27626_5(ptr %a, i32 %x, i32 %y, i32 %z, i64 %n) {
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[A_I:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[I]]
-; CHECK-NEXT: [[TMP31:%.*]] = getelementptr i32, ptr [[A]], i64 [[I]]
+; CHECK-NEXT: [[A_I:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[I]]
+; CHECK-NEXT: [[TMP31:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[I]]
; CHECK-NEXT: [[A_I_MINUS_1:%.*]] = getelementptr i8, ptr [[TMP31]], i64 -4
-; CHECK-NEXT: [[TMP32:%.*]] = getelementptr i32, ptr [[A]], i64 [[I]]
+; CHECK-NEXT: [[TMP32:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[I]]
; CHECK-NEXT: [[A_I_MINUS_3:%.*]] = getelementptr i8, ptr [[TMP32]], i64 -12
; CHECK-NEXT: store i32 [[X]], ptr [[A_I_MINUS_1]], align 4
; CHECK-NEXT: store i32 [[Y]], ptr [[A_I_MINUS_3]], align 4
@@ -1481,8 +1481,8 @@ define void @PR34743(ptr %a, ptr %b, i64 %n) {
; CHECK-NEXT: [[TMP11:%.*]] = sext <4 x i16> [[STRIDED_VEC4]] to <4 x i32>
; CHECK-NEXT: [[TMP12:%.*]] = mul nsw <4 x i32> [[TMP9]], [[TMP10]]
; CHECK-NEXT: [[TMP13:%.*]] = mul nsw <4 x i32> [[TMP12]], [[TMP11]]
-; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]]
-; CHECK-NEXT: store <4 x i32> [[TMP13]], ptr [[TMP14]], align 4, !alias.scope [[META30:![0-9]+]], !noalias [[META27]]
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: store <4 x i32> [[TMP13]], ptr [[TMP19]], align 4, !alias.scope [[META30:![0-9]+]], !noalias [[META27]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]]
@@ -1502,16 +1502,16 @@ define void @PR34743(ptr %a, ptr %b, i64 %n) {
; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP16]] to i32
; CHECK-NEXT: [[I1]] = add nuw nsw i64 [[I]], 1
; CHECK-NEXT: [[IV2]] = add nuw nsw i64 [[IV]], 2
-; CHECK-NEXT: [[TMP18:%.*]] = getelementptr i16, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP18:%.*]] = getelementptr [2 x i8], ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i8, ptr [[TMP18]], i64 2
; CHECK-NEXT: [[LOAD1:%.*]] = load i16, ptr [[GEP1]], align 4
; CHECK-NEXT: [[CONV1:%.*]] = sext i16 [[LOAD1]] to i32
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds i16, ptr [[A]], i64 [[IV2]]
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds [2 x i8], ptr [[A]], i64 [[IV2]]
; CHECK-NEXT: [[LOAD2]] = load i16, ptr [[GEP2]], align 4
; CHECK-NEXT: [[CONV2:%.*]] = sext i16 [[LOAD2]] to i32
; CHECK-NEXT: [[MUL01:%.*]] = mul nsw i32 [[CONV]], [[CONV1]]
; CHECK-NEXT: [[MUL012:%.*]] = mul nsw i32 [[MUL01]], [[CONV2]]
-; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[I]]
+; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[I]]
; CHECK-NEXT: store i32 [[MUL012]], ptr [[ARRAYIDX5]], align 4
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV]], [[N]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[END]], label [[LOOP]], !llvm.loop [[LOOP33:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/invariant-store-vectorization-2.ll b/llvm/test/Transforms/LoopVectorize/invariant-store-vectorization-2.ll
index a02fddc4cf72d..cea9a145c1b61 100644
--- a/llvm/test/Transforms/LoopVectorize/invariant-store-vectorization-2.ll
+++ b/llvm/test/Transforms/LoopVectorize/invariant-store-vectorization-2.ll
@@ -41,7 +41,7 @@ define void @inv_val_store_to_inv_address_conditional_diff_values_ic(ptr %a, i64
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 8, !alias.scope [[META0:![0-9]+]], !noalias [[META3:![0-9]+]]
; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT4]]
; CHECK-NEXT: store <4 x i32> [[BROADCAST_SPLAT]], ptr [[TMP1]], align 4, !alias.scope [[META0]], !noalias [[META3]]
@@ -59,7 +59,7 @@ define void @inv_val_store_to_inv_address_conditional_diff_values_ic(ptr %a, i64
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[LATCH:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[I1:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[I]]
+; CHECK-NEXT: [[I1:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B]], i64 [[I]]
; CHECK-NEXT: [[I2:%.*]] = load i32, ptr [[I1]], align 8
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[I2]], [[K]]
; CHECK-NEXT: store i32 [[NTRUNC]], ptr [[I1]], align 4
@@ -139,8 +139,8 @@ define void @inv_val_store_to_inv_address_conditional_inv(ptr %a, i64 %n, ptr %b
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]]
-; CHECK-NEXT: store <4 x i32> [[BROADCAST_SPLAT]], ptr [[TMP3]], align 4, !alias.scope [[META9:![0-9]+]], !noalias [[META12:![0-9]+]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: store <4 x i32> [[BROADCAST_SPLAT]], ptr [[TMP1]], align 4, !alias.scope [[META9:![0-9]+]], !noalias [[META12:![0-9]+]]
; CHECK-NEXT: store i32 [[K]], ptr [[A]], align 4, !alias.scope [[META12]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -153,7 +153,7 @@ define void @inv_val_store_to_inv_address_conditional_inv(ptr %a, i64 %n, ptr %b
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[LATCH:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[I1:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[I]]
+; CHECK-NEXT: [[I1:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B]], i64 [[I]]
; CHECK-NEXT: store i32 [[NTRUNC]], ptr [[I1]], align 4
; CHECK-NEXT: br i1 [[CMP]], label [[COND_STORE:%.*]], label [[COND_STORE_K:%.*]]
; CHECK: cond_store:
@@ -223,7 +223,7 @@ define i32 @variant_val_store_to_inv_address(ptr %a, i64 %n, ptr %b, i32 %k) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 8, !alias.scope [[META16:![0-9]+]]
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i64 3
; CHECK-NEXT: store i32 [[TMP2]], ptr [[A]], align 4, !alias.scope [[META19:![0-9]+]], !noalias [[META16]]
@@ -243,7 +243,7 @@ define i32 @variant_val_store_to_inv_address(ptr %a, i64 %n, ptr %b, i32 %k) {
; CHECK: for.body:
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[I0:%.*]] = phi i32 [ [[I3:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[I1:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[I]]
+; CHECK-NEXT: [[I1:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B]], i64 [[I]]
; CHECK-NEXT: [[I2:%.*]] = load i32, ptr [[I1]], align 8
; CHECK-NEXT: store i32 [[I2]], ptr [[A]], align 4
; CHECK-NEXT: [[I3]] = add i32 [[I0]], [[I2]]
diff --git a/llvm/test/Transforms/LoopVectorize/invariant-store-vectorization.ll b/llvm/test/Transforms/LoopVectorize/invariant-store-vectorization.ll
index eea22374ade30..bf26813afc7be 100644
--- a/llvm/test/Transforms/LoopVectorize/invariant-store-vectorization.ll
+++ b/llvm/test/Transforms/LoopVectorize/invariant-store-vectorization.ll
@@ -37,7 +37,7 @@ define i32 @inv_val_store_to_inv_address_with_reduction(ptr %a, i64 %n, ptr %b)
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 8, !alias.scope [[META0:![0-9]+]]
; CHECK-NEXT: [[TMP2]] = add <4 x i32> [[VEC_PHI]], [[WIDE_LOAD]]
; CHECK-NEXT: store i32 [[NTRUNC]], ptr [[A]], align 4, !alias.scope [[META3:![0-9]+]], !noalias [[META0]]
@@ -56,7 +56,7 @@ define i32 @inv_val_store_to_inv_address_with_reduction(ptr %a, i64 %n, ptr %b)
; CHECK: for.body:
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[I0:%.*]] = phi i32 [ [[I3:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[I1:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[I]]
+; CHECK-NEXT: [[I1:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B]], i64 [[I]]
; CHECK-NEXT: [[I2:%.*]] = load i32, ptr [[I1]], align 8
; CHECK-NEXT: [[I3]] = add i32 [[I0]], [[I2]]
; CHECK-NEXT: store i32 [[NTRUNC]], ptr [[A]], align 4
@@ -112,7 +112,7 @@ define void @inv_val_store_to_inv_address(ptr %a, i64 %n, ptr %b) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: store i32 [[NTRUNC]], ptr [[A]], align 4, !alias.scope [[META9:![0-9]+]], !noalias [[META12:![0-9]+]]
; CHECK-NEXT: store <4 x i32> [[BROADCAST_SPLAT]], ptr [[TMP1]], align 4, !alias.scope [[META12]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
@@ -126,7 +126,7 @@ define void @inv_val_store_to_inv_address(ptr %a, i64 %n, ptr %b) {
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[I1:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[I]]
+; CHECK-NEXT: [[I1:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B]], i64 [[I]]
; CHECK-NEXT: store i32 [[NTRUNC]], ptr [[A]], align 4
; CHECK-NEXT: store i32 [[NTRUNC]], ptr [[I1]], align 4
; CHECK-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
@@ -189,7 +189,7 @@ define void @inv_val_store_to_inv_address_conditional(ptr %a, i64 %n, ptr %b, i3
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE10:%.*]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 8, !alias.scope [[META16:![0-9]+]], !noalias [[META19:![0-9]+]]
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT4]]
; CHECK-NEXT: store <4 x i32> [[BROADCAST_SPLAT]], ptr [[TMP1]], align 4, !alias.scope [[META16]], !noalias [[META19]]
@@ -228,7 +228,7 @@ define void @inv_val_store_to_inv_address_conditional(ptr %a, i64 %n, ptr %b, i3
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[LATCH:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[I1:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[I]]
+; CHECK-NEXT: [[I1:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B]], i64 [[I]]
; CHECK-NEXT: [[I2:%.*]] = load i32, ptr [[I1]], align 8
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[I2]], [[K]]
; CHECK-NEXT: store i32 [[NTRUNC]], ptr [[I1]], align 4
@@ -282,7 +282,7 @@ define void @inv_val_store_to_inv_address_conditional_diff_values(ptr %a, i64 %n
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[LATCH:%.*]] ], [ 0, [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[I1:%.*]] = getelementptr inbounds nuw i32, ptr [[B:%.*]], i64 [[I]]
+; CHECK-NEXT: [[I1:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B:%.*]], i64 [[I]]
; CHECK-NEXT: [[I2:%.*]] = load i32, ptr [[I1]], align 8
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[I2]], [[K:%.*]]
; CHECK-NEXT: store i32 [[NTRUNC]], ptr [[I1]], align 4
@@ -356,7 +356,7 @@ define void @multiple_uniform_stores(ptr nocapture %var1, ptr nocapture readonly
; CHECK-NEXT: [[CMP218:%.*]] = icmp ult i32 [[J_022]], [[ITR]]
; CHECK-NEXT: br i1 [[CMP218]], label [[FOR_BODY3_LR_PH:%.*]], label [[FOR_INC8]]
; CHECK: for.body3.lr.ph:
-; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds nuw i32, ptr [[VAR1]], i64 [[INDVARS_IV23]]
+; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[VAR1]], i64 [[INDVARS_IV23]]
; CHECK-NEXT: [[TMP4:%.*]] = zext i32 [[J_022]] to i64
; CHECK-NEXT: [[ARRAYIDX5_PROMOTED:%.*]] = load i32, ptr [[ARRAYIDX5]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = xor i32 [[J_022]], -1
@@ -382,12 +382,12 @@ define void @multiple_uniform_stores(ptr nocapture %var1, ptr nocapture readonly
; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[TMP8]], 8589934588
; CHECK-NEXT: [[IND_END:%.*]] = add nuw nsw i64 [[N_VEC]], [[TMP4]]
; CHECK-NEXT: [[TMP15:%.*]] = insertelement <4 x i32> <i32 poison, i32 0, i32 0, i32 0>, i32 [[ARRAYIDX5_PROMOTED]], i64 0
-; CHECK-NEXT: [[INVARIANT_GEP:%.*]] = getelementptr i32, ptr [[VAR2]], i64 [[TMP4]]
+; CHECK-NEXT: [[INVARIANT_GEP:%.*]] = getelementptr [4 x i8], ptr [[VAR2]], i64 [[TMP4]]
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ [[TMP15]], [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[INVARIANT_GEP]], i64 [[INDEX]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr [4 x i8], ptr [[INVARIANT_GEP]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[GEP]], align 4, !alias.scope [[META23:![0-9]+]]
; CHECK-NEXT: [[TMP16:%.*]] = add <4 x i32> [[VEC_PHI]], [[WIDE_LOAD]]
; CHECK-NEXT: [[TMP17]] = add <4 x i32> [[TMP16]], splat (i32 1)
@@ -407,7 +407,7 @@ define void @multiple_uniform_stores(ptr nocapture %var1, ptr nocapture readonly
; CHECK: for.body3:
; CHECK-NEXT: [[TMP20:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[TMP22:%.*]], [[FOR_BODY3]] ]
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY3]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[VAR2]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[VAR2]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
; CHECK-NEXT: [[TMP22]] = add nsw i32 [[ADD]], 1
@@ -487,14 +487,14 @@ define void @multiple_uniform_stores_conditional(ptr nocapture %var1, ptr nocapt
; CHECK-NEXT: [[CMP218:%.*]] = icmp ult i32 [[J_022]], [[ITR]]
; CHECK-NEXT: br i1 [[CMP218]], label [[FOR_BODY3_LR_PH:%.*]], label [[FOR_INC8]]
; CHECK: for.body3.lr.ph:
-; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds nuw i32, ptr [[VAR1:%.*]], i64 [[INDVARS_IV23]]
+; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[VAR1:%.*]], i64 [[INDVARS_IV23]]
; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[J_022]] to i64
; CHECK-NEXT: [[ARRAYIDX5_PROMOTED:%.*]] = load i32, ptr [[ARRAYIDX5]], align 4
; CHECK-NEXT: br label [[FOR_BODY3:%.*]]
; CHECK: for.body3:
; CHECK-NEXT: [[TMP1:%.*]] = phi i32 [ [[ARRAYIDX5_PROMOTED]], [[FOR_BODY3_LR_PH]] ], [ [[TMP5:%.*]], [[LATCH:%.*]] ]
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[TMP0]], [[FOR_BODY3_LR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[LATCH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[VAR2:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[VAR2:%.*]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i32 [[ADD]], 42
@@ -578,7 +578,7 @@ for.end10: ; preds = %for.inc8, %entry
define void @unsafe_dep_uniform_load_store(i32 %arg, i32 %arg1, i64 %arg2, ptr %arg3, i32 %arg4, i64 %arg5) {
; CHECK-LABEL: @unsafe_dep_uniform_load_store(
; CHECK-NEXT: bb:
-; CHECK-NEXT: [[I6:%.*]] = getelementptr inbounds i16, ptr [[ARG3:%.*]], i64 [[ARG5:%.*]]
+; CHECK-NEXT: [[I6:%.*]] = getelementptr inbounds [2 x i8], ptr [[ARG3:%.*]], i64 [[ARG5:%.*]]
; CHECK-NEXT: br label [[BB7:%.*]]
; CHECK: bb7:
; CHECK-NEXT: [[I121:%.*]] = phi i32 [ [[ARG4:%.*]], [[BB:%.*]] ], [ [[I12:%.*]], [[BB7]] ]
@@ -591,12 +591,12 @@ define void @unsafe_dep_uniform_load_store(i32 %arg, i32 %arg1, i64 %arg2, ptr %
; CHECK-NEXT: [[I15:%.*]] = trunc i64 [[I8]] to i32
; CHECK-NEXT: [[I16:%.*]] = add i32 [[ARG:%.*]], [[I15]]
; CHECK-NEXT: [[I17:%.*]] = zext i32 [[I16]] to i64
-; CHECK-NEXT: [[I18:%.*]] = getelementptr inbounds nuw i16, ptr [[I6]], i64 [[I17]]
+; CHECK-NEXT: [[I18:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[I6]], i64 [[I17]]
; CHECK-NEXT: store i16 [[I14]], ptr [[I18]], align 2
; CHECK-NEXT: [[I19:%.*]] = add i32 [[I13]], [[I9]]
; CHECK-NEXT: [[I20:%.*]] = trunc i32 [[I19]] to i16
; CHECK-NEXT: [[I21:%.*]] = and i16 [[I20]], 255
-; CHECK-NEXT: [[I22:%.*]] = getelementptr inbounds nuw i16, ptr [[ARG3]], i64 [[I17]]
+; CHECK-NEXT: [[I22:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[ARG3]], i64 [[I17]]
; CHECK-NEXT: store i16 [[I21]], ptr [[I22]], align 2
; CHECK-NEXT: [[I23]] = add nsw i32 [[I9]], 1
; CHECK-NEXT: [[I24]] = add nuw nsw i64 [[I8]], 1
diff --git a/llvm/test/Transforms/LoopVectorize/loop-scalars.ll b/llvm/test/Transforms/LoopVectorize/loop-scalars.ll
index 010890e30f4eb..24e7cbbed8518 100644
--- a/llvm/test/Transforms/LoopVectorize/loop-scalars.ll
+++ b/llvm/test/Transforms/LoopVectorize/loop-scalars.ll
@@ -17,8 +17,8 @@ define void @vector_gep(ptr %a, ptr %b, i64 %n) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ <i64 0, i64 1>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], <2 x i64> [[VEC_IND]]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds ptr, ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], <2 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [8 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: store <2 x ptr> [[TMP0]], ptr [[TMP1]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[VEC_IND_NEXT]] = add nuw nsw <2 x i64> [[VEC_IND]], splat (i64 2)
@@ -32,8 +32,8 @@ define void @vector_gep(ptr %a, ptr %b, i64 %n) {
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[VAR0:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[I]]
-; CHECK-NEXT: [[VAR1:%.*]] = getelementptr inbounds nuw ptr, ptr [[A]], i64 [[I]]
+; CHECK-NEXT: [[VAR0:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B]], i64 [[I]]
+; CHECK-NEXT: [[VAR1:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[A]], i64 [[I]]
; CHECK-NEXT: store ptr [[VAR0]], ptr [[VAR1]], align 8
; CHECK-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
; CHECK-NEXT: [[COND:%.*]] = icmp slt i64 [[I_NEXT]], [[N]]
@@ -75,10 +75,10 @@ define void @scalar_store(ptr %a, ptr %b, i64 %n) {
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 1
; CHECK-NEXT: [[TMP3:%.*]] = or disjoint i64 [[OFFSET_IDX]], 2
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[OFFSET_IDX]]
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP3]]
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds ptr, ptr [[A:%.*]], i64 [[OFFSET_IDX]]
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds ptr, ptr [[A]], i64 [[TMP3]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[TMP3]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [8 x i8], ptr [[A:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [8 x i8], ptr [[A]], i64 [[TMP3]]
; CHECK-NEXT: store ptr [[TMP4]], ptr [[TMP6]], align 8
; CHECK-NEXT: store ptr [[TMP5]], ptr [[TMP7]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
@@ -92,8 +92,8 @@ define void @scalar_store(ptr %a, ptr %b, i64 %n) {
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[VAR0:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[I]]
-; CHECK-NEXT: [[VAR1:%.*]] = getelementptr inbounds ptr, ptr [[A]], i64 [[I]]
+; CHECK-NEXT: [[VAR0:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[I]]
+; CHECK-NEXT: [[VAR1:%.*]] = getelementptr inbounds [8 x i8], ptr [[A]], i64 [[I]]
; CHECK-NEXT: store ptr [[VAR0]], ptr [[VAR1]], align 8
; CHECK-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 2
; CHECK-NEXT: [[COND:%.*]] = icmp slt i64 [[I_NEXT]], [[N]]
@@ -135,10 +135,10 @@ define void @expansion(ptr %a, ptr %b, i64 %n) {
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 1
; CHECK-NEXT: [[TMP3:%.*]] = or disjoint i64 [[OFFSET_IDX]], 2
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[OFFSET_IDX]]
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[TMP3]]
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds ptr, ptr [[A:%.*]], i64 [[OFFSET_IDX]]
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds ptr, ptr [[A]], i64 [[TMP3]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [8 x i8], ptr [[B:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [8 x i8], ptr [[B]], i64 [[TMP3]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [8 x i8], ptr [[A:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [8 x i8], ptr [[A]], i64 [[TMP3]]
; CHECK-NEXT: store ptr [[TMP4]], ptr [[TMP6]], align 8
; CHECK-NEXT: store ptr [[TMP5]], ptr [[TMP7]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
@@ -152,8 +152,8 @@ define void @expansion(ptr %a, ptr %b, i64 %n) {
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[VAR0:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[I]]
-; CHECK-NEXT: [[VAR3:%.*]] = getelementptr inbounds ptr, ptr [[A]], i64 [[I]]
+; CHECK-NEXT: [[VAR0:%.*]] = getelementptr inbounds [8 x i8], ptr [[B]], i64 [[I]]
+; CHECK-NEXT: [[VAR3:%.*]] = getelementptr inbounds [8 x i8], ptr [[A]], i64 [[I]]
; CHECK-NEXT: store ptr [[VAR0]], ptr [[VAR3]], align 8
; CHECK-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 2
; CHECK-NEXT: [[COND:%.*]] = icmp slt i64 [[I_NEXT]], [[N]]
@@ -189,7 +189,7 @@ define void @no_gep_or_bitcast(ptr noalias %a, i64 %n) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds ptr, ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [8 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x ptr>, ptr [[TMP0]], align 8
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x ptr> [[WIDE_LOAD]], i64 0
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x ptr> [[WIDE_LOAD]], i64 1
@@ -206,7 +206,7 @@ define void @no_gep_or_bitcast(ptr noalias %a, i64 %n) {
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[VAR0:%.*]] = getelementptr inbounds nuw ptr, ptr [[A]], i64 [[I]]
+; CHECK-NEXT: [[VAR0:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[A]], i64 [[I]]
; CHECK-NEXT: [[VAR1:%.*]] = load ptr, ptr [[VAR0]], align 8
; CHECK-NEXT: store i32 0, ptr [[VAR1]], align 8
; CHECK-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
diff --git a/llvm/test/Transforms/LoopVectorize/non-const-n.ll b/llvm/test/Transforms/LoopVectorize/non-const-n.ll
index 30f76f8734145..03f6bed18b7e4 100644
--- a/llvm/test/Transforms/LoopVectorize/non-const-n.ll
+++ b/llvm/test/Transforms/LoopVectorize/non-const-n.ll
@@ -19,12 +19,12 @@ define void @example1(i32 %n) nounwind uwtable ssp {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr @b, i64 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr @b, i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr @c, i64 [[INDEX]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [4 x i8], ptr @c, i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD]]
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr @a, i64 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i8], ptr @a, i64 [[INDEX]]
; CHECK-NEXT: store <4 x i32> [[TMP4]], ptr [[TMP5]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX]], [[TMP1]]
diff --git a/llvm/test/Transforms/LoopVectorize/phi-cost.ll b/llvm/test/Transforms/LoopVectorize/phi-cost.ll
index 7b5d0b69639fa..876e0411dff6a 100644
--- a/llvm/test/Transforms/LoopVectorize/phi-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/phi-cost.ll
@@ -20,9 +20,9 @@ define void @phi_two_incoming_values(ptr noalias %a, ptr noalias %b, i64 %n) {
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP2]], align 4
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp sgt <2 x i32> [[WIDE_LOAD]], zeroinitializer
; CHECK-NEXT: [[TMP5:%.*]] = zext <2 x i1> [[TMP4]] to <2 x i32>
; CHECK-NEXT: [[PREDPHI:%.*]] = add <2 x i32> [[WIDE_LOAD]], [[TMP5]]
@@ -38,9 +38,9 @@ define void @phi_two_incoming_values(ptr noalias %a, ptr noalias %b, i64 %n) {
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], %[[IF_END:.*]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[I]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[I]]
; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP0]], align 4
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[I]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[I]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp sgt i32 [[TMP2]], 0
; CHECK-NEXT: br i1 [[TMP4]], label %[[IF_THEN:.*]], label %[[IF_END]]
; CHECK: [[IF_THEN]]:
@@ -97,9 +97,9 @@ define void @phi_three_incoming_values(ptr noalias %a, ptr noalias %b, i64 %n) {
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP3]], align 4
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <2 x i32>, ptr [[TMP4]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = icmp sgt <2 x i32> [[WIDE_LOAD]], [[WIDE_LOAD2]]
; CHECK-NEXT: [[TMP6:%.*]] = icmp sgt <2 x i32> [[WIDE_LOAD]], splat (i32 19)
@@ -119,9 +119,9 @@ define void @phi_three_incoming_values(ptr noalias %a, ptr noalias %b, i64 %n) {
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], %[[IF_END:.*]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[I]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[I]]
; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[I]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[I]]
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4
; CHECK-NEXT: [[TMP6:%.*]] = icmp sgt i32 [[TMP3]], [[TMP5]]
; CHECK-NEXT: br i1 [[TMP6]], label %[[IF_THEN:.*]], label %[[IF_END]]
diff --git a/llvm/test/Transforms/LoopVectorize/reduction-inloop-cond.ll b/llvm/test/Transforms/LoopVectorize/reduction-inloop-cond.ll
index 481095225af6a..0a2a5e4e89dcd 100644
--- a/llvm/test/Transforms/LoopVectorize/reduction-inloop-cond.ll
+++ b/llvm/test/Transforms/LoopVectorize/reduction-inloop-cond.ll
@@ -13,13 +13,13 @@ define float @cond_fadd(ptr noalias nocapture readonly %a, ptr noalias nocapture
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_LOAD_CONTINUE6:%.*]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi float [ 1.000000e+00, [[VECTOR_PH]] ], [ [[TMP26:%.*]], [[PRED_LOAD_CONTINUE6]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds float, ptr [[COND]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[COND]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = fcmp une <4 x float> [[WIDE_LOAD]], splat (float 5.000000e+00)
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i1> [[TMP1]], i64 0
; CHECK-NEXT: br i1 [[TMP2]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]]
; CHECK: pred.load.if:
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[TMP3]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = insertelement <4 x float> poison, float [[TMP4]], i64 0
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE]]
@@ -28,7 +28,7 @@ define float @cond_fadd(ptr noalias nocapture readonly %a, ptr noalias nocapture
; CHECK-NEXT: [[TMP7:%.*]] = extractelement <4 x i1> [[TMP1]], i64 1
; CHECK-NEXT: br i1 [[TMP7]], label [[PRED_LOAD_IF1:%.*]], label [[PRED_LOAD_CONTINUE2:%.*]]
; CHECK: pred.load.if1:
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[TMP8]], i64 4
; CHECK-NEXT: [[TMP10:%.*]] = load float, ptr [[TMP9]], align 4
; CHECK-NEXT: [[TMP11:%.*]] = insertelement <4 x float> [[TMP6]], float [[TMP10]], i64 1
@@ -38,7 +38,7 @@ define float @cond_fadd(ptr noalias nocapture readonly %a, ptr noalias nocapture
; CHECK-NEXT: [[TMP13:%.*]] = extractelement <4 x i1> [[TMP1]], i64 2
; CHECK-NEXT: br i1 [[TMP13]], label [[PRED_LOAD_IF3:%.*]], label [[PRED_LOAD_CONTINUE4:%.*]]
; CHECK: pred.load.if3:
-; CHECK-NEXT: [[TMP14:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[TMP14]], i64 8
; CHECK-NEXT: [[TMP16:%.*]] = load float, ptr [[TMP15]], align 4
; CHECK-NEXT: [[TMP17:%.*]] = insertelement <4 x float> [[TMP12]], float [[TMP16]], i64 2
@@ -48,7 +48,7 @@ define float @cond_fadd(ptr noalias nocapture readonly %a, ptr noalias nocapture
; CHECK-NEXT: [[TMP19:%.*]] = extractelement <4 x i1> [[TMP1]], i64 3
; CHECK-NEXT: br i1 [[TMP19]], label [[PRED_LOAD_IF5:%.*]], label [[PRED_LOAD_CONTINUE6]]
; CHECK: pred.load.if5:
-; CHECK-NEXT: [[TMP20:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP20:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[TMP20]], i64 12
; CHECK-NEXT: [[TMP22:%.*]] = load float, ptr [[TMP21]], align 4
; CHECK-NEXT: [[TMP23:%.*]] = insertelement <4 x float> [[TMP18]], float [[TMP22]], i64 3
@@ -70,12 +70,12 @@ define float @cond_fadd(ptr noalias nocapture readonly %a, ptr noalias nocapture
; CHECK: for.body:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
; CHECK-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RES:%.*]], [[FOR_INC]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[COND]], i64 [[IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[COND]], i64 [[IV]]
; CHECK-NEXT: [[TMP28:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[TOBOOL:%.*]] = fcmp une float [[TMP28]], 5.000000e+00
; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_THEN:%.*]], label [[FOR_INC]]
; CHECK: if.then:
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP29:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[FADD:%.*]] = fadd fast float [[RDX]], [[TMP29]]
; CHECK-NEXT: br label [[FOR_INC]]
@@ -127,13 +127,13 @@ define float @cond_cmp_sel(ptr noalias %a, ptr noalias %cond, i64 %N) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_LOAD_CONTINUE6:%.*]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi float [ 1.000000e+00, [[VECTOR_PH]] ], [ [[RDX_MINMAX_SELECT:%.*]], [[PRED_LOAD_CONTINUE6]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds float, ptr [[COND]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[COND]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = fcmp une <4 x float> [[WIDE_LOAD]], splat (float 3.000000e+00)
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i1> [[TMP1]], i64 0
; CHECK-NEXT: br i1 [[TMP2]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]]
; CHECK: pred.load.if:
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[TMP3]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = insertelement <4 x float> poison, float [[TMP4]], i64 0
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE]]
@@ -142,7 +142,7 @@ define float @cond_cmp_sel(ptr noalias %a, ptr noalias %cond, i64 %N) {
; CHECK-NEXT: [[TMP7:%.*]] = extractelement <4 x i1> [[TMP1]], i64 1
; CHECK-NEXT: br i1 [[TMP7]], label [[PRED_LOAD_IF1:%.*]], label [[PRED_LOAD_CONTINUE2:%.*]]
; CHECK: pred.load.if1:
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[TMP8]], i64 4
; CHECK-NEXT: [[TMP10:%.*]] = load float, ptr [[TMP9]], align 4
; CHECK-NEXT: [[TMP11:%.*]] = insertelement <4 x float> [[TMP6]], float [[TMP10]], i64 1
@@ -152,7 +152,7 @@ define float @cond_cmp_sel(ptr noalias %a, ptr noalias %cond, i64 %N) {
; CHECK-NEXT: [[TMP13:%.*]] = extractelement <4 x i1> [[TMP1]], i64 2
; CHECK-NEXT: br i1 [[TMP13]], label [[PRED_LOAD_IF3:%.*]], label [[PRED_LOAD_CONTINUE4:%.*]]
; CHECK: pred.load.if3:
-; CHECK-NEXT: [[TMP14:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[TMP14]], i64 8
; CHECK-NEXT: [[TMP16:%.*]] = load float, ptr [[TMP15]], align 4
; CHECK-NEXT: [[TMP17:%.*]] = insertelement <4 x float> [[TMP12]], float [[TMP16]], i64 2
@@ -162,7 +162,7 @@ define float @cond_cmp_sel(ptr noalias %a, ptr noalias %cond, i64 %N) {
; CHECK-NEXT: [[TMP19:%.*]] = extractelement <4 x i1> [[TMP1]], i64 3
; CHECK-NEXT: br i1 [[TMP19]], label [[PRED_LOAD_IF5:%.*]], label [[PRED_LOAD_CONTINUE6]]
; CHECK: pred.load.if5:
-; CHECK-NEXT: [[TMP20:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP20:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[TMP20]], i64 12
; CHECK-NEXT: [[TMP22:%.*]] = load float, ptr [[TMP21]], align 4
; CHECK-NEXT: [[TMP23:%.*]] = insertelement <4 x float> [[TMP18]], float [[TMP22]], i64 3
@@ -185,12 +185,12 @@ define float @cond_cmp_sel(ptr noalias %a, ptr noalias %cond, i64 %N) {
; CHECK: for.body:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
; CHECK-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RES:%.*]], [[FOR_INC]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[COND]], i64 [[IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[COND]], i64 [[IV]]
; CHECK-NEXT: [[TMP28:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[TOBOOL:%.*]] = fcmp une float [[TMP28]], 3.000000e+00
; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_THEN:%.*]], label [[FOR_INC]]
; CHECK: if.then:
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP29:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[FSEL:%.*]] = call fast float @llvm.minnum.f32(float [[RDX]], float [[TMP29]])
; CHECK-NEXT: br label [[FOR_INC]]
@@ -245,13 +245,13 @@ define i32 @conditional_and(ptr noalias %A, ptr noalias %B, i32 %cond, i64 nound
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_LOAD_CONTINUE6:%.*]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 7, [[VECTOR_PH]] ], [ [[TMP27:%.*]], [[PRED_LOAD_CONTINUE6]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i1> [[TMP1]], i64 0
; CHECK-NEXT: br i1 [[TMP2]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]]
; CHECK: pred.load.if:
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = insertelement <4 x i32> poison, i32 [[TMP4]], i64 0
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE]]
@@ -260,7 +260,7 @@ define i32 @conditional_and(ptr noalias %A, ptr noalias %B, i32 %cond, i64 nound
; CHECK-NEXT: [[TMP7:%.*]] = extractelement <4 x i1> [[TMP1]], i64 1
; CHECK-NEXT: br i1 [[TMP7]], label [[PRED_LOAD_IF1:%.*]], label [[PRED_LOAD_CONTINUE2:%.*]]
; CHECK: pred.load.if1:
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr [4 x i8], ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[TMP8]], i64 4
; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4
; CHECK-NEXT: [[TMP11:%.*]] = insertelement <4 x i32> [[TMP6]], i32 [[TMP10]], i64 1
@@ -270,7 +270,7 @@ define i32 @conditional_and(ptr noalias %A, ptr noalias %B, i32 %cond, i64 nound
; CHECK-NEXT: [[TMP13:%.*]] = extractelement <4 x i1> [[TMP1]], i64 2
; CHECK-NEXT: br i1 [[TMP13]], label [[PRED_LOAD_IF3:%.*]], label [[PRED_LOAD_CONTINUE4:%.*]]
; CHECK: pred.load.if3:
-; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr [4 x i8], ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[TMP14]], i64 8
; CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4
; CHECK-NEXT: [[TMP17:%.*]] = insertelement <4 x i32> [[TMP12]], i32 [[TMP16]], i64 2
@@ -280,7 +280,7 @@ define i32 @conditional_and(ptr noalias %A, ptr noalias %B, i32 %cond, i64 nound
; CHECK-NEXT: [[TMP19:%.*]] = extractelement <4 x i1> [[TMP1]], i64 3
; CHECK-NEXT: br i1 [[TMP19]], label [[PRED_LOAD_IF5:%.*]], label [[PRED_LOAD_CONTINUE6]]
; CHECK: pred.load.if5:
-; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i32, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP20:%.*]] = getelementptr [4 x i8], ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[TMP20]], i64 12
; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
; CHECK-NEXT: [[TMP23:%.*]] = insertelement <4 x i32> [[TMP18]], i32 [[TMP22]], i64 3
@@ -303,12 +303,12 @@ define i32 @conditional_and(ptr noalias %A, ptr noalias %B, i32 %cond, i64 nound
; CHECK: for.body:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
; CHECK-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RES:%.*]], [[FOR_INC]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP29:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[TMP29]], [[COND]]
; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_THEN:%.*]], label [[FOR_INC]]
; CHECK: if.then:
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[IV]]
; CHECK-NEXT: [[TMP30:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[AND:%.*]] = and i32 [[TMP30]], [[RDX]]
; CHECK-NEXT: br label [[FOR_INC]]
@@ -360,16 +360,16 @@ define i32 @simple_chained_rdx(ptr noalias %a, ptr noalias %b, ptr noalias %cond
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_LOAD_CONTINUE6:%.*]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 5, [[VECTOR_PH]] ], [ [[TMP46:%.*]], [[PRED_LOAD_CONTINUE6]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[COND]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[COND]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = icmp ne <4 x i32> [[WIDE_LOAD]], zeroinitializer
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i1> [[TMP1]], i64 0
; CHECK-NEXT: br i1 [[TMP2]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]]
; CHECK: pred.load.if:
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = insertelement <4 x i32> poison, i32 [[TMP4]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4
; CHECK-NEXT: [[TMP8:%.*]] = insertelement <4 x i32> poison, i32 [[TMP7]], i64 0
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE]]
@@ -380,10 +380,10 @@ define i32 @simple_chained_rdx(ptr noalias %a, ptr noalias %b, ptr noalias %cond
; CHECK-NEXT: br i1 [[TMP11]], label [[PRED_LOAD_IF1:%.*]], label [[PRED_LOAD_CONTINUE2:%.*]]
; CHECK: pred.load.if1:
; CHECK-NEXT: [[TMP12:%.*]] = or disjoint i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP12]]
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP12]]
; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4
; CHECK-NEXT: [[TMP15:%.*]] = insertelement <4 x i32> [[TMP9]], i32 [[TMP14]], i64 1
-; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP12]]
+; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[TMP12]]
; CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr [[TMP16]], align 4
; CHECK-NEXT: [[TMP18:%.*]] = insertelement <4 x i32> [[TMP10]], i32 [[TMP17]], i64 1
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE2]]
@@ -394,10 +394,10 @@ define i32 @simple_chained_rdx(ptr noalias %a, ptr noalias %b, ptr noalias %cond
; CHECK-NEXT: br i1 [[TMP21]], label [[PRED_LOAD_IF3:%.*]], label [[PRED_LOAD_CONTINUE4:%.*]]
; CHECK: pred.load.if3:
; CHECK-NEXT: [[TMP22:%.*]] = or disjoint i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP22]]
+; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP22]]
; CHECK-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP23]], align 4
; CHECK-NEXT: [[TMP25:%.*]] = insertelement <4 x i32> [[TMP19]], i32 [[TMP24]], i64 2
-; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP22]]
+; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[TMP22]]
; CHECK-NEXT: [[TMP27:%.*]] = load i32, ptr [[TMP26]], align 4
; CHECK-NEXT: [[TMP28:%.*]] = insertelement <4 x i32> [[TMP20]], i32 [[TMP27]], i64 2
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE4]]
@@ -408,10 +408,10 @@ define i32 @simple_chained_rdx(ptr noalias %a, ptr noalias %b, ptr noalias %cond
; CHECK-NEXT: br i1 [[TMP31]], label [[PRED_LOAD_IF5:%.*]], label [[PRED_LOAD_CONTINUE6]]
; CHECK: pred.load.if5:
; CHECK-NEXT: [[TMP32:%.*]] = or disjoint i64 [[INDEX]], 3
-; CHECK-NEXT: [[TMP33:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP32]]
+; CHECK-NEXT: [[TMP33:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP32]]
; CHECK-NEXT: [[TMP34:%.*]] = load i32, ptr [[TMP33]], align 4
; CHECK-NEXT: [[TMP35:%.*]] = insertelement <4 x i32> [[TMP29]], i32 [[TMP34]], i64 3
-; CHECK-NEXT: [[TMP36:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP32]]
+; CHECK-NEXT: [[TMP36:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[TMP32]]
; CHECK-NEXT: [[TMP37:%.*]] = load i32, ptr [[TMP36]], align 4
; CHECK-NEXT: [[TMP38:%.*]] = insertelement <4 x i32> [[TMP30]], i32 [[TMP37]], i64 3
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE6]]
@@ -437,15 +437,15 @@ define i32 @simple_chained_rdx(ptr noalias %a, ptr noalias %b, ptr noalias %cond
; CHECK: for.body:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[RDX:%.*]] = phi i32 [ [[RES:%.*]], [[FOR_INC]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[COND]], i64 [[IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[COND]], i64 [[IV]]
; CHECK-NEXT: [[TMP48:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[TMP48]], 0
; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label [[FOR_INC]], label [[IF_THEN:%.*]]
; CHECK: if.then:
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP49:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP49]], [[RDX]]
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[IV]]
; CHECK-NEXT: [[TMP50:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[ADD3:%.*]] = add nsw i32 [[ADD]], [[TMP50]]
; CHECK-NEXT: br label [[FOR_INC]]
@@ -510,13 +510,13 @@ define i64 @nested_cond_and(ptr noalias nocapture readonly %a, ptr noalias nocap
; CHECK-NEXT: [[TMP0:%.*]] = or disjoint i64 [[INDEX]], 1
; CHECK-NEXT: [[TMP1:%.*]] = or disjoint i64 [[INDEX]], 2
; CHECK-NEXT: [[TMP2:%.*]] = or disjoint i64 [[INDEX]], 3
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[COND]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [8 x i8], ptr [[COND]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP3]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq <4 x i64> [[WIDE_LOAD]], zeroinitializer
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x i1> [[TMP4]], i64 0
; CHECK-NEXT: br i1 [[TMP5]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]]
; CHECK: pred.load.if:
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [8 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr [[TMP6]], align 4
; CHECK-NEXT: [[TMP8:%.*]] = insertelement <4 x i64> poison, i64 [[TMP7]], i64 0
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE]]
@@ -525,7 +525,7 @@ define i64 @nested_cond_and(ptr noalias nocapture readonly %a, ptr noalias nocap
; CHECK-NEXT: [[TMP10:%.*]] = extractelement <4 x i1> [[TMP4]], i64 1
; CHECK-NEXT: br i1 [[TMP10]], label [[PRED_LOAD_IF1:%.*]], label [[PRED_LOAD_CONTINUE2:%.*]]
; CHECK: pred.load.if1:
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [8 x i8], ptr [[A]], i64 [[TMP0]]
; CHECK-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP11]], align 4
; CHECK-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> [[TMP9]], i64 [[TMP12]], i64 1
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE2]]
@@ -534,7 +534,7 @@ define i64 @nested_cond_and(ptr noalias nocapture readonly %a, ptr noalias nocap
; CHECK-NEXT: [[TMP15:%.*]] = extractelement <4 x i1> [[TMP4]], i64 2
; CHECK-NEXT: br i1 [[TMP15]], label [[PRED_LOAD_IF3:%.*]], label [[PRED_LOAD_CONTINUE4:%.*]]
; CHECK: pred.load.if3:
-; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP1]]
+; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds [8 x i8], ptr [[A]], i64 [[TMP1]]
; CHECK-NEXT: [[TMP17:%.*]] = load i64, ptr [[TMP16]], align 4
; CHECK-NEXT: [[TMP18:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP17]], i64 2
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE4]]
@@ -543,7 +543,7 @@ define i64 @nested_cond_and(ptr noalias nocapture readonly %a, ptr noalias nocap
; CHECK-NEXT: [[TMP20:%.*]] = extractelement <4 x i1> [[TMP4]], i64 3
; CHECK-NEXT: br i1 [[TMP20]], label [[PRED_LOAD_IF5:%.*]], label [[PRED_LOAD_CONTINUE6:%.*]]
; CHECK: pred.load.if5:
-; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP2]]
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds [8 x i8], ptr [[A]], i64 [[TMP2]]
; CHECK-NEXT: [[TMP22:%.*]] = load i64, ptr [[TMP21]], align 4
; CHECK-NEXT: [[TMP23:%.*]] = insertelement <4 x i64> [[TMP19]], i64 [[TMP22]], i64 3
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE6]]
@@ -554,7 +554,7 @@ define i64 @nested_cond_and(ptr noalias nocapture readonly %a, ptr noalias nocap
; CHECK-NEXT: [[TMP27:%.*]] = extractelement <4 x i1> [[TMP26]], i64 0
; CHECK-NEXT: br i1 [[TMP27]], label [[PRED_LOAD_IF7:%.*]], label [[PRED_LOAD_CONTINUE8:%.*]]
; CHECK: pred.load.if7:
-; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds [8 x i8], ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP29:%.*]] = load i64, ptr [[TMP28]], align 4
; CHECK-NEXT: [[TMP30:%.*]] = insertelement <4 x i64> poison, i64 [[TMP29]], i64 0
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE8]]
@@ -563,7 +563,7 @@ define i64 @nested_cond_and(ptr noalias nocapture readonly %a, ptr noalias nocap
; CHECK-NEXT: [[TMP32:%.*]] = extractelement <4 x i1> [[TMP26]], i64 1
; CHECK-NEXT: br i1 [[TMP32]], label [[PRED_LOAD_IF9:%.*]], label [[PRED_LOAD_CONTINUE10:%.*]]
; CHECK: pred.load.if9:
-; CHECK-NEXT: [[TMP33:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP33:%.*]] = getelementptr inbounds [8 x i8], ptr [[B]], i64 [[TMP0]]
; CHECK-NEXT: [[TMP34:%.*]] = load i64, ptr [[TMP33]], align 4
; CHECK-NEXT: [[TMP35:%.*]] = insertelement <4 x i64> [[TMP31]], i64 [[TMP34]], i64 1
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE10]]
@@ -572,7 +572,7 @@ define i64 @nested_cond_and(ptr noalias nocapture readonly %a, ptr noalias nocap
; CHECK-NEXT: [[TMP37:%.*]] = extractelement <4 x i1> [[TMP26]], i64 2
; CHECK-NEXT: br i1 [[TMP37]], label [[PRED_LOAD_IF11:%.*]], label [[PRED_LOAD_CONTINUE12:%.*]]
; CHECK: pred.load.if11:
-; CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[TMP1]]
+; CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds [8 x i8], ptr [[B]], i64 [[TMP1]]
; CHECK-NEXT: [[TMP39:%.*]] = load i64, ptr [[TMP38]], align 4
; CHECK-NEXT: [[TMP40:%.*]] = insertelement <4 x i64> [[TMP36]], i64 [[TMP39]], i64 2
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE12]]
@@ -581,7 +581,7 @@ define i64 @nested_cond_and(ptr noalias nocapture readonly %a, ptr noalias nocap
; CHECK-NEXT: [[TMP42:%.*]] = extractelement <4 x i1> [[TMP26]], i64 3
; CHECK-NEXT: br i1 [[TMP42]], label [[PRED_LOAD_IF13:%.*]], label [[PRED_LOAD_CONTINUE14]]
; CHECK: pred.load.if13:
-; CHECK-NEXT: [[TMP43:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[TMP2]]
+; CHECK-NEXT: [[TMP43:%.*]] = getelementptr inbounds [8 x i8], ptr [[B]], i64 [[TMP2]]
; CHECK-NEXT: [[TMP44:%.*]] = load i64, ptr [[TMP43]], align 4
; CHECK-NEXT: [[TMP45:%.*]] = insertelement <4 x i64> [[TMP41]], i64 [[TMP44]], i64 3
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE14]]
@@ -604,18 +604,18 @@ define i64 @nested_cond_and(ptr noalias nocapture readonly %a, ptr noalias nocap
; CHECK: for.body:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
; CHECK-NEXT: [[RDX:%.*]] = phi i64 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RES:%.*]], [[FOR_INC]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[COND]], i64 [[IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i8], ptr [[COND]], i64 [[IV]]
; CHECK-NEXT: [[TMP51:%.*]] = load i64, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i64 [[TMP51]], 0
; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_THEN:%.*]], label [[FOR_INC]]
; CHECK: if.then:
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [8 x i8], ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP52:%.*]] = load i64, ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[AND1:%.*]] = and i64 [[RDX]], [[TMP52]]
; CHECK-NEXT: [[TOBOOL2:%.*]] = icmp eq i64 [[TMP52]], 3
; CHECK-NEXT: br i1 [[TOBOOL2]], label [[IF_THEN_2:%.*]], label [[FOR_INC]]
; CHECK: if.then.2:
-; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]]
+; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [8 x i8], ptr [[B]], i64 [[IV]]
; CHECK-NEXT: [[TMP53:%.*]] = load i64, ptr [[ARRAYIDX3]], align 4
; CHECK-NEXT: [[AND2:%.*]] = and i64 [[RDX]], [[TMP53]]
; CHECK-NEXT: br label [[FOR_INC]]
@@ -678,13 +678,13 @@ define i32 @cond-uncond(ptr noalias %src1, ptr noalias %src2, ptr noalias %cond,
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_LOAD_CONTINUE6:%.*]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP27:%.*]], [[PRED_LOAD_CONTINUE6]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[COND]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[COND]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = icmp ne <4 x i32> [[WIDE_LOAD]], zeroinitializer
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i1> [[TMP1]], i64 0
; CHECK-NEXT: br i1 [[TMP2]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]]
; CHECK: pred.load.if:
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[SRC2]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [4 x i8], ptr [[SRC2]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = insertelement <4 x i32> poison, i32 [[TMP4]], i64 0
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE]]
@@ -693,7 +693,7 @@ define i32 @cond-uncond(ptr noalias %src1, ptr noalias %src2, ptr noalias %cond,
; CHECK-NEXT: [[TMP7:%.*]] = extractelement <4 x i1> [[TMP1]], i64 1
; CHECK-NEXT: br i1 [[TMP7]], label [[PRED_LOAD_IF1:%.*]], label [[PRED_LOAD_CONTINUE2:%.*]]
; CHECK: pred.load.if1:
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[SRC2]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr [4 x i8], ptr [[SRC2]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[TMP8]], i64 4
; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4
; CHECK-NEXT: [[TMP11:%.*]] = insertelement <4 x i32> [[TMP6]], i32 [[TMP10]], i64 1
@@ -703,7 +703,7 @@ define i32 @cond-uncond(ptr noalias %src1, ptr noalias %src2, ptr noalias %cond,
; CHECK-NEXT: [[TMP13:%.*]] = extractelement <4 x i1> [[TMP1]], i64 2
; CHECK-NEXT: br i1 [[TMP13]], label [[PRED_LOAD_IF3:%.*]], label [[PRED_LOAD_CONTINUE4:%.*]]
; CHECK: pred.load.if3:
-; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr [[SRC2]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr [4 x i8], ptr [[SRC2]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[TMP14]], i64 8
; CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4
; CHECK-NEXT: [[TMP17:%.*]] = insertelement <4 x i32> [[TMP12]], i32 [[TMP16]], i64 2
@@ -713,7 +713,7 @@ define i32 @cond-uncond(ptr noalias %src1, ptr noalias %src2, ptr noalias %cond,
; CHECK-NEXT: [[TMP19:%.*]] = extractelement <4 x i1> [[TMP1]], i64 3
; CHECK-NEXT: br i1 [[TMP19]], label [[PRED_LOAD_IF5:%.*]], label [[PRED_LOAD_CONTINUE6]]
; CHECK: pred.load.if5:
-; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i32, ptr [[SRC2]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP20:%.*]] = getelementptr [4 x i8], ptr [[SRC2]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[TMP20]], i64 12
; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
; CHECK-NEXT: [[TMP23:%.*]] = insertelement <4 x i32> [[TMP18]], i32 [[TMP22]], i64 3
@@ -722,7 +722,7 @@ define i32 @cond-uncond(ptr noalias %src1, ptr noalias %src2, ptr noalias %cond,
; CHECK-NEXT: [[TMP24:%.*]] = phi <4 x i32> [ [[TMP18]], [[PRED_LOAD_CONTINUE4]] ], [ [[TMP23]], [[PRED_LOAD_IF5]] ]
; CHECK-NEXT: [[TMP25:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> [[TMP24]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[PREDPHI:%.*]] = add <4 x i32> [[VEC_PHI]], [[TMP25]]
-; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, ptr [[SRC1]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8], ptr [[SRC1]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x i32>, ptr [[TMP26]], align 4
; CHECK-NEXT: [[TMP27]] = add <4 x i32> [[WIDE_LOAD7]], [[PREDPHI]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
@@ -739,18 +739,18 @@ define i32 @cond-uncond(ptr noalias %src1, ptr noalias %src2, ptr noalias %cond,
; CHECK: for.body:
; CHECK-NEXT: [[RDX1:%.*]] = phi i32 [ [[ADD2:%.*]], [[IF_END:%.*]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[IF_END]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[COND]], i64 [[IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[COND]], i64 [[IV]]
; CHECK-NEXT: [[TMP30:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[TMP30]], 0
; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label [[IF_END]], label [[IF_THEN:%.*]]
; CHECK: if.then:
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[SRC2]], i64 [[IV]]
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [4 x i8], ptr [[SRC2]], i64 [[IV]]
; CHECK-NEXT: [[TMP31:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP31]], [[RDX1]]
; CHECK-NEXT: br label [[IF_END]]
; CHECK: if.end:
; CHECK-NEXT: [[RES:%.*]] = phi i32 [ [[ADD]], [[IF_THEN]] ], [ [[RDX1]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[SRC1]], i64 [[IV]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i8], ptr [[SRC1]], i64 [[IV]]
; CHECK-NEXT: [[TMP32:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[ADD2]] = add nsw i32 [[TMP32]], [[RES]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
@@ -809,13 +809,13 @@ define float @cond_cond(ptr noalias %src1, ptr noalias %src2, ptr noalias %cond,
; CHECK-NEXT: [[TMP0:%.*]] = or disjoint i64 [[INDEX]], 1
; CHECK-NEXT: [[TMP1:%.*]] = or disjoint i64 [[INDEX]], 2
; CHECK-NEXT: [[TMP2:%.*]] = or disjoint i64 [[INDEX]], 3
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[COND]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [4 x i8], ptr [[COND]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP3]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = fcmp fast oeq <4 x float> [[WIDE_LOAD]], splat (float 3.000000e+00)
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x i1> [[TMP4]], i64 0
; CHECK-NEXT: br i1 [[TMP5]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]]
; CHECK: pred.load.if:
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[SRC1]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x i8], ptr [[SRC1]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[TMP6]], align 4
; CHECK-NEXT: [[TMP8:%.*]] = insertelement <4 x float> poison, float [[TMP7]], i64 0
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE]]
@@ -824,7 +824,7 @@ define float @cond_cond(ptr noalias %src1, ptr noalias %src2, ptr noalias %cond,
; CHECK-NEXT: [[TMP10:%.*]] = extractelement <4 x i1> [[TMP4]], i64 1
; CHECK-NEXT: br i1 [[TMP10]], label [[PRED_LOAD_IF1:%.*]], label [[PRED_LOAD_CONTINUE2:%.*]]
; CHECK: pred.load.if1:
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[SRC1]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8], ptr [[SRC1]], i64 [[TMP0]]
; CHECK-NEXT: [[TMP12:%.*]] = load float, ptr [[TMP11]], align 4
; CHECK-NEXT: [[TMP13:%.*]] = insertelement <4 x float> [[TMP9]], float [[TMP12]], i64 1
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE2]]
@@ -833,7 +833,7 @@ define float @cond_cond(ptr noalias %src1, ptr noalias %src2, ptr noalias %cond,
; CHECK-NEXT: [[TMP15:%.*]] = extractelement <4 x i1> [[TMP4]], i64 2
; CHECK-NEXT: br i1 [[TMP15]], label [[PRED_LOAD_IF3:%.*]], label [[PRED_LOAD_CONTINUE4:%.*]]
; CHECK: pred.load.if3:
-; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[SRC1]], i64 [[TMP1]]
+; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8], ptr [[SRC1]], i64 [[TMP1]]
; CHECK-NEXT: [[TMP17:%.*]] = load float, ptr [[TMP16]], align 4
; CHECK-NEXT: [[TMP18:%.*]] = insertelement <4 x float> [[TMP14]], float [[TMP17]], i64 2
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE4]]
@@ -842,7 +842,7 @@ define float @cond_cond(ptr noalias %src1, ptr noalias %src2, ptr noalias %cond,
; CHECK-NEXT: [[TMP20:%.*]] = extractelement <4 x i1> [[TMP4]], i64 3
; CHECK-NEXT: br i1 [[TMP20]], label [[PRED_LOAD_IF5:%.*]], label [[PRED_LOAD_CONTINUE6:%.*]]
; CHECK: pred.load.if5:
-; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, ptr [[SRC1]], i64 [[TMP2]]
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8], ptr [[SRC1]], i64 [[TMP2]]
; CHECK-NEXT: [[TMP22:%.*]] = load float, ptr [[TMP21]], align 4
; CHECK-NEXT: [[TMP23:%.*]] = insertelement <4 x float> [[TMP19]], float [[TMP22]], i64 3
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE6]]
@@ -854,7 +854,7 @@ define float @cond_cond(ptr noalias %src1, ptr noalias %src2, ptr noalias %cond,
; CHECK-NEXT: [[TMP27:%.*]] = extractelement <4 x i1> [[TMP26]], i64 0
; CHECK-NEXT: br i1 [[TMP27]], label [[PRED_LOAD_IF7:%.*]], label [[PRED_LOAD_CONTINUE8:%.*]]
; CHECK: pred.load.if7:
-; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds float, ptr [[SRC2]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8], ptr [[SRC2]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP29:%.*]] = load float, ptr [[TMP28]], align 4
; CHECK-NEXT: [[TMP30:%.*]] = insertelement <4 x float> poison, float [[TMP29]], i64 0
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE8]]
@@ -863,7 +863,7 @@ define float @cond_cond(ptr noalias %src1, ptr noalias %src2, ptr noalias %cond,
; CHECK-NEXT: [[TMP32:%.*]] = extractelement <4 x i1> [[TMP26]], i64 1
; CHECK-NEXT: br i1 [[TMP32]], label [[PRED_LOAD_IF9:%.*]], label [[PRED_LOAD_CONTINUE10:%.*]]
; CHECK: pred.load.if9:
-; CHECK-NEXT: [[TMP33:%.*]] = getelementptr inbounds float, ptr [[SRC2]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP33:%.*]] = getelementptr inbounds [4 x i8], ptr [[SRC2]], i64 [[TMP0]]
; CHECK-NEXT: [[TMP34:%.*]] = load float, ptr [[TMP33]], align 4
; CHECK-NEXT: [[TMP35:%.*]] = insertelement <4 x float> [[TMP31]], float [[TMP34]], i64 1
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE10]]
@@ -872,7 +872,7 @@ define float @cond_cond(ptr noalias %src1, ptr noalias %src2, ptr noalias %cond,
; CHECK-NEXT: [[TMP37:%.*]] = extractelement <4 x i1> [[TMP26]], i64 2
; CHECK-NEXT: br i1 [[TMP37]], label [[PRED_LOAD_IF11:%.*]], label [[PRED_LOAD_CONTINUE12:%.*]]
; CHECK: pred.load.if11:
-; CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds float, ptr [[SRC2]], i64 [[TMP1]]
+; CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds [4 x i8], ptr [[SRC2]], i64 [[TMP1]]
; CHECK-NEXT: [[TMP39:%.*]] = load float, ptr [[TMP38]], align 4
; CHECK-NEXT: [[TMP40:%.*]] = insertelement <4 x float> [[TMP36]], float [[TMP39]], i64 2
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE12]]
@@ -881,7 +881,7 @@ define float @cond_cond(ptr noalias %src1, ptr noalias %src2, ptr noalias %cond,
; CHECK-NEXT: [[TMP42:%.*]] = extractelement <4 x i1> [[TMP26]], i64 3
; CHECK-NEXT: br i1 [[TMP42]], label [[PRED_LOAD_IF13:%.*]], label [[PRED_LOAD_CONTINUE14]]
; CHECK: pred.load.if13:
-; CHECK-NEXT: [[TMP43:%.*]] = getelementptr inbounds float, ptr [[SRC2]], i64 [[TMP2]]
+; CHECK-NEXT: [[TMP43:%.*]] = getelementptr inbounds [4 x i8], ptr [[SRC2]], i64 [[TMP2]]
; CHECK-NEXT: [[TMP44:%.*]] = load float, ptr [[TMP43]], align 4
; CHECK-NEXT: [[TMP45:%.*]] = insertelement <4 x float> [[TMP41]], float [[TMP44]], i64 3
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE14]]
@@ -903,12 +903,12 @@ define float @cond_cond(ptr noalias %src1, ptr noalias %src2, ptr noalias %cond,
; CHECK: for.body:
; CHECK-NEXT: [[RDX1:%.*]] = phi float [ [[RES:%.*]], [[FOR_INC:%.*]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[FOR_INC]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[COND]], i64 [[IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[COND]], i64 [[IV]]
; CHECK-NEXT: [[TMP50:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[CMP1:%.*]] = fcmp fast oeq float [[TMP50]], 3.000000e+00
; CHECK-NEXT: br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
; CHECK: if.then:
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[SRC1]], i64 [[IV]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i8], ptr [[SRC1]], i64 [[IV]]
; CHECK-NEXT: [[TMP51:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[TMP51]], [[RDX1]]
; CHECK-NEXT: br label [[IF_END]]
@@ -917,7 +917,7 @@ define float @cond_cond(ptr noalias %src1, ptr noalias %src2, ptr noalias %cond,
; CHECK-NEXT: [[CMP5:%.*]] = fcmp fast oeq float [[TMP50]], 7.000000e+00
; CHECK-NEXT: br i1 [[CMP5]], label [[IF_THEN6:%.*]], label [[FOR_INC]]
; CHECK: if.then6:
-; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[SRC2]], i64 [[IV]]
+; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [4 x i8], ptr [[SRC2]], i64 [[IV]]
; CHECK-NEXT: [[TMP52:%.*]] = load float, ptr [[ARRAYIDX7]], align 4
; CHECK-NEXT: [[ADD2:%.*]] = fadd fast float [[TMP52]], [[RDX2]]
; CHECK-NEXT: br label [[FOR_INC]]
@@ -984,16 +984,16 @@ define i32 @uncond_cond(ptr noalias %src1, ptr noalias %src2, ptr noalias %cond,
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_LOAD_CONTINUE7:%.*]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PREDPHI:%.*]], [[PRED_LOAD_CONTINUE7]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[SRC1]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[SRC1]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = add <4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[COND]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[COND]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i32> [[WIDE_LOAD1]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i1> [[TMP3]], i64 0
; CHECK-NEXT: br i1 [[TMP4]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]]
; CHECK: pred.load.if:
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[SRC2]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i8], ptr [[SRC2]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> poison, i32 [[TMP6]], i64 0
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE]]
@@ -1002,7 +1002,7 @@ define i32 @uncond_cond(ptr noalias %src1, ptr noalias %src2, ptr noalias %cond,
; CHECK-NEXT: [[TMP9:%.*]] = extractelement <4 x i1> [[TMP3]], i64 1
; CHECK-NEXT: br i1 [[TMP9]], label [[PRED_LOAD_IF2:%.*]], label [[PRED_LOAD_CONTINUE3:%.*]]
; CHECK: pred.load.if2:
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr [[SRC2]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr [4 x i8], ptr [[SRC2]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[TMP10]], i64 4
; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP11]], align 4
; CHECK-NEXT: [[TMP13:%.*]] = insertelement <4 x i32> [[TMP8]], i32 [[TMP12]], i64 1
@@ -1012,7 +1012,7 @@ define i32 @uncond_cond(ptr noalias %src1, ptr noalias %src2, ptr noalias %cond,
; CHECK-NEXT: [[TMP15:%.*]] = extractelement <4 x i1> [[TMP3]], i64 2
; CHECK-NEXT: br i1 [[TMP15]], label [[PRED_LOAD_IF4:%.*]], label [[PRED_LOAD_CONTINUE5:%.*]]
; CHECK: pred.load.if4:
-; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[SRC2]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP16:%.*]] = getelementptr [4 x i8], ptr [[SRC2]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP16]], i64 8
; CHECK-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP17]], align 4
; CHECK-NEXT: [[TMP19:%.*]] = insertelement <4 x i32> [[TMP14]], i32 [[TMP18]], i64 2
@@ -1022,7 +1022,7 @@ define i32 @uncond_cond(ptr noalias %src1, ptr noalias %src2, ptr noalias %cond,
; CHECK-NEXT: [[TMP21:%.*]] = extractelement <4 x i1> [[TMP3]], i64 3
; CHECK-NEXT: br i1 [[TMP21]], label [[PRED_LOAD_IF6:%.*]], label [[PRED_LOAD_CONTINUE7]]
; CHECK: pred.load.if6:
-; CHECK-NEXT: [[TMP22:%.*]] = getelementptr i32, ptr [[SRC2]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP22:%.*]] = getelementptr [4 x i8], ptr [[SRC2]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP23:%.*]] = getelementptr i8, ptr [[TMP22]], i64 12
; CHECK-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP23]], align 4
; CHECK-NEXT: [[TMP25:%.*]] = insertelement <4 x i32> [[TMP20]], i32 [[TMP24]], i64 3
@@ -1045,15 +1045,15 @@ define i32 @uncond_cond(ptr noalias %src1, ptr noalias %src2, ptr noalias %cond,
; CHECK: for.body:
; CHECK-NEXT: [[RDX:%.*]] = phi i32 [ [[RES:%.*]], [[FOR_INC:%.*]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[FOR_INC]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[SRC1]], i64 [[IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[SRC1]], i64 [[IV]]
; CHECK-NEXT: [[TMP30:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP30]], [[RDX]]
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[COND]], i64 [[IV]]
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [4 x i8], ptr [[COND]], i64 [[IV]]
; CHECK-NEXT: [[TMP31:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4
; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[TMP31]], 0
; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label [[FOR_INC]], label [[IF_THEN:%.*]]
; CHECK: if.then:
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[SRC2]], i64 [[IV]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i8], ptr [[SRC2]], i64 [[IV]]
; CHECK-NEXT: [[TMP32:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP32]], [[ADD1]]
; CHECK-NEXT: br label [[FOR_INC]]
@@ -1112,16 +1112,16 @@ define i32 @uncond_cond_uncond(ptr noalias %src1, ptr noalias %src2, ptr noalias
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_LOAD_CONTINUE7:%.*]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP28:%.*]], [[PRED_LOAD_CONTINUE7]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[SRC1]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[SRC1]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = add <4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[COND]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[COND]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i32> [[WIDE_LOAD1]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i1> [[TMP3]], i64 0
; CHECK-NEXT: br i1 [[TMP4]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]]
; CHECK: pred.load.if:
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[SRC2]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i8], ptr [[SRC2]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> poison, i32 [[TMP6]], i64 0
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE]]
@@ -1130,7 +1130,7 @@ define i32 @uncond_cond_uncond(ptr noalias %src1, ptr noalias %src2, ptr noalias
; CHECK-NEXT: [[TMP9:%.*]] = extractelement <4 x i1> [[TMP3]], i64 1
; CHECK-NEXT: br i1 [[TMP9]], label [[PRED_LOAD_IF2:%.*]], label [[PRED_LOAD_CONTINUE3:%.*]]
; CHECK: pred.load.if2:
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr [[SRC2]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr [4 x i8], ptr [[SRC2]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[TMP10]], i64 4
; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP11]], align 4
; CHECK-NEXT: [[TMP13:%.*]] = insertelement <4 x i32> [[TMP8]], i32 [[TMP12]], i64 1
@@ -1140,7 +1140,7 @@ define i32 @uncond_cond_uncond(ptr noalias %src1, ptr noalias %src2, ptr noalias
; CHECK-NEXT: [[TMP15:%.*]] = extractelement <4 x i1> [[TMP3]], i64 2
; CHECK-NEXT: br i1 [[TMP15]], label [[PRED_LOAD_IF4:%.*]], label [[PRED_LOAD_CONTINUE5:%.*]]
; CHECK: pred.load.if4:
-; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[SRC2]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP16:%.*]] = getelementptr [4 x i8], ptr [[SRC2]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP16]], i64 8
; CHECK-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP17]], align 4
; CHECK-NEXT: [[TMP19:%.*]] = insertelement <4 x i32> [[TMP14]], i32 [[TMP18]], i64 2
@@ -1150,7 +1150,7 @@ define i32 @uncond_cond_uncond(ptr noalias %src1, ptr noalias %src2, ptr noalias
; CHECK-NEXT: [[TMP21:%.*]] = extractelement <4 x i1> [[TMP3]], i64 3
; CHECK-NEXT: br i1 [[TMP21]], label [[PRED_LOAD_IF6:%.*]], label [[PRED_LOAD_CONTINUE7]]
; CHECK: pred.load.if6:
-; CHECK-NEXT: [[TMP22:%.*]] = getelementptr i32, ptr [[SRC2]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP22:%.*]] = getelementptr [4 x i8], ptr [[SRC2]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP23:%.*]] = getelementptr i8, ptr [[TMP22]], i64 12
; CHECK-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP23]], align 4
; CHECK-NEXT: [[TMP25:%.*]] = insertelement <4 x i32> [[TMP20]], i32 [[TMP24]], i64 3
@@ -1174,15 +1174,15 @@ define i32 @uncond_cond_uncond(ptr noalias %src1, ptr noalias %src2, ptr noalias
; CHECK: for.body:
; CHECK-NEXT: [[RDX:%.*]] = phi i32 [ [[ADD3:%.*]], [[IF_END:%.*]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[IF_END]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[SRC1]], i64 [[IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[SRC1]], i64 [[IV]]
; CHECK-NEXT: [[TMP31:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP31]], [[RDX]]
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[COND]], i64 [[IV]]
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [4 x i8], ptr [[COND]], i64 [[IV]]
; CHECK-NEXT: [[TMP32:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4
; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[TMP32]], 0
; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label [[IF_END]], label [[IF_THEN:%.*]]
; CHECK: if.then:
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[SRC2]], i64 [[IV]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i8], ptr [[SRC2]], i64 [[IV]]
; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP33]], [[ADD1]]
; CHECK-NEXT: br label [[IF_END]]
diff --git a/llvm/test/Transforms/LoopVectorize/reduction-inloop-min-max.ll b/llvm/test/Transforms/LoopVectorize/reduction-inloop-min-max.ll
index 73ddddc69a7c7..02ab7085415c4 100644
--- a/llvm/test/Transforms/LoopVectorize/reduction-inloop-min-max.ll
+++ b/llvm/test/Transforms/LoopVectorize/reduction-inloop-min-max.ll
@@ -11,7 +11,7 @@ define i32 @reduction_smin(ptr nocapture %A, ptr nocapture %B) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 1000, [[VECTOR_PH]] ], [ [[RDX_MINMAX:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> [[WIDE_LOAD]])
; CHECK-NEXT: [[RDX_MINMAX]] = call i32 @llvm.smin.i32(i32 [[TMP1]], i32 [[VEC_PHI]])
@@ -53,7 +53,7 @@ define i32 @reduction_smin_select_ops_flipped(ptr nocapture %A, ptr nocapture %B
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 1000, [[VECTOR_PH]] ], [ [[RDX_MINMAX:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> [[WIDE_LOAD]])
; CHECK-NEXT: [[RDX_MINMAX]] = call i32 @llvm.smax.i32(i32 [[TMP1]], i32 [[VEC_PHI]])
@@ -95,7 +95,7 @@ define i32 @reduction_smin_intrinsic(ptr nocapture %A, ptr nocapture %B) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ splat (i32 1000), [[VECTOR_PH]] ], [ [[TMP1:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP1]] = call <4 x i32> @llvm.smin.v4i32(<4 x i32> [[VEC_PHI]], <4 x i32> [[WIDE_LOAD]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
@@ -138,7 +138,7 @@ define i32 @reduction_umax(ptr nocapture %A, ptr nocapture %B) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 1000, [[VECTOR_PH]] ], [ [[RDX_MINMAX:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> [[WIDE_LOAD]])
; CHECK-NEXT: [[RDX_MINMAX]] = call i32 @llvm.umax.i32(i32 [[TMP1]], i32 [[VEC_PHI]])
@@ -180,7 +180,7 @@ define i32 @reduction_umax_select_ops_flipped(ptr nocapture %A, ptr nocapture %B
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 1000, [[VECTOR_PH]] ], [ [[RDX_MINMAX:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> [[WIDE_LOAD]])
; CHECK-NEXT: [[RDX_MINMAX]] = call i32 @llvm.umin.i32(i32 [[TMP1]], i32 [[VEC_PHI]])
@@ -222,7 +222,7 @@ define i32 @reduction_umax_intrinsic(ptr nocapture %A, ptr nocapture %B) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ splat (i32 1000), [[VECTOR_PH]] ], [ [[TMP1:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP1]] = call <4 x i32> @llvm.umax.v4i32(<4 x i32> [[VEC_PHI]], <4 x i32> [[WIDE_LOAD]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
diff --git a/llvm/test/Transforms/LoopVectorize/reduction-inloop-pred.ll b/llvm/test/Transforms/LoopVectorize/reduction-inloop-pred.ll
index 1b9dcadbbfc39..dcb5b3914a3ce 100644
--- a/llvm/test/Transforms/LoopVectorize/reduction-inloop-pred.ll
+++ b/llvm/test/Transforms/LoopVectorize/reduction-inloop-pred.ll
@@ -17,7 +17,7 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i1> [[TMP0]], i64 0
; CHECK-NEXT: br i1 [[TMP1]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]]
; CHECK: pred.load.if:
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> poison, i32 [[TMP3]], i64 0
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE]]
@@ -26,7 +26,7 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[TMP0]], i64 1
; CHECK-NEXT: br i1 [[TMP6]], label [[PRED_LOAD_IF1:%.*]], label [[PRED_LOAD_CONTINUE2:%.*]]
; CHECK: pred.load.if1:
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP7]], i64 4
; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
; CHECK-NEXT: [[TMP10:%.*]] = insertelement <4 x i32> [[TMP5]], i32 [[TMP9]], i64 1
@@ -36,7 +36,7 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP12:%.*]] = extractelement <4 x i1> [[TMP0]], i64 2
; CHECK-NEXT: br i1 [[TMP12]], label [[PRED_LOAD_IF3:%.*]], label [[PRED_LOAD_CONTINUE4:%.*]]
; CHECK: pred.load.if3:
-; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[TMP13]], i64 8
; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4
; CHECK-NEXT: [[TMP16:%.*]] = insertelement <4 x i32> [[TMP11]], i32 [[TMP15]], i64 2
@@ -46,7 +46,7 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP18:%.*]] = extractelement <4 x i1> [[TMP0]], i64 3
; CHECK-NEXT: br i1 [[TMP18]], label [[PRED_LOAD_IF5:%.*]], label [[PRED_LOAD_CONTINUE6]]
; CHECK: pred.load.if5:
-; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[TMP19]], i64 12
; CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP20]], align 4
; CHECK-NEXT: [[TMP22:%.*]] = insertelement <4 x i32> [[TMP17]], i32 [[TMP21]], i64 3
@@ -99,10 +99,10 @@ define i32 @reduction_sum(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i1> [[TMP0]], i64 0
; CHECK-NEXT: br i1 [[TMP1]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]]
; CHECK: pred.load.if:
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> poison, i32 [[TMP3]], i64 0
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> poison, i32 [[TMP6]], i64 0
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE]]
@@ -113,10 +113,10 @@ define i32 @reduction_sum(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: br i1 [[TMP10]], label [[PRED_LOAD_IF3:%.*]], label [[PRED_LOAD_CONTINUE4:%.*]]
; CHECK: pred.load.if2:
; CHECK-NEXT: [[TMP11:%.*]] = or disjoint i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP11]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP11]]
; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4
; CHECK-NEXT: [[TMP14:%.*]] = insertelement <4 x i32> [[TMP8]], i32 [[TMP13]], i64 1
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP11]]
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[TMP11]]
; CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4
; CHECK-NEXT: [[TMP17:%.*]] = insertelement <4 x i32> [[TMP9]], i32 [[TMP16]], i64 1
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE4]]
@@ -127,10 +127,10 @@ define i32 @reduction_sum(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: br i1 [[TMP20]], label [[PRED_LOAD_IF5:%.*]], label [[PRED_LOAD_CONTINUE6:%.*]]
; CHECK: pred.load.if4:
; CHECK-NEXT: [[TMP21:%.*]] = or disjoint i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP21]]
+; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP21]]
; CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
; CHECK-NEXT: [[TMP24:%.*]] = insertelement <4 x i32> [[TMP18]], i32 [[TMP23]], i64 2
-; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP21]]
+; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[TMP21]]
; CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4
; CHECK-NEXT: [[TMP27:%.*]] = insertelement <4 x i32> [[TMP19]], i32 [[TMP26]], i64 2
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE6]]
@@ -141,10 +141,10 @@ define i32 @reduction_sum(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: br i1 [[TMP30]], label [[PRED_LOAD_IF7:%.*]], label [[PRED_LOAD_CONTINUE8]]
; CHECK: pred.load.if6:
; CHECK-NEXT: [[TMP31:%.*]] = or disjoint i64 [[INDEX]], 3
-; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP31]]
+; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP31]]
; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4
; CHECK-NEXT: [[TMP34:%.*]] = insertelement <4 x i32> [[TMP28]], i32 [[TMP33]], i64 3
-; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP31]]
+; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[TMP31]]
; CHECK-NEXT: [[TMP36:%.*]] = load i32, ptr [[TMP35]], align 4
; CHECK-NEXT: [[TMP37:%.*]] = insertelement <4 x i32> [[TMP29]], i32 [[TMP36]], i64 3
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE8]]
@@ -208,7 +208,7 @@ define i32 @reduction_sum_const(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i1> [[TMP0]], i64 0
; CHECK-NEXT: br i1 [[TMP1]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]]
; CHECK: pred.load.if:
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> poison, i32 [[TMP3]], i64 0
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE]]
@@ -217,7 +217,7 @@ define i32 @reduction_sum_const(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[TMP0]], i64 1
; CHECK-NEXT: br i1 [[TMP6]], label [[PRED_LOAD_IF1:%.*]], label [[PRED_LOAD_CONTINUE2:%.*]]
; CHECK: pred.load.if1:
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP7]], i64 4
; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
; CHECK-NEXT: [[TMP10:%.*]] = insertelement <4 x i32> [[TMP5]], i32 [[TMP9]], i64 1
@@ -227,7 +227,7 @@ define i32 @reduction_sum_const(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP12:%.*]] = extractelement <4 x i1> [[TMP0]], i64 2
; CHECK-NEXT: br i1 [[TMP12]], label [[PRED_LOAD_IF3:%.*]], label [[PRED_LOAD_CONTINUE4:%.*]]
; CHECK: pred.load.if3:
-; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[TMP13]], i64 8
; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4
; CHECK-NEXT: [[TMP16:%.*]] = insertelement <4 x i32> [[TMP11]], i32 [[TMP15]], i64 2
@@ -237,7 +237,7 @@ define i32 @reduction_sum_const(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP18:%.*]] = extractelement <4 x i1> [[TMP0]], i64 3
; CHECK-NEXT: br i1 [[TMP18]], label [[PRED_LOAD_IF5:%.*]], label [[PRED_LOAD_CONTINUE6]]
; CHECK: pred.load.if5:
-; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[TMP19]], i64 12
; CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP20]], align 4
; CHECK-NEXT: [[TMP22:%.*]] = insertelement <4 x i32> [[TMP17]], i32 [[TMP21]], i64 3
@@ -294,10 +294,10 @@ define i32 @reduction_prod(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i1> [[TMP0]], i64 0
; CHECK-NEXT: br i1 [[TMP1]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]]
; CHECK: pred.load.if:
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> poison, i32 [[TMP3]], i64 0
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> poison, i32 [[TMP6]], i64 0
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE]]
@@ -308,10 +308,10 @@ define i32 @reduction_prod(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: br i1 [[TMP10]], label [[PRED_LOAD_IF3:%.*]], label [[PRED_LOAD_CONTINUE4:%.*]]
; CHECK: pred.load.if2:
; CHECK-NEXT: [[TMP11:%.*]] = or disjoint i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP11]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP11]]
; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4
; CHECK-NEXT: [[TMP14:%.*]] = insertelement <4 x i32> [[TMP8]], i32 [[TMP13]], i64 1
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP11]]
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[TMP11]]
; CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4
; CHECK-NEXT: [[TMP17:%.*]] = insertelement <4 x i32> [[TMP9]], i32 [[TMP16]], i64 1
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE4]]
@@ -322,10 +322,10 @@ define i32 @reduction_prod(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: br i1 [[TMP20]], label [[PRED_LOAD_IF5:%.*]], label [[PRED_LOAD_CONTINUE6:%.*]]
; CHECK: pred.load.if4:
; CHECK-NEXT: [[TMP21:%.*]] = or disjoint i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP21]]
+; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP21]]
; CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
; CHECK-NEXT: [[TMP24:%.*]] = insertelement <4 x i32> [[TMP18]], i32 [[TMP23]], i64 2
-; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP21]]
+; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[TMP21]]
; CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4
; CHECK-NEXT: [[TMP27:%.*]] = insertelement <4 x i32> [[TMP19]], i32 [[TMP26]], i64 2
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE6]]
@@ -336,10 +336,10 @@ define i32 @reduction_prod(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: br i1 [[TMP30]], label [[PRED_LOAD_IF7:%.*]], label [[PRED_LOAD_CONTINUE8]]
; CHECK: pred.load.if6:
; CHECK-NEXT: [[TMP31:%.*]] = or disjoint i64 [[INDEX]], 3
-; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP31]]
+; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP31]]
; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4
; CHECK-NEXT: [[TMP34:%.*]] = insertelement <4 x i32> [[TMP28]], i32 [[TMP33]], i64 3
-; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP31]]
+; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[TMP31]]
; CHECK-NEXT: [[TMP36:%.*]] = load i32, ptr [[TMP35]], align 4
; CHECK-NEXT: [[TMP37:%.*]] = insertelement <4 x i32> [[TMP29]], i32 [[TMP36]], i64 3
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE8]]
@@ -404,10 +404,10 @@ define i32 @reduction_mix(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i1> [[TMP0]], i64 0
; CHECK-NEXT: br i1 [[TMP1]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]]
; CHECK: pred.load.if:
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> poison, i32 [[TMP6]], i64 0
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[B1:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP8]], align 4
; CHECK-NEXT: [[TMP13:%.*]] = insertelement <4 x i32> poison, i32 [[TMP12]], i64 0
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE]]
@@ -418,10 +418,10 @@ define i32 @reduction_mix(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: br i1 [[TMP10]], label [[PRED_LOAD_IF3:%.*]], label [[PRED_LOAD_CONTINUE4:%.*]]
; CHECK: pred.load.if2:
; CHECK-NEXT: [[TMP11:%.*]] = or disjoint i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP11]]
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP11]]
; CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4
; CHECK-NEXT: [[TMP17:%.*]] = insertelement <4 x i32> [[TMP9]], i32 [[TMP16]], i64 1
-; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[B1]], i64 [[TMP11]]
+; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[TMP11]]
; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP18]], align 4
; CHECK-NEXT: [[TMP23:%.*]] = insertelement <4 x i32> [[TMP14]], i32 [[TMP22]], i64 1
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE4]]
@@ -432,10 +432,10 @@ define i32 @reduction_mix(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: br i1 [[TMP20]], label [[PRED_LOAD_IF5:%.*]], label [[PRED_LOAD_CONTINUE6:%.*]]
; CHECK: pred.load.if4:
; CHECK-NEXT: [[TMP21:%.*]] = or disjoint i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP21]]
+; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP21]]
; CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4
; CHECK-NEXT: [[TMP27:%.*]] = insertelement <4 x i32> [[TMP19]], i32 [[TMP26]], i64 2
-; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds i32, ptr [[B1]], i64 [[TMP21]]
+; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[TMP21]]
; CHECK-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP28]], align 4
; CHECK-NEXT: [[TMP33:%.*]] = insertelement <4 x i32> [[TMP24]], i32 [[TMP32]], i64 2
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE6]]
@@ -446,10 +446,10 @@ define i32 @reduction_mix(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: br i1 [[TMP30]], label [[PRED_LOAD_IF7:%.*]], label [[PRED_LOAD_CONTINUE8]]
; CHECK: pred.load.if6:
; CHECK-NEXT: [[TMP31:%.*]] = or disjoint i64 [[INDEX]], 3
-; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP31]]
+; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP31]]
; CHECK-NEXT: [[TMP36:%.*]] = load i32, ptr [[TMP35]], align 4
; CHECK-NEXT: [[TMP37:%.*]] = insertelement <4 x i32> [[TMP29]], i32 [[TMP36]], i64 3
-; CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds i32, ptr [[B1]], i64 [[TMP31]]
+; CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[TMP31]]
; CHECK-NEXT: [[TMP48:%.*]] = load i32, ptr [[TMP38]], align 4
; CHECK-NEXT: [[TMP49:%.*]] = insertelement <4 x i32> [[TMP34]], i32 [[TMP48]], i64 3
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE8]]
@@ -511,10 +511,10 @@ define i32 @reduction_mul(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i1> [[TMP0]], i64 0
; CHECK-NEXT: br i1 [[TMP1]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]]
; CHECK: pred.load.if:
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> poison, i32 [[TMP3]], i64 0
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> poison, i32 [[TMP6]], i64 0
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE]]
@@ -525,10 +525,10 @@ define i32 @reduction_mul(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: br i1 [[TMP10]], label [[PRED_LOAD_IF1:%.*]], label [[PRED_LOAD_CONTINUE2:%.*]]
; CHECK: pred.load.if1:
; CHECK-NEXT: [[TMP11:%.*]] = or disjoint i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP11]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP11]]
; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4
; CHECK-NEXT: [[TMP14:%.*]] = insertelement <4 x i32> [[TMP8]], i32 [[TMP13]], i64 1
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP11]]
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[TMP11]]
; CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4
; CHECK-NEXT: [[TMP17:%.*]] = insertelement <4 x i32> [[TMP9]], i32 [[TMP16]], i64 1
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE2]]
@@ -539,10 +539,10 @@ define i32 @reduction_mul(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: br i1 [[TMP20]], label [[PRED_LOAD_IF3:%.*]], label [[PRED_LOAD_CONTINUE4:%.*]]
; CHECK: pred.load.if3:
; CHECK-NEXT: [[TMP21:%.*]] = or disjoint i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP21]]
+; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP21]]
; CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
; CHECK-NEXT: [[TMP24:%.*]] = insertelement <4 x i32> [[TMP18]], i32 [[TMP23]], i64 2
-; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP21]]
+; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[TMP21]]
; CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4
; CHECK-NEXT: [[TMP27:%.*]] = insertelement <4 x i32> [[TMP19]], i32 [[TMP26]], i64 2
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE4]]
@@ -553,10 +553,10 @@ define i32 @reduction_mul(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: br i1 [[TMP30]], label [[PRED_LOAD_IF5:%.*]], label [[PRED_LOAD_CONTINUE6]]
; CHECK: pred.load.if5:
; CHECK-NEXT: [[TMP31:%.*]] = or disjoint i64 [[INDEX]], 3
-; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP31]]
+; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP31]]
; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4
; CHECK-NEXT: [[TMP34:%.*]] = insertelement <4 x i32> [[TMP28]], i32 [[TMP33]], i64 3
-; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP31]]
+; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[TMP31]]
; CHECK-NEXT: [[TMP36:%.*]] = load i32, ptr [[TMP35]], align 4
; CHECK-NEXT: [[TMP37:%.*]] = insertelement <4 x i32> [[TMP29]], i32 [[TMP36]], i64 3
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE6]]
@@ -614,10 +614,10 @@ define i32 @reduction_and(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i1> [[TMP0]], i64 0
; CHECK-NEXT: br i1 [[TMP1]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]]
; CHECK: pred.load.if:
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> poison, i32 [[TMP3]], i64 0
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> poison, i32 [[TMP6]], i64 0
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE]]
@@ -628,10 +628,10 @@ define i32 @reduction_and(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: br i1 [[TMP10]], label [[PRED_LOAD_IF1:%.*]], label [[PRED_LOAD_CONTINUE2:%.*]]
; CHECK: pred.load.if1:
; CHECK-NEXT: [[TMP11:%.*]] = or disjoint i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP11]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP11]]
; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4
; CHECK-NEXT: [[TMP14:%.*]] = insertelement <4 x i32> [[TMP8]], i32 [[TMP13]], i64 1
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP11]]
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[TMP11]]
; CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4
; CHECK-NEXT: [[TMP17:%.*]] = insertelement <4 x i32> [[TMP9]], i32 [[TMP16]], i64 1
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE2]]
@@ -642,10 +642,10 @@ define i32 @reduction_and(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: br i1 [[TMP20]], label [[PRED_LOAD_IF3:%.*]], label [[PRED_LOAD_CONTINUE4:%.*]]
; CHECK: pred.load.if3:
; CHECK-NEXT: [[TMP21:%.*]] = or disjoint i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP21]]
+; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP21]]
; CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
; CHECK-NEXT: [[TMP24:%.*]] = insertelement <4 x i32> [[TMP18]], i32 [[TMP23]], i64 2
-; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP21]]
+; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[TMP21]]
; CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4
; CHECK-NEXT: [[TMP27:%.*]] = insertelement <4 x i32> [[TMP19]], i32 [[TMP26]], i64 2
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE4]]
@@ -656,10 +656,10 @@ define i32 @reduction_and(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: br i1 [[TMP30]], label [[PRED_LOAD_IF5:%.*]], label [[PRED_LOAD_CONTINUE6]]
; CHECK: pred.load.if5:
; CHECK-NEXT: [[TMP31:%.*]] = or disjoint i64 [[INDEX]], 3
-; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP31]]
+; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP31]]
; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4
; CHECK-NEXT: [[TMP34:%.*]] = insertelement <4 x i32> [[TMP28]], i32 [[TMP33]], i64 3
-; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP31]]
+; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[TMP31]]
; CHECK-NEXT: [[TMP36:%.*]] = load i32, ptr [[TMP35]], align 4
; CHECK-NEXT: [[TMP37:%.*]] = insertelement <4 x i32> [[TMP29]], i32 [[TMP36]], i64 3
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE6]]
@@ -717,10 +717,10 @@ define i32 @reduction_or(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i1> [[TMP0]], i64 0
; CHECK-NEXT: br i1 [[TMP1]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]]
; CHECK: pred.load.if:
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> poison, i32 [[TMP3]], i64 0
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> poison, i32 [[TMP6]], i64 0
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE]]
@@ -731,10 +731,10 @@ define i32 @reduction_or(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: br i1 [[TMP10]], label [[PRED_LOAD_IF1:%.*]], label [[PRED_LOAD_CONTINUE2:%.*]]
; CHECK: pred.load.if1:
; CHECK-NEXT: [[TMP11:%.*]] = or disjoint i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP11]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP11]]
; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4
; CHECK-NEXT: [[TMP14:%.*]] = insertelement <4 x i32> [[TMP8]], i32 [[TMP13]], i64 1
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP11]]
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[TMP11]]
; CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4
; CHECK-NEXT: [[TMP17:%.*]] = insertelement <4 x i32> [[TMP9]], i32 [[TMP16]], i64 1
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE2]]
@@ -745,10 +745,10 @@ define i32 @reduction_or(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: br i1 [[TMP20]], label [[PRED_LOAD_IF3:%.*]], label [[PRED_LOAD_CONTINUE4:%.*]]
; CHECK: pred.load.if3:
; CHECK-NEXT: [[TMP21:%.*]] = or disjoint i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP21]]
+; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP21]]
; CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
; CHECK-NEXT: [[TMP24:%.*]] = insertelement <4 x i32> [[TMP18]], i32 [[TMP23]], i64 2
-; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP21]]
+; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[TMP21]]
; CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4
; CHECK-NEXT: [[TMP27:%.*]] = insertelement <4 x i32> [[TMP19]], i32 [[TMP26]], i64 2
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE4]]
@@ -759,10 +759,10 @@ define i32 @reduction_or(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: br i1 [[TMP30]], label [[PRED_LOAD_IF5:%.*]], label [[PRED_LOAD_CONTINUE6]]
; CHECK: pred.load.if5:
; CHECK-NEXT: [[TMP31:%.*]] = or disjoint i64 [[INDEX]], 3
-; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP31]]
+; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP31]]
; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4
; CHECK-NEXT: [[TMP34:%.*]] = insertelement <4 x i32> [[TMP28]], i32 [[TMP33]], i64 3
-; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP31]]
+; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[TMP31]]
; CHECK-NEXT: [[TMP36:%.*]] = load i32, ptr [[TMP35]], align 4
; CHECK-NEXT: [[TMP37:%.*]] = insertelement <4 x i32> [[TMP29]], i32 [[TMP36]], i64 3
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE6]]
@@ -818,10 +818,10 @@ define i32 @reduction_xor(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i1> [[TMP0]], i64 0
; CHECK-NEXT: br i1 [[TMP1]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]]
; CHECK: pred.load.if:
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> poison, i32 [[TMP3]], i64 0
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> poison, i32 [[TMP6]], i64 0
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE]]
@@ -832,10 +832,10 @@ define i32 @reduction_xor(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: br i1 [[TMP10]], label [[PRED_LOAD_IF1:%.*]], label [[PRED_LOAD_CONTINUE2:%.*]]
; CHECK: pred.load.if1:
; CHECK-NEXT: [[TMP11:%.*]] = or disjoint i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP11]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP11]]
; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4
; CHECK-NEXT: [[TMP14:%.*]] = insertelement <4 x i32> [[TMP8]], i32 [[TMP13]], i64 1
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP11]]
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[TMP11]]
; CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4
; CHECK-NEXT: [[TMP17:%.*]] = insertelement <4 x i32> [[TMP9]], i32 [[TMP16]], i64 1
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE2]]
@@ -846,10 +846,10 @@ define i32 @reduction_xor(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: br i1 [[TMP20]], label [[PRED_LOAD_IF3:%.*]], label [[PRED_LOAD_CONTINUE4:%.*]]
; CHECK: pred.load.if3:
; CHECK-NEXT: [[TMP21:%.*]] = or disjoint i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP21]]
+; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP21]]
; CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
; CHECK-NEXT: [[TMP24:%.*]] = insertelement <4 x i32> [[TMP18]], i32 [[TMP23]], i64 2
-; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP21]]
+; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[TMP21]]
; CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4
; CHECK-NEXT: [[TMP27:%.*]] = insertelement <4 x i32> [[TMP19]], i32 [[TMP26]], i64 2
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE4]]
@@ -860,10 +860,10 @@ define i32 @reduction_xor(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: br i1 [[TMP30]], label [[PRED_LOAD_IF5:%.*]], label [[PRED_LOAD_CONTINUE6]]
; CHECK: pred.load.if5:
; CHECK-NEXT: [[TMP31:%.*]] = or disjoint i64 [[INDEX]], 3
-; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP31]]
+; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP31]]
; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4
; CHECK-NEXT: [[TMP34:%.*]] = insertelement <4 x i32> [[TMP28]], i32 [[TMP33]], i64 3
-; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP31]]
+; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[TMP31]]
; CHECK-NEXT: [[TMP36:%.*]] = load i32, ptr [[TMP35]], align 4
; CHECK-NEXT: [[TMP37:%.*]] = insertelement <4 x i32> [[TMP29]], i32 [[TMP36]], i64 3
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE6]]
@@ -919,10 +919,10 @@ define float @reduction_fadd(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i1> [[TMP0]], i64 0
; CHECK-NEXT: br i1 [[TMP1]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]]
; CHECK: pred.load.if:
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x float> poison, float [[TMP3]], i64 0
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP6:%.*]] = load float, ptr [[TMP5]], align 4
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x float> poison, float [[TMP6]], i64 0
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE]]
@@ -933,10 +933,10 @@ define float @reduction_fadd(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: br i1 [[TMP10]], label [[PRED_LOAD_IF1:%.*]], label [[PRED_LOAD_CONTINUE2:%.*]]
; CHECK: pred.load.if1:
; CHECK-NEXT: [[TMP11:%.*]] = or disjoint i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP11]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP11]]
; CHECK-NEXT: [[TMP13:%.*]] = load float, ptr [[TMP12]], align 4
; CHECK-NEXT: [[TMP14:%.*]] = insertelement <4 x float> [[TMP8]], float [[TMP13]], i64 1
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP11]]
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[TMP11]]
; CHECK-NEXT: [[TMP16:%.*]] = load float, ptr [[TMP15]], align 4
; CHECK-NEXT: [[TMP17:%.*]] = insertelement <4 x float> [[TMP9]], float [[TMP16]], i64 1
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE2]]
@@ -947,10 +947,10 @@ define float @reduction_fadd(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: br i1 [[TMP20]], label [[PRED_LOAD_IF3:%.*]], label [[PRED_LOAD_CONTINUE4:%.*]]
; CHECK: pred.load.if3:
; CHECK-NEXT: [[TMP21:%.*]] = or disjoint i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP21]]
+; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP21]]
; CHECK-NEXT: [[TMP23:%.*]] = load float, ptr [[TMP22]], align 4
; CHECK-NEXT: [[TMP24:%.*]] = insertelement <4 x float> [[TMP18]], float [[TMP23]], i64 2
-; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP21]]
+; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[TMP21]]
; CHECK-NEXT: [[TMP26:%.*]] = load float, ptr [[TMP25]], align 4
; CHECK-NEXT: [[TMP27:%.*]] = insertelement <4 x float> [[TMP19]], float [[TMP26]], i64 2
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE4]]
@@ -961,10 +961,10 @@ define float @reduction_fadd(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: br i1 [[TMP30]], label [[PRED_LOAD_IF5:%.*]], label [[PRED_LOAD_CONTINUE6]]
; CHECK: pred.load.if5:
; CHECK-NEXT: [[TMP31:%.*]] = or disjoint i64 [[INDEX]], 3
-; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP31]]
+; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP31]]
; CHECK-NEXT: [[TMP33:%.*]] = load float, ptr [[TMP32]], align 4
; CHECK-NEXT: [[TMP34:%.*]] = insertelement <4 x float> [[TMP28]], float [[TMP33]], i64 3
-; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP31]]
+; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[TMP31]]
; CHECK-NEXT: [[TMP36:%.*]] = load float, ptr [[TMP35]], align 4
; CHECK-NEXT: [[TMP37:%.*]] = insertelement <4 x float> [[TMP29]], float [[TMP36]], i64 3
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE6]]
@@ -1020,10 +1020,10 @@ define float @reduction_fmul(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i1> [[TMP0]], i64 0
; CHECK-NEXT: br i1 [[TMP1]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]]
; CHECK: pred.load.if:
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x float> poison, float [[TMP3]], i64 0
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP6:%.*]] = load float, ptr [[TMP5]], align 4
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x float> poison, float [[TMP6]], i64 0
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE]]
@@ -1034,10 +1034,10 @@ define float @reduction_fmul(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: br i1 [[TMP10]], label [[PRED_LOAD_IF1:%.*]], label [[PRED_LOAD_CONTINUE2:%.*]]
; CHECK: pred.load.if1:
; CHECK-NEXT: [[TMP11:%.*]] = or disjoint i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP11]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP11]]
; CHECK-NEXT: [[TMP13:%.*]] = load float, ptr [[TMP12]], align 4
; CHECK-NEXT: [[TMP14:%.*]] = insertelement <4 x float> [[TMP8]], float [[TMP13]], i64 1
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP11]]
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[TMP11]]
; CHECK-NEXT: [[TMP16:%.*]] = load float, ptr [[TMP15]], align 4
; CHECK-NEXT: [[TMP17:%.*]] = insertelement <4 x float> [[TMP9]], float [[TMP16]], i64 1
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE2]]
@@ -1048,10 +1048,10 @@ define float @reduction_fmul(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: br i1 [[TMP20]], label [[PRED_LOAD_IF3:%.*]], label [[PRED_LOAD_CONTINUE4:%.*]]
; CHECK: pred.load.if3:
; CHECK-NEXT: [[TMP21:%.*]] = or disjoint i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP21]]
+; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP21]]
; CHECK-NEXT: [[TMP23:%.*]] = load float, ptr [[TMP22]], align 4
; CHECK-NEXT: [[TMP24:%.*]] = insertelement <4 x float> [[TMP18]], float [[TMP23]], i64 2
-; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP21]]
+; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[TMP21]]
; CHECK-NEXT: [[TMP26:%.*]] = load float, ptr [[TMP25]], align 4
; CHECK-NEXT: [[TMP27:%.*]] = insertelement <4 x float> [[TMP19]], float [[TMP26]], i64 2
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE4]]
@@ -1062,10 +1062,10 @@ define float @reduction_fmul(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: br i1 [[TMP30]], label [[PRED_LOAD_IF5:%.*]], label [[PRED_LOAD_CONTINUE6]]
; CHECK: pred.load.if5:
; CHECK-NEXT: [[TMP31:%.*]] = or disjoint i64 [[INDEX]], 3
-; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP31]]
+; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[TMP31]]
; CHECK-NEXT: [[TMP33:%.*]] = load float, ptr [[TMP32]], align 4
; CHECK-NEXT: [[TMP34:%.*]] = insertelement <4 x float> [[TMP28]], float [[TMP33]], i64 3
-; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP31]]
+; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[TMP31]]
; CHECK-NEXT: [[TMP36:%.*]] = load float, ptr [[TMP35]], align 4
; CHECK-NEXT: [[TMP37:%.*]] = insertelement <4 x float> [[TMP29]], float [[TMP36]], i64 3
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE6]]
@@ -1123,7 +1123,7 @@ define i32 @reduction_min(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i1> [[TMP0]], i64 0
; CHECK-NEXT: br i1 [[TMP1]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]]
; CHECK: pred.load.if:
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> poison, i32 [[TMP3]], i64 0
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE]]
@@ -1132,7 +1132,7 @@ define i32 @reduction_min(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[TMP0]], i64 1
; CHECK-NEXT: br i1 [[TMP6]], label [[PRED_LOAD_IF1:%.*]], label [[PRED_LOAD_CONTINUE2:%.*]]
; CHECK: pred.load.if1:
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP7]], i64 4
; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
; CHECK-NEXT: [[TMP10:%.*]] = insertelement <4 x i32> [[TMP5]], i32 [[TMP9]], i64 1
@@ -1142,7 +1142,7 @@ define i32 @reduction_min(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP12:%.*]] = extractelement <4 x i1> [[TMP0]], i64 2
; CHECK-NEXT: br i1 [[TMP12]], label [[PRED_LOAD_IF3:%.*]], label [[PRED_LOAD_CONTINUE4:%.*]]
; CHECK: pred.load.if3:
-; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[TMP13]], i64 8
; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4
; CHECK-NEXT: [[TMP16:%.*]] = insertelement <4 x i32> [[TMP11]], i32 [[TMP15]], i64 2
@@ -1152,7 +1152,7 @@ define i32 @reduction_min(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP18:%.*]] = extractelement <4 x i1> [[TMP0]], i64 3
; CHECK-NEXT: br i1 [[TMP18]], label [[PRED_LOAD_IF5:%.*]], label [[PRED_LOAD_CONTINUE6]]
; CHECK: pred.load.if5:
-; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[TMP19]], i64 12
; CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP20]], align 4
; CHECK-NEXT: [[TMP22:%.*]] = insertelement <4 x i32> [[TMP17]], i32 [[TMP21]], i64 3
@@ -1205,7 +1205,7 @@ define i32 @reduction_max(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i1> [[TMP0]], i64 0
; CHECK-NEXT: br i1 [[TMP1]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]]
; CHECK: pred.load.if:
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> poison, i32 [[TMP3]], i64 0
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE]]
@@ -1214,7 +1214,7 @@ define i32 @reduction_max(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[TMP0]], i64 1
; CHECK-NEXT: br i1 [[TMP6]], label [[PRED_LOAD_IF1:%.*]], label [[PRED_LOAD_CONTINUE2:%.*]]
; CHECK: pred.load.if1:
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP7]], i64 4
; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
; CHECK-NEXT: [[TMP10:%.*]] = insertelement <4 x i32> [[TMP5]], i32 [[TMP9]], i64 1
@@ -1224,7 +1224,7 @@ define i32 @reduction_max(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP12:%.*]] = extractelement <4 x i1> [[TMP0]], i64 2
; CHECK-NEXT: br i1 [[TMP12]], label [[PRED_LOAD_IF3:%.*]], label [[PRED_LOAD_CONTINUE4:%.*]]
; CHECK: pred.load.if3:
-; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[TMP13]], i64 8
; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4
; CHECK-NEXT: [[TMP16:%.*]] = insertelement <4 x i32> [[TMP11]], i32 [[TMP15]], i64 2
@@ -1234,7 +1234,7 @@ define i32 @reduction_max(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP18:%.*]] = extractelement <4 x i1> [[TMP0]], i64 3
; CHECK-NEXT: br i1 [[TMP18]], label [[PRED_LOAD_IF5:%.*]], label [[PRED_LOAD_CONTINUE6]]
; CHECK: pred.load.if5:
-; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[TMP19]], i64 12
; CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP20]], align 4
; CHECK-NEXT: [[TMP22:%.*]] = insertelement <4 x i32> [[TMP17]], i32 [[TMP21]], i64 3
@@ -1284,9 +1284,9 @@ define float @reduction_conditional(ptr %A, ptr %B, ptr %C, float %S) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x float> [ [[TMP0]], [[VECTOR_PH]] ], [ [[PREDPHI3:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP1]], align 4
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x float>, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = fcmp ogt <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]]
; CHECK-NEXT: [[TMP4:%.*]] = fcmp ogt <4 x float> [[WIDE_LOAD1]], splat (float 1.000000e+00)
diff --git a/llvm/test/Transforms/LoopVectorize/reduction-inloop-uf4.ll b/llvm/test/Transforms/LoopVectorize/reduction-inloop-uf4.ll
index b55c563162e13..0c4543647f300 100644
--- a/llvm/test/Transforms/LoopVectorize/reduction-inloop-uf4.ll
+++ b/llvm/test/Transforms/LoopVectorize/reduction-inloop-uf4.ll
@@ -15,7 +15,7 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) {
; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP11:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 16
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 32
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 48
@@ -74,8 +74,8 @@ define i64 @reduction_sum_chain(ptr noalias %p, ptr noalias %q) {
; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[TMP19:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[TMP21:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[TMP23:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[Q:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr [8 x i8], ptr [[P:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr [8 x i8], ptr [[Q:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i64 32
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[TMP0]], i64 64
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[TMP0]], i64 96
@@ -164,7 +164,7 @@ define i32 @predicated(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i1> [[TMP0]], i64 0
; CHECK-NEXT: br i1 [[TMP4]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]]
; CHECK: pred.load.if:
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> poison, i32 [[TMP6]], i64 0
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE]]
@@ -173,7 +173,7 @@ define i32 @predicated(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP9:%.*]] = extractelement <4 x i1> [[TMP0]], i64 1
; CHECK-NEXT: br i1 [[TMP9]], label [[PRED_LOAD_IF7:%.*]], label [[PRED_LOAD_CONTINUE8:%.*]]
; CHECK: pred.load.if4:
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[TMP10]], i64 4
; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP11]], align 4
; CHECK-NEXT: [[TMP13:%.*]] = insertelement <4 x i32> [[TMP8]], i32 [[TMP12]], i64 1
@@ -183,7 +183,7 @@ define i32 @predicated(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP15:%.*]] = extractelement <4 x i1> [[TMP0]], i64 2
; CHECK-NEXT: br i1 [[TMP15]], label [[PRED_LOAD_IF9:%.*]], label [[PRED_LOAD_CONTINUE10:%.*]]
; CHECK: pred.load.if6:
-; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP16:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP16]], i64 8
; CHECK-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP17]], align 4
; CHECK-NEXT: [[TMP19:%.*]] = insertelement <4 x i32> [[TMP14]], i32 [[TMP18]], i64 2
@@ -193,7 +193,7 @@ define i32 @predicated(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP21:%.*]] = extractelement <4 x i1> [[TMP0]], i64 3
; CHECK-NEXT: br i1 [[TMP21]], label [[PRED_LOAD_IF11:%.*]], label [[PRED_LOAD_CONTINUE12:%.*]]
; CHECK: pred.load.if8:
-; CHECK-NEXT: [[TMP22:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP22:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP23:%.*]] = getelementptr i8, ptr [[TMP22]], i64 12
; CHECK-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP23]], align 4
; CHECK-NEXT: [[TMP25:%.*]] = insertelement <4 x i32> [[TMP20]], i32 [[TMP24]], i64 3
@@ -203,7 +203,7 @@ define i32 @predicated(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP27:%.*]] = extractelement <4 x i1> [[TMP1]], i64 0
; CHECK-NEXT: br i1 [[TMP27]], label [[PRED_LOAD_IF13:%.*]], label [[PRED_LOAD_CONTINUE14:%.*]]
; CHECK: pred.load.if10:
-; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP28:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP29:%.*]] = getelementptr i8, ptr [[TMP28]], i64 16
; CHECK-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP29]], align 4
; CHECK-NEXT: [[TMP31:%.*]] = insertelement <4 x i32> poison, i32 [[TMP30]], i64 0
@@ -213,7 +213,7 @@ define i32 @predicated(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP33:%.*]] = extractelement <4 x i1> [[TMP1]], i64 1
; CHECK-NEXT: br i1 [[TMP33]], label [[PRED_LOAD_IF15:%.*]], label [[PRED_LOAD_CONTINUE16:%.*]]
; CHECK: pred.load.if12:
-; CHECK-NEXT: [[TMP34:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP34:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP35:%.*]] = getelementptr i8, ptr [[TMP34]], i64 20
; CHECK-NEXT: [[TMP36:%.*]] = load i32, ptr [[TMP35]], align 4
; CHECK-NEXT: [[TMP37:%.*]] = insertelement <4 x i32> [[TMP32]], i32 [[TMP36]], i64 1
@@ -223,7 +223,7 @@ define i32 @predicated(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP39:%.*]] = extractelement <4 x i1> [[TMP1]], i64 2
; CHECK-NEXT: br i1 [[TMP39]], label [[PRED_LOAD_IF17:%.*]], label [[PRED_LOAD_CONTINUE18:%.*]]
; CHECK: pred.load.if14:
-; CHECK-NEXT: [[TMP40:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP40:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP41:%.*]] = getelementptr i8, ptr [[TMP40]], i64 24
; CHECK-NEXT: [[TMP42:%.*]] = load i32, ptr [[TMP41]], align 4
; CHECK-NEXT: [[TMP43:%.*]] = insertelement <4 x i32> [[TMP38]], i32 [[TMP42]], i64 2
@@ -233,7 +233,7 @@ define i32 @predicated(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP45:%.*]] = extractelement <4 x i1> [[TMP1]], i64 3
; CHECK-NEXT: br i1 [[TMP45]], label [[PRED_LOAD_IF19:%.*]], label [[PRED_LOAD_CONTINUE20:%.*]]
; CHECK: pred.load.if16:
-; CHECK-NEXT: [[TMP46:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP46:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP47:%.*]] = getelementptr i8, ptr [[TMP46]], i64 28
; CHECK-NEXT: [[TMP48:%.*]] = load i32, ptr [[TMP47]], align 4
; CHECK-NEXT: [[TMP49:%.*]] = insertelement <4 x i32> [[TMP44]], i32 [[TMP48]], i64 3
@@ -243,7 +243,7 @@ define i32 @predicated(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP51:%.*]] = extractelement <4 x i1> [[TMP2]], i64 0
; CHECK-NEXT: br i1 [[TMP51]], label [[PRED_LOAD_IF21:%.*]], label [[PRED_LOAD_CONTINUE22:%.*]]
; CHECK: pred.load.if18:
-; CHECK-NEXT: [[TMP52:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP52:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP53:%.*]] = getelementptr i8, ptr [[TMP52]], i64 32
; CHECK-NEXT: [[TMP54:%.*]] = load i32, ptr [[TMP53]], align 4
; CHECK-NEXT: [[TMP55:%.*]] = insertelement <4 x i32> poison, i32 [[TMP54]], i64 0
@@ -253,7 +253,7 @@ define i32 @predicated(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP57:%.*]] = extractelement <4 x i1> [[TMP2]], i64 1
; CHECK-NEXT: br i1 [[TMP57]], label [[PRED_LOAD_IF23:%.*]], label [[PRED_LOAD_CONTINUE24:%.*]]
; CHECK: pred.load.if20:
-; CHECK-NEXT: [[TMP58:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP58:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP59:%.*]] = getelementptr i8, ptr [[TMP58]], i64 36
; CHECK-NEXT: [[TMP60:%.*]] = load i32, ptr [[TMP59]], align 4
; CHECK-NEXT: [[TMP61:%.*]] = insertelement <4 x i32> [[TMP56]], i32 [[TMP60]], i64 1
@@ -263,7 +263,7 @@ define i32 @predicated(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP63:%.*]] = extractelement <4 x i1> [[TMP2]], i64 2
; CHECK-NEXT: br i1 [[TMP63]], label [[PRED_LOAD_IF25:%.*]], label [[PRED_LOAD_CONTINUE26:%.*]]
; CHECK: pred.load.if22:
-; CHECK-NEXT: [[TMP64:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP64:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP65:%.*]] = getelementptr i8, ptr [[TMP64]], i64 40
; CHECK-NEXT: [[TMP66:%.*]] = load i32, ptr [[TMP65]], align 4
; CHECK-NEXT: [[TMP67:%.*]] = insertelement <4 x i32> [[TMP62]], i32 [[TMP66]], i64 2
@@ -273,7 +273,7 @@ define i32 @predicated(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP69:%.*]] = extractelement <4 x i1> [[TMP2]], i64 3
; CHECK-NEXT: br i1 [[TMP69]], label [[PRED_LOAD_IF27:%.*]], label [[PRED_LOAD_CONTINUE28:%.*]]
; CHECK: pred.load.if24:
-; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP70:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i8, ptr [[TMP70]], i64 44
; CHECK-NEXT: [[TMP72:%.*]] = load i32, ptr [[TMP71]], align 4
; CHECK-NEXT: [[TMP73:%.*]] = insertelement <4 x i32> [[TMP68]], i32 [[TMP72]], i64 3
@@ -283,7 +283,7 @@ define i32 @predicated(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP75:%.*]] = extractelement <4 x i1> [[TMP3]], i64 0
; CHECK-NEXT: br i1 [[TMP75]], label [[PRED_LOAD_IF29:%.*]], label [[PRED_LOAD_CONTINUE30:%.*]]
; CHECK: pred.load.if26:
-; CHECK-NEXT: [[TMP76:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP76:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP77:%.*]] = getelementptr i8, ptr [[TMP76]], i64 48
; CHECK-NEXT: [[TMP78:%.*]] = load i32, ptr [[TMP77]], align 4
; CHECK-NEXT: [[TMP79:%.*]] = insertelement <4 x i32> poison, i32 [[TMP78]], i64 0
@@ -293,7 +293,7 @@ define i32 @predicated(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP81:%.*]] = extractelement <4 x i1> [[TMP3]], i64 1
; CHECK-NEXT: br i1 [[TMP81]], label [[PRED_LOAD_IF31:%.*]], label [[PRED_LOAD_CONTINUE32:%.*]]
; CHECK: pred.load.if28:
-; CHECK-NEXT: [[TMP82:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP82:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP83:%.*]] = getelementptr i8, ptr [[TMP82]], i64 52
; CHECK-NEXT: [[TMP84:%.*]] = load i32, ptr [[TMP83]], align 4
; CHECK-NEXT: [[TMP85:%.*]] = insertelement <4 x i32> [[TMP80]], i32 [[TMP84]], i64 1
@@ -303,7 +303,7 @@ define i32 @predicated(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP87:%.*]] = extractelement <4 x i1> [[TMP3]], i64 2
; CHECK-NEXT: br i1 [[TMP87]], label [[PRED_LOAD_IF33:%.*]], label [[PRED_LOAD_CONTINUE34:%.*]]
; CHECK: pred.load.if30:
-; CHECK-NEXT: [[TMP88:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP88:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP89:%.*]] = getelementptr i8, ptr [[TMP88]], i64 56
; CHECK-NEXT: [[TMP90:%.*]] = load i32, ptr [[TMP89]], align 4
; CHECK-NEXT: [[TMP91:%.*]] = insertelement <4 x i32> [[TMP86]], i32 [[TMP90]], i64 2
@@ -313,7 +313,7 @@ define i32 @predicated(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP93:%.*]] = extractelement <4 x i1> [[TMP3]], i64 3
; CHECK-NEXT: br i1 [[TMP93]], label [[PRED_LOAD_IF35:%.*]], label [[PRED_LOAD_CONTINUE36]]
; CHECK: pred.load.if32:
-; CHECK-NEXT: [[TMP94:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP94:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP95:%.*]] = getelementptr i8, ptr [[TMP94]], i64 60
; CHECK-NEXT: [[TMP96:%.*]] = load i32, ptr [[TMP95]], align 4
; CHECK-NEXT: [[TMP97:%.*]] = insertelement <4 x i32> [[TMP92]], i32 [[TMP96]], i64 3
@@ -398,7 +398,7 @@ define i32 @cond_rdx_pred(i32 %cond, ptr noalias %a, i64 %N) {
; CHECK-NEXT: [[TMP12:%.*]] = extractelement <4 x i1> [[TMP8]], i64 0
; CHECK-NEXT: br i1 [[TMP12]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]]
; CHECK: pred.load.if:
-; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4
; CHECK-NEXT: [[TMP15:%.*]] = insertelement <4 x i32> poison, i32 [[TMP14]], i64 0
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE]]
@@ -407,7 +407,7 @@ define i32 @cond_rdx_pred(i32 %cond, ptr noalias %a, i64 %N) {
; CHECK-NEXT: [[TMP17:%.*]] = extractelement <4 x i1> [[TMP8]], i64 1
; CHECK-NEXT: br i1 [[TMP17]], label [[PRED_LOAD_IF9:%.*]], label [[PRED_LOAD_CONTINUE10:%.*]]
; CHECK: pred.load.if6:
-; CHECK-NEXT: [[TMP18:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP18:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[TMP18]], i64 4
; CHECK-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4
; CHECK-NEXT: [[TMP21:%.*]] = insertelement <4 x i32> [[TMP16]], i32 [[TMP20]], i64 1
@@ -417,7 +417,7 @@ define i32 @cond_rdx_pred(i32 %cond, ptr noalias %a, i64 %N) {
; CHECK-NEXT: [[TMP23:%.*]] = extractelement <4 x i1> [[TMP8]], i64 2
; CHECK-NEXT: br i1 [[TMP23]], label [[PRED_LOAD_IF11:%.*]], label [[PRED_LOAD_CONTINUE12:%.*]]
; CHECK: pred.load.if8:
-; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP24:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP25:%.*]] = getelementptr i8, ptr [[TMP24]], i64 8
; CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4
; CHECK-NEXT: [[TMP27:%.*]] = insertelement <4 x i32> [[TMP22]], i32 [[TMP26]], i64 2
@@ -427,7 +427,7 @@ define i32 @cond_rdx_pred(i32 %cond, ptr noalias %a, i64 %N) {
; CHECK-NEXT: [[TMP29:%.*]] = extractelement <4 x i1> [[TMP8]], i64 3
; CHECK-NEXT: br i1 [[TMP29]], label [[PRED_LOAD_IF13:%.*]], label [[PRED_LOAD_CONTINUE14:%.*]]
; CHECK: pred.load.if10:
-; CHECK-NEXT: [[TMP30:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP30:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP31:%.*]] = getelementptr i8, ptr [[TMP30]], i64 12
; CHECK-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP31]], align 4
; CHECK-NEXT: [[TMP33:%.*]] = insertelement <4 x i32> [[TMP28]], i32 [[TMP32]], i64 3
@@ -437,7 +437,7 @@ define i32 @cond_rdx_pred(i32 %cond, ptr noalias %a, i64 %N) {
; CHECK-NEXT: [[TMP35:%.*]] = extractelement <4 x i1> [[TMP9]], i64 0
; CHECK-NEXT: br i1 [[TMP35]], label [[PRED_LOAD_IF15:%.*]], label [[PRED_LOAD_CONTINUE16:%.*]]
; CHECK: pred.load.if12:
-; CHECK-NEXT: [[TMP36:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP36:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP37:%.*]] = getelementptr i8, ptr [[TMP36]], i64 16
; CHECK-NEXT: [[TMP38:%.*]] = load i32, ptr [[TMP37]], align 4
; CHECK-NEXT: [[TMP39:%.*]] = insertelement <4 x i32> poison, i32 [[TMP38]], i64 0
@@ -447,7 +447,7 @@ define i32 @cond_rdx_pred(i32 %cond, ptr noalias %a, i64 %N) {
; CHECK-NEXT: [[TMP41:%.*]] = extractelement <4 x i1> [[TMP9]], i64 1
; CHECK-NEXT: br i1 [[TMP41]], label [[PRED_LOAD_IF17:%.*]], label [[PRED_LOAD_CONTINUE18:%.*]]
; CHECK: pred.load.if14:
-; CHECK-NEXT: [[TMP42:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP42:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP43:%.*]] = getelementptr i8, ptr [[TMP42]], i64 20
; CHECK-NEXT: [[TMP44:%.*]] = load i32, ptr [[TMP43]], align 4
; CHECK-NEXT: [[TMP45:%.*]] = insertelement <4 x i32> [[TMP40]], i32 [[TMP44]], i64 1
@@ -457,7 +457,7 @@ define i32 @cond_rdx_pred(i32 %cond, ptr noalias %a, i64 %N) {
; CHECK-NEXT: [[TMP47:%.*]] = extractelement <4 x i1> [[TMP9]], i64 2
; CHECK-NEXT: br i1 [[TMP47]], label [[PRED_LOAD_IF19:%.*]], label [[PRED_LOAD_CONTINUE20:%.*]]
; CHECK: pred.load.if16:
-; CHECK-NEXT: [[TMP48:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP48:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP49:%.*]] = getelementptr i8, ptr [[TMP48]], i64 24
; CHECK-NEXT: [[TMP50:%.*]] = load i32, ptr [[TMP49]], align 4
; CHECK-NEXT: [[TMP51:%.*]] = insertelement <4 x i32> [[TMP46]], i32 [[TMP50]], i64 2
@@ -467,7 +467,7 @@ define i32 @cond_rdx_pred(i32 %cond, ptr noalias %a, i64 %N) {
; CHECK-NEXT: [[TMP53:%.*]] = extractelement <4 x i1> [[TMP9]], i64 3
; CHECK-NEXT: br i1 [[TMP53]], label [[PRED_LOAD_IF21:%.*]], label [[PRED_LOAD_CONTINUE22:%.*]]
; CHECK: pred.load.if18:
-; CHECK-NEXT: [[TMP54:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP54:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP55:%.*]] = getelementptr i8, ptr [[TMP54]], i64 28
; CHECK-NEXT: [[TMP56:%.*]] = load i32, ptr [[TMP55]], align 4
; CHECK-NEXT: [[TMP57:%.*]] = insertelement <4 x i32> [[TMP52]], i32 [[TMP56]], i64 3
@@ -477,7 +477,7 @@ define i32 @cond_rdx_pred(i32 %cond, ptr noalias %a, i64 %N) {
; CHECK-NEXT: [[TMP59:%.*]] = extractelement <4 x i1> [[TMP10]], i64 0
; CHECK-NEXT: br i1 [[TMP59]], label [[PRED_LOAD_IF23:%.*]], label [[PRED_LOAD_CONTINUE24:%.*]]
; CHECK: pred.load.if20:
-; CHECK-NEXT: [[TMP60:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP60:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP61:%.*]] = getelementptr i8, ptr [[TMP60]], i64 32
; CHECK-NEXT: [[TMP62:%.*]] = load i32, ptr [[TMP61]], align 4
; CHECK-NEXT: [[TMP63:%.*]] = insertelement <4 x i32> poison, i32 [[TMP62]], i64 0
@@ -487,7 +487,7 @@ define i32 @cond_rdx_pred(i32 %cond, ptr noalias %a, i64 %N) {
; CHECK-NEXT: [[TMP65:%.*]] = extractelement <4 x i1> [[TMP10]], i64 1
; CHECK-NEXT: br i1 [[TMP65]], label [[PRED_LOAD_IF25:%.*]], label [[PRED_LOAD_CONTINUE26:%.*]]
; CHECK: pred.load.if22:
-; CHECK-NEXT: [[TMP66:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP66:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP67:%.*]] = getelementptr i8, ptr [[TMP66]], i64 36
; CHECK-NEXT: [[TMP68:%.*]] = load i32, ptr [[TMP67]], align 4
; CHECK-NEXT: [[TMP69:%.*]] = insertelement <4 x i32> [[TMP64]], i32 [[TMP68]], i64 1
@@ -497,7 +497,7 @@ define i32 @cond_rdx_pred(i32 %cond, ptr noalias %a, i64 %N) {
; CHECK-NEXT: [[TMP71:%.*]] = extractelement <4 x i1> [[TMP10]], i64 2
; CHECK-NEXT: br i1 [[TMP71]], label [[PRED_LOAD_IF27:%.*]], label [[PRED_LOAD_CONTINUE28:%.*]]
; CHECK: pred.load.if24:
-; CHECK-NEXT: [[TMP72:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP72:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP73:%.*]] = getelementptr i8, ptr [[TMP72]], i64 40
; CHECK-NEXT: [[TMP74:%.*]] = load i32, ptr [[TMP73]], align 4
; CHECK-NEXT: [[TMP75:%.*]] = insertelement <4 x i32> [[TMP70]], i32 [[TMP74]], i64 2
@@ -507,7 +507,7 @@ define i32 @cond_rdx_pred(i32 %cond, ptr noalias %a, i64 %N) {
; CHECK-NEXT: [[TMP77:%.*]] = extractelement <4 x i1> [[TMP10]], i64 3
; CHECK-NEXT: br i1 [[TMP77]], label [[PRED_LOAD_IF29:%.*]], label [[PRED_LOAD_CONTINUE30:%.*]]
; CHECK: pred.load.if26:
-; CHECK-NEXT: [[TMP78:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP78:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP79:%.*]] = getelementptr i8, ptr [[TMP78]], i64 44
; CHECK-NEXT: [[TMP80:%.*]] = load i32, ptr [[TMP79]], align 4
; CHECK-NEXT: [[TMP81:%.*]] = insertelement <4 x i32> [[TMP76]], i32 [[TMP80]], i64 3
@@ -517,7 +517,7 @@ define i32 @cond_rdx_pred(i32 %cond, ptr noalias %a, i64 %N) {
; CHECK-NEXT: [[TMP83:%.*]] = extractelement <4 x i1> [[TMP11]], i64 0
; CHECK-NEXT: br i1 [[TMP83]], label [[PRED_LOAD_IF31:%.*]], label [[PRED_LOAD_CONTINUE32:%.*]]
; CHECK: pred.load.if28:
-; CHECK-NEXT: [[TMP84:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP84:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP85:%.*]] = getelementptr i8, ptr [[TMP84]], i64 48
; CHECK-NEXT: [[TMP86:%.*]] = load i32, ptr [[TMP85]], align 4
; CHECK-NEXT: [[TMP87:%.*]] = insertelement <4 x i32> poison, i32 [[TMP86]], i64 0
@@ -527,7 +527,7 @@ define i32 @cond_rdx_pred(i32 %cond, ptr noalias %a, i64 %N) {
; CHECK-NEXT: [[TMP89:%.*]] = extractelement <4 x i1> [[TMP11]], i64 1
; CHECK-NEXT: br i1 [[TMP89]], label [[PRED_LOAD_IF33:%.*]], label [[PRED_LOAD_CONTINUE34:%.*]]
; CHECK: pred.load.if30:
-; CHECK-NEXT: [[TMP90:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP90:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP91:%.*]] = getelementptr i8, ptr [[TMP90]], i64 52
; CHECK-NEXT: [[TMP92:%.*]] = load i32, ptr [[TMP91]], align 4
; CHECK-NEXT: [[TMP93:%.*]] = insertelement <4 x i32> [[TMP88]], i32 [[TMP92]], i64 1
@@ -537,7 +537,7 @@ define i32 @cond_rdx_pred(i32 %cond, ptr noalias %a, i64 %N) {
; CHECK-NEXT: [[TMP95:%.*]] = extractelement <4 x i1> [[TMP11]], i64 2
; CHECK-NEXT: br i1 [[TMP95]], label [[PRED_LOAD_IF35:%.*]], label [[PRED_LOAD_CONTINUE36:%.*]]
; CHECK: pred.load.if32:
-; CHECK-NEXT: [[TMP96:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP96:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP97:%.*]] = getelementptr i8, ptr [[TMP96]], i64 56
; CHECK-NEXT: [[TMP98:%.*]] = load i32, ptr [[TMP97]], align 4
; CHECK-NEXT: [[TMP99:%.*]] = insertelement <4 x i32> [[TMP94]], i32 [[TMP98]], i64 2
@@ -547,7 +547,7 @@ define i32 @cond_rdx_pred(i32 %cond, ptr noalias %a, i64 %N) {
; CHECK-NEXT: [[TMP101:%.*]] = extractelement <4 x i1> [[TMP11]], i64 3
; CHECK-NEXT: br i1 [[TMP101]], label [[PRED_LOAD_IF37:%.*]], label [[PRED_LOAD_CONTINUE38]]
; CHECK: pred.load.if34:
-; CHECK-NEXT: [[TMP102:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP102:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP103:%.*]] = getelementptr i8, ptr [[TMP102]], i64 60
; CHECK-NEXT: [[TMP104:%.*]] = load i32, ptr [[TMP103]], align 4
; CHECK-NEXT: [[TMP105:%.*]] = insertelement <4 x i32> [[TMP100]], i32 [[TMP104]], i64 3
diff --git a/llvm/test/Transforms/LoopVectorize/reduction-predselect.ll b/llvm/test/Transforms/LoopVectorize/reduction-predselect.ll
index 855a0ce56f2c7..e94eb06704ffd 100644
--- a/llvm/test/Transforms/LoopVectorize/reduction-predselect.ll
+++ b/llvm/test/Transforms/LoopVectorize/reduction-predselect.ll
@@ -17,7 +17,7 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i1> [[TMP0]], i64 0
; CHECK-NEXT: br i1 [[TMP1]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]]
; CHECK: pred.load.if:
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> poison, i32 [[TMP3]], i64 0
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE]]
@@ -26,7 +26,7 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[TMP0]], i64 1
; CHECK-NEXT: br i1 [[TMP6]], label [[PRED_LOAD_IF1:%.*]], label [[PRED_LOAD_CONTINUE2:%.*]]
; CHECK: pred.load.if1:
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[A]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr [4 x i8], ptr [[A]], i32 [[INDEX]]
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP7]], i32 4
; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
; CHECK-NEXT: [[TMP10:%.*]] = insertelement <4 x i32> [[TMP5]], i32 [[TMP9]], i64 1
@@ -36,7 +36,7 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP12:%.*]] = extractelement <4 x i1> [[TMP0]], i64 2
; CHECK-NEXT: br i1 [[TMP12]], label [[PRED_LOAD_IF3:%.*]], label [[PRED_LOAD_CONTINUE4:%.*]]
; CHECK: pred.load.if3:
-; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[A]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr [4 x i8], ptr [[A]], i32 [[INDEX]]
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[TMP13]], i32 8
; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4
; CHECK-NEXT: [[TMP16:%.*]] = insertelement <4 x i32> [[TMP11]], i32 [[TMP15]], i64 2
@@ -46,7 +46,7 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP18:%.*]] = extractelement <4 x i1> [[TMP0]], i64 3
; CHECK-NEXT: br i1 [[TMP18]], label [[PRED_LOAD_IF5:%.*]], label [[PRED_LOAD_CONTINUE6]]
; CHECK: pred.load.if5:
-; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i32, ptr [[A]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr [4 x i8], ptr [[A]], i32 [[INDEX]]
; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[TMP19]], i32 12
; CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP20]], align 4
; CHECK-NEXT: [[TMP22:%.*]] = insertelement <4 x i32> [[TMP17]], i32 [[TMP21]], i64 3
@@ -97,10 +97,10 @@ define i32 @reduction_sum(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i1> [[TMP0]], i64 0
; CHECK-NEXT: br i1 [[TMP1]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]]
; CHECK: pred.load.if:
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> poison, i32 [[TMP3]], i64 0
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> poison, i32 [[TMP6]], i64 0
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE]]
@@ -111,10 +111,10 @@ define i32 @reduction_sum(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: br i1 [[TMP10]], label [[PRED_LOAD_IF1:%.*]], label [[PRED_LOAD_CONTINUE2:%.*]]
; CHECK: pred.load.if1:
; CHECK-NEXT: [[TMP11:%.*]] = or disjoint i32 [[INDEX]], 1
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[TMP11]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i32 [[TMP11]]
; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4
; CHECK-NEXT: [[TMP14:%.*]] = insertelement <4 x i32> [[TMP8]], i32 [[TMP13]], i64 1
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[TMP11]]
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i32 [[TMP11]]
; CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4
; CHECK-NEXT: [[TMP17:%.*]] = insertelement <4 x i32> [[TMP9]], i32 [[TMP16]], i64 1
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE2]]
@@ -125,10 +125,10 @@ define i32 @reduction_sum(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: br i1 [[TMP20]], label [[PRED_LOAD_IF3:%.*]], label [[PRED_LOAD_CONTINUE4:%.*]]
; CHECK: pred.load.if3:
; CHECK-NEXT: [[TMP21:%.*]] = or disjoint i32 [[INDEX]], 2
-; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[TMP21]]
+; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i32 [[TMP21]]
; CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
; CHECK-NEXT: [[TMP24:%.*]] = insertelement <4 x i32> [[TMP18]], i32 [[TMP23]], i64 2
-; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[TMP21]]
+; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i32 [[TMP21]]
; CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4
; CHECK-NEXT: [[TMP27:%.*]] = insertelement <4 x i32> [[TMP19]], i32 [[TMP26]], i64 2
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE4]]
@@ -139,10 +139,10 @@ define i32 @reduction_sum(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: br i1 [[TMP30]], label [[PRED_LOAD_IF5:%.*]], label [[PRED_LOAD_CONTINUE6]]
; CHECK: pred.load.if5:
; CHECK-NEXT: [[TMP31:%.*]] = or disjoint i32 [[INDEX]], 3
-; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[TMP31]]
+; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i32 [[TMP31]]
; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4
; CHECK-NEXT: [[TMP34:%.*]] = insertelement <4 x i32> [[TMP28]], i32 [[TMP33]], i64 3
-; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[TMP31]]
+; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i32 [[TMP31]]
; CHECK-NEXT: [[TMP36:%.*]] = load i32, ptr [[TMP35]], align 4
; CHECK-NEXT: [[TMP37:%.*]] = insertelement <4 x i32> [[TMP29]], i32 [[TMP36]], i64 3
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE6]]
@@ -199,10 +199,10 @@ define i32 @reduction_prod(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i1> [[TMP0]], i64 0
; CHECK-NEXT: br i1 [[TMP1]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]]
; CHECK: pred.load.if:
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> poison, i32 [[TMP3]], i64 0
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> poison, i32 [[TMP6]], i64 0
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE]]
@@ -213,10 +213,10 @@ define i32 @reduction_prod(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: br i1 [[TMP10]], label [[PRED_LOAD_IF1:%.*]], label [[PRED_LOAD_CONTINUE2:%.*]]
; CHECK: pred.load.if1:
; CHECK-NEXT: [[TMP11:%.*]] = or disjoint i32 [[INDEX]], 1
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[TMP11]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i32 [[TMP11]]
; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4
; CHECK-NEXT: [[TMP14:%.*]] = insertelement <4 x i32> [[TMP8]], i32 [[TMP13]], i64 1
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[TMP11]]
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i32 [[TMP11]]
; CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4
; CHECK-NEXT: [[TMP17:%.*]] = insertelement <4 x i32> [[TMP9]], i32 [[TMP16]], i64 1
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE2]]
@@ -227,10 +227,10 @@ define i32 @reduction_prod(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: br i1 [[TMP20]], label [[PRED_LOAD_IF3:%.*]], label [[PRED_LOAD_CONTINUE4:%.*]]
; CHECK: pred.load.if3:
; CHECK-NEXT: [[TMP21:%.*]] = or disjoint i32 [[INDEX]], 2
-; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[TMP21]]
+; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i32 [[TMP21]]
; CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
; CHECK-NEXT: [[TMP24:%.*]] = insertelement <4 x i32> [[TMP18]], i32 [[TMP23]], i64 2
-; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[TMP21]]
+; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i32 [[TMP21]]
; CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4
; CHECK-NEXT: [[TMP27:%.*]] = insertelement <4 x i32> [[TMP19]], i32 [[TMP26]], i64 2
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE4]]
@@ -241,10 +241,10 @@ define i32 @reduction_prod(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: br i1 [[TMP30]], label [[PRED_LOAD_IF5:%.*]], label [[PRED_LOAD_CONTINUE6]]
; CHECK: pred.load.if5:
; CHECK-NEXT: [[TMP31:%.*]] = or disjoint i32 [[INDEX]], 3
-; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[TMP31]]
+; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i32 [[TMP31]]
; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4
; CHECK-NEXT: [[TMP34:%.*]] = insertelement <4 x i32> [[TMP28]], i32 [[TMP33]], i64 3
-; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[TMP31]]
+; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i32 [[TMP31]]
; CHECK-NEXT: [[TMP36:%.*]] = load i32, ptr [[TMP35]], align 4
; CHECK-NEXT: [[TMP37:%.*]] = insertelement <4 x i32> [[TMP29]], i32 [[TMP36]], i64 3
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE6]]
@@ -299,10 +299,10 @@ define i32 @reduction_and(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i1> [[TMP0]], i64 0
; CHECK-NEXT: br i1 [[TMP1]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]]
; CHECK: pred.load.if:
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> poison, i32 [[TMP3]], i64 0
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> poison, i32 [[TMP6]], i64 0
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE]]
@@ -313,10 +313,10 @@ define i32 @reduction_and(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: br i1 [[TMP10]], label [[PRED_LOAD_IF1:%.*]], label [[PRED_LOAD_CONTINUE2:%.*]]
; CHECK: pred.load.if1:
; CHECK-NEXT: [[TMP11:%.*]] = or disjoint i32 [[INDEX]], 1
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[TMP11]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i32 [[TMP11]]
; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4
; CHECK-NEXT: [[TMP14:%.*]] = insertelement <4 x i32> [[TMP8]], i32 [[TMP13]], i64 1
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[TMP11]]
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i32 [[TMP11]]
; CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4
; CHECK-NEXT: [[TMP17:%.*]] = insertelement <4 x i32> [[TMP9]], i32 [[TMP16]], i64 1
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE2]]
@@ -327,10 +327,10 @@ define i32 @reduction_and(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: br i1 [[TMP20]], label [[PRED_LOAD_IF3:%.*]], label [[PRED_LOAD_CONTINUE4:%.*]]
; CHECK: pred.load.if3:
; CHECK-NEXT: [[TMP21:%.*]] = or disjoint i32 [[INDEX]], 2
-; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[TMP21]]
+; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i32 [[TMP21]]
; CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
; CHECK-NEXT: [[TMP24:%.*]] = insertelement <4 x i32> [[TMP18]], i32 [[TMP23]], i64 2
-; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[TMP21]]
+; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i32 [[TMP21]]
; CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4
; CHECK-NEXT: [[TMP27:%.*]] = insertelement <4 x i32> [[TMP19]], i32 [[TMP26]], i64 2
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE4]]
@@ -341,10 +341,10 @@ define i32 @reduction_and(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: br i1 [[TMP30]], label [[PRED_LOAD_IF5:%.*]], label [[PRED_LOAD_CONTINUE6]]
; CHECK: pred.load.if5:
; CHECK-NEXT: [[TMP31:%.*]] = or disjoint i32 [[INDEX]], 3
-; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[TMP31]]
+; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i32 [[TMP31]]
; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4
; CHECK-NEXT: [[TMP34:%.*]] = insertelement <4 x i32> [[TMP28]], i32 [[TMP33]], i64 3
-; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[TMP31]]
+; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i32 [[TMP31]]
; CHECK-NEXT: [[TMP36:%.*]] = load i32, ptr [[TMP35]], align 4
; CHECK-NEXT: [[TMP37:%.*]] = insertelement <4 x i32> [[TMP29]], i32 [[TMP36]], i64 3
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE6]]
@@ -399,10 +399,10 @@ define i32 @reduction_or(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i1> [[TMP0]], i64 0
; CHECK-NEXT: br i1 [[TMP1]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]]
; CHECK: pred.load.if:
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> poison, i32 [[TMP3]], i64 0
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> poison, i32 [[TMP6]], i64 0
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE]]
@@ -413,10 +413,10 @@ define i32 @reduction_or(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: br i1 [[TMP10]], label [[PRED_LOAD_IF1:%.*]], label [[PRED_LOAD_CONTINUE2:%.*]]
; CHECK: pred.load.if1:
; CHECK-NEXT: [[TMP11:%.*]] = or disjoint i32 [[INDEX]], 1
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[TMP11]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i32 [[TMP11]]
; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4
; CHECK-NEXT: [[TMP14:%.*]] = insertelement <4 x i32> [[TMP8]], i32 [[TMP13]], i64 1
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[TMP11]]
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i32 [[TMP11]]
; CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4
; CHECK-NEXT: [[TMP17:%.*]] = insertelement <4 x i32> [[TMP9]], i32 [[TMP16]], i64 1
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE2]]
@@ -427,10 +427,10 @@ define i32 @reduction_or(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: br i1 [[TMP20]], label [[PRED_LOAD_IF3:%.*]], label [[PRED_LOAD_CONTINUE4:%.*]]
; CHECK: pred.load.if3:
; CHECK-NEXT: [[TMP21:%.*]] = or disjoint i32 [[INDEX]], 2
-; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[TMP21]]
+; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i32 [[TMP21]]
; CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
; CHECK-NEXT: [[TMP24:%.*]] = insertelement <4 x i32> [[TMP18]], i32 [[TMP23]], i64 2
-; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[TMP21]]
+; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i32 [[TMP21]]
; CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4
; CHECK-NEXT: [[TMP27:%.*]] = insertelement <4 x i32> [[TMP19]], i32 [[TMP26]], i64 2
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE4]]
@@ -441,10 +441,10 @@ define i32 @reduction_or(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: br i1 [[TMP30]], label [[PRED_LOAD_IF5:%.*]], label [[PRED_LOAD_CONTINUE6]]
; CHECK: pred.load.if5:
; CHECK-NEXT: [[TMP31:%.*]] = or disjoint i32 [[INDEX]], 3
-; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[TMP31]]
+; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i32 [[TMP31]]
; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4
; CHECK-NEXT: [[TMP34:%.*]] = insertelement <4 x i32> [[TMP28]], i32 [[TMP33]], i64 3
-; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[TMP31]]
+; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i32 [[TMP31]]
; CHECK-NEXT: [[TMP36:%.*]] = load i32, ptr [[TMP35]], align 4
; CHECK-NEXT: [[TMP37:%.*]] = insertelement <4 x i32> [[TMP29]], i32 [[TMP36]], i64 3
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE6]]
@@ -499,10 +499,10 @@ define i32 @reduction_xor(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i1> [[TMP0]], i64 0
; CHECK-NEXT: br i1 [[TMP1]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]]
; CHECK: pred.load.if:
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> poison, i32 [[TMP3]], i64 0
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> poison, i32 [[TMP6]], i64 0
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE]]
@@ -513,10 +513,10 @@ define i32 @reduction_xor(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: br i1 [[TMP10]], label [[PRED_LOAD_IF1:%.*]], label [[PRED_LOAD_CONTINUE2:%.*]]
; CHECK: pred.load.if1:
; CHECK-NEXT: [[TMP11:%.*]] = or disjoint i32 [[INDEX]], 1
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[TMP11]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i32 [[TMP11]]
; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4
; CHECK-NEXT: [[TMP14:%.*]] = insertelement <4 x i32> [[TMP8]], i32 [[TMP13]], i64 1
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[TMP11]]
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i32 [[TMP11]]
; CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4
; CHECK-NEXT: [[TMP17:%.*]] = insertelement <4 x i32> [[TMP9]], i32 [[TMP16]], i64 1
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE2]]
@@ -527,10 +527,10 @@ define i32 @reduction_xor(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: br i1 [[TMP20]], label [[PRED_LOAD_IF3:%.*]], label [[PRED_LOAD_CONTINUE4:%.*]]
; CHECK: pred.load.if3:
; CHECK-NEXT: [[TMP21:%.*]] = or disjoint i32 [[INDEX]], 2
-; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[TMP21]]
+; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i32 [[TMP21]]
; CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
; CHECK-NEXT: [[TMP24:%.*]] = insertelement <4 x i32> [[TMP18]], i32 [[TMP23]], i64 2
-; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[TMP21]]
+; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i32 [[TMP21]]
; CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4
; CHECK-NEXT: [[TMP27:%.*]] = insertelement <4 x i32> [[TMP19]], i32 [[TMP26]], i64 2
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE4]]
@@ -541,10 +541,10 @@ define i32 @reduction_xor(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: br i1 [[TMP30]], label [[PRED_LOAD_IF5:%.*]], label [[PRED_LOAD_CONTINUE6]]
; CHECK: pred.load.if5:
; CHECK-NEXT: [[TMP31:%.*]] = or disjoint i32 [[INDEX]], 3
-; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[TMP31]]
+; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i32 [[TMP31]]
; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4
; CHECK-NEXT: [[TMP34:%.*]] = insertelement <4 x i32> [[TMP28]], i32 [[TMP33]], i64 3
-; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[TMP31]]
+; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i32 [[TMP31]]
; CHECK-NEXT: [[TMP36:%.*]] = load i32, ptr [[TMP35]], align 4
; CHECK-NEXT: [[TMP37:%.*]] = insertelement <4 x i32> [[TMP29]], i32 [[TMP36]], i64 3
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE6]]
@@ -599,10 +599,10 @@ define float @reduction_fadd(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i1> [[TMP0]], i64 0
; CHECK-NEXT: br i1 [[TMP1]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]]
; CHECK: pred.load.if:
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x float> poison, float [[TMP3]], i64 0
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[TMP6:%.*]] = load float, ptr [[TMP5]], align 4
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x float> poison, float [[TMP6]], i64 0
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE]]
@@ -613,10 +613,10 @@ define float @reduction_fadd(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: br i1 [[TMP10]], label [[PRED_LOAD_IF1:%.*]], label [[PRED_LOAD_CONTINUE2:%.*]]
; CHECK: pred.load.if1:
; CHECK-NEXT: [[TMP11:%.*]] = or disjoint i32 [[INDEX]], 1
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[A]], i32 [[TMP11]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i32 [[TMP11]]
; CHECK-NEXT: [[TMP13:%.*]] = load float, ptr [[TMP12]], align 4
; CHECK-NEXT: [[TMP14:%.*]] = insertelement <4 x float> [[TMP8]], float [[TMP13]], i64 1
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, ptr [[B]], i32 [[TMP11]]
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i32 [[TMP11]]
; CHECK-NEXT: [[TMP16:%.*]] = load float, ptr [[TMP15]], align 4
; CHECK-NEXT: [[TMP17:%.*]] = insertelement <4 x float> [[TMP9]], float [[TMP16]], i64 1
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE2]]
@@ -627,10 +627,10 @@ define float @reduction_fadd(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: br i1 [[TMP20]], label [[PRED_LOAD_IF3:%.*]], label [[PRED_LOAD_CONTINUE4:%.*]]
; CHECK: pred.load.if3:
; CHECK-NEXT: [[TMP21:%.*]] = or disjoint i32 [[INDEX]], 2
-; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[A]], i32 [[TMP21]]
+; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i32 [[TMP21]]
; CHECK-NEXT: [[TMP23:%.*]] = load float, ptr [[TMP22]], align 4
; CHECK-NEXT: [[TMP24:%.*]] = insertelement <4 x float> [[TMP18]], float [[TMP23]], i64 2
-; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds float, ptr [[B]], i32 [[TMP21]]
+; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i32 [[TMP21]]
; CHECK-NEXT: [[TMP26:%.*]] = load float, ptr [[TMP25]], align 4
; CHECK-NEXT: [[TMP27:%.*]] = insertelement <4 x float> [[TMP19]], float [[TMP26]], i64 2
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE4]]
@@ -641,10 +641,10 @@ define float @reduction_fadd(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: br i1 [[TMP30]], label [[PRED_LOAD_IF5:%.*]], label [[PRED_LOAD_CONTINUE6]]
; CHECK: pred.load.if5:
; CHECK-NEXT: [[TMP31:%.*]] = or disjoint i32 [[INDEX]], 3
-; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds float, ptr [[A]], i32 [[TMP31]]
+; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i32 [[TMP31]]
; CHECK-NEXT: [[TMP33:%.*]] = load float, ptr [[TMP32]], align 4
; CHECK-NEXT: [[TMP34:%.*]] = insertelement <4 x float> [[TMP28]], float [[TMP33]], i64 3
-; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds float, ptr [[B]], i32 [[TMP31]]
+; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i32 [[TMP31]]
; CHECK-NEXT: [[TMP36:%.*]] = load float, ptr [[TMP35]], align 4
; CHECK-NEXT: [[TMP37:%.*]] = insertelement <4 x float> [[TMP29]], float [[TMP36]], i64 3
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE6]]
@@ -699,10 +699,10 @@ define float @reduction_fmul(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i1> [[TMP0]], i64 0
; CHECK-NEXT: br i1 [[TMP1]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]]
; CHECK: pred.load.if:
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x float> poison, float [[TMP3]], i64 0
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[TMP6:%.*]] = load float, ptr [[TMP5]], align 4
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x float> poison, float [[TMP6]], i64 0
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE]]
@@ -713,10 +713,10 @@ define float @reduction_fmul(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: br i1 [[TMP10]], label [[PRED_LOAD_IF1:%.*]], label [[PRED_LOAD_CONTINUE2:%.*]]
; CHECK: pred.load.if1:
; CHECK-NEXT: [[TMP11:%.*]] = or disjoint i32 [[INDEX]], 1
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[A]], i32 [[TMP11]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i32 [[TMP11]]
; CHECK-NEXT: [[TMP13:%.*]] = load float, ptr [[TMP12]], align 4
; CHECK-NEXT: [[TMP14:%.*]] = insertelement <4 x float> [[TMP8]], float [[TMP13]], i64 1
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, ptr [[B]], i32 [[TMP11]]
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i32 [[TMP11]]
; CHECK-NEXT: [[TMP16:%.*]] = load float, ptr [[TMP15]], align 4
; CHECK-NEXT: [[TMP17:%.*]] = insertelement <4 x float> [[TMP9]], float [[TMP16]], i64 1
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE2]]
@@ -727,10 +727,10 @@ define float @reduction_fmul(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: br i1 [[TMP20]], label [[PRED_LOAD_IF3:%.*]], label [[PRED_LOAD_CONTINUE4:%.*]]
; CHECK: pred.load.if3:
; CHECK-NEXT: [[TMP21:%.*]] = or disjoint i32 [[INDEX]], 2
-; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[A]], i32 [[TMP21]]
+; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i32 [[TMP21]]
; CHECK-NEXT: [[TMP23:%.*]] = load float, ptr [[TMP22]], align 4
; CHECK-NEXT: [[TMP24:%.*]] = insertelement <4 x float> [[TMP18]], float [[TMP23]], i64 2
-; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds float, ptr [[B]], i32 [[TMP21]]
+; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i32 [[TMP21]]
; CHECK-NEXT: [[TMP26:%.*]] = load float, ptr [[TMP25]], align 4
; CHECK-NEXT: [[TMP27:%.*]] = insertelement <4 x float> [[TMP19]], float [[TMP26]], i64 2
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE4]]
@@ -741,10 +741,10 @@ define float @reduction_fmul(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: br i1 [[TMP30]], label [[PRED_LOAD_IF5:%.*]], label [[PRED_LOAD_CONTINUE6]]
; CHECK: pred.load.if5:
; CHECK-NEXT: [[TMP31:%.*]] = or disjoint i32 [[INDEX]], 3
-; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds float, ptr [[A]], i32 [[TMP31]]
+; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i32 [[TMP31]]
; CHECK-NEXT: [[TMP33:%.*]] = load float, ptr [[TMP32]], align 4
; CHECK-NEXT: [[TMP34:%.*]] = insertelement <4 x float> [[TMP28]], float [[TMP33]], i64 3
-; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds float, ptr [[B]], i32 [[TMP31]]
+; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i32 [[TMP31]]
; CHECK-NEXT: [[TMP36:%.*]] = load float, ptr [[TMP35]], align 4
; CHECK-NEXT: [[TMP37:%.*]] = insertelement <4 x float> [[TMP29]], float [[TMP36]], i64 3
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE6]]
@@ -799,7 +799,7 @@ define i32 @reduction_min(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i1> [[TMP0]], i64 0
; CHECK-NEXT: br i1 [[TMP1]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]]
; CHECK: pred.load.if:
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> poison, i32 [[TMP3]], i64 0
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE]]
@@ -808,7 +808,7 @@ define i32 @reduction_min(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[TMP0]], i64 1
; CHECK-NEXT: br i1 [[TMP6]], label [[PRED_LOAD_IF1:%.*]], label [[PRED_LOAD_CONTINUE2:%.*]]
; CHECK: pred.load.if1:
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[A]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr [4 x i8], ptr [[A]], i32 [[INDEX]]
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP7]], i32 4
; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
; CHECK-NEXT: [[TMP10:%.*]] = insertelement <4 x i32> [[TMP5]], i32 [[TMP9]], i64 1
@@ -818,7 +818,7 @@ define i32 @reduction_min(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP12:%.*]] = extractelement <4 x i1> [[TMP0]], i64 2
; CHECK-NEXT: br i1 [[TMP12]], label [[PRED_LOAD_IF3:%.*]], label [[PRED_LOAD_CONTINUE4:%.*]]
; CHECK: pred.load.if3:
-; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[A]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr [4 x i8], ptr [[A]], i32 [[INDEX]]
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[TMP13]], i32 8
; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4
; CHECK-NEXT: [[TMP16:%.*]] = insertelement <4 x i32> [[TMP11]], i32 [[TMP15]], i64 2
@@ -828,7 +828,7 @@ define i32 @reduction_min(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP18:%.*]] = extractelement <4 x i1> [[TMP0]], i64 3
; CHECK-NEXT: br i1 [[TMP18]], label [[PRED_LOAD_IF5:%.*]], label [[PRED_LOAD_CONTINUE6]]
; CHECK: pred.load.if5:
-; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i32, ptr [[A]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr [4 x i8], ptr [[A]], i32 [[INDEX]]
; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[TMP19]], i32 12
; CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP20]], align 4
; CHECK-NEXT: [[TMP22:%.*]] = insertelement <4 x i32> [[TMP17]], i32 [[TMP21]], i64 3
@@ -880,7 +880,7 @@ define i32 @reduction_max(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i1> [[TMP0]], i64 0
; CHECK-NEXT: br i1 [[TMP1]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]]
; CHECK: pred.load.if:
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> poison, i32 [[TMP3]], i64 0
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE]]
@@ -889,7 +889,7 @@ define i32 @reduction_max(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[TMP0]], i64 1
; CHECK-NEXT: br i1 [[TMP6]], label [[PRED_LOAD_IF1:%.*]], label [[PRED_LOAD_CONTINUE2:%.*]]
; CHECK: pred.load.if1:
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[A]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr [4 x i8], ptr [[A]], i32 [[INDEX]]
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP7]], i32 4
; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
; CHECK-NEXT: [[TMP10:%.*]] = insertelement <4 x i32> [[TMP5]], i32 [[TMP9]], i64 1
@@ -899,7 +899,7 @@ define i32 @reduction_max(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP12:%.*]] = extractelement <4 x i1> [[TMP0]], i64 2
; CHECK-NEXT: br i1 [[TMP12]], label [[PRED_LOAD_IF3:%.*]], label [[PRED_LOAD_CONTINUE4:%.*]]
; CHECK: pred.load.if3:
-; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[A]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr [4 x i8], ptr [[A]], i32 [[INDEX]]
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[TMP13]], i32 8
; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4
; CHECK-NEXT: [[TMP16:%.*]] = insertelement <4 x i32> [[TMP11]], i32 [[TMP15]], i64 2
@@ -909,7 +909,7 @@ define i32 @reduction_max(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP18:%.*]] = extractelement <4 x i1> [[TMP0]], i64 3
; CHECK-NEXT: br i1 [[TMP18]], label [[PRED_LOAD_IF5:%.*]], label [[PRED_LOAD_CONTINUE6]]
; CHECK: pred.load.if5:
-; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i32, ptr [[A]], i32 [[INDEX]]
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr [4 x i8], ptr [[A]], i32 [[INDEX]]
; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[TMP19]], i32 12
; CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP20]], align 4
; CHECK-NEXT: [[TMP22:%.*]] = insertelement <4 x i32> [[TMP17]], i32 [[TMP21]], i64 3
diff --git a/llvm/test/Transforms/LoopVectorize/reduction.ll b/llvm/test/Transforms/LoopVectorize/reduction.ll
index 65d57015b0140..717c722abe8ee 100644
--- a/llvm/test/Transforms/LoopVectorize/reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/reduction.ll
@@ -17,9 +17,9 @@ define i32 @reduction_sum(i32 %n, ptr %A, ptr %B) {
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = add <4 x i32> [[VEC_PHI]], [[VEC_IND]]
; CHECK-NEXT: [[TMP6:%.*]] = add <4 x i32> [[TMP5]], [[WIDE_LOAD]]
@@ -39,9 +39,9 @@ define i32 @reduction_sum(i32 %n, ptr %A, ptr %B) {
; CHECK: .lr.ph:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[DOTLR_PH]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[SUM_02:%.*]] = phi i32 [ [[TMP17:%.*]], [[DOTLR_PH]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4
; CHECK-NEXT: [[TMP14:%.*]] = trunc i64 [[INDVARS_IV]] to i32
; CHECK-NEXT: [[TMP15:%.*]] = add i32 [[SUM_02]], [[TMP14]]
@@ -98,9 +98,9 @@ define i32 @reduction_prod(i32 %n, ptr %A, ptr %B) {
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ splat (i32 1), [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = mul <4 x i32> [[VEC_PHI]], [[VEC_IND]]
; CHECK-NEXT: [[TMP6:%.*]] = mul <4 x i32> [[TMP5]], [[WIDE_LOAD]]
@@ -120,9 +120,9 @@ define i32 @reduction_prod(i32 %n, ptr %A, ptr %B) {
; CHECK: .lr.ph:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[DOTLR_PH]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[PROD_02:%.*]] = phi i32 [ [[TMP17:%.*]], [[DOTLR_PH]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4
; CHECK-NEXT: [[TMP14:%.*]] = trunc i64 [[INDVARS_IV]] to i32
; CHECK-NEXT: [[TMP15:%.*]] = mul i32 [[PROD_02]], [[TMP14]]
@@ -179,9 +179,9 @@ define i32 @reduction_mix(i32 %n, ptr %A, ptr %B) {
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = mul nsw <4 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD]]
; CHECK-NEXT: [[TMP6:%.*]] = add <4 x i32> [[VEC_PHI]], [[VEC_IND]]
@@ -201,9 +201,9 @@ define i32 @reduction_mix(i32 %n, ptr %A, ptr %B) {
; CHECK: .lr.ph:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[DOTLR_PH]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[SUM_02:%.*]] = phi i32 [ [[TMP17:%.*]], [[DOTLR_PH]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4
; CHECK-NEXT: [[TMP14:%.*]] = mul nsw i32 [[TMP13]], [[TMP11]]
; CHECK-NEXT: [[TMP15:%.*]] = trunc i64 [[INDVARS_IV]] to i32
@@ -260,9 +260,9 @@ define i32 @reduction_mul(i32 %n, ptr %A, ptr %B) {
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ <i32 19, i32 1, i32 1, i32 1>, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = add <4 x i32> [[WIDE_LOAD]], [[VEC_IND]]
; CHECK-NEXT: [[TMP6:%.*]] = add <4 x i32> [[TMP5]], [[WIDE_LOAD1]]
@@ -282,9 +282,9 @@ define i32 @reduction_mul(i32 %n, ptr %A, ptr %B) {
; CHECK: .lr.ph:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[DOTLR_PH]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[SUM_02:%.*]] = phi i32 [ [[TMP17:%.*]], [[DOTLR_PH]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4
; CHECK-NEXT: [[TMP14:%.*]] = trunc i64 [[INDVARS_IV]] to i32
; CHECK-NEXT: [[TMP15:%.*]] = add i32 [[TMP11]], [[TMP14]]
@@ -341,9 +341,9 @@ define i32 @start_at_non_zero(ptr %in, ptr %coeff, ptr %out, i32 %n) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ <i32 120, i32 0, i32 0, i32 0>, [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[IN]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[IN]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[COEFF]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[COEFF]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = mul nsw <4 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD]]
; CHECK-NEXT: [[TMP4]] = add <4 x i32> [[TMP3]], [[VEC_PHI]]
@@ -361,9 +361,9 @@ define i32 @start_at_non_zero(ptr %in, ptr %coeff, ptr %out, i32 %n) {
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[SUM_09:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[IN]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[IN]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[COEFF]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i8], ptr [[COEFF]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], [[TMP7]]
; CHECK-NEXT: [[ADD]] = add nsw i32 [[MUL]], [[SUM_09]]
@@ -417,9 +417,9 @@ define i32 @reduction_and(i32 %n, ptr %A, ptr %B) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ splat (i32 -1), [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD]]
; CHECK-NEXT: [[TMP4]] = and <4 x i32> [[TMP3]], [[VEC_PHI]]
@@ -437,9 +437,9 @@ define i32 @reduction_and(i32 %n, ptr %A, ptr %B) {
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[RESULT_08:%.*]] = phi i32 [ [[AND:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP7]]
; CHECK-NEXT: [[AND]] = and i32 [[ADD]], [[RESULT_08]]
@@ -493,9 +493,9 @@ define i32 @reduction_or(i32 %n, ptr %A, ptr %B) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD]]
; CHECK-NEXT: [[TMP4]] = or <4 x i32> [[TMP3]], [[VEC_PHI]]
@@ -513,9 +513,9 @@ define i32 @reduction_or(i32 %n, ptr %A, ptr %B) {
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[RESULT_08:%.*]] = phi i32 [ [[OR:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP7]]
; CHECK-NEXT: [[OR]] = or i32 [[ADD]], [[RESULT_08]]
@@ -569,9 +569,9 @@ define i32 @reduction_xor(i32 %n, ptr %A, ptr %B) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD]]
; CHECK-NEXT: [[TMP4]] = xor <4 x i32> [[TMP3]], [[VEC_PHI]]
@@ -589,9 +589,9 @@ define i32 @reduction_xor(i32 %n, ptr %A, ptr %B) {
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[RESULT_08:%.*]] = phi i32 [ [[XOR:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP7]]
; CHECK-NEXT: [[XOR]] = xor i32 [[ADD]], [[RESULT_08]]
@@ -641,7 +641,7 @@ define i32 @reduction_sub_rhs(i32 %n, ptr %A) {
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: [[X_05:%.*]] = phi i32 [ [[SUB:%.*]], [[FOR_BODY]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[SUB]] = sub nsw i32 [[TMP0]], [[X_05]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
@@ -692,7 +692,7 @@ define i32 @reduction_sub_lhs(i32 %n, ptr %A) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP2]] = sub <4 x i32> [[VEC_PHI]], [[WIDE_LOAD]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
@@ -709,7 +709,7 @@ define i32 @reduction_sub_lhs(i32 %n, ptr %A) {
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[X_05:%.*]] = phi i32 [ [[SUB:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[SUB]] = sub nsw i32 [[X_05]], [[TMP5]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
@@ -755,9 +755,9 @@ define float @reduction_conditional(ptr %A, ptr %B, ptr %C, float %S) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x float> [ [[TMP0]], [[VECTOR_PH]] ], [ [[PREDPHI3:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP1]], align 4
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x float>, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = fcmp ogt <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]]
; CHECK-NEXT: [[TMP4:%.*]] = fcmp ogt <4 x float> [[WIDE_LOAD1]], splat (float 1.000000e+00)
@@ -830,9 +830,9 @@ define float @noreduction_phi(ptr %A, ptr %B, ptr %C, float %S) {
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
; CHECK-NEXT: [[SUM_033:%.*]] = phi float [ [[S]], [[ENTRY]] ], [ [[SUM_1:%.*]], [[FOR_INC]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[CMP3:%.*]] = fcmp ogt float [[TMP0]], [[TMP1]]
; CHECK-NEXT: br i1 [[CMP3]], label [[IF_THEN:%.*]], label [[FOR_INC]]
@@ -908,7 +908,7 @@ define float @noredux_header_phi(ptr %A, ptr %B, ptr %C, float %S) {
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[SUM2_09:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[ADD1:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[SUM_08:%.*]] = phi float [ [[S]], [[ENTRY]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD]] = fadd fast float [[SUM_08]], [[TMP0]]
; CHECK-NEXT: [[ADD1]] = fadd fast float [[SUM2_09]], [[ADD]]
@@ -1036,9 +1036,9 @@ define i32 @reduction_sum_multiuse(i32 %n, ptr %A, ptr %B) {
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = add <4 x i32> [[VEC_PHI]], [[VEC_IND]]
; CHECK-NEXT: [[TMP6:%.*]] = add <4 x i32> [[TMP5]], [[WIDE_LOAD]]
@@ -1058,9 +1058,9 @@ define i32 @reduction_sum_multiuse(i32 %n, ptr %A, ptr %B) {
; CHECK: .lr.ph:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[DOTLR_PH]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[SUM_02:%.*]] = phi i32 [ [[TMP17:%.*]], [[DOTLR_PH]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4
; CHECK-NEXT: [[TMP14:%.*]] = trunc i64 [[INDVARS_IV]] to i32
; CHECK-NEXT: [[TMP15:%.*]] = add i32 [[SUM_02]], [[TMP14]]
@@ -1126,7 +1126,7 @@ define void @reduction_reset(i32 %N, ptr %arrayA, ptr %arrayB) {
; CHECK: .lr.ph:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[DOTLR_PH_PREHEADER]] ], [ [[INDVARS_IV_NEXT:%.*]], [[DOTLR_PH]] ]
; CHECK-NEXT: [[DOT017:%.*]] = phi i32 [ 100, [[DOTLR_PH_PREHEADER]] ], [ [[CSEL:%.*]], [[DOTLR_PH]] ]
-; CHECK-NEXT: [[C6:%.*]] = getelementptr inbounds nuw i32, ptr [[ARRAYA]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[C6:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[ARRAYA]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[C7:%.*]] = load i32, ptr [[C6]], align 4
; CHECK-NEXT: [[C8:%.*]] = icmp sgt i32 [[C7]], 0
; CHECK-NEXT: [[C9:%.*]] = add nsw i32 [[C7]], [[DOT017]]
@@ -1140,7 +1140,7 @@ define void @reduction_reset(i32 %N, ptr %arrayA, ptr %arrayB) {
; CHECK: ._crit_edge:
; CHECK-NEXT: [[DOT015_LCSSA:%.*]] = phi i64 [ -1, [[ENTRY:%.*]] ], [ [[PHITMP19]], [[DOT_CRIT_EDGE_LOOPEXIT]] ]
; CHECK-NEXT: [[DOT0_LCSSA:%.*]] = phi i32 [ 100, [[ENTRY]] ], [ [[CSEL]], [[DOT_CRIT_EDGE_LOOPEXIT]] ]
-; CHECK-NEXT: [[C10:%.*]] = getelementptr inbounds i32, ptr [[ARRAYB]], i64 [[DOT015_LCSSA]]
+; CHECK-NEXT: [[C10:%.*]] = getelementptr inbounds [4 x i8], ptr [[ARRAYB]], i64 [[DOT015_LCSSA]]
; CHECK-NEXT: store i32 [[DOT0_LCSSA]], ptr [[C10]], align 4
; CHECK-NEXT: ret void
;
@@ -1198,7 +1198,7 @@ define i64 @reduction_with_phi_with_one_incoming_on_backedge(i16 %n, ptr %A) {
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[DOTCAST1:%.*]] = trunc i32 [[INDEX]] to i16
; CHECK-NEXT: [[TMP2:%.*]] = sext i16 [[DOTCAST1]] to i64
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i64, ptr [[A]], i64 [[TMP2]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr [8 x i8], ptr [[A]], i64 [[TMP2]]
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP3]], i64 8
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP8]], align 4
; CHECK-NEXT: [[TMP4]] = add <4 x i64> [[VEC_PHI]], [[WIDE_LOAD]]
@@ -1217,7 +1217,7 @@ define i64 @reduction_with_phi_with_one_incoming_on_backedge(i16 %n, ptr %A) {
; CHECK-NEXT: [[IV:%.*]] = phi i16 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
; CHECK-NEXT: [[SUM:%.*]] = phi i64 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[LOOP_LATCH]] ]
; CHECK-NEXT: [[TMP7:%.*]] = sext i16 [[IV]] to i64
-; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i64, ptr [[A]], i64 [[TMP7]]
+; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr [8 x i8], ptr [[A]], i64 [[TMP7]]
; CHECK-NEXT: [[LV_A:%.*]] = load i64, ptr [[GEP_A]], align 4
; CHECK-NEXT: [[SUM_NEXT]] = add nsw i64 [[SUM]], [[LV_A]]
; CHECK-NEXT: br label [[LOOP_BB:%.*]]
@@ -1276,7 +1276,7 @@ define i64 @reduction_with_phi_with_two_incoming_on_backedge(i16 %n, ptr %A) {
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[DOTCAST1:%.*]] = trunc i32 [[INDEX]] to i16
; CHECK-NEXT: [[TMP2:%.*]] = sext i16 [[DOTCAST1]] to i64
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i64, ptr [[A]], i64 [[TMP2]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr [8 x i8], ptr [[A]], i64 [[TMP2]]
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP3]], i64 8
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP8]], align 4
; CHECK-NEXT: [[TMP4]] = add <4 x i64> [[VEC_PHI]], [[WIDE_LOAD]]
@@ -1295,7 +1295,7 @@ define i64 @reduction_with_phi_with_two_incoming_on_backedge(i16 %n, ptr %A) {
; CHECK-NEXT: [[IV:%.*]] = phi i16 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
; CHECK-NEXT: [[SUM:%.*]] = phi i64 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[LOOP_LATCH]] ]
; CHECK-NEXT: [[TMP7:%.*]] = sext i16 [[IV]] to i64
-; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i64, ptr [[A]], i64 [[TMP7]]
+; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr [8 x i8], ptr [[A]], i64 [[TMP7]]
; CHECK-NEXT: [[LV_A:%.*]] = load i64, ptr [[GEP_A]], align 4
; CHECK-NEXT: [[SUM_NEXT]] = add nsw i64 [[SUM]], [[LV_A]]
; CHECK-NEXT: [[CMP_0:%.*]] = icmp eq i64 [[LV_A]], 29
diff --git a/llvm/test/Transforms/LoopVectorize/runtime-check.ll b/llvm/test/Transforms/LoopVectorize/runtime-check.ll
index 6a9e6089c5499..755208e1a4d67 100644
--- a/llvm/test/Transforms/LoopVectorize/runtime-check.ll
+++ b/llvm/test/Transforms/LoopVectorize/runtime-check.ll
@@ -30,10 +30,10 @@ define void @foo(ptr nocapture %a, ptr nocapture %b, i32 %n) nounwind uwtable ss
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]], !dbg [[DBG9]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ], !dbg [[DBG9]]
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]], !dbg [[DBG9]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDEX]], !dbg [[DBG9]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP2]], align 4, !dbg [[DBG9]]
; CHECK-NEXT: [[TMP3:%.*]] = fmul <4 x float> [[WIDE_LOAD]], splat (float 3.000000e+00), !dbg [[DBG9]]
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]], !dbg [[DBG9]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDEX]], !dbg [[DBG9]]
; CHECK-NEXT: store <4 x float> [[TMP3]], ptr [[TMP4]], align 4, !dbg [[DBG9]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4, !dbg [[DBG9]]
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]], !dbg [[DBG9]]
@@ -46,10 +46,10 @@ define void @foo(ptr nocapture %a, ptr nocapture %b, i32 %n) nounwind uwtable ss
; CHECK-NEXT: br label [[FOR_BODY:%.*]], !dbg [[DBG9]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], !dbg [[DBG9]]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDVARS_IV]], !dbg [[DBG9]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDVARS_IV]], !dbg [[DBG9]]
; CHECK-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !dbg [[DBG9]]
; CHECK-NEXT: [[MUL:%.*]] = fmul float [[TMP6]], 3.000000e+00, !dbg [[DBG9]]
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]], !dbg [[DBG9]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDVARS_IV]], !dbg [[DBG9]]
; CHECK-NEXT: store float [[MUL]], ptr [[ARRAYIDX2]], align 4, !dbg [[DBG9]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1, !dbg [[DBG9]]
; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32, !dbg [[DBG9]]
@@ -131,11 +131,11 @@ define void @test_runtime_check(ptr %a, float %b, i64 %offset, i64 %offset2, i64
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr float, ptr [[TMP5]], i64 [[OFFSET]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr [4 x i8], ptr [[TMP5]], i64 [[OFFSET]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP6]], align 4, !alias.scope [[META15:![0-9]+]], !noalias [[META18:![0-9]+]]
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr float, ptr [[TMP7]], i64 [[OFFSET2]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr [4 x i8], ptr [[TMP7]], i64 [[OFFSET2]]
; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x float>, ptr [[TMP8]], align 4, !alias.scope [[META18]]
; CHECK-NEXT: [[TMP9:%.*]] = fmul fast <4 x float> [[BROADCAST_SPLAT]], [[WIDE_LOAD4]]
; CHECK-NEXT: [[TMP10:%.*]] = fadd fast <4 x float> [[WIDE_LOAD]], [[TMP9]]
@@ -151,11 +151,11 @@ define void @test_runtime_check(ptr %a, float %b, i64 %offset, i64 %offset2, i64
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr float, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[ARR_IDX:%.*]] = getelementptr float, ptr [[TMP12]], i64 [[OFFSET]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[ARR_IDX:%.*]] = getelementptr [4 x i8], ptr [[TMP12]], i64 [[OFFSET]]
; CHECK-NEXT: [[L1:%.*]] = load float, ptr [[ARR_IDX]], align 4
-; CHECK-NEXT: [[TMP13:%.*]] = getelementptr float, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[ARR_IDX2:%.*]] = getelementptr float, ptr [[TMP13]], i64 [[OFFSET2]]
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[ARR_IDX2:%.*]] = getelementptr [4 x i8], ptr [[TMP13]], i64 [[OFFSET2]]
; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[ARR_IDX2]], align 4
; CHECK-NEXT: [[M:%.*]] = fmul fast float [[B]], [[L2]]
; CHECK-NEXT: [[AD:%.*]] = fadd fast float [[L1]], [[M]]
@@ -225,20 +225,20 @@ define void @test_runtime_check2(ptr %a, float %b, i64 %offset, i64 %offset2, i6
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr float, ptr [[A:%.*]], i64 [[IV]]
-; CHECK-NEXT: [[ARR_IDX:%.*]] = getelementptr float, ptr [[TMP0]], i64 [[OFFSET:%.*]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr [4 x i8], ptr [[A:%.*]], i64 [[IV]]
+; CHECK-NEXT: [[ARR_IDX:%.*]] = getelementptr [4 x i8], ptr [[TMP0]], i64 [[OFFSET:%.*]]
; CHECK-NEXT: [[L1:%.*]] = load float, ptr [[ARR_IDX]], align 4
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr float, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[ARR_IDX2:%.*]] = getelementptr float, ptr [[TMP1]], i64 [[OFFSET2:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr [4 x i8], ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[ARR_IDX2:%.*]] = getelementptr [4 x i8], ptr [[TMP1]], i64 [[OFFSET2:%.*]]
; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[ARR_IDX2]], align 4
; CHECK-NEXT: [[M:%.*]] = fmul fast float [[B:%.*]], [[L2]]
; CHECK-NEXT: [[AD:%.*]] = fadd fast float [[L1]], [[M]]
; CHECK-NEXT: store float [[AD]], ptr [[ARR_IDX]], align 4
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr float, ptr [[C:%.*]], i64 [[IV]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr [4 x i8], ptr [[C:%.*]], i64 [[IV]]
; CHECK-NEXT: [[C_IDX:%.*]] = getelementptr i8, ptr [[TMP2]], i64 -4
; CHECK-NEXT: [[LC:%.*]] = load float, ptr [[C_IDX]], align 4
; CHECK-NEXT: [[VC:%.*]] = fadd float [[LC]], 1.000000e+00
-; CHECK-NEXT: [[C_IDX2:%.*]] = getelementptr inbounds nuw float, ptr [[C]], i64 [[IV]]
+; CHECK-NEXT: [[C_IDX2:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[C]], i64 [[IV]]
; CHECK-NEXT: store float [[VC]], ptr [[C_IDX2]], align 4
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N:%.*]]
@@ -337,13 +337,13 @@ define void @different_load_store_pairs(ptr %src.1, ptr %src.2, ptr %dst.1, ptr
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i32, ptr [[SRC_1]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr [4 x i8], ptr [[SRC_1]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4, !alias.scope [[META22:![0-9]+]]
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i64, ptr [[SRC_2]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr [8 x i8], ptr [[SRC_2]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD20:%.*]] = load <4 x i64>, ptr [[TMP3]], align 8, !alias.scope [[META25:![0-9]+]]
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr nusw i32, ptr [[DST_1]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr nusw [4 x i8], ptr [[DST_1]], i64 [[INDEX]]
; CHECK-NEXT: store <4 x i32> [[WIDE_LOAD]], ptr [[TMP4]], align 4, !alias.scope [[META27:![0-9]+]], !noalias [[META29:![0-9]+]]
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr nusw i64, ptr [[DST_2]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr nusw [8 x i8], ptr [[DST_2]], i64 [[INDEX]]
; CHECK-NEXT: store <4 x i64> [[WIDE_LOAD20]], ptr [[TMP5]], align 8, !alias.scope [[META31:![0-9]+]], !noalias [[META32:![0-9]+]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -356,13 +356,13 @@ define void @different_load_store_pairs(ptr %src.1, ptr %src.2, ptr %dst.1, ptr
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr i32, ptr [[SRC_1]], i64 [[IV]]
+; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr [4 x i8], ptr [[SRC_1]], i64 [[IV]]
; CHECK-NEXT: [[LD_SRC_1:%.*]] = load i32, ptr [[GEP_SRC_1]], align 4
-; CHECK-NEXT: [[GEP_SRC_2:%.*]] = getelementptr i64, ptr [[SRC_2]], i64 [[IV]]
+; CHECK-NEXT: [[GEP_SRC_2:%.*]] = getelementptr [8 x i8], ptr [[SRC_2]], i64 [[IV]]
; CHECK-NEXT: [[LD_SRC_2:%.*]] = load i64, ptr [[GEP_SRC_2]], align 8
-; CHECK-NEXT: [[GEP_DST_1:%.*]] = getelementptr nusw i32, ptr [[DST_1]], i64 [[IV]]
+; CHECK-NEXT: [[GEP_DST_1:%.*]] = getelementptr nusw [4 x i8], ptr [[DST_1]], i64 [[IV]]
; CHECK-NEXT: store i32 [[LD_SRC_1]], ptr [[GEP_DST_1]], align 4
-; CHECK-NEXT: [[GEP_DST_2:%.*]] = getelementptr nusw i64, ptr [[DST_2]], i64 [[IV]]
+; CHECK-NEXT: [[GEP_DST_2:%.*]] = getelementptr nusw [8 x i8], ptr [[DST_2]], i64 [[IV]]
; CHECK-NEXT: store i64 [[LD_SRC_2]], ptr [[GEP_DST_2]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[COND:%.*]] = icmp ult i64 [[IV_NEXT]], [[UMAX19]]
@@ -418,12 +418,12 @@ define dso_local void @forced_optsize(ptr noalias nocapture readonly %x_p, ptr n
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i64, ptr [[X_P:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [8 x i8], ptr [[X_P:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i64>, ptr [[TMP0]], align 8
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[Y_P:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [8 x i8], ptr [[Y_P:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <2 x i64>, ptr [[TMP1]], align 8
; CHECK-NEXT: [[TMP2:%.*]] = add nsw <2 x i64> [[WIDE_LOAD1]], [[WIDE_LOAD]]
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[Z_P:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [8 x i8], ptr [[Z_P:%.*]], i64 [[INDEX]]
; CHECK-NEXT: store <2 x i64> [[TMP2]], ptr [[TMP3]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128
@@ -504,7 +504,7 @@ define void @test_scev_check_mul_add_expansion(ptr %out, ptr %in, i32 %len, i32
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[INDEX]], 6
; CHECK-NEXT: [[TMP6:%.*]] = sext i32 [[OFFSET_IDX]] to i64
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i16, ptr [[OUT]], i64 [[TMP6]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr [2 x i8], ptr [[OUT]], i64 [[TMP6]]
; CHECK-NEXT: store <4 x i16> zeroinitializer, ptr [[TMP7]], align 2, !alias.scope [[META36:![0-9]+]], !noalias [[META39:![0-9]+]]
; CHECK-NEXT: store i32 0, ptr [[IN]], align 4, !alias.scope [[META39]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
@@ -519,7 +519,7 @@ define void @test_scev_check_mul_add_expansion(ptr %out, ptr %in, i32 %len, i32
; CHECK: loop:
; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[TMP9:%.*]] = sext i32 [[IV]] to i64
-; CHECK-NEXT: [[ARRAYIDX80:%.*]] = getelementptr i16, ptr [[OUT]], i64 [[TMP9]]
+; CHECK-NEXT: [[ARRAYIDX80:%.*]] = getelementptr [2 x i8], ptr [[OUT]], i64 [[TMP9]]
; CHECK-NEXT: store i16 0, ptr [[ARRAYIDX80]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
; CHECK-NEXT: store i32 0, ptr [[IN]], align 4
diff --git a/llvm/test/Transforms/LoopVectorize/scalable-inductions.ll b/llvm/test/Transforms/LoopVectorize/scalable-inductions.ll
index c3bf4e9f783c2..b3580e9ae9b8a 100644
--- a/llvm/test/Transforms/LoopVectorize/scalable-inductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/scalable-inductions.ll
@@ -28,14 +28,14 @@ define void @add_ind64_unrolled(ptr noalias nocapture %a, ptr noalias nocapture
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[TMP6]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[STEP_ADD:%.*]] = add <vscale x 2 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[TMP9]], i64 [[TMP8]]
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [8 x i8], ptr [[B:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [8 x i8], ptr [[TMP9]], i64 [[TMP8]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP9]], align 8
; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 2 x i64>, ptr [[TMP11]], align 8
; CHECK-NEXT: [[TMP12:%.*]] = add nsw <vscale x 2 x i64> [[WIDE_LOAD]], [[VEC_IND]]
; CHECK-NEXT: [[TMP13:%.*]] = add nsw <vscale x 2 x i64> [[WIDE_LOAD2]], [[STEP_ADD]]
-; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i64, ptr [[TMP14]], i64 [[TMP8]]
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [8 x i8], ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds [8 x i8], ptr [[TMP14]], i64 [[TMP8]]
; CHECK-NEXT: store <vscale x 2 x i64> [[TMP12]], ptr [[TMP14]], align 8
; CHECK-NEXT: store <vscale x 2 x i64> [[TMP13]], ptr [[TMP16]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
@@ -50,10 +50,10 @@ define void @add_ind64_unrolled(ptr noalias nocapture %a, ptr noalias nocapture
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[I_08]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i8], ptr [[B]], i64 [[I_08]]
; CHECK-NEXT: [[TMP18:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP18]], [[I_08]]
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[I_08]]
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [8 x i8], ptr [[A]], i64 [[I_08]]
; CHECK-NEXT: store i64 [[ADD]], ptr [[ARRAYIDX1]], align 8
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N]]
@@ -102,14 +102,14 @@ define void @add_ind64_unrolled_nxv1i64(ptr noalias nocapture %a, ptr noalias no
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 1 x i64> [ [[TMP6]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[STEP_ADD:%.*]] = add <vscale x 1 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, ptr [[TMP8]], i64 [[TMP2]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [8 x i8], ptr [[B:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [8 x i8], ptr [[TMP8]], i64 [[TMP2]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 1 x i64>, ptr [[TMP8]], align 8
; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 1 x i64>, ptr [[TMP10]], align 8
; CHECK-NEXT: [[TMP11:%.*]] = add nsw <vscale x 1 x i64> [[WIDE_LOAD]], [[VEC_IND]]
; CHECK-NEXT: [[TMP12:%.*]] = add nsw <vscale x 1 x i64> [[WIDE_LOAD2]], [[STEP_ADD]]
-; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i64, ptr [[TMP13]], i64 [[TMP2]]
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds [8 x i8], ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [8 x i8], ptr [[TMP13]], i64 [[TMP2]]
; CHECK-NEXT: store <vscale x 1 x i64> [[TMP11]], ptr [[TMP13]], align 8
; CHECK-NEXT: store <vscale x 1 x i64> [[TMP12]], ptr [[TMP15]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
@@ -124,10 +124,10 @@ define void @add_ind64_unrolled_nxv1i64(ptr noalias nocapture %a, ptr noalias no
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[I_08]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i8], ptr [[B]], i64 [[I_08]]
; CHECK-NEXT: [[TMP17:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP17]], [[I_08]]
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[I_08]]
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [8 x i8], ptr [[A]], i64 [[I_08]]
; CHECK-NEXT: store i64 [[ADD]], ptr [[ARRAYIDX1]], align 8
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N]]
@@ -187,7 +187,7 @@ define void @add_unique_ind32(ptr noalias nocapture %a, i64 %n) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i32> [ [[TMP7]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: store <vscale x 4 x i32> [[VEC_IND]], ptr [[TMP10]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add nuw nsw <vscale x 4 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
@@ -203,7 +203,7 @@ define void @add_unique_ind32(ptr noalias nocapture %a, i64 %n) {
; CHECK: for.body:
; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[I_08]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[I_08]]
; CHECK-NEXT: store i32 [[R_07]], ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD]] = add nuw nsw i32 [[R_07]], 2
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1
@@ -264,7 +264,7 @@ define void @add_unique_indf32(ptr noalias nocapture %a, i64 %n) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x float> [ [[TMP9]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: store <vscale x 4 x float> [[VEC_IND]], ptr [[TMP14]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = fadd <vscale x 4 x float> [[VEC_IND]], [[DOTSPLAT]]
@@ -280,7 +280,7 @@ define void @add_unique_indf32(ptr noalias nocapture %a, i64 %n) {
; CHECK: for.body:
; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[R_07:%.*]] = phi float [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[I_08]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[I_08]]
; CHECK-NEXT: store float [[R_07]], ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD]] = fadd float [[R_07]], 2.000000e+00
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1
diff --git a/llvm/test/Transforms/LoopVectorize/scalar_after_vectorization.ll b/llvm/test/Transforms/LoopVectorize/scalar_after_vectorization.ll
index 306bcf336e5af..61021e05a0789 100644
--- a/llvm/test/Transforms/LoopVectorize/scalar_after_vectorization.ll
+++ b/llvm/test/Transforms/LoopVectorize/scalar_after_vectorization.ll
@@ -10,7 +10,7 @@ target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
; CHECK: [[OFFSET_IDX:%.+]] = or disjoint i64 %index, 1
; CHECK: %[[T2:.+]] = add nuw nsw i64 [[OFFSET_IDX]], %tmp0
; CHECK: %[[T3:.+]] = sub nsw i64 %[[T2]], %x
-; CHECK: %[[T4:.+]] = getelementptr inbounds i32, ptr %a, i64 %[[T3]]
+; CHECK: %[[T4:.+]] = getelementptr inbounds [4 x i8], ptr %a, i64 %[[T3]]
; CHECK: %[[T6:.+]] = getelementptr inbounds nuw i8, ptr %[[T4]], i64 16
; CHECK: load <4 x i32>, ptr %[[T4]], align 4
; CHECK: load <4 x i32>, ptr %[[T6]], align 4
diff --git a/llvm/test/Transforms/LoopVectorize/trunc-reductions.ll b/llvm/test/Transforms/LoopVectorize/trunc-reductions.ll
index 2aebb73081364..15713b8d8e339 100644
--- a/llvm/test/Transforms/LoopVectorize/trunc-reductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/trunc-reductions.ll
@@ -53,7 +53,7 @@ define i16 @reduction_or_trunc(ptr noalias nocapture %ptr) {
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <8 x i16> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[INDEX]] to i64
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i16, ptr [[PTR:%.*]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [2 x i8], ptr [[PTR:%.*]], i64 [[TMP0]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[TMP1]], align 2
; CHECK-NEXT: [[TMP2]] = or <8 x i16> [[VEC_PHI]], [[WIDE_LOAD]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
@@ -95,7 +95,7 @@ define i16 @reduction_xor_trunc(ptr noalias nocapture %ptr) {
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <8 x i16> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[INDEX]] to i64
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i16, ptr [[PTR:%.*]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [2 x i8], ptr [[PTR:%.*]], i64 [[TMP0]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[TMP1]], align 2
; CHECK-NEXT: [[TMP2]] = xor <8 x i16> [[VEC_PHI]], [[WIDE_LOAD]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
@@ -218,7 +218,7 @@ define i16 @reduction_smax_trunc(ptr noalias nocapture %ptr) {
; CHECK-NEXT: [[SUM_02P:%.*]] = phi i32 [ [[MIN:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY]] ]
; CHECK-NEXT: [[SUM_02:%.*]] = and i32 [[SUM_02P]], 65535
; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[IV]] to i64
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i16, ptr [[PTR:%.*]], i64 [[TMP0]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [2 x i8], ptr [[PTR:%.*]], i64 [[TMP0]]
; CHECK-NEXT: [[LOAD:%.*]] = load i16, ptr [[GEP]], align 2
; CHECK-NEXT: [[EXT:%.*]] = sext i16 [[LOAD]] to i32
; CHECK-NEXT: [[MIN]] = call i32 @llvm.smax.i32(i32 [[SUM_02]], i32 [[EXT]])
@@ -259,7 +259,7 @@ define i16 @reduction_umax_trunc(ptr noalias nocapture %ptr) {
; CHECK-NEXT: [[SUM_02P:%.*]] = phi i32 [ [[MIN:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY]] ]
; CHECK-NEXT: [[SUM_02:%.*]] = and i32 [[SUM_02P]], 65535
; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[IV]] to i64
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i16, ptr [[PTR:%.*]], i64 [[TMP0]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [2 x i8], ptr [[PTR:%.*]], i64 [[TMP0]]
; CHECK-NEXT: [[LOAD:%.*]] = load i16, ptr [[GEP]], align 2
; CHECK-NEXT: [[EXT:%.*]] = zext i16 [[LOAD]] to i32
; CHECK-NEXT: [[MIN]] = call i32 @llvm.umax.i32(i32 [[SUM_02]], i32 [[EXT]])
@@ -302,7 +302,7 @@ define i32 @reduction_and_or(i16 %a, i32 %b, ptr %src) {
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <8 x i32> [ <i32 10, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>, [[VECTOR_PH]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[INDEX]] to i64
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[SRC:%.*]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[SRC:%.*]], i64 [[TMP0]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP2]] = or <8 x i32> [[VEC_PHI]], [[WIDE_LOAD]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
@@ -317,7 +317,7 @@ define i32 @reduction_and_or(i16 %a, i32 %b, ptr %src) {
; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 992, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[OR67:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[OR:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[TMP5:%.*]] = zext nneg i32 [[IV]] to i64
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds nuw i32, ptr [[SRC]], i64 [[TMP5]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[SRC]], i64 [[TMP5]]
; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP]], align 4
; CHECK-NEXT: [[OR]] = or i32 [[OR67]], [[L]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
diff --git a/llvm/test/Transforms/LoopVectorize/uniform-args-call-variants.ll b/llvm/test/Transforms/LoopVectorize/uniform-args-call-variants.ll
index abdd5e9562590..8d19c73861516 100644
--- a/llvm/test/Transforms/LoopVectorize/uniform-args-call-variants.ll
+++ b/llvm/test/Transforms/LoopVectorize/uniform-args-call-variants.ll
@@ -14,10 +14,10 @@ define void @test_uniform(ptr noalias %dst, ptr readonly %src, i64 %uniform , i6
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr double, ptr [[SRC]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr [8 x i8], ptr [[SRC]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP0]], align 8
; CHECK-NEXT: [[TMP1:%.*]] = call <2 x double> @foo_uniform(<2 x double> [[WIDE_LOAD]], i64 [[UNIFORM]])
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds double, ptr [[DST]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [8 x i8], ptr [[DST]], i64 [[INDEX]]
; CHECK-NEXT: store <2 x double> [[TMP1]], ptr [[TMP2]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -30,10 +30,10 @@ define void @test_uniform(ptr noalias %dst, ptr readonly %src, i64 %uniform , i6
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[GEPSRC:%.*]] = getelementptr double, ptr [[SRC]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[GEPSRC:%.*]] = getelementptr [8 x i8], ptr [[SRC]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[DATA:%.*]] = load double, ptr [[GEPSRC]], align 8
; CHECK-NEXT: [[CALL:%.*]] = call double @foo(double [[DATA]], i64 [[UNIFORM]]) #[[ATTR0:[0-9]+]]
-; CHECK-NEXT: [[GEPDST:%.*]] = getelementptr inbounds double, ptr [[DST]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[GEPDST:%.*]] = getelementptr inbounds [8 x i8], ptr [[DST]], i64 [[INDVARS_IV]]
; CHECK-NEXT: store double [[CALL]], ptr [[GEPDST]], align 8
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
@@ -73,7 +73,7 @@ define void @test_uniform_not_invariant(ptr noalias %dst, ptr readonly %src, i64
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP0:%.*]] = or disjoint i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr double, ptr [[SRC]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr [8 x i8], ptr [[SRC]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP1]], align 8
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[WIDE_LOAD]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x double> [[WIDE_LOAD]], i64 1
@@ -81,7 +81,7 @@ define void @test_uniform_not_invariant(ptr noalias %dst, ptr readonly %src, i64
; CHECK-NEXT: [[TMP5:%.*]] = call double @foo(double [[TMP3]], i64 [[TMP0]]) #[[ATTR0]]
; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x double> poison, double [[TMP4]], i64 0
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x double> [[TMP6]], double [[TMP5]], i64 1
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds double, ptr [[DST]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [8 x i8], ptr [[DST]], i64 [[INDEX]]
; CHECK-NEXT: store <2 x double> [[TMP7]], ptr [[TMP8]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -94,10 +94,10 @@ define void @test_uniform_not_invariant(ptr noalias %dst, ptr readonly %src, i64
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[GEPSRC:%.*]] = getelementptr double, ptr [[SRC]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[GEPSRC:%.*]] = getelementptr [8 x i8], ptr [[SRC]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[DATA:%.*]] = load double, ptr [[GEPSRC]], align 8
; CHECK-NEXT: [[CALL:%.*]] = call double @foo(double [[DATA]], i64 [[INDVARS_IV]]) #[[ATTR0]]
-; CHECK-NEXT: [[GEPDST:%.*]] = getelementptr inbounds double, ptr [[DST]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[GEPDST:%.*]] = getelementptr inbounds [8 x i8], ptr [[DST]], i64 [[INDVARS_IV]]
; CHECK-NEXT: store double [[CALL]], ptr [[GEPDST]], align 8
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
diff --git a/llvm/test/Transforms/LoopVectorize/vector-geps.ll b/llvm/test/Transforms/LoopVectorize/vector-geps.ll
index 90b4be0f96dce..8c2f23cd71d98 100644
--- a/llvm/test/Transforms/LoopVectorize/vector-geps.ll
+++ b/llvm/test/Transforms/LoopVectorize/vector-geps.ll
@@ -16,8 +16,8 @@ define void @vector_gep_stored(ptr %a, ptr %b, i64 %n) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], <4 x i64> [[VEC_IND]]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds ptr, ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i8], ptr [[B:%.*]], <4 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [8 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: store <4 x ptr> [[TMP0]], ptr [[TMP1]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add nuw nsw <4 x i64> [[VEC_IND]], splat (i64 4)
@@ -31,8 +31,8 @@ define void @vector_gep_stored(ptr %a, ptr %b, i64 %n) {
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[VAR0:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[I]]
-; CHECK-NEXT: [[VAR1:%.*]] = getelementptr inbounds nuw ptr, ptr [[A]], i64 [[I]]
+; CHECK-NEXT: [[VAR0:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B]], i64 [[I]]
+; CHECK-NEXT: [[VAR1:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[A]], i64 [[I]]
; CHECK-NEXT: store ptr [[VAR0]], ptr [[VAR1]], align 8
; CHECK-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
; CHECK-NEXT: [[COND:%.*]] = icmp slt i64 [[I_NEXT]], [[N]]
@@ -71,7 +71,7 @@ define void @uniform_vector_gep_stored(ptr %a, ptr %b, i64 %n) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds ptr, ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [8 x i8], ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: store <4 x ptr> [[DOTSPLAT]], ptr [[TMP1]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -85,7 +85,7 @@ define void @uniform_vector_gep_stored(ptr %a, ptr %b, i64 %n) {
; CHECK: for.body:
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[VAR0:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 4
-; CHECK-NEXT: [[VAR1:%.*]] = getelementptr inbounds nuw ptr, ptr [[A]], i64 [[I]]
+; CHECK-NEXT: [[VAR1:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[A]], i64 [[I]]
; CHECK-NEXT: store ptr [[VAR0]], ptr [[VAR1]], align 8
; CHECK-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
; CHECK-NEXT: [[COND:%.*]] = icmp slt i64 [[I_NEXT]], [[N]]
diff --git a/llvm/test/Transforms/LoopVersioningLICM/loopversioningLICM1.ll b/llvm/test/Transforms/LoopVersioningLICM/loopversioningLICM1.ll
index b4f8abbf83a53..81b207d7ebd08 100644
--- a/llvm/test/Transforms/LoopVersioningLICM/loopversioningLICM1.ll
+++ b/llvm/test/Transforms/LoopVersioningLICM/loopversioningLICM1.ll
@@ -30,7 +30,7 @@ define i32 @foo(ptr nocapture %var1, ptr nocapture readnone %var2, ptr nocapture
; CHECK: for.body3.lver.check:
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[I_015]], [[ITR]]
; CHECK-NEXT: [[IDXPROM6:%.*]] = zext i32 [[I_015]] to i64
-; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds nuw i32, ptr [[VAR3]], i64 [[IDXPROM6]]
+; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[VAR3]], i64 [[IDXPROM6]]
; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[J_016]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[VAR1]], i64 [[TMP4]]
@@ -48,7 +48,7 @@ define i32 @foo(ptr nocapture %var1, ptr nocapture readnone %var2, ptr nocapture
; CHECK: for.body3.lver.orig:
; CHECK-NEXT: [[J_113_LVER_ORIG:%.*]] = phi i32 [ [[J_016]], [[FOR_BODY3_PH_LVER_ORIG]] ], [ [[INC_LVER_ORIG:%.*]], [[FOR_BODY3_LVER_ORIG]] ]
; CHECK-NEXT: [[IDXPROM_LVER_ORIG:%.*]] = zext i32 [[J_113_LVER_ORIG]] to i64
-; CHECK-NEXT: [[ARRAYIDX_LVER_ORIG:%.*]] = getelementptr inbounds nuw i32, ptr [[VAR1]], i64 [[IDXPROM_LVER_ORIG]]
+; CHECK-NEXT: [[ARRAYIDX_LVER_ORIG:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[VAR1]], i64 [[IDXPROM_LVER_ORIG]]
; CHECK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX_LVER_ORIG]], align 4
; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[ARRAYIDX7]], align 4
; CHECK-NEXT: [[ADD8_LVER_ORIG:%.*]] = add nsw i32 [[TMP9]], [[ADD]]
@@ -63,7 +63,7 @@ define i32 @foo(ptr nocapture %var1, ptr nocapture readnone %var2, ptr nocapture
; CHECK-NEXT: [[ADD86:%.*]] = phi i32 [ [[ARRAYIDX7_PROMOTED]], [[FOR_BODY3_PH]] ], [ [[ADD8:%.*]], [[FOR_BODY3]] ]
; CHECK-NEXT: [[J_113:%.*]] = phi i32 [ [[J_016]], [[FOR_BODY3_PH]] ], [ [[INC:%.*]], [[FOR_BODY3]] ]
; CHECK-NEXT: [[IDXPROM:%.*]] = zext i32 [[J_113]] to i64
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[VAR1]], i64 [[IDXPROM]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[VAR1]], i64 [[IDXPROM]]
; CHECK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX]], align 4, !alias.scope [[META5:![0-9]+]], !noalias [[META2]]
; CHECK-NEXT: [[ADD8]] = add nsw i32 [[ADD86]], [[ADD]]
; CHECK-NEXT: [[INC]] = add nuw i32 [[J_113]], 1
diff --git a/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-loops.ll b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-loops.ll
index 77da175b7478b..976085e3be166 100644
--- a/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-loops.ll
+++ b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-loops.ll
@@ -28,13 +28,13 @@ define void @multiply_noalias_4x4(ptr noalias %A, ptr noalias %B, ptr noalias %C
; CHECK: inner.body:
; CHECK-NEXT: [[DOTIDX:%.*]] = shl i64 [[INNER_IV]], 5
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[DOTIDX]]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr double, ptr [[TMP0]], i64 [[ROWS_IV]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr [8 x i8], ptr [[TMP0]], i64 [[ROWS_IV]]
; CHECK-NEXT: [[COL_LOAD:%.*]] = load <2 x double>, ptr [[TMP1]], align 8
; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr i8, ptr [[TMP1]], i64 32
; CHECK-NEXT: [[COL_LOAD1:%.*]] = load <2 x double>, ptr [[VEC_GEP]], align 8
; CHECK-NEXT: [[DOTIDX17:%.*]] = shl i64 [[COLS_IV]], 5
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 [[DOTIDX17]]
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr double, ptr [[TMP2]], i64 [[INNER_IV]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr [8 x i8], ptr [[TMP2]], i64 [[INNER_IV]]
; CHECK-NEXT: [[COL_LOAD2:%.*]] = load <2 x double>, ptr [[TMP3]], align 8
; CHECK-NEXT: [[VEC_GEP3:%.*]] = getelementptr i8, ptr [[TMP3]], i64 32
; CHECK-NEXT: [[COL_LOAD4:%.*]] = load <2 x double>, ptr [[VEC_GEP3]], align 8
@@ -56,7 +56,7 @@ define void @multiply_noalias_4x4(ptr noalias %A, ptr noalias %B, ptr noalias %C
; CHECK-NEXT: [[ROWS_COND_NOT:%.*]] = icmp eq i64 [[ROWS_STEP]], 4
; CHECK-NEXT: [[DOTIDX18:%.*]] = shl i64 [[COLS_IV]], 5
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[C:%.*]], i64 [[DOTIDX18]]
-; CHECK-NEXT: [[TMP9:%.*]] = getelementptr double, ptr [[TMP8]], i64 [[ROWS_IV]]
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr [8 x i8], ptr [[TMP8]], i64 [[ROWS_IV]]
; CHECK-NEXT: store <2 x double> [[TMP5]], ptr [[TMP9]], align 8
; CHECK-NEXT: [[VEC_GEP16:%.*]] = getelementptr i8, ptr [[TMP9]], i64 32
; CHECK-NEXT: store <2 x double> [[TMP7]], ptr [[VEC_GEP16]], align 8
@@ -104,13 +104,13 @@ define void @multiply_noalias_2x4(ptr noalias %A, ptr noalias %B, ptr noalias %C
; CHECK: inner.body:
; CHECK-NEXT: [[DOTIDX:%.*]] = shl i64 [[INNER_IV]], 4
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[DOTIDX]]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[TMP0]], i64 [[ROWS_IV]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr [8 x i8], ptr [[TMP0]], i64 [[ROWS_IV]]
; CHECK-NEXT: [[COL_LOAD:%.*]] = load <2 x i64>, ptr [[TMP1]], align 8
; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr i8, ptr [[TMP1]], i64 16
; CHECK-NEXT: [[COL_LOAD1:%.*]] = load <2 x i64>, ptr [[VEC_GEP]], align 8
; CHECK-NEXT: [[DOTIDX17:%.*]] = shl i64 [[COLS_IV]], 5
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 [[DOTIDX17]]
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i64, ptr [[TMP2]], i64 [[INNER_IV]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr [8 x i8], ptr [[TMP2]], i64 [[INNER_IV]]
; CHECK-NEXT: [[COL_LOAD2:%.*]] = load <2 x i64>, ptr [[TMP3]], align 8
; CHECK-NEXT: [[VEC_GEP3:%.*]] = getelementptr i8, ptr [[TMP3]], i64 32
; CHECK-NEXT: [[COL_LOAD4:%.*]] = load <2 x i64>, ptr [[VEC_GEP3]], align 8
@@ -136,7 +136,7 @@ define void @multiply_noalias_2x4(ptr noalias %A, ptr noalias %B, ptr noalias %C
; CHECK-NEXT: [[ROWS_COND_NOT:%.*]] = icmp eq i64 [[ROWS_IV]], 0
; CHECK-NEXT: [[DOTIDX18:%.*]] = shl i64 [[COLS_IV]], 4
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[C:%.*]], i64 [[DOTIDX18]]
-; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i64, ptr [[TMP12]], i64 [[ROWS_IV]]
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr [8 x i8], ptr [[TMP12]], i64 [[ROWS_IV]]
; CHECK-NEXT: store <2 x i64> [[TMP7]], ptr [[TMP13]], align 8
; CHECK-NEXT: [[VEC_GEP16:%.*]] = getelementptr i8, ptr [[TMP13]], i64 16
; CHECK-NEXT: store <2 x i64> [[TMP11]], ptr [[VEC_GEP16]], align 8
@@ -190,13 +190,13 @@ define void @multiply_noalias_4x2_2x8(ptr noalias %A, ptr noalias %B, ptr noalia
; CHECK: inner.body:
; CHECK-NEXT: [[DOTIDX:%.*]] = shl i64 [[INNER_IV]], 5
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[DOTIDX]]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[TMP0]], i64 [[ROWS_IV]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr [8 x i8], ptr [[TMP0]], i64 [[ROWS_IV]]
; CHECK-NEXT: [[COL_LOAD:%.*]] = load <2 x i64>, ptr [[TMP1]], align 8
; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr i8, ptr [[TMP1]], i64 32
; CHECK-NEXT: [[COL_LOAD1:%.*]] = load <2 x i64>, ptr [[VEC_GEP]], align 8
; CHECK-NEXT: [[DOTIDX17:%.*]] = shl i64 [[COLS_IV]], 4
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 [[DOTIDX17]]
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i64, ptr [[TMP2]], i64 [[INNER_IV]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr [8 x i8], ptr [[TMP2]], i64 [[INNER_IV]]
; CHECK-NEXT: [[COL_LOAD2:%.*]] = load <2 x i64>, ptr [[TMP3]], align 8
; CHECK-NEXT: [[VEC_GEP3:%.*]] = getelementptr i8, ptr [[TMP3]], i64 16
; CHECK-NEXT: [[COL_LOAD4:%.*]] = load <2 x i64>, ptr [[VEC_GEP3]], align 8
@@ -222,7 +222,7 @@ define void @multiply_noalias_4x2_2x8(ptr noalias %A, ptr noalias %B, ptr noalia
; CHECK-NEXT: [[ROWS_COND_NOT:%.*]] = icmp eq i64 [[ROWS_STEP]], 4
; CHECK-NEXT: [[DOTIDX18:%.*]] = shl i64 [[COLS_IV]], 5
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[C:%.*]], i64 [[DOTIDX18]]
-; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i64, ptr [[TMP12]], i64 [[ROWS_IV]]
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr [8 x i8], ptr [[TMP12]], i64 [[ROWS_IV]]
; CHECK-NEXT: store <2 x i64> [[TMP7]], ptr [[TMP13]], align 8
; CHECK-NEXT: [[VEC_GEP16:%.*]] = getelementptr i8, ptr [[TMP13]], i64 32
; CHECK-NEXT: store <2 x i64> [[TMP11]], ptr [[VEC_GEP16]], align 8
@@ -307,13 +307,13 @@ define void @multiply_alias_2x2(ptr %A, ptr %B, ptr %C) {
; CHECK: inner.body:
; CHECK-NEXT: [[DOTIDX1:%.*]] = shl i64 [[INNER_IV]], 3
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP3]], i64 [[DOTIDX1]]
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr float, ptr [[TMP8]], i64 [[ROWS_IV]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr [4 x i8], ptr [[TMP8]], i64 [[ROWS_IV]]
; CHECK-NEXT: [[COL_LOAD:%.*]] = load <2 x float>, ptr [[TMP10]], align 4
; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr i8, ptr [[TMP10]], i64 8
; CHECK-NEXT: [[COL_LOAD8:%.*]] = load <2 x float>, ptr [[VEC_GEP]], align 4
; CHECK-NEXT: [[DOTIDX24:%.*]] = shl i64 [[COLS_IV]], 3
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[TMP7]], i64 [[DOTIDX24]]
-; CHECK-NEXT: [[TMP13:%.*]] = getelementptr float, ptr [[TMP11]], i64 [[INNER_IV]]
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr [4 x i8], ptr [[TMP11]], i64 [[INNER_IV]]
; CHECK-NEXT: [[COL_LOAD9:%.*]] = load <2 x float>, ptr [[TMP13]], align 4
; CHECK-NEXT: [[VEC_GEP10:%.*]] = getelementptr i8, ptr [[TMP13]], i64 8
; CHECK-NEXT: [[COL_LOAD11:%.*]] = load <2 x float>, ptr [[VEC_GEP10]], align 4
@@ -335,7 +335,7 @@ define void @multiply_alias_2x2(ptr %A, ptr %B, ptr %C) {
; CHECK-NEXT: [[ROWS_COND_NOT:%.*]] = icmp eq i64 [[ROWS_IV]], 0
; CHECK-NEXT: [[DOTIDX:%.*]] = shl i64 [[COLS_IV]], 3
; CHECK-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[C]], i64 [[DOTIDX]]
-; CHECK-NEXT: [[TMP19:%.*]] = getelementptr float, ptr [[TMP18]], i64 [[ROWS_IV]]
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr [4 x i8], ptr [[TMP18]], i64 [[ROWS_IV]]
; CHECK-NEXT: store <2 x float> [[TMP15]], ptr [[TMP19]], align 8
; CHECK-NEXT: [[VEC_GEP23:%.*]] = getelementptr i8, ptr [[TMP19]], i64 8
; CHECK-NEXT: store <2 x float> [[TMP17]], ptr [[VEC_GEP23]], align 8
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/block_scaling_decompr_8bit.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/block_scaling_decompr_8bit.ll
index 7175816963ed1..bc7a5c93f6dbc 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/block_scaling_decompr_8bit.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/block_scaling_decompr_8bit.ll
@@ -20,7 +20,7 @@ define dso_local noundef i32 @_Z33block_scaling_decompr_8bitjPK27compressed_data
; CHECK: [[FOR_BODY_US]]:
; CHECK-NEXT: [[INDVARS_IV55:%.*]] = phi i64 [ [[INDVARS_IV_NEXT56:%.*]], %[[FOR_BODY_US]] ], [ 0, %[[FOR_BODY_LR_PH]] ]
; CHECK-NEXT: [[DST_ADDR_052_US:%.*]] = phi ptr [ [[DST_ADDR_1_US:%.*]], %[[FOR_BODY_US]] ], [ [[DST]], %[[FOR_BODY_LR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds nuw [[STRUCT_COMPRESSED_DATA_8BIT:%.*]], ptr [[SRC]], i64 [[INDVARS_IV55]]
+; CHECK-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds nuw [25 x i8], ptr [[SRC]], i64 [[INDVARS_IV55]]
; CHECK-NEXT: [[MANTISSA_US:%.*]] = getelementptr inbounds nuw i8, ptr [[ARRAYIDX_US]], i64 1
; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr [[MANTISSA_US]], align 1
; CHECK-NEXT: [[VMOVL_I59_US:%.*]] = sext <8 x i8> [[TMP0]] to <8 x i16>
@@ -53,7 +53,7 @@ define dso_local noundef i32 @_Z33block_scaling_decompr_8bitjPK27compressed_data
; CHECK-NEXT: [[AGG_TMP_COERCE_050:%.*]] = phi i64 [ [[AGG_TMP_COERCE_0_INSERT_INSERT:%.*]], %[[FOR_BODY]] ], [ undef, %[[FOR_BODY_LR_PH]] ]
; CHECK-NEXT: [[AGG_TMP42_COERCE_049:%.*]] = phi i64 [ [[AGG_TMP42_COERCE_0_INSERT_INSERT:%.*]], %[[FOR_BODY]] ], [ undef, %[[FOR_BODY_LR_PH]] ]
; CHECK-NEXT: [[AGG_TMP37_COERCE_048:%.*]] = phi i64 [ [[AGG_TMP37_COERCE_0_INSERT_INSERT:%.*]], %[[FOR_BODY]] ], [ undef, %[[FOR_BODY_LR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [[STRUCT_COMPRESSED_DATA_8BIT]], ptr [[SRC]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [25 x i8], ptr [[SRC]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[MANTISSA:%.*]] = getelementptr inbounds nuw i8, ptr [[ARRAYIDX]], i64 1
; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr [[MANTISSA]], align 1
; CHECK-NEXT: [[VMOVL_I59:%.*]] = sext <8 x i8> [[TMP4]] to <8 x i16>
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/constraint-elimination-placement.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/constraint-elimination-placement.ll
index f60812f2f39be..7e59fecd522d0 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/constraint-elimination-placement.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/constraint-elimination-placement.ll
@@ -21,7 +21,7 @@ define i1 @test_order_1(ptr %this, ptr noalias %other, i1 %tobool9.not, i32 %cal
; CHECK-NEXT: [[CMP442:%.*]] = icmp sgt i32 [[CALL431]], 0
; CHECK-NEXT: br i1 [[CMP442]], label [[FOR_BODY45_LR_PH:%.*]], label [[FOR_COND]]
; CHECK: for.body45.lr.ph:
-; CHECK-NEXT: [[ARRAYIDX_I_I:%.*]] = getelementptr ptr, ptr [[OTHER]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX_I_I:%.*]] = getelementptr [8 x i8], ptr [[OTHER]], i64 [[INDVARS_IV]]
; CHECK-NEXT: br label [[FOR_BODY45:%.*]]
; CHECK: for.cond.cleanup:
; CHECK-NEXT: store i32 0, ptr [[THIS]], align 4
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/extra-unroll-simplifications.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/extra-unroll-simplifications.ll
index 5e1066ee626fa..4d00514eb8e9c 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/extra-unroll-simplifications.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/extra-unroll-simplifications.ll
@@ -21,15 +21,15 @@ define void @partial_unroll_forced(i32 %N, ptr %src, ptr noalias %dst) {
; CHECK: loop.latch:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[LOOP_LATCH_PREHEADER_NEW]] ], [ [[INDVARS_IV_NEXT_1:%.*]], [[LOOP_LATCH]] ]
; CHECK-NEXT: [[NITER:%.*]] = phi i64 [ 0, [[LOOP_LATCH_PREHEADER_NEW]] ], [ [[NITER_NEXT_1:%.*]], [[LOOP_LATCH]] ]
-; CHECK-NEXT: [[SRC_IDX:%.*]] = getelementptr <8 x half>, ptr [[SRC]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[SRC_IDX:%.*]] = getelementptr [16 x i8], ptr [[SRC]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[L:%.*]] = load <8 x half>, ptr [[SRC_IDX]], align 16
-; CHECK-NEXT: [[DST_IDX:%.*]] = getelementptr <8 x half>, ptr [[DST]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[DST_IDX:%.*]] = getelementptr [16 x i8], ptr [[DST]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[ADD:%.*]] = fadd <8 x half> [[L]], [[L]]
; CHECK-NEXT: store <8 x half> [[ADD]], ptr [[DST_IDX]], align 16
; CHECK-NEXT: [[INDVARS_IV_NEXT:%.*]] = or disjoint i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[SRC_IDX_1:%.*]] = getelementptr <8 x half>, ptr [[SRC]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-NEXT: [[SRC_IDX_1:%.*]] = getelementptr [16 x i8], ptr [[SRC]], i64 [[INDVARS_IV_NEXT]]
; CHECK-NEXT: [[L_1:%.*]] = load <8 x half>, ptr [[SRC_IDX_1]], align 16
-; CHECK-NEXT: [[DST_IDX_1:%.*]] = getelementptr <8 x half>, ptr [[DST]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-NEXT: [[DST_IDX_1:%.*]] = getelementptr [16 x i8], ptr [[DST]], i64 [[INDVARS_IV_NEXT]]
; CHECK-NEXT: [[ADD_1:%.*]] = fadd <8 x half> [[L_1]], [[L_1]]
; CHECK-NEXT: store <8 x half> [[ADD_1]], ptr [[DST_IDX_1]], align 16
; CHECK-NEXT: [[INDVARS_IV_NEXT_1]] = add nuw nsw i64 [[INDVARS_IV]], 2
@@ -43,9 +43,9 @@ define void @partial_unroll_forced(i32 %N, ptr %src, ptr noalias %dst) {
; CHECK-NEXT: [[INDVARS_IV_UNR:%.*]] = phi i64 [ 0, [[LOOP_LATCH_PREHEADER]] ], [ [[INDVARS_IV_NEXT_1]], [[EXIT_LOOPEXIT_UNR_LCSSA]] ]
; CHECK-NEXT: [[LCMP_MOD4:%.*]] = trunc i32 [[N]] to i1
; CHECK-NEXT: tail call void @llvm.assume(i1 [[LCMP_MOD4]])
-; CHECK-NEXT: [[SRC_IDX_EPIL:%.*]] = getelementptr <8 x half>, ptr [[SRC]], i64 [[INDVARS_IV_UNR]]
+; CHECK-NEXT: [[SRC_IDX_EPIL:%.*]] = getelementptr [16 x i8], ptr [[SRC]], i64 [[INDVARS_IV_UNR]]
; CHECK-NEXT: [[L_EPIL:%.*]] = load <8 x half>, ptr [[SRC_IDX_EPIL]], align 16
-; CHECK-NEXT: [[DST_IDX_EPIL:%.*]] = getelementptr <8 x half>, ptr [[DST]], i64 [[INDVARS_IV_UNR]]
+; CHECK-NEXT: [[DST_IDX_EPIL:%.*]] = getelementptr [16 x i8], ptr [[DST]], i64 [[INDVARS_IV_UNR]]
; CHECK-NEXT: [[ADD_EPIL:%.*]] = fadd <8 x half> [[L_EPIL]], [[L_EPIL]]
; CHECK-NEXT: store <8 x half> [[ADD_EPIL]], ptr [[DST_IDX_EPIL]], align 16
; CHECK-NEXT: br label [[EXIT]]
@@ -93,18 +93,18 @@ define void @cse_matching_load_from_previous_unrolled_iteration(i32 %N, ptr %src
; CHECK: loop.latch:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[LOOP_LATCH_PREHEADER_NEW]] ], [ [[INDVARS_IV_NEXT_1:%.*]], [[LOOP_LATCH]] ]
; CHECK-NEXT: [[NITER:%.*]] = phi i64 [ 0, [[LOOP_LATCH_PREHEADER_NEW]] ], [ [[NITER_NEXT_1:%.*]], [[LOOP_LATCH]] ]
-; CHECK-NEXT: [[GEP_SRC_12:%.*]] = getelementptr <2 x i32>, ptr [[SRC_12]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[GEP_SRC_12:%.*]] = getelementptr [8 x i8], ptr [[SRC_12]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[L_12:%.*]] = load <2 x i32>, ptr [[GEP_SRC_12]], align 8
-; CHECK-NEXT: [[GEP_SRC_4:%.*]] = getelementptr <2 x i32>, ptr [[SRC_4]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[GEP_SRC_4:%.*]] = getelementptr [8 x i8], ptr [[SRC_4]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[L_4:%.*]] = load <2 x i32>, ptr [[GEP_SRC_4]], align 8
; CHECK-NEXT: [[MUL:%.*]] = mul <2 x i32> [[L_4]], [[L_12]]
-; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr <2 x i32>, ptr [[DST]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr [8 x i8], ptr [[DST]], i64 [[INDVARS_IV]]
; CHECK-NEXT: store <2 x i32> [[MUL]], ptr [[GEP_DST]], align 8
; CHECK-NEXT: [[INDVARS_IV_NEXT:%.*]] = or disjoint i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[GEP_SRC_12_1:%.*]] = getelementptr <2 x i32>, ptr [[SRC_12]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-NEXT: [[GEP_SRC_12_1:%.*]] = getelementptr [8 x i8], ptr [[SRC_12]], i64 [[INDVARS_IV_NEXT]]
; CHECK-NEXT: [[L_12_1:%.*]] = load <2 x i32>, ptr [[GEP_SRC_12_1]], align 8
; CHECK-NEXT: [[MUL_1:%.*]] = mul <2 x i32> [[L_12]], [[L_12_1]]
-; CHECK-NEXT: [[GEP_DST_1:%.*]] = getelementptr <2 x i32>, ptr [[DST]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-NEXT: [[GEP_DST_1:%.*]] = getelementptr [8 x i8], ptr [[DST]], i64 [[INDVARS_IV_NEXT]]
; CHECK-NEXT: store <2 x i32> [[MUL_1]], ptr [[GEP_DST_1]], align 8
; CHECK-NEXT: [[INDVARS_IV_NEXT_1]] = add nuw nsw i64 [[INDVARS_IV]], 2
; CHECK-NEXT: [[NITER_NEXT_1]] = add i64 [[NITER]], 2
@@ -117,12 +117,12 @@ define void @cse_matching_load_from_previous_unrolled_iteration(i32 %N, ptr %src
; CHECK-NEXT: [[INDVARS_IV_UNR:%.*]] = phi i64 [ 0, [[LOOP_LATCH_PREHEADER]] ], [ [[INDVARS_IV_NEXT_1]], [[EXIT_LOOPEXIT_UNR_LCSSA]] ]
; CHECK-NEXT: [[LCMP_MOD4:%.*]] = trunc i32 [[N]] to i1
; CHECK-NEXT: tail call void @llvm.assume(i1 [[LCMP_MOD4]])
-; CHECK-NEXT: [[GEP_SRC_12_EPIL:%.*]] = getelementptr <2 x i32>, ptr [[SRC_12]], i64 [[INDVARS_IV_UNR]]
+; CHECK-NEXT: [[GEP_SRC_12_EPIL:%.*]] = getelementptr [8 x i8], ptr [[SRC_12]], i64 [[INDVARS_IV_UNR]]
; CHECK-NEXT: [[L_12_EPIL:%.*]] = load <2 x i32>, ptr [[GEP_SRC_12_EPIL]], align 8
-; CHECK-NEXT: [[GEP_SRC_4_EPIL:%.*]] = getelementptr <2 x i32>, ptr [[SRC_4]], i64 [[INDVARS_IV_UNR]]
+; CHECK-NEXT: [[GEP_SRC_4_EPIL:%.*]] = getelementptr [8 x i8], ptr [[SRC_4]], i64 [[INDVARS_IV_UNR]]
; CHECK-NEXT: [[L_4_EPIL:%.*]] = load <2 x i32>, ptr [[GEP_SRC_4_EPIL]], align 8
; CHECK-NEXT: [[MUL_EPIL:%.*]] = mul <2 x i32> [[L_4_EPIL]], [[L_12_EPIL]]
-; CHECK-NEXT: [[GEP_DST_EPIL:%.*]] = getelementptr <2 x i32>, ptr [[DST]], i64 [[INDVARS_IV_UNR]]
+; CHECK-NEXT: [[GEP_DST_EPIL:%.*]] = getelementptr [8 x i8], ptr [[DST]], i64 [[INDVARS_IV_UNR]]
; CHECK-NEXT: store <2 x i32> [[MUL_EPIL]], ptr [[GEP_DST_EPIL]], align 8
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/hoist-load-from-vector-loop.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/hoist-load-from-vector-loop.ll
index c17b15138329c..f7f2d6413973f 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/hoist-load-from-vector-loop.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/hoist-load-from-vector-loop.ll
@@ -34,14 +34,14 @@ define void @hoist_invariant_load(ptr %invariant_ptr, i64 %num_elements, ptr %ar
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[I2:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr nusw %"class.dealii::VectorizedArray", ptr [[ARRAY]], i64 [[I2]]
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr %"class.dealii::VectorizedArray", ptr [[ARRAY]], i64 [[I2]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr nusw [32 x i8], ptr [[ARRAY]], i64 [[I2]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr [32 x i8], ptr [[ARRAY]], i64 [[I2]]
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP6]], i64 32
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr %"class.dealii::VectorizedArray", ptr [[ARRAY]], i64 [[I2]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr [32 x i8], ptr [[ARRAY]], i64 [[I2]]
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[TMP8]], i64 64
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr %"class.dealii::VectorizedArray", ptr [[ARRAY]], i64 [[I2]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr [32 x i8], ptr [[ARRAY]], i64 [[I2]]
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[TMP10]], i64 96
-; CHECK-NEXT: [[TMP12:%.*]] = load <5 x double>, ptr [[GEP]], align 8, !alias.scope [[META3:![0-9]+]], !noalias [[META0]]
+; CHECK-NEXT: [[TMP12:%.*]] = load <5 x double>, ptr [[TMP5]], align 8, !alias.scope [[META3:![0-9]+]], !noalias [[META0]]
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <5 x double> [[TMP12]], <5 x double> poison, <2 x i32> <i32 0, i32 4>
; CHECK-NEXT: [[TMP13:%.*]] = load <5 x double>, ptr [[TMP9]], align 8, !alias.scope [[META3]], !noalias [[META0]]
; CHECK-NEXT: [[STRIDED_VEC5:%.*]] = shufflevector <5 x double> [[TMP13]], <5 x double> poison, <2 x i32> <i32 0, i32 4>
@@ -51,7 +51,7 @@ define void @hoist_invariant_load(ptr %invariant_ptr, i64 %num_elements, ptr %ar
; CHECK-NEXT: [[TMP17:%.*]] = fadd <2 x double> [[BROADCAST_SPLAT]], [[STRIDED_VEC5]]
; CHECK-NEXT: [[TMP18:%.*]] = extractelement <2 x double> [[TMP17]], i64 0
; CHECK-NEXT: [[TMP19:%.*]] = extractelement <2 x double> [[TMP17]], i64 1
-; CHECK-NEXT: store double [[TMP15]], ptr [[GEP]], align 8, !alias.scope [[META3]], !noalias [[META0]]
+; CHECK-NEXT: store double [[TMP15]], ptr [[TMP5]], align 8, !alias.scope [[META3]], !noalias [[META0]]
; CHECK-NEXT: store double [[TMP16]], ptr [[TMP7]], align 8, !alias.scope [[META3]], !noalias [[META0]]
; CHECK-NEXT: store double [[TMP18]], ptr [[TMP9]], align 8, !alias.scope [[META3]], !noalias [[META0]]
; CHECK-NEXT: store double [[TMP19]], ptr [[TMP11]], align 8, !alias.scope [[META3]], !noalias [[META0]]
@@ -63,7 +63,7 @@ define void @hoist_invariant_load(ptr %invariant_ptr, i64 %num_elements, ptr %ar
; CHECK-NEXT: br label %[[LOOP_LATCH:.*]]
; CHECK: [[LOOP_LATCH]]:
; CHECK-NEXT: [[I3:%.*]] = phi i64 [ [[I_NEXT:%.*]], %[[LOOP_LATCH]] ], [ [[I2_PH]], %[[LOOP_LATCH_PREHEADER6]] ]
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr nusw %"class.dealii::VectorizedArray", ptr [[ARRAY]], i64 [[I3]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr nusw [32 x i8], ptr [[ARRAY]], i64 [[I3]]
; CHECK-NEXT: [[INVARIANT_VAL:%.*]] = load double, ptr [[INVARIANT_PTR]], align 8
; CHECK-NEXT: [[ARRAY_VAL:%.*]] = load double, ptr [[GEP1]], align 8
; CHECK-NEXT: [[SUM:%.*]] = fadd double [[INVARIANT_VAL]], [[ARRAY_VAL]]
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/hoist-runtime-checks.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/hoist-runtime-checks.ll
index 141503d344fe9..ed0452d8eccd5 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/hoist-runtime-checks.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/hoist-runtime-checks.ll
@@ -25,7 +25,7 @@ define i32 @read_only_loop_with_runtime_check(ptr noundef %array, i32 noundef %c
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI11:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw i32, ptr [[ARRAY]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[ARRAY]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP2]], i64 16
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4
; CHECK-NEXT: [[WIDE_LOAD12:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4
@@ -49,7 +49,7 @@ define i32 @read_only_loop_with_runtime_check(ptr noundef %array, i32 noundef %c
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[INDVARS_IV_PH]], [[FOR_BODY_PREHEADER13]] ]
; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[SUM_07_PH]], [[FOR_BODY_PREHEADER13]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[ARRAY]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[ARRAY]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD]] = add nsw i32 [[TMP8]], [[SUM_07]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
@@ -140,7 +140,7 @@ define dso_local noundef i32 @sum_prefix_with_sum(ptr %s.coerce0, i64 %s.coerce1
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP3:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI9:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S_COERCE0]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[S_COERCE0]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP1]], i64 16
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[WIDE_LOAD10:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4
@@ -164,7 +164,7 @@ define dso_local noundef i32 @sum_prefix_with_sum(ptr %s.coerce0, i64 %s.coerce1
; CHECK: for.body:
; CHECK-NEXT: [[I_07:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_BODY1]] ], [ [[I_07_PH]], [[FOR_BODY_PREHEADER11]] ]
; CHECK-NEXT: [[RET_06:%.*]] = phi i32 [ [[ADD1]], [[FOR_BODY1]] ], [ [[RET_0_LCSSA]], [[FOR_BODY_PREHEADER11]] ]
-; CHECK-NEXT: [[ARRAYIDX_I:%.*]] = getelementptr inbounds i32, ptr [[S_COERCE0]], i64 [[I_07]]
+; CHECK-NEXT: [[ARRAYIDX_I:%.*]] = getelementptr inbounds [4 x i8], ptr [[S_COERCE0]], i64 [[I_07]]
; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX_I]], align 4
; CHECK-NEXT: [[ADD1]] = add nsw i32 [[TMP7]], [[RET_06]]
; CHECK-NEXT: [[INC]] = add nuw i64 [[I_07]], 1
@@ -234,7 +234,7 @@ define hidden noundef nonnull align 4 dereferenceable(4) ptr @span_checked_acces
; CHECK-NEXT: unreachable
; CHECK: cond.end:
; CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[THIS]], align 8
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 [[__IDX]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP1]], i64 [[__IDX]]
; CHECK-NEXT: ret ptr [[ARRAYIDX]]
;
entry:
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/hoisting-sinking-required-for-vectorization.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/hoisting-sinking-required-for-vectorization.ll
index 1df9536c3c90f..4ea558a3a51da 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/hoisting-sinking-required-for-vectorization.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/hoisting-sinking-required-for-vectorization.ll
@@ -48,7 +48,7 @@ define void @loop(ptr %X, ptr %Y) {
; CHECK-NEXT: br i1 [[DIFF_CHECK]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ], [ 0, [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw double, ptr [[Y]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[Y]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP1]], i64 16
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP1]], align 8
; CHECK-NEXT: [[WIDE_LOAD8:%.*]] = load <2 x double>, ptr [[TMP2]], align 8
@@ -60,7 +60,7 @@ define void @loop(ptr %X, ptr %Y) {
; CHECK-NEXT: [[TMP8:%.*]] = select <2 x i1> [[TMP6]], <2 x double> splat (double 6.000000e+00), <2 x double> [[WIDE_LOAD8]]
; CHECK-NEXT: [[TMP9:%.*]] = select <2 x i1> [[TMP3]], <2 x double> zeroinitializer, <2 x double> [[TMP7]]
; CHECK-NEXT: [[TMP10:%.*]] = select <2 x i1> [[TMP4]], <2 x double> zeroinitializer, <2 x double> [[TMP8]]
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw double, ptr [[X]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[X]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP11]], i64 16
; CHECK-NEXT: store <2 x double> [[TMP9]], ptr [[TMP11]], align 8
; CHECK-NEXT: store <2 x double> [[TMP10]], ptr [[TMP12]], align 8
@@ -71,13 +71,13 @@ define void @loop(ptr %X, ptr %Y) {
; CHECK-NEXT: ret void
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw double, ptr [[Y]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[Y]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP14:%.*]] = load double, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[CMP_I:%.*]] = fcmp olt double [[TMP14]], 0.000000e+00
; CHECK-NEXT: [[CMP1_I:%.*]] = fcmp ogt double [[TMP14]], 6.000000e+00
; CHECK-NEXT: [[DOTV_I:%.*]] = select i1 [[CMP1_I]], double 6.000000e+00, double [[TMP14]]
; CHECK-NEXT: [[RETVAL_0_I:%.*]] = select i1 [[CMP_I]], double 0.000000e+00, double [[DOTV_I]]
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw double, ptr [[X]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[X]], i64 [[INDVARS_IV]]
; CHECK-NEXT: store double [[RETVAL_0_I]], ptr [[ARRAYIDX2]], align 8
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 20000
@@ -148,19 +148,19 @@ define void @loop2(ptr %A, ptr %B, ptr %C, float %x) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i32, ptr [[C]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[C]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 16
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4, !alias.scope [[META4:![0-9]+]]
; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4, !alias.scope [[META4]]
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq <4 x i32> [[WIDE_LOAD]], splat (i32 20)
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq <4 x i32> [[WIDE_LOAD7]], splat (i32 20)
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP4]], i64 16
; CHECK-NEXT: [[WIDE_LOAD8:%.*]] = load <4 x float>, ptr [[TMP4]], align 4, !alias.scope [[META7:![0-9]+]]
; CHECK-NEXT: [[WIDE_LOAD9:%.*]] = load <4 x float>, ptr [[TMP5]], align 4, !alias.scope [[META7]]
; CHECK-NEXT: [[TMP6:%.*]] = fmul <4 x float> [[BROADCAST_SPLAT]], [[WIDE_LOAD8]]
; CHECK-NEXT: [[TMP7:%.*]] = fmul <4 x float> [[BROADCAST_SPLAT]], [[WIDE_LOAD9]]
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr float, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr [4 x i8], ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[TMP8]], i64 16
; CHECK-NEXT: [[WIDE_LOAD10:%.*]] = load <4 x float>, ptr [[TMP8]], align 4, !alias.scope [[META9:![0-9]+]], !noalias [[META11:![0-9]+]]
; CHECK-NEXT: [[WIDE_LOAD11:%.*]] = load <4 x float>, ptr [[TMP9]], align 4, !alias.scope [[META9]], !noalias [[META11]]
@@ -175,13 +175,13 @@ define void @loop2(ptr %A, ptr %B, ptr %C, float %x) {
; CHECK-NEXT: br i1 [[TMP13]], label [[EXIT:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: loop.body:
; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ], [ 0, [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[C_GEP:%.*]] = getelementptr inbounds nuw i32, ptr [[C]], i64 [[IV1]]
+; CHECK-NEXT: [[C_GEP:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[C]], i64 [[IV1]]
; CHECK-NEXT: [[C_LV:%.*]] = load i32, ptr [[C_GEP]], align 4
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[C_LV]], 20
-; CHECK-NEXT: [[A_GEP_0:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[IV1]]
+; CHECK-NEXT: [[A_GEP_0:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[IV1]]
; CHECK-NEXT: [[A_LV_0:%.*]] = load float, ptr [[A_GEP_0]], align 4
; CHECK-NEXT: [[MUL2_I81_I:%.*]] = fmul float [[X]], [[A_LV_0]]
-; CHECK-NEXT: [[B_GEP_0:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[IV1]]
+; CHECK-NEXT: [[B_GEP_0:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B]], i64 [[IV1]]
; CHECK-NEXT: br i1 [[CMP]], label [[LOOP_LATCH]], label [[ELSE:%.*]]
; CHECK: else:
; CHECK-NEXT: [[B_LV:%.*]] = load float, ptr [[B_GEP_0]], align 4
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/indvars-vectorization.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/indvars-vectorization.ll
index 8d20a3ba8ed08..1569db4ce0edd 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/indvars-vectorization.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/indvars-vectorization.ll
@@ -48,11 +48,11 @@ define void @s172(i32 noundef %xa, i32 noundef %xb, ptr noundef %a, ptr noundef
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[INDEX]], [[TMP0]]
-; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP20]], i64 16
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP20]], align 4, !alias.scope [[META0:![0-9]+]]
; CHECK-NEXT: [[WIDE_LOAD10:%.*]] = load <4 x i32>, ptr [[TMP21]], align 4, !alias.scope [[META0]]
-; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP22]], i64 16
; CHECK-NEXT: [[WIDE_LOAD11:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4, !alias.scope [[META3:![0-9]+]], !noalias [[META0]]
; CHECK-NEXT: [[WIDE_LOAD12:%.*]] = load <4 x i32>, ptr [[TMP23]], align 4, !alias.scope [[META3]], !noalias [[META0]]
@@ -71,9 +71,9 @@ define void @s172(i32 noundef %xa, i32 noundef %xb, ptr noundef %a, ptr noundef
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[INDVARS_IV_PH]], [[FOR_BODY_PREHEADER13]] ]
-; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds [4 x i8], ptr [[B]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4
-; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[L_A]], [[L_B]]
; CHECK-NEXT: store i32 [[ADD]], ptr [[GEP_A]], align 4
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/interleave_vec.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/interleave_vec.ll
index 2dceb27165c4d..24702fda62fcf 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/interleave_vec.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/interleave_vec.ll
@@ -13,16 +13,16 @@ define void @same_op2(ptr noalias noundef %a, ptr noundef %b, ptr noundef %c) {
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 1
; CHECK-NEXT: [[TMP0:%.*]] = or disjoint i64 [[OFFSET_IDX]], 8
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[C]], i64 [[OFFSET_IDX]]
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[C]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[C]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[C]], i64 [[TMP0]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <8 x float>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[WIDE_VEC15:%.*]] = load <8 x float>, ptr [[TMP2]], align 4
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[OFFSET_IDX]]
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B]], i64 [[TMP0]]
; CHECK-NEXT: [[WIDE_VEC18:%.*]] = load <8 x float>, ptr [[TMP3]], align 4
; CHECK-NEXT: [[WIDE_VEC21:%.*]] = load <8 x float>, ptr [[TMP4]], align 4
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[OFFSET_IDX]]
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[TMP0]]
; CHECK-NEXT: [[WIDE_VEC24:%.*]] = load <8 x float>, ptr [[TMP5]], align 4
; CHECK-NEXT: [[WIDE_VEC27:%.*]] = load <8 x float>, ptr [[TMP6]], align 4
; CHECK-NEXT: [[TMP7:%.*]] = fmul fast <8 x float> [[WIDE_VEC18]], [[WIDE_VEC]]
@@ -125,12 +125,12 @@ define void @same_op2_splat(ptr noalias noundef %a, ptr noundef %b, ptr noundef
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 1
; CHECK-NEXT: [[TMP3:%.*]] = or disjoint i64 [[OFFSET_IDX]], 8
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[OFFSET_IDX]]
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[TMP3]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B]], i64 [[TMP3]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <8 x float>, ptr [[TMP4]], align 4
; CHECK-NEXT: [[WIDE_VEC13:%.*]] = load <8 x float>, ptr [[TMP5]], align 4
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[OFFSET_IDX]]
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[TMP3]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[TMP3]]
; CHECK-NEXT: [[WIDE_VEC16:%.*]] = load <8 x float>, ptr [[TMP6]], align 4
; CHECK-NEXT: [[WIDE_VEC19:%.*]] = load <8 x float>, ptr [[TMP7]], align 4
; CHECK-NEXT: [[TMP8:%.*]] = fmul fast <8 x float> [[WIDE_VEC]], [[TMP1]]
@@ -224,11 +224,11 @@ define void @same_op3(ptr noalias noundef %a, ptr noundef %b, ptr noundef %c) {
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 3
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw float, ptr [[C]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[C]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <12 x float>, ptr [[TMP0]], align 4
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC16:%.*]] = load <12 x float>, ptr [[TMP1]], align 4
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC20:%.*]] = load <12 x float>, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = fmul fast <12 x float> [[WIDE_VEC16]], [[WIDE_VEC]]
; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = fadd fast <12 x float> [[WIDE_VEC20]], [[TMP3]]
@@ -325,9 +325,9 @@ define void @same_op3_splat(ptr noalias noundef %a, ptr noundef %b, ptr noundef
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 3
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <12 x float>, ptr [[TMP1]], align 4
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC14:%.*]] = load <12 x float>, ptr [[TMP3]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = fmul fast <12 x float> [[WIDE_VEC]], [[TMP2]]
; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = fadd fast <12 x float> [[WIDE_VEC14]], [[TMP4]]
@@ -417,11 +417,11 @@ define void @same_op4(ptr noalias noundef %a, ptr noundef %b, ptr noundef %c) {
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw float, ptr [[C]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[C]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <16 x float>, ptr [[TMP0]], align 4
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC17:%.*]] = load <16 x float>, ptr [[TMP1]], align 4
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC22:%.*]] = load <16 x float>, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = fmul fast <16 x float> [[WIDE_VEC17]], [[WIDE_VEC]]
; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = fadd fast <16 x float> [[WIDE_VEC22]], [[TMP3]]
@@ -518,9 +518,9 @@ define void @same_op4_splat(ptr noalias noundef %a, ptr noundef %b, ptr noundef
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <16 x float>, ptr [[TMP2]], align 4
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC15:%.*]] = load <16 x float>, ptr [[TMP3]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = fmul fast <16 x float> [[WIDE_VEC]], [[TMP1]]
; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = fadd fast <16 x float> [[WIDE_VEC15]], [[TMP4]]
@@ -609,9 +609,9 @@ define void @same_op6(ptr noalias noundef %a, ptr noundef %b, ptr noundef %c) {
; CHECK-NEXT: br label %[[FOR_COND1_PREHEADER:.*]]
; CHECK: [[FOR_COND1_PREHEADER]]:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_COND1_PREHEADER]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[C]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[C]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP0:%.*]] = load <4 x float>, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[ARRAYIDX6]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = fmul fast <4 x float> [[TMP1]], [[TMP0]]
@@ -619,9 +619,9 @@ define void @same_op6(ptr noalias noundef %a, ptr noundef %b, ptr noundef %c) {
; CHECK-NEXT: [[TMP4:%.*]] = fadd fast <4 x float> [[TMP3]], [[TMP2]]
; CHECK-NEXT: store <4 x float> [[TMP4]], ptr [[ARRAYIDX9]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 4
-; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds nuw float, ptr [[C]], i64 [[TMP5]]
-; CHECK-NEXT: [[ARRAYIDX6_4:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[TMP5]]
-; CHECK-NEXT: [[ARRAYIDX9_4:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[TMP5]]
+; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[C]], i64 [[TMP5]]
+; CHECK-NEXT: [[ARRAYIDX6_4:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B]], i64 [[TMP5]]
+; CHECK-NEXT: [[ARRAYIDX9_4:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[TMP5]]
; CHECK-NEXT: [[TMP6:%.*]] = load <2 x float>, ptr [[ARRAYIDX_4]], align 4
; CHECK-NEXT: [[TMP7:%.*]] = load <2 x float>, ptr [[ARRAYIDX6_4]], align 4
; CHECK-NEXT: [[TMP8:%.*]] = fmul fast <2 x float> [[TMP7]], [[TMP6]]
@@ -721,16 +721,16 @@ define void @same_op6_splat(ptr noalias noundef %a, ptr noundef %b, ptr noundef
; CHECK-NEXT: br label %[[FOR_COND1_PREHEADER:.*]]
; CHECK: [[FOR_COND1_PREHEADER]]:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_COND1_PREHEADER]] ]
-; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP5:%.*]] = load <4 x float>, ptr [[ARRAYIDX4]], align 4
; CHECK-NEXT: [[TMP6:%.*]] = fmul fast <4 x float> [[TMP5]], [[TMP2]]
; CHECK-NEXT: [[TMP7:%.*]] = load <4 x float>, ptr [[ARRAYIDX7]], align 4
; CHECK-NEXT: [[TMP8:%.*]] = fadd fast <4 x float> [[TMP7]], [[TMP6]]
; CHECK-NEXT: store <4 x float> [[TMP8]], ptr [[ARRAYIDX7]], align 4
; CHECK-NEXT: [[TMP9:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 4
-; CHECK-NEXT: [[ARRAYIDX4_4:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[TMP9]]
-; CHECK-NEXT: [[ARRAYIDX7_4:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[TMP9]]
+; CHECK-NEXT: [[ARRAYIDX4_4:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B]], i64 [[TMP9]]
+; CHECK-NEXT: [[ARRAYIDX7_4:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[TMP9]]
; CHECK-NEXT: [[TMP10:%.*]] = load <2 x float>, ptr [[ARRAYIDX4_4]], align 4
; CHECK-NEXT: [[TMP11:%.*]] = fmul fast <2 x float> [[TMP10]], [[TMP4]]
; CHECK-NEXT: [[TMP12:%.*]] = load <2 x float>, ptr [[ARRAYIDX7_4]], align 4
@@ -820,9 +820,9 @@ define void @same_op8(ptr noalias noundef %a, ptr noundef %b, ptr noundef %c) {
; CHECK-NEXT: br label %[[FOR_COND1_PREHEADER:.*]]
; CHECK: [[FOR_COND1_PREHEADER]]:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_COND1_PREHEADER]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[C]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[C]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP0:%.*]] = load <4 x float>, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[ARRAYIDX6]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = fmul fast <4 x float> [[TMP1]], [[TMP0]]
@@ -830,9 +830,9 @@ define void @same_op8(ptr noalias noundef %a, ptr noundef %b, ptr noundef %c) {
; CHECK-NEXT: [[TMP4:%.*]] = fadd fast <4 x float> [[TMP3]], [[TMP2]]
; CHECK-NEXT: store <4 x float> [[TMP4]], ptr [[ARRAYIDX9]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = or disjoint i64 [[INDVARS_IV]], 4
-; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds nuw float, ptr [[C]], i64 [[TMP5]]
-; CHECK-NEXT: [[ARRAYIDX6_4:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[TMP5]]
-; CHECK-NEXT: [[ARRAYIDX9_4:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[TMP5]]
+; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[C]], i64 [[TMP5]]
+; CHECK-NEXT: [[ARRAYIDX6_4:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B]], i64 [[TMP5]]
+; CHECK-NEXT: [[ARRAYIDX9_4:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[TMP5]]
; CHECK-NEXT: [[TMP6:%.*]] = load <4 x float>, ptr [[ARRAYIDX_4]], align 4
; CHECK-NEXT: [[TMP7:%.*]] = load <4 x float>, ptr [[ARRAYIDX6_4]], align 4
; CHECK-NEXT: [[TMP8:%.*]] = fmul fast <4 x float> [[TMP7]], [[TMP6]]
@@ -931,9 +931,9 @@ define void @same_op8_splat(ptr noalias noundef %a, ptr noundef %b, ptr noundef
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 3
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <16 x float>, ptr [[TMP5]], align 4
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC19:%.*]] = load <16 x float>, ptr [[TMP6]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = fmul fast <16 x float> [[WIDE_VEC]], [[TMP1]]
; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = fadd fast <16 x float> [[WIDE_VEC19]], [[TMP4]]
@@ -1047,9 +1047,9 @@ define void @saxpy_5(i64 %n, float %a, ptr readonly %x, ptr noalias %y) {
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 5
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw float, ptr [[X]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[X]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <10 x float>, ptr [[TMP6]], align 4
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw float, ptr [[Y]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[Y]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC5:%.*]] = load <10 x float>, ptr [[TMP7]], align 4
; CHECK-NEXT: [[TMP8:%.*]] = fmul fast <10 x float> [[WIDE_VEC]], [[TMP5]]
; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = fadd fast <10 x float> [[WIDE_VEC5]], [[TMP8]]
@@ -1067,18 +1067,18 @@ define void @saxpy_5(i64 %n, float %a, ptr readonly %x, ptr noalias %y) {
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[I1:%.*]] = phi i64 [ [[I_NEXT:%.*]], %[[LOOP]] ], [ [[I1_PH]], %[[LOOP_PREHEADER11]] ]
-; CHECK-NEXT: [[XGEP1:%.*]] = getelementptr inbounds nuw float, ptr [[X]], i64 [[I1]]
-; CHECK-NEXT: [[YGEP1:%.*]] = getelementptr inbounds nuw float, ptr [[Y]], i64 [[I1]]
+; CHECK-NEXT: [[XGEP1:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[X]], i64 [[I1]]
+; CHECK-NEXT: [[YGEP1:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[Y]], i64 [[I1]]
; CHECK-NEXT: [[TMP12:%.*]] = load <4 x float>, ptr [[XGEP1]], align 4
; CHECK-NEXT: [[TMP13:%.*]] = fmul fast <4 x float> [[TMP12]], [[TMP11]]
; CHECK-NEXT: [[TMP14:%.*]] = load <4 x float>, ptr [[YGEP1]], align 4
; CHECK-NEXT: [[TMP15:%.*]] = fadd fast <4 x float> [[TMP14]], [[TMP13]]
; CHECK-NEXT: store <4 x float> [[TMP15]], ptr [[YGEP1]], align 4
; CHECK-NEXT: [[I5:%.*]] = add nuw nsw i64 [[I1]], 4
-; CHECK-NEXT: [[XGEP5:%.*]] = getelementptr inbounds nuw float, ptr [[X]], i64 [[I5]]
+; CHECK-NEXT: [[XGEP5:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[X]], i64 [[I5]]
; CHECK-NEXT: [[X5:%.*]] = load float, ptr [[XGEP5]], align 4
; CHECK-NEXT: [[AX5:%.*]] = fmul fast float [[X5]], [[A]]
-; CHECK-NEXT: [[YGEP5:%.*]] = getelementptr inbounds nuw float, ptr [[Y]], i64 [[I5]]
+; CHECK-NEXT: [[YGEP5:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[Y]], i64 [[I5]]
; CHECK-NEXT: [[Y5:%.*]] = load float, ptr [[YGEP5]], align 4
; CHECK-NEXT: [[AXPY5:%.*]] = fadd fast float [[Y5]], [[AX5]]
; CHECK-NEXT: store float [[AXPY5]], ptr [[YGEP5]], align 4
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/interleavevectorization.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/interleavevectorization.ll
index be57809132d3f..5d2a9c20874aa 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/interleavevectorization.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/interleavevectorization.ll
@@ -17,9 +17,9 @@ define void @add4(ptr noalias noundef %x, ptr noalias noundef %y, i32 noundef %n
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i16, ptr [[Y:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[Y:%.*]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <32 x i16>, ptr [[TMP0]], align 2
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i16, ptr [[X:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[X:%.*]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC24:%.*]] = load <32 x i16>, ptr [[TMP1]], align 2
; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = add <32 x i16> [[WIDE_VEC24]], [[WIDE_VEC]]
; CHECK-NEXT: store <32 x i16> [[INTERLEAVED_VEC]], ptr [[TMP1]], align 2
@@ -137,9 +137,9 @@ define void @addsubs(ptr noalias noundef %x, ptr noundef %y, i32 noundef %n) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i16, ptr [[Y:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[Y:%.*]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <32 x i16>, ptr [[TMP0]], align 2
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i16, ptr [[X:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[X:%.*]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC24:%.*]] = load <32 x i16>, ptr [[TMP1]], align 2
; CHECK-NEXT: [[TMP2:%.*]] = add <32 x i16> [[WIDE_VEC24]], [[WIDE_VEC]]
; CHECK-NEXT: [[TMP3:%.*]] = sub <32 x i16> [[WIDE_VEC24]], [[WIDE_VEC]]
@@ -263,9 +263,9 @@ define void @add2sub2(ptr noalias noundef %x, ptr noundef %y, i32 noundef %n) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i16, ptr [[Y:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[Y:%.*]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <32 x i16>, ptr [[TMP0]], align 2
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i16, ptr [[X:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[X:%.*]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC24:%.*]] = load <32 x i16>, ptr [[TMP1]], align 2
; CHECK-NEXT: [[TMP2:%.*]] = add <32 x i16> [[WIDE_VEC24]], [[WIDE_VEC]]
; CHECK-NEXT: [[TMP3:%.*]] = add <32 x i16> [[WIDE_VEC24]], [[WIDE_VEC]]
@@ -389,11 +389,11 @@ define void @addmul(ptr noalias noundef %x, ptr noundef %y, ptr noundef %z, i32
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i16, ptr [[Y:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[Y:%.*]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <32 x i16>, ptr [[TMP0]], align 2
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i16, ptr [[Z:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[Z:%.*]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC31:%.*]] = load <32 x i16>, ptr [[TMP1]], align 2
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw i16, ptr [[X:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[X:%.*]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC36:%.*]] = load <32 x i16>, ptr [[TMP2]], align 2
; CHECK-NEXT: [[TMP4:%.*]] = mul <32 x i16> [[WIDE_VEC31]], [[WIDE_VEC]]
; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = add <32 x i16> [[TMP4]], [[WIDE_VEC36]]
@@ -545,12 +545,12 @@ define void @addsubsmul(ptr noalias noundef %x, ptr noundef %y, ptr noundef %z,
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i16, ptr [[Y:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[Y:%.*]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <32 x i16>, ptr [[TMP0]], align 2
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i16, ptr [[Z:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[Z:%.*]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC31:%.*]] = load <32 x i16>, ptr [[TMP1]], align 2
; CHECK-NEXT: [[TMP2:%.*]] = mul <32 x i16> [[WIDE_VEC31]], [[WIDE_VEC]]
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i16, ptr [[X:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[X:%.*]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC36:%.*]] = load <32 x i16>, ptr [[TMP3]], align 2
; CHECK-NEXT: [[TMP4:%.*]] = add <32 x i16> [[TMP2]], [[WIDE_VEC36]]
; CHECK-NEXT: [[TMP5:%.*]] = mul <32 x i16> [[WIDE_VEC31]], [[WIDE_VEC]]
@@ -710,12 +710,12 @@ define void @add2sub2mul(ptr noalias noundef %x, ptr noundef %y, ptr noundef %z,
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i16, ptr [[Y:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[Y:%.*]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <32 x i16>, ptr [[TMP0]], align 2
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i16, ptr [[Z:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[Z:%.*]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC31:%.*]] = load <32 x i16>, ptr [[TMP1]], align 2
; CHECK-NEXT: [[TMP2:%.*]] = mul <32 x i16> [[WIDE_VEC31]], [[WIDE_VEC]]
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i16, ptr [[X:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [2 x i8], ptr [[X:%.*]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC36:%.*]] = load <32 x i16>, ptr [[TMP3]], align 2
; CHECK-NEXT: [[TMP4:%.*]] = add <32 x i16> [[TMP2]], [[WIDE_VEC36]]
; CHECK-NEXT: [[TMP5:%.*]] = mul <32 x i16> [[WIDE_VEC31]], [[WIDE_VEC]]
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/loopflatten.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/loopflatten.ll
index 2703d2390ce52..52d49ac9cd661 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/loopflatten.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/loopflatten.ll
@@ -18,7 +18,7 @@ define dso_local void @_Z3fooPiii(ptr %A, i32 %N, i32 %M) #0 {
; CHECK-NEXT: br label [[FOR_COND1_PREHEADER_US:%.*]]
; CHECK: for.cond1.preheader.us:
; CHECK-NEXT: [[INDVAR6:%.*]] = phi i64 [ [[INDVAR_NEXT7:%.*]], [[FOR_COND1_PREHEADER_US]] ], [ 0, [[FOR_COND1_PREHEADER_LR_PH_SPLIT_US]] ]
-; CHECK-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds nuw i32, ptr [[A:%.*]], i64 [[INDVAR6]]
+; CHECK-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A:%.*]], i64 [[INDVAR6]]
; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX_US]], align 4
; CHECK-NEXT: tail call void @_Z1fi(i32 [[TMP2]])
; CHECK-NEXT: [[INDVAR_NEXT7]] = add nuw i64 [[INDVAR6]], 1
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll
index 75276c0412647..d94bf5e221b93 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll
@@ -12,16 +12,16 @@ define void @matrix_extract_insert_scalar(i32 %i, i32 %k, i32 %j, ptr nonnull al
; CHECK-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[TMP0]], [[CONV]]
; CHECK-NEXT: [[TMP2:%.*]] = icmp samesign ult i64 [[TMP1]], 225
; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP2]])
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw double, ptr [[A:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[A:%.*]], i64 [[TMP1]]
; CHECK-NEXT: [[MATRIXEXT:%.*]] = load double, ptr [[TMP3]], align 8
; CHECK-NEXT: [[CONV2:%.*]] = zext i32 [[I:%.*]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = add nuw nsw i64 [[TMP0]], [[CONV2]]
; CHECK-NEXT: [[TMP5:%.*]] = icmp samesign ult i64 [[TMP4]], 225
; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP5]])
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw double, ptr [[B:%.*]], i64 [[TMP4]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[B:%.*]], i64 [[TMP4]]
; CHECK-NEXT: [[MATRIXEXT4:%.*]] = load double, ptr [[TMP6]], align 8
; CHECK-NEXT: [[MUL:%.*]] = fmul double [[MATRIXEXT]], [[MATRIXEXT4]]
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw double, ptr [[B]], i64 [[TMP1]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[B]], i64 [[TMP1]]
; CHECK-NEXT: [[MATRIXEXT7:%.*]] = load double, ptr [[TMP7]], align 8
; CHECK-NEXT: [[SUB:%.*]] = fsub double [[MATRIXEXT7]], [[MUL]]
; CHECK-NEXT: store double [[SUB]], ptr [[TMP7]], align 8
@@ -93,7 +93,7 @@ define void @matrix_extract_insert_loop(i32 %i, ptr nonnull align 8 dereferencea
; CHECK-NEXT: [[SCEVGEP20:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[TMP1]]
; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i32 [[I]], 225
; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP2]])
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw double, ptr [[B]], i64 [[CONV6]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[B]], i64 [[CONV6]]
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp samesign ult i32 [[I]], 4
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[FOR_BODY4_US_PREHEADER:%.*]], label [[FOR_COND1_PREHEADER_US_PREHEADER:%.*]]
; CHECK: vector.memcheck:
@@ -106,7 +106,7 @@ define void @matrix_extract_insert_loop(i32 %i, ptr nonnull align 8 dereferencea
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw double, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP17]], i64 16
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP17]], align 8, !alias.scope [[META0:![0-9]+]]
; CHECK-NEXT: [[WIDE_LOAD21:%.*]] = load <2 x double>, ptr [[TMP18]], align 8, !alias.scope [[META0]]
@@ -115,7 +115,7 @@ define void @matrix_extract_insert_loop(i32 %i, ptr nonnull align 8 dereferencea
; CHECK-NEXT: [[BROADCAST_SPLAT23:%.*]] = shufflevector <2 x double> [[BROADCAST_SPLATINSERT22]], <2 x double> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP20:%.*]] = fmul <2 x double> [[WIDE_LOAD]], [[BROADCAST_SPLAT23]]
; CHECK-NEXT: [[TMP21:%.*]] = fmul <2 x double> [[WIDE_LOAD21]], [[BROADCAST_SPLAT23]]
-; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw double, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP22]], i64 16
; CHECK-NEXT: [[WIDE_LOAD24:%.*]] = load <2 x double>, ptr [[TMP22]], align 8, !alias.scope [[META5:![0-9]+]], !noalias [[META0]]
; CHECK-NEXT: [[WIDE_LOAD25:%.*]] = load <2 x double>, ptr [[TMP23]], align 8, !alias.scope [[META5]], !noalias [[META0]]
@@ -136,11 +136,11 @@ define void @matrix_extract_insert_loop(i32 %i, ptr nonnull align 8 dereferencea
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY4_US]] ], [ [[INDVARS_IV_PH]], [[FOR_BODY4_US_PREHEADER]] ]
; CHECK-NEXT: [[TMP27:%.*]] = icmp samesign ult i64 [[INDVARS_IV]], 225
; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP27]])
-; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw double, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[MATRIXEXT_US:%.*]] = load double, ptr [[TMP28]], align 8
; CHECK-NEXT: [[MATRIXEXT8_US:%.*]] = load double, ptr [[TMP3]], align 8
; CHECK-NEXT: [[MUL_US:%.*]] = fmul double [[MATRIXEXT_US]], [[MATRIXEXT8_US]]
-; CHECK-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw double, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[B]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[MATRIXEXT11_US:%.*]] = load double, ptr [[TMP29]], align 8
; CHECK-NEXT: [[SUB_US:%.*]] = fsub double [[MATRIXEXT11_US]], [[MUL_US]]
; CHECK-NEXT: store double [[SUB_US]], ptr [[TMP29]], align 8
@@ -150,7 +150,7 @@ define void @matrix_extract_insert_loop(i32 %i, ptr nonnull align 8 dereferencea
; CHECK: for.cond1.for.cond.cleanup3_crit_edge.us:
; CHECK-NEXT: [[TMP31:%.*]] = icmp samesign ult i32 [[I]], 210
; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP31]])
-; CHECK-NEXT: [[TMP61:%.*]] = getelementptr inbounds nuw double, ptr [[B]], i64 [[CONV6]]
+; CHECK-NEXT: [[TMP61:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[B]], i64 [[CONV6]]
; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP61]], i64 120
; CHECK-NEXT: [[MIN_ITERS_CHECK_1:%.*]] = icmp samesign ult i32 [[I]], 4
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK_1]], label [[FOR_BODY4_US_PREHEADER_1:%.*]], label [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US:%.*]]
@@ -165,7 +165,7 @@ define void @matrix_extract_insert_loop(i32 %i, ptr nonnull align 8 dereferencea
; CHECK: vector.body.1:
; CHECK-NEXT: [[INDEX_1:%.*]] = phi i64 [ 0, [[VECTOR_PH_1]] ], [ [[INDEX_NEXT_1:%.*]], [[VECTOR_BODY_1]] ]
; CHECK-NEXT: [[TMP33:%.*]] = add nuw nsw i64 [[INDEX_1]], 15
-; CHECK-NEXT: [[TMP47:%.*]] = getelementptr inbounds nuw double, ptr [[A]], i64 [[TMP33]]
+; CHECK-NEXT: [[TMP47:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[A]], i64 [[TMP33]]
; CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP47]], i64 16
; CHECK-NEXT: [[WIDE_LOAD_1:%.*]] = load <2 x double>, ptr [[TMP47]], align 8, !alias.scope [[META0]]
; CHECK-NEXT: [[WIDE_LOAD21_1:%.*]] = load <2 x double>, ptr [[TMP48]], align 8, !alias.scope [[META0]]
@@ -174,7 +174,7 @@ define void @matrix_extract_insert_loop(i32 %i, ptr nonnull align 8 dereferencea
; CHECK-NEXT: [[BROADCAST_SPLAT23_1:%.*]] = shufflevector <2 x double> [[BROADCAST_SPLATINSERT22_1]], <2 x double> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP50:%.*]] = fmul <2 x double> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT23_1]]
; CHECK-NEXT: [[TMP51:%.*]] = fmul <2 x double> [[WIDE_LOAD21_1]], [[BROADCAST_SPLAT23_1]]
-; CHECK-NEXT: [[TMP52:%.*]] = getelementptr inbounds nuw double, ptr [[B]], i64 [[TMP33]]
+; CHECK-NEXT: [[TMP52:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[B]], i64 [[TMP33]]
; CHECK-NEXT: [[TMP53:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP52]], i64 16
; CHECK-NEXT: [[WIDE_LOAD24_1:%.*]] = load <2 x double>, ptr [[TMP52]], align 8, !alias.scope [[META5]], !noalias [[META0]]
; CHECK-NEXT: [[WIDE_LOAD25_1:%.*]] = load <2 x double>, ptr [[TMP53]], align 8, !alias.scope [[META5]], !noalias [[META0]]
@@ -196,11 +196,11 @@ define void @matrix_extract_insert_loop(i32 %i, ptr nonnull align 8 dereferencea
; CHECK-NEXT: [[TMP57:%.*]] = add nuw nsw i64 [[INDVARS_IV_1]], 15
; CHECK-NEXT: [[TMP58:%.*]] = icmp samesign ult i64 [[INDVARS_IV_1]], 210
; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP58]])
-; CHECK-NEXT: [[TMP59:%.*]] = getelementptr inbounds nuw double, ptr [[A]], i64 [[TMP57]]
+; CHECK-NEXT: [[TMP59:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[A]], i64 [[TMP57]]
; CHECK-NEXT: [[MATRIXEXT_US_1:%.*]] = load double, ptr [[TMP59]], align 8
; CHECK-NEXT: [[MATRIXEXT8_US_1:%.*]] = load double, ptr [[TMP32]], align 8
; CHECK-NEXT: [[MUL_US_1:%.*]] = fmul double [[MATRIXEXT_US_1]], [[MATRIXEXT8_US_1]]
-; CHECK-NEXT: [[TMP60:%.*]] = getelementptr inbounds nuw double, ptr [[B]], i64 [[TMP57]]
+; CHECK-NEXT: [[TMP60:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[B]], i64 [[TMP57]]
; CHECK-NEXT: [[MATRIXEXT11_US_1:%.*]] = load double, ptr [[TMP60]], align 8
; CHECK-NEXT: [[SUB_US_1:%.*]] = fsub double [[MATRIXEXT11_US_1]], [[MUL_US_1]]
; CHECK-NEXT: store double [[SUB_US_1]], ptr [[TMP60]], align 8
@@ -210,7 +210,7 @@ define void @matrix_extract_insert_loop(i32 %i, ptr nonnull align 8 dereferencea
; CHECK: for.cond1.for.cond.cleanup3_crit_edge.us.1:
; CHECK-NEXT: [[TMP62:%.*]] = icmp samesign ult i32 [[I]], 195
; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP62]])
-; CHECK-NEXT: [[TMP92:%.*]] = getelementptr inbounds nuw double, ptr [[B]], i64 [[CONV6]]
+; CHECK-NEXT: [[TMP92:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[B]], i64 [[CONV6]]
; CHECK-NEXT: [[TMP63:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP92]], i64 240
; CHECK-NEXT: [[MIN_ITERS_CHECK_2:%.*]] = icmp samesign ult i32 [[I]], 4
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK_2]], label [[FOR_BODY4_US_PREHEADER_2:%.*]], label [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US_1:%.*]]
@@ -225,7 +225,7 @@ define void @matrix_extract_insert_loop(i32 %i, ptr nonnull align 8 dereferencea
; CHECK: vector.body.2:
; CHECK-NEXT: [[INDEX_2:%.*]] = phi i64 [ 0, [[VECTOR_PH_2]] ], [ [[INDEX_NEXT_2:%.*]], [[VECTOR_BODY_2]] ]
; CHECK-NEXT: [[TMP64:%.*]] = add nuw nsw i64 [[INDEX_2]], 30
-; CHECK-NEXT: [[TMP78:%.*]] = getelementptr inbounds nuw double, ptr [[A]], i64 [[TMP64]]
+; CHECK-NEXT: [[TMP78:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[A]], i64 [[TMP64]]
; CHECK-NEXT: [[TMP79:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP78]], i64 16
; CHECK-NEXT: [[WIDE_LOAD_2:%.*]] = load <2 x double>, ptr [[TMP78]], align 8, !alias.scope [[META0]]
; CHECK-NEXT: [[WIDE_LOAD21_2:%.*]] = load <2 x double>, ptr [[TMP79]], align 8, !alias.scope [[META0]]
@@ -234,7 +234,7 @@ define void @matrix_extract_insert_loop(i32 %i, ptr nonnull align 8 dereferencea
; CHECK-NEXT: [[BROADCAST_SPLAT23_2:%.*]] = shufflevector <2 x double> [[BROADCAST_SPLATINSERT22_2]], <2 x double> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP81:%.*]] = fmul <2 x double> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT23_2]]
; CHECK-NEXT: [[TMP82:%.*]] = fmul <2 x double> [[WIDE_LOAD21_2]], [[BROADCAST_SPLAT23_2]]
-; CHECK-NEXT: [[TMP83:%.*]] = getelementptr inbounds nuw double, ptr [[B]], i64 [[TMP64]]
+; CHECK-NEXT: [[TMP83:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[B]], i64 [[TMP64]]
; CHECK-NEXT: [[TMP84:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP83]], i64 16
; CHECK-NEXT: [[WIDE_LOAD24_2:%.*]] = load <2 x double>, ptr [[TMP83]], align 8, !alias.scope [[META5]], !noalias [[META0]]
; CHECK-NEXT: [[WIDE_LOAD25_2:%.*]] = load <2 x double>, ptr [[TMP84]], align 8, !alias.scope [[META5]], !noalias [[META0]]
@@ -256,11 +256,11 @@ define void @matrix_extract_insert_loop(i32 %i, ptr nonnull align 8 dereferencea
; CHECK-NEXT: [[TMP88:%.*]] = add nuw nsw i64 [[INDVARS_IV_2]], 30
; CHECK-NEXT: [[TMP89:%.*]] = icmp samesign ult i64 [[INDVARS_IV_2]], 195
; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP89]])
-; CHECK-NEXT: [[TMP90:%.*]] = getelementptr inbounds nuw double, ptr [[A]], i64 [[TMP88]]
+; CHECK-NEXT: [[TMP90:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[A]], i64 [[TMP88]]
; CHECK-NEXT: [[MATRIXEXT_US_2:%.*]] = load double, ptr [[TMP90]], align 8
; CHECK-NEXT: [[MATRIXEXT8_US_2:%.*]] = load double, ptr [[TMP63]], align 8
; CHECK-NEXT: [[MUL_US_2:%.*]] = fmul double [[MATRIXEXT_US_2]], [[MATRIXEXT8_US_2]]
-; CHECK-NEXT: [[TMP91:%.*]] = getelementptr inbounds nuw double, ptr [[B]], i64 [[TMP88]]
+; CHECK-NEXT: [[TMP91:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[B]], i64 [[TMP88]]
; CHECK-NEXT: [[MATRIXEXT11_US_2:%.*]] = load double, ptr [[TMP91]], align 8
; CHECK-NEXT: [[SUB_US_2:%.*]] = fsub double [[MATRIXEXT11_US_2]], [[MUL_US_2]]
; CHECK-NEXT: store double [[SUB_US_2]], ptr [[TMP91]], align 8
@@ -270,7 +270,7 @@ define void @matrix_extract_insert_loop(i32 %i, ptr nonnull align 8 dereferencea
; CHECK: for.cond1.for.cond.cleanup3_crit_edge.us.2:
; CHECK-NEXT: [[TMP93:%.*]] = icmp samesign ult i32 [[I]], 180
; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP93]])
-; CHECK-NEXT: [[TMP123:%.*]] = getelementptr inbounds nuw double, ptr [[B]], i64 [[CONV6]]
+; CHECK-NEXT: [[TMP123:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[B]], i64 [[CONV6]]
; CHECK-NEXT: [[TMP94:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP123]], i64 360
; CHECK-NEXT: [[MIN_ITERS_CHECK_3:%.*]] = icmp samesign ult i32 [[I]], 4
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK_3]], label [[FOR_BODY4_US_PREHEADER_3:%.*]], label [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US_2:%.*]]
@@ -285,7 +285,7 @@ define void @matrix_extract_insert_loop(i32 %i, ptr nonnull align 8 dereferencea
; CHECK: vector.body.3:
; CHECK-NEXT: [[INDEX_3:%.*]] = phi i64 [ 0, [[VECTOR_PH_3]] ], [ [[INDEX_NEXT_3:%.*]], [[VECTOR_BODY_3]] ]
; CHECK-NEXT: [[TMP95:%.*]] = add nuw nsw i64 [[INDEX_3]], 45
-; CHECK-NEXT: [[TMP109:%.*]] = getelementptr inbounds nuw double, ptr [[A]], i64 [[TMP95]]
+; CHECK-NEXT: [[TMP109:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[A]], i64 [[TMP95]]
; CHECK-NEXT: [[TMP110:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP109]], i64 16
; CHECK-NEXT: [[WIDE_LOAD_3:%.*]] = load <2 x double>, ptr [[TMP109]], align 8, !alias.scope [[META0]]
; CHECK-NEXT: [[WIDE_LOAD21_3:%.*]] = load <2 x double>, ptr [[TMP110]], align 8, !alias.scope [[META0]]
@@ -294,7 +294,7 @@ define void @matrix_extract_insert_loop(i32 %i, ptr nonnull align 8 dereferencea
; CHECK-NEXT: [[BROADCAST_SPLAT23_3:%.*]] = shufflevector <2 x double> [[BROADCAST_SPLATINSERT22_3]], <2 x double> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP112:%.*]] = fmul <2 x double> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT23_3]]
; CHECK-NEXT: [[TMP113:%.*]] = fmul <2 x double> [[WIDE_LOAD21_3]], [[BROADCAST_SPLAT23_3]]
-; CHECK-NEXT: [[TMP114:%.*]] = getelementptr inbounds nuw double, ptr [[B]], i64 [[TMP95]]
+; CHECK-NEXT: [[TMP114:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[B]], i64 [[TMP95]]
; CHECK-NEXT: [[TMP115:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP114]], i64 16
; CHECK-NEXT: [[WIDE_LOAD24_3:%.*]] = load <2 x double>, ptr [[TMP114]], align 8, !alias.scope [[META5]], !noalias [[META0]]
; CHECK-NEXT: [[WIDE_LOAD25_3:%.*]] = load <2 x double>, ptr [[TMP115]], align 8, !alias.scope [[META5]], !noalias [[META0]]
@@ -316,11 +316,11 @@ define void @matrix_extract_insert_loop(i32 %i, ptr nonnull align 8 dereferencea
; CHECK-NEXT: [[TMP119:%.*]] = add nuw nsw i64 [[INDVARS_IV_3]], 45
; CHECK-NEXT: [[TMP120:%.*]] = icmp samesign ult i64 [[INDVARS_IV_3]], 180
; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP120]])
-; CHECK-NEXT: [[TMP121:%.*]] = getelementptr inbounds nuw double, ptr [[A]], i64 [[TMP119]]
+; CHECK-NEXT: [[TMP121:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[A]], i64 [[TMP119]]
; CHECK-NEXT: [[MATRIXEXT_US_3:%.*]] = load double, ptr [[TMP121]], align 8
; CHECK-NEXT: [[MATRIXEXT8_US_3:%.*]] = load double, ptr [[TMP94]], align 8
; CHECK-NEXT: [[MUL_US_3:%.*]] = fmul double [[MATRIXEXT_US_3]], [[MATRIXEXT8_US_3]]
-; CHECK-NEXT: [[TMP122:%.*]] = getelementptr inbounds nuw double, ptr [[B]], i64 [[TMP119]]
+; CHECK-NEXT: [[TMP122:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[B]], i64 [[TMP119]]
; CHECK-NEXT: [[MATRIXEXT11_US_3:%.*]] = load double, ptr [[TMP122]], align 8
; CHECK-NEXT: [[SUB_US_3:%.*]] = fsub double [[MATRIXEXT11_US_3]], [[MUL_US_3]]
; CHECK-NEXT: store double [[SUB_US_3]], ptr [[TMP122]], align 8
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/peel-multiple-unreachable-exits-for-vectorization.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/peel-multiple-unreachable-exits-for-vectorization.ll
index 2fe49a31b7722..4518a5dc6d540 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/peel-multiple-unreachable-exits-for-vectorization.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/peel-multiple-unreachable-exits-for-vectorization.ll
@@ -46,11 +46,11 @@ define i64 @sum_2_at_with_int_conversion(ptr %A, ptr %B, i64 %N) {
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI13:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[START_I]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr [8 x i8], ptr [[START_I]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[TMP8]], i64 16
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i64>, ptr [[TMP8]], align 8
; CHECK-NEXT: [[WIDE_LOAD14:%.*]] = load <2 x i64>, ptr [[TMP9]], align 8
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i64, ptr [[START_I1]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr [8 x i8], ptr [[START_I1]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[TMP10]], i64 16
; CHECK-NEXT: [[WIDE_LOAD15:%.*]] = load <2 x i64>, ptr [[TMP10]], align 8
; CHECK-NEXT: [[WIDE_LOAD16:%.*]] = load <2 x i64>, ptr [[TMP11]], align 8
@@ -73,9 +73,9 @@ define i64 @sum_2_at_with_int_conversion(ptr %A, ptr %B, i64 %N) {
; CHECK: loop:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ [[IV_PH]], [[LOOP_PREHEADER17]] ]
; CHECK-NEXT: [[SUM:%.*]] = phi i64 [ [[SUM_NEXT:%.*]], [[LOOP]] ], [ [[SUM_PH]], [[LOOP_PREHEADER17]] ]
-; CHECK-NEXT: [[GEP_IDX_I:%.*]] = getelementptr i64, ptr [[START_I]], i64 [[IV]]
+; CHECK-NEXT: [[GEP_IDX_I:%.*]] = getelementptr [8 x i8], ptr [[START_I]], i64 [[IV]]
; CHECK-NEXT: [[LV_I:%.*]] = load i64, ptr [[GEP_IDX_I]], align 8
-; CHECK-NEXT: [[GEP_IDX_I8:%.*]] = getelementptr i64, ptr [[START_I1]], i64 [[IV]]
+; CHECK-NEXT: [[GEP_IDX_I8:%.*]] = getelementptr [8 x i8], ptr [[START_I1]], i64 [[IV]]
; CHECK-NEXT: [[LV_I9:%.*]] = load i64, ptr [[GEP_IDX_I8]], align 8
; CHECK-NEXT: [[ADD:%.*]] = add i64 [[LV_I]], [[SUM]]
; CHECK-NEXT: [[SUM_NEXT]] = add i64 [[ADD]], [[LV_I9]]
@@ -161,15 +161,15 @@ define i64 @sum_3_at_with_int_conversion(ptr %A, ptr %B, ptr %C, i64 %N) {
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP21:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI25:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP22:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i64, ptr [[START_I]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr [8 x i8], ptr [[START_I]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[TMP11]], i64 16
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i64>, ptr [[TMP11]], align 8
; CHECK-NEXT: [[WIDE_LOAD26:%.*]] = load <2 x i64>, ptr [[TMP12]], align 8
-; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i64, ptr [[START_I1]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr [8 x i8], ptr [[START_I1]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[TMP13]], i64 16
; CHECK-NEXT: [[WIDE_LOAD27:%.*]] = load <2 x i64>, ptr [[TMP13]], align 8
; CHECK-NEXT: [[WIDE_LOAD28:%.*]] = load <2 x i64>, ptr [[TMP14]], align 8
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i64, ptr [[START_I12]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr [8 x i8], ptr [[START_I12]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[TMP15]], i64 16
; CHECK-NEXT: [[WIDE_LOAD29:%.*]] = load <2 x i64>, ptr [[TMP15]], align 8
; CHECK-NEXT: [[WIDE_LOAD30:%.*]] = load <2 x i64>, ptr [[TMP16]], align 8
@@ -194,11 +194,11 @@ define i64 @sum_3_at_with_int_conversion(ptr %A, ptr %B, ptr %C, i64 %N) {
; CHECK: loop:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ [[IV_PH]], [[LOOP_PREHEADER31]] ]
; CHECK-NEXT: [[SUM:%.*]] = phi i64 [ [[SUM_NEXT:%.*]], [[LOOP]] ], [ [[SUM_PH]], [[LOOP_PREHEADER31]] ]
-; CHECK-NEXT: [[GEP_IDX_I:%.*]] = getelementptr i64, ptr [[START_I]], i64 [[IV]]
+; CHECK-NEXT: [[GEP_IDX_I:%.*]] = getelementptr [8 x i8], ptr [[START_I]], i64 [[IV]]
; CHECK-NEXT: [[LV_I:%.*]] = load i64, ptr [[GEP_IDX_I]], align 8
-; CHECK-NEXT: [[GEP_IDX_I8:%.*]] = getelementptr i64, ptr [[START_I1]], i64 [[IV]]
+; CHECK-NEXT: [[GEP_IDX_I8:%.*]] = getelementptr [8 x i8], ptr [[START_I1]], i64 [[IV]]
; CHECK-NEXT: [[LV_I9:%.*]] = load i64, ptr [[GEP_IDX_I8]], align 8
-; CHECK-NEXT: [[GEP_IDX_I19:%.*]] = getelementptr i64, ptr [[START_I12]], i64 [[IV]]
+; CHECK-NEXT: [[GEP_IDX_I19:%.*]] = getelementptr [8 x i8], ptr [[START_I12]], i64 [[IV]]
; CHECK-NEXT: [[LV_I20:%.*]] = load i64, ptr [[GEP_IDX_I19]], align 8
; CHECK-NEXT: [[ADD_1:%.*]] = add i64 [[LV_I]], [[SUM]]
; CHECK-NEXT: [[ADD_2:%.*]] = add i64 [[ADD_1]], [[LV_I9]]
@@ -251,7 +251,7 @@ define i64 @at_with_int_conversion(ptr %ptr, i64 %idx) {
; CHECK-NEXT: [[INRANGE:%.*]] = icmp ugt i64 [[IDX:%.*]], [[SUB]]
; CHECK-NEXT: br i1 [[INRANGE]], label [[ERROR:%.*]], label [[EXIT:%.*]]
; CHECK: exit:
-; CHECK-NEXT: [[GEP_IDX:%.*]] = getelementptr i64, ptr [[START]], i64 [[IDX]]
+; CHECK-NEXT: [[GEP_IDX:%.*]] = getelementptr [8 x i8], ptr [[START]], i64 [[IDX]]
; CHECK-NEXT: [[LV:%.*]] = load i64, ptr [[GEP_IDX]], align 8
; CHECK-NEXT: ret i64 [[LV]]
; CHECK: error:
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/predicated-reduction.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/predicated-reduction.ll
index 48e8564322153..28f326f9092ff 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/predicated-reduction.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/predicated-reduction.ll
@@ -27,7 +27,7 @@ define nofpclass(nan inf) double @monte_simple(i32 noundef %nblocks, i32 noundef
; CHECK-NEXT: [[VEC_PHI16:%.*]] = phi <4 x double> [ splat (double -0.000000e+00), %[[VECTOR_PH]] ], [ [[TMP19:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI17:%.*]] = phi <4 x double> [ <double 0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %[[VECTOR_PH]] ], [ [[TMP14:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI18:%.*]] = phi <4 x double> [ splat (double -0.000000e+00), %[[VECTOR_PH]] ], [ [[TMP15:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[SAMPLES]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[SAMPLES]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr [[ARRAYIDX]], i64 16
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[WIDE_LOAD19:%.*]] = load <4 x float>, ptr [[TMP1]], align 4
@@ -68,7 +68,7 @@ define nofpclass(nan inf) double @monte_simple(i32 noundef %nblocks, i32 noundef
; CHECK-NEXT: [[INDVARS_IV1:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ [[INDVARS_IV_PH]], %[[FOR_BODY_PREHEADER22]] ]
; CHECK-NEXT: [[V1_012:%.*]] = phi double [ [[V1_2:%.*]], %[[FOR_BODY]] ], [ [[V1_011_PH]], %[[FOR_BODY_PREHEADER22]] ]
; CHECK-NEXT: [[V0_011:%.*]] = phi double [ [[V0_2:%.*]], %[[FOR_BODY]] ], [ [[V0_010_PH]], %[[FOR_BODY_PREHEADER22]] ]
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds nuw float, ptr [[SAMPLES]], i64 [[INDVARS_IV1]]
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[SAMPLES]], i64 [[INDVARS_IV1]]
; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX1]], align 4
; CHECK-NEXT: [[CONV:%.*]] = fpext nnan ninf float [[TMP0]] to double
; CHECK-NEXT: [[MUL:%.*]] = fmul fast double [[Y]], [[CONV]]
@@ -213,7 +213,7 @@ define nofpclass(nan inf) double @monte_exp(i32 noundef %nblocks, i32 noundef %R
; CHECK-NEXT: [[VEC_PHI31:%.*]] = phi <4 x double> [ splat (double -0.000000e+00), %[[VECTOR_PH]] ], [ [[TMP23:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI32:%.*]] = phi <4 x double> [ [[TMP27]], %[[VECTOR_PH]] ], [ [[TMP18:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI33:%.*]] = phi <4 x double> [ splat (double -0.000000e+00), %[[VECTOR_PH]] ], [ [[TMP19:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds nuw float, ptr [[SAMPLES]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[SAMPLES]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i8, ptr [[ARRAYIDX_US]], i64 16
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[ARRAYIDX_US]], align 4
; CHECK-NEXT: [[WIDE_LOAD34:%.*]] = load <4 x float>, ptr [[TMP3]], align 4
@@ -255,7 +255,7 @@ define nofpclass(nan inf) double @monte_exp(i32 noundef %nblocks, i32 noundef %R
; CHECK-NEXT: [[INDVARS_IV1:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY3_US]] ], [ [[INDVARS_IV_PH]], %[[FOR_BODY3_US_PREHEADER]] ]
; CHECK-NEXT: [[V1_116_US:%.*]] = phi double [ [[V1_2_US:%.*]], %[[FOR_BODY3_US]] ], [ [[V1_114_US_PH]], %[[FOR_BODY3_US_PREHEADER]] ]
; CHECK-NEXT: [[V0_115_US:%.*]] = phi double [ [[V0_2_US:%.*]], %[[FOR_BODY3_US]] ], [ [[V0_113_US_PH]], %[[FOR_BODY3_US_PREHEADER]] ]
-; CHECK-NEXT: [[ARRAYIDX_US1:%.*]] = getelementptr inbounds nuw float, ptr [[SAMPLES]], i64 [[INDVARS_IV1]]
+; CHECK-NEXT: [[ARRAYIDX_US1:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[SAMPLES]], i64 [[INDVARS_IV1]]
; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX_US1]], align 4
; CHECK-NEXT: [[CONV_US:%.*]] = fpext nnan float [[TMP0]] to double
; CHECK-NEXT: [[TMP1:%.*]] = tail call fast double @llvm.exp2.f64(double [[CONV_US]])
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/reduce_submuladd.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/reduce_submuladd.ll
index eefde9ddfdc1d..fe0aaf9d80195 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/reduce_submuladd.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/reduce_submuladd.ll
@@ -22,8 +22,8 @@ define dso_local noundef nofpclass(nan inf) float @_Z4testPKfS0_ii(ptr noundef %
; CHECK-NEXT: [[TMP13:%.*]] = load float, ptr [[TMP12]], align 4, !tbaa [[TBAA4]]
; CHECK-NEXT: [[TMP14:%.*]] = fsub fast float [[TMP11]], [[TMP13]]
; CHECK-NEXT: [[TMP15:%.*]] = fmul fast float [[TMP14]], [[TMP14]]
-; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[TMP0]], i64 [[TMP5]]
-; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i64 [[TMP4]]
+; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP0]], i64 [[TMP5]]
+; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP1]], i64 [[TMP4]]
; CHECK-NEXT: [[OP_RDX:%.*]] = tail call fast float @llvm.vector.reduce.fadd.v20f32(float [[TMP15]], <20 x float> [[TMP9]])
; CHECK-NEXT: [[TMP18:%.*]] = load <20 x float>, ptr [[TMP16]], align 4, !tbaa [[TBAA4]]
; CHECK-NEXT: [[TMP19:%.*]] = load <20 x float>, ptr [[TMP17]], align 4, !tbaa [[TBAA4]]
@@ -35,8 +35,8 @@ define dso_local noundef nofpclass(nan inf) float @_Z4testPKfS0_ii(ptr noundef %
; CHECK-NEXT: [[TMP25:%.*]] = load float, ptr [[TMP24]], align 4, !tbaa [[TBAA4]]
; CHECK-NEXT: [[TMP26:%.*]] = fsub fast float [[TMP23]], [[TMP25]]
; CHECK-NEXT: [[TMP27:%.*]] = fmul fast float [[TMP26]], [[TMP26]]
-; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP5]]
-; CHECK-NEXT: [[TMP29:%.*]] = getelementptr inbounds float, ptr [[TMP17]], i64 [[TMP4]]
+; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP16]], i64 [[TMP5]]
+; CHECK-NEXT: [[TMP29:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP17]], i64 [[TMP4]]
; CHECK-NEXT: [[OP_RDX_1:%.*]] = tail call fast float @llvm.vector.reduce.fadd.v20f32(float [[TMP27]], <20 x float> [[TMP21]])
; CHECK-NEXT: [[OP_RDX3_1:%.*]] = fadd fast float [[OP_RDX_1]], [[OP_RDX]]
; CHECK-NEXT: [[TMP30:%.*]] = load <20 x float>, ptr [[TMP28]], align 4, !tbaa [[TBAA4]]
@@ -49,8 +49,8 @@ define dso_local noundef nofpclass(nan inf) float @_Z4testPKfS0_ii(ptr noundef %
; CHECK-NEXT: [[TMP37:%.*]] = load float, ptr [[TMP36]], align 4, !tbaa [[TBAA4]]
; CHECK-NEXT: [[TMP38:%.*]] = fsub fast float [[TMP35]], [[TMP37]]
; CHECK-NEXT: [[TMP39:%.*]] = fmul fast float [[TMP38]], [[TMP38]]
-; CHECK-NEXT: [[TMP40:%.*]] = getelementptr inbounds float, ptr [[TMP28]], i64 [[TMP5]]
-; CHECK-NEXT: [[TMP41:%.*]] = getelementptr inbounds float, ptr [[TMP29]], i64 [[TMP4]]
+; CHECK-NEXT: [[TMP40:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP28]], i64 [[TMP5]]
+; CHECK-NEXT: [[TMP41:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP29]], i64 [[TMP4]]
; CHECK-NEXT: [[OP_RDX_2:%.*]] = tail call fast float @llvm.vector.reduce.fadd.v20f32(float [[TMP39]], <20 x float> [[TMP33]])
; CHECK-NEXT: [[OP_RDX3_2:%.*]] = fadd fast float [[OP_RDX_2]], [[OP_RDX3_1]]
; CHECK-NEXT: [[TMP42:%.*]] = load <20 x float>, ptr [[TMP40]], align 4, !tbaa [[TBAA4]]
@@ -63,8 +63,8 @@ define dso_local noundef nofpclass(nan inf) float @_Z4testPKfS0_ii(ptr noundef %
; CHECK-NEXT: [[TMP49:%.*]] = load float, ptr [[TMP48]], align 4, !tbaa [[TBAA4]]
; CHECK-NEXT: [[TMP50:%.*]] = fsub fast float [[TMP47]], [[TMP49]]
; CHECK-NEXT: [[TMP51:%.*]] = fmul fast float [[TMP50]], [[TMP50]]
-; CHECK-NEXT: [[TMP52:%.*]] = getelementptr inbounds float, ptr [[TMP40]], i64 [[TMP5]]
-; CHECK-NEXT: [[TMP53:%.*]] = getelementptr inbounds float, ptr [[TMP41]], i64 [[TMP4]]
+; CHECK-NEXT: [[TMP52:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP40]], i64 [[TMP5]]
+; CHECK-NEXT: [[TMP53:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP41]], i64 [[TMP4]]
; CHECK-NEXT: [[OP_RDX_3:%.*]] = tail call fast float @llvm.vector.reduce.fadd.v20f32(float [[TMP51]], <20 x float> [[TMP45]])
; CHECK-NEXT: [[OP_RDX3_3:%.*]] = fadd fast float [[OP_RDX_3]], [[OP_RDX3_2]]
; CHECK-NEXT: [[TMP54:%.*]] = load <20 x float>, ptr [[TMP52]], align 4, !tbaa [[TBAA4]]
@@ -77,8 +77,8 @@ define dso_local noundef nofpclass(nan inf) float @_Z4testPKfS0_ii(ptr noundef %
; CHECK-NEXT: [[TMP61:%.*]] = load float, ptr [[TMP60]], align 4, !tbaa [[TBAA4]]
; CHECK-NEXT: [[TMP62:%.*]] = fsub fast float [[TMP59]], [[TMP61]]
; CHECK-NEXT: [[TMP63:%.*]] = fmul fast float [[TMP62]], [[TMP62]]
-; CHECK-NEXT: [[TMP64:%.*]] = getelementptr inbounds float, ptr [[TMP52]], i64 [[TMP5]]
-; CHECK-NEXT: [[TMP65:%.*]] = getelementptr inbounds float, ptr [[TMP53]], i64 [[TMP4]]
+; CHECK-NEXT: [[TMP64:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP52]], i64 [[TMP5]]
+; CHECK-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP53]], i64 [[TMP4]]
; CHECK-NEXT: [[OP_RDX_4:%.*]] = tail call fast float @llvm.vector.reduce.fadd.v20f32(float [[TMP63]], <20 x float> [[TMP57]])
; CHECK-NEXT: [[OP_RDX3_4:%.*]] = fadd fast float [[OP_RDX_4]], [[OP_RDX3_3]]
; CHECK-NEXT: [[TMP66:%.*]] = load <20 x float>, ptr [[TMP64]], align 4, !tbaa [[TBAA4]]
@@ -91,8 +91,8 @@ define dso_local noundef nofpclass(nan inf) float @_Z4testPKfS0_ii(ptr noundef %
; CHECK-NEXT: [[TMP73:%.*]] = load float, ptr [[TMP72]], align 4, !tbaa [[TBAA4]]
; CHECK-NEXT: [[TMP74:%.*]] = fsub fast float [[TMP71]], [[TMP73]]
; CHECK-NEXT: [[TMP75:%.*]] = fmul fast float [[TMP74]], [[TMP74]]
-; CHECK-NEXT: [[TMP76:%.*]] = getelementptr inbounds float, ptr [[TMP64]], i64 [[TMP5]]
-; CHECK-NEXT: [[TMP77:%.*]] = getelementptr inbounds float, ptr [[TMP65]], i64 [[TMP4]]
+; CHECK-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP64]], i64 [[TMP5]]
+; CHECK-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP65]], i64 [[TMP4]]
; CHECK-NEXT: [[OP_RDX_5:%.*]] = tail call fast float @llvm.vector.reduce.fadd.v20f32(float [[TMP75]], <20 x float> [[TMP69]])
; CHECK-NEXT: [[OP_RDX3_5:%.*]] = fadd fast float [[OP_RDX_5]], [[OP_RDX3_4]]
; CHECK-NEXT: [[TMP78:%.*]] = load <20 x float>, ptr [[TMP76]], align 4, !tbaa [[TBAA4]]
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/sinking-vs-if-conversion.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/sinking-vs-if-conversion.ll
index eda54d999a79f..0dc3ae1b782bc 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/sinking-vs-if-conversion.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/sinking-vs-if-conversion.ll
@@ -25,7 +25,7 @@ define void @test_find_min(ptr noundef nonnull align 8 dereferenceable(24) %this
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[COND_END7:%.*]] ]
; CHECK-NEXT: [[MIN_010:%.*]] = phi ptr [ [[TMP1]], [[FOR_BODY_LR_PH]] ], [ [[COND8:%.*]], [[COND_END7]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw ptr, ptr [[TMP2]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[TMP2]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[CMP3:%.*]] = icmp eq ptr [[MIN_010]], null
; CHECK-NEXT: br i1 [[CMP3]], label [[COND_END7]], label [[COND_FALSE:%.*]]
@@ -143,17 +143,17 @@ define void @cond_select_loop(ptr noalias nocapture noundef readonly %a, ptr noa
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I_07:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[COND_END:%.*]] ]
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[I_07]]
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[I_07]]
; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX1]], align 4
; CHECK-NEXT: [[CMP2:%.*]] = fcmp ogt float [[TMP0]], 0.000000e+00
; CHECK-NEXT: br i1 [[CMP2]], label [[COND_END]], label [[COND_FALSE:%.*]]
; CHECK: cond.false:
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[I_07]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[B]], i64 [[I_07]]
; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: br label [[COND_END]]
; CHECK: cond.end:
; CHECK-NEXT: [[COND:%.*]] = phi float [ [[TMP1]], [[COND_FALSE]] ], [ [[TMP0]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw float, ptr [[C]], i64 [[I_07]]
+; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[C]], i64 [[I_07]]
; CHECK-NEXT: store float [[COND]], ptr [[ARRAYIDX4]], align 4
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_07]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 1000
diff --git a/llvm/test/Transforms/PhaseOrdering/SystemZ/sub-xor.ll b/llvm/test/Transforms/PhaseOrdering/SystemZ/sub-xor.ll
index 13eed2e918aa0..474fcc3e64185 100644
--- a/llvm/test/Transforms/PhaseOrdering/SystemZ/sub-xor.ll
+++ b/llvm/test/Transforms/PhaseOrdering/SystemZ/sub-xor.ll
@@ -21,35 +21,35 @@ define dso_local zeroext i32 @foo(ptr noundef %a) #0 {
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDVARS_IV_NEXT_7:%.*]], %[[FOR_BODY4]] ]
; CHECK-NEXT: [[SUM_11:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[ADD_7:%.*]], %[[FOR_BODY4]] ]
; CHECK-NEXT: [[IDX_NEG:%.*]] = sub nsw i64 0, [[INDVARS_IV]]
-; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[IDX_NEG]]
+; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds [4 x i8], ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[IDX_NEG]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ADD_PTR]], align 4, !tbaa [[INT_TBAA3:![0-9]+]]
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[TMP0]], [[SUM_11]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_NEG:%.*]] = xor i64 [[INDVARS_IV]], -1
-; CHECK-NEXT: [[ADD_PTR_110:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_NEG]]
+; CHECK-NEXT: [[ADD_PTR_110:%.*]] = getelementptr inbounds [4 x i8], ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_NEG]]
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ADD_PTR_110]], align 4, !tbaa [[INT_TBAA3]]
; CHECK-NEXT: [[ADD_111:%.*]] = add i32 [[TMP1]], [[ADD]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_112_NEG:%.*]] = sub nuw nsw i64 -2, [[INDVARS_IV]]
-; CHECK-NEXT: [[ADD_PTR_217:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_112_NEG]]
+; CHECK-NEXT: [[ADD_PTR_217:%.*]] = getelementptr inbounds [4 x i8], ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_112_NEG]]
; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ADD_PTR_217]], align 4, !tbaa [[INT_TBAA3]]
; CHECK-NEXT: [[ADD_218:%.*]] = add i32 [[TMP2]], [[ADD_111]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_219_NEG:%.*]] = sub nuw nsw i64 -3, [[INDVARS_IV]]
-; CHECK-NEXT: [[ADD_PTR_3:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_219_NEG]]
+; CHECK-NEXT: [[ADD_PTR_3:%.*]] = getelementptr inbounds [4 x i8], ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_219_NEG]]
; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ADD_PTR_3]], align 4, !tbaa [[INT_TBAA3]]
; CHECK-NEXT: [[ADD_3:%.*]] = add i32 [[TMP3]], [[ADD_218]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_3_NEG:%.*]] = sub nuw nsw i64 -4, [[INDVARS_IV]]
-; CHECK-NEXT: [[ADD_PTR_4:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_3_NEG]]
+; CHECK-NEXT: [[ADD_PTR_4:%.*]] = getelementptr inbounds [4 x i8], ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_3_NEG]]
; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[ADD_PTR_4]], align 4, !tbaa [[INT_TBAA3]]
; CHECK-NEXT: [[ADD_4:%.*]] = add i32 [[TMP4]], [[ADD_3]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_4_NEG:%.*]] = sub nuw nsw i64 -5, [[INDVARS_IV]]
-; CHECK-NEXT: [[ADD_PTR_5:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_4_NEG]]
+; CHECK-NEXT: [[ADD_PTR_5:%.*]] = getelementptr inbounds [4 x i8], ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_4_NEG]]
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[ADD_PTR_5]], align 4, !tbaa [[INT_TBAA3]]
; CHECK-NEXT: [[ADD_5:%.*]] = add i32 [[TMP5]], [[ADD_4]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_5_NEG:%.*]] = sub nuw nsw i64 -6, [[INDVARS_IV]]
-; CHECK-NEXT: [[ADD_PTR_6:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_5_NEG]]
+; CHECK-NEXT: [[ADD_PTR_6:%.*]] = getelementptr inbounds [4 x i8], ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_5_NEG]]
; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[ADD_PTR_6]], align 4, !tbaa [[INT_TBAA3]]
; CHECK-NEXT: [[ADD_6:%.*]] = add i32 [[TMP6]], [[ADD_5]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_6_NEG:%.*]] = sub nuw nsw i64 -7, [[INDVARS_IV]]
-; CHECK-NEXT: [[ADD_PTR_7:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_6_NEG]]
+; CHECK-NEXT: [[ADD_PTR_7:%.*]] = getelementptr inbounds [4 x i8], ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_6_NEG]]
; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ADD_PTR_7]], align 4, !tbaa [[INT_TBAA3]]
; CHECK-NEXT: [[ADD_7]] = add i32 [[TMP7]], [[ADD_6]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_7]] = add nuw nsw i64 [[INDVARS_IV]], 8
@@ -59,34 +59,34 @@ define dso_local zeroext i32 @foo(ptr noundef %a) #0 {
; CHECK-NEXT: [[INDVARS_IV_1:%.*]] = phi i64 [ [[INDVARS_IV_NEXT_1_7:%.*]], %[[FOR_BODY4_1]] ], [ 0, %[[FOR_BODY4]] ]
; CHECK-NEXT: [[SUM_11_1:%.*]] = phi i32 [ [[ADD_1_7:%.*]], %[[FOR_BODY4_1]] ], [ [[ADD_7]], %[[FOR_BODY4]] ]
; CHECK-NEXT: [[IDX_NEG_1:%.*]] = sub nsw i64 0, [[INDVARS_IV_1]]
-; CHECK-NEXT: [[ADD_PTR_1:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[IDX_NEG_1]]
+; CHECK-NEXT: [[ADD_PTR_1:%.*]] = getelementptr inbounds [4 x i8], ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[IDX_NEG_1]]
; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[ADD_PTR_1]], align 4, !tbaa [[INT_TBAA3]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_1_NEG:%.*]] = xor i64 [[INDVARS_IV_1]], -1
-; CHECK-NEXT: [[ADD_PTR_1_1:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_1_NEG]]
+; CHECK-NEXT: [[ADD_PTR_1_1:%.*]] = getelementptr inbounds [4 x i8], ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_1_NEG]]
; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[ADD_PTR_1_1]], align 4, !tbaa [[INT_TBAA3]]
; CHECK-NEXT: [[TMP10:%.*]] = add i32 [[TMP8]], [[TMP9]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_1_1_NEG:%.*]] = sub nuw nsw i64 -2, [[INDVARS_IV_1]]
-; CHECK-NEXT: [[ADD_PTR_1_2:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_1_1_NEG]]
+; CHECK-NEXT: [[ADD_PTR_1_2:%.*]] = getelementptr inbounds [4 x i8], ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_1_1_NEG]]
; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[ADD_PTR_1_2]], align 4, !tbaa [[INT_TBAA3]]
; CHECK-NEXT: [[TMP12:%.*]] = add i32 [[TMP10]], [[TMP11]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_1_2_NEG:%.*]] = sub nuw nsw i64 -3, [[INDVARS_IV_1]]
-; CHECK-NEXT: [[ADD_PTR_1_3:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_1_2_NEG]]
+; CHECK-NEXT: [[ADD_PTR_1_3:%.*]] = getelementptr inbounds [4 x i8], ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_1_2_NEG]]
; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[ADD_PTR_1_3]], align 4, !tbaa [[INT_TBAA3]]
; CHECK-NEXT: [[TMP14:%.*]] = add i32 [[TMP12]], [[TMP13]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_1_3_NEG:%.*]] = sub nuw nsw i64 -4, [[INDVARS_IV_1]]
-; CHECK-NEXT: [[ADD_PTR_1_4:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_1_3_NEG]]
+; CHECK-NEXT: [[ADD_PTR_1_4:%.*]] = getelementptr inbounds [4 x i8], ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_1_3_NEG]]
; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[ADD_PTR_1_4]], align 4, !tbaa [[INT_TBAA3]]
; CHECK-NEXT: [[TMP16:%.*]] = add i32 [[TMP14]], [[TMP15]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_1_4_NEG:%.*]] = sub nuw nsw i64 -5, [[INDVARS_IV_1]]
-; CHECK-NEXT: [[ADD_PTR_1_5:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_1_4_NEG]]
+; CHECK-NEXT: [[ADD_PTR_1_5:%.*]] = getelementptr inbounds [4 x i8], ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_1_4_NEG]]
; CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr [[ADD_PTR_1_5]], align 4, !tbaa [[INT_TBAA3]]
; CHECK-NEXT: [[TMP18:%.*]] = add i32 [[TMP16]], [[TMP17]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_1_5_NEG:%.*]] = sub nuw nsw i64 -6, [[INDVARS_IV_1]]
-; CHECK-NEXT: [[ADD_PTR_1_6:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_1_5_NEG]]
+; CHECK-NEXT: [[ADD_PTR_1_6:%.*]] = getelementptr inbounds [4 x i8], ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_1_5_NEG]]
; CHECK-NEXT: [[TMP19:%.*]] = load i32, ptr [[ADD_PTR_1_6]], align 4, !tbaa [[INT_TBAA3]]
; CHECK-NEXT: [[TMP20:%.*]] = add i32 [[TMP18]], [[TMP19]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_1_6_NEG:%.*]] = sub nuw nsw i64 -7, [[INDVARS_IV_1]]
-; CHECK-NEXT: [[ADD_PTR_1_7:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_1_6_NEG]]
+; CHECK-NEXT: [[ADD_PTR_1_7:%.*]] = getelementptr inbounds [4 x i8], ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_1_6_NEG]]
; CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[ADD_PTR_1_7]], align 4, !tbaa [[INT_TBAA3]]
; CHECK-NEXT: [[TMP22:%.*]] = add i32 [[TMP20]], [[TMP21]]
; CHECK-NEXT: [[TMP23:%.*]] = shl i32 [[TMP22]], 1
@@ -98,42 +98,42 @@ define dso_local zeroext i32 @foo(ptr noundef %a) #0 {
; CHECK-NEXT: [[INDVARS_IV_2:%.*]] = phi i64 [ [[INDVARS_IV_NEXT_2_7:%.*]], %[[FOR_BODY4_2]] ], [ 0, %[[FOR_BODY4_1]] ]
; CHECK-NEXT: [[SUM_11_2:%.*]] = phi i32 [ [[ADD_2_7:%.*]], %[[FOR_BODY4_2]] ], [ [[ADD_1_7]], %[[FOR_BODY4_1]] ]
; CHECK-NEXT: [[IDX_NEG_2:%.*]] = sub nsw i64 0, [[INDVARS_IV_2]]
-; CHECK-NEXT: [[ADD_PTR_2:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[IDX_NEG_2]]
+; CHECK-NEXT: [[ADD_PTR_2:%.*]] = getelementptr inbounds [4 x i8], ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[IDX_NEG_2]]
; CHECK-NEXT: [[TMP24:%.*]] = load i32, ptr [[ADD_PTR_2]], align 4, !tbaa [[INT_TBAA3]]
; CHECK-NEXT: [[MUL_2:%.*]] = mul i32 [[TMP24]], 3
; CHECK-NEXT: [[ADD_2:%.*]] = add i32 [[MUL_2]], [[SUM_11_2]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_2_NEG:%.*]] = xor i64 [[INDVARS_IV_2]], -1
-; CHECK-NEXT: [[ADD_PTR_2_1:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_2_NEG]]
+; CHECK-NEXT: [[ADD_PTR_2_1:%.*]] = getelementptr inbounds [4 x i8], ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_2_NEG]]
; CHECK-NEXT: [[TMP25:%.*]] = load i32, ptr [[ADD_PTR_2_1]], align 4, !tbaa [[INT_TBAA3]]
; CHECK-NEXT: [[MUL_2_1:%.*]] = mul i32 [[TMP25]], 3
; CHECK-NEXT: [[ADD_2_1:%.*]] = add i32 [[MUL_2_1]], [[ADD_2]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_2_1_NEG:%.*]] = sub nuw nsw i64 -2, [[INDVARS_IV_2]]
-; CHECK-NEXT: [[ADD_PTR_2_2:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_2_1_NEG]]
+; CHECK-NEXT: [[ADD_PTR_2_2:%.*]] = getelementptr inbounds [4 x i8], ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_2_1_NEG]]
; CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[ADD_PTR_2_2]], align 4, !tbaa [[INT_TBAA3]]
; CHECK-NEXT: [[MUL_2_2:%.*]] = mul i32 [[TMP26]], 3
; CHECK-NEXT: [[ADD_2_2:%.*]] = add i32 [[MUL_2_2]], [[ADD_2_1]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_2_2_NEG:%.*]] = sub nuw nsw i64 -3, [[INDVARS_IV_2]]
-; CHECK-NEXT: [[ADD_PTR_2_3:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_2_2_NEG]]
+; CHECK-NEXT: [[ADD_PTR_2_3:%.*]] = getelementptr inbounds [4 x i8], ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_2_2_NEG]]
; CHECK-NEXT: [[TMP27:%.*]] = load i32, ptr [[ADD_PTR_2_3]], align 4, !tbaa [[INT_TBAA3]]
; CHECK-NEXT: [[MUL_2_3:%.*]] = mul i32 [[TMP27]], 3
; CHECK-NEXT: [[ADD_2_3:%.*]] = add i32 [[MUL_2_3]], [[ADD_2_2]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_2_3_NEG:%.*]] = sub nuw nsw i64 -4, [[INDVARS_IV_2]]
-; CHECK-NEXT: [[ADD_PTR_2_4:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_2_3_NEG]]
+; CHECK-NEXT: [[ADD_PTR_2_4:%.*]] = getelementptr inbounds [4 x i8], ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_2_3_NEG]]
; CHECK-NEXT: [[TMP28:%.*]] = load i32, ptr [[ADD_PTR_2_4]], align 4, !tbaa [[INT_TBAA3]]
; CHECK-NEXT: [[MUL_2_4:%.*]] = mul i32 [[TMP28]], 3
; CHECK-NEXT: [[ADD_2_4:%.*]] = add i32 [[MUL_2_4]], [[ADD_2_3]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_2_4_NEG:%.*]] = sub nuw nsw i64 -5, [[INDVARS_IV_2]]
-; CHECK-NEXT: [[ADD_PTR_2_5:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_2_4_NEG]]
+; CHECK-NEXT: [[ADD_PTR_2_5:%.*]] = getelementptr inbounds [4 x i8], ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_2_4_NEG]]
; CHECK-NEXT: [[TMP29:%.*]] = load i32, ptr [[ADD_PTR_2_5]], align 4, !tbaa [[INT_TBAA3]]
; CHECK-NEXT: [[MUL_2_5:%.*]] = mul i32 [[TMP29]], 3
; CHECK-NEXT: [[ADD_2_5:%.*]] = add i32 [[MUL_2_5]], [[ADD_2_4]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_2_5_NEG:%.*]] = sub nuw nsw i64 -6, [[INDVARS_IV_2]]
-; CHECK-NEXT: [[ADD_PTR_2_6:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_2_5_NEG]]
+; CHECK-NEXT: [[ADD_PTR_2_6:%.*]] = getelementptr inbounds [4 x i8], ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_2_5_NEG]]
; CHECK-NEXT: [[TMP30:%.*]] = load i32, ptr [[ADD_PTR_2_6]], align 4, !tbaa [[INT_TBAA3]]
; CHECK-NEXT: [[MUL_2_6:%.*]] = mul i32 [[TMP30]], 3
; CHECK-NEXT: [[ADD_2_6:%.*]] = add i32 [[MUL_2_6]], [[ADD_2_5]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_2_6_NEG:%.*]] = sub nuw nsw i64 -7, [[INDVARS_IV_2]]
-; CHECK-NEXT: [[ADD_PTR_2_7:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_2_6_NEG]]
+; CHECK-NEXT: [[ADD_PTR_2_7:%.*]] = getelementptr inbounds [4 x i8], ptr getelementptr inbounds nuw (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_2_6_NEG]]
; CHECK-NEXT: [[TMP31:%.*]] = load i32, ptr [[ADD_PTR_2_7]], align 4, !tbaa [[INT_TBAA3]]
; CHECK-NEXT: [[MUL_2_7:%.*]] = mul i32 [[TMP31]], 3
; CHECK-NEXT: [[ADD_2_7]] = add i32 [[MUL_2_7]], [[ADD_2_6]]
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/earlycse-after-simplifycfg-two-entry-phi-node-folding.ll b/llvm/test/Transforms/PhaseOrdering/X86/earlycse-after-simplifycfg-two-entry-phi-node-folding.ll
index 4c57ba74782e5..6a6250b3b1cf1 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/earlycse-after-simplifycfg-two-entry-phi-node-folding.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/earlycse-after-simplifycfg-two-entry-phi-node-folding.ll
@@ -11,7 +11,7 @@ define dso_local void @foo(ptr %in, i64 %lo, i64 %hi, i32 %ishi) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[ISHI:%.*]], 0
; CHECK-NEXT: [[LO_HI:%.*]] = select i1 [[TOBOOL_NOT]], i64 [[LO:%.*]], i64 [[HI:%.*]]
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[IN:%.*]], i64 [[LO_HI]]
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [4 x i8], ptr [[IN:%.*]], i64 [[LO_HI]]
; CHECK-NEXT: [[ARRAYVAL2:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4
; CHECK-NEXT: [[INC2:%.*]] = add nsw i32 [[ARRAYVAL2]], 1
; CHECK-NEXT: store i32 [[INC2]], ptr [[ARRAYIDX1]], align 4
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/excessive-unrolling.ll b/llvm/test/Transforms/PhaseOrdering/X86/excessive-unrolling.ll
index ffdff21f94f0e..c1b118d40b2a0 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/excessive-unrolling.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/excessive-unrolling.ll
@@ -175,17 +175,17 @@ define void @test_runtime_trip_count(i32 %N) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw double, ptr @b, i64 [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [8 x i8], ptr @b, i64 [[INDEX]]
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 16
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP0]], align 16
; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <2 x double>, ptr [[TMP1]], align 16
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw double, ptr @c, i64 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [8 x i8], ptr @c, i64 [[INDEX]]
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP2]], i64 16
; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <2 x double>, ptr [[TMP2]], align 16
; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <2 x double>, ptr [[TMP3]], align 16
; CHECK-NEXT: [[TMP4:%.*]] = fadd <2 x double> [[WIDE_LOAD]], [[WIDE_LOAD5]]
; CHECK-NEXT: [[TMP5:%.*]] = fadd <2 x double> [[WIDE_LOAD4]], [[WIDE_LOAD6]]
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw double, ptr @a, i64 [[INDEX]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [8 x i8], ptr @a, i64 [[INDEX]]
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP6]], i64 16
; CHECK-NEXT: store <2 x double> [[TMP4]], ptr [[TMP6]], align 16
; CHECK-NEXT: store <2 x double> [[TMP5]], ptr [[TMP7]], align 16
@@ -200,12 +200,12 @@ define void @test_runtime_trip_count(i32 %N) {
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[INDVARS_IV_PH]], [[FOR_BODY_PREHEADER7]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw double, ptr @b, i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [8 x i8], ptr @b, i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP9:%.*]] = load double, ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw double, ptr @c, i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [8 x i8], ptr @c, i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP10:%.*]] = load double, ptr [[ARRAYIDX2]], align 8
; CHECK-NEXT: [[ADD:%.*]] = fadd double [[TMP9]], [[TMP10]]
-; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw double, ptr @a, i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [8 x i8], ptr @a, i64 [[INDVARS_IV]]
; CHECK-NEXT: store double [[ADD]], ptr [[ARRAYIDX4]], align 8
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/hoist-load-of-baseptr.ll b/llvm/test/Transforms/PhaseOrdering/X86/hoist-load-of-baseptr.ll
index 7954ff051a33d..94766a1a83f7b 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/hoist-load-of-baseptr.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/hoist-load-of-baseptr.ll
@@ -31,7 +31,7 @@ define dso_local void @_Z7computeRSt6vectorIiSaIiEEy(ptr noundef nonnull align 8
; O1-NEXT: br i1 [[EXITCOND7_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_COND1_PREHEADER]], !llvm.loop [[LOOP0:![0-9]+]]
; O1: [[FOR_BODY4]]:
; O1-NEXT: [[J_05:%.*]] = phi i64 [ [[INC5:%.*]], %[[FOR_BODY4]] ], [ 0, %[[FOR_COND1_PREHEADER]] ]
-; O1-NEXT: [[ADD_PTR_I:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i64 [[J_05]]
+; O1-NEXT: [[ADD_PTR_I:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP0]], i64 [[J_05]]
; O1-NEXT: [[TMP1:%.*]] = load i32, ptr [[ADD_PTR_I]], align 4, !tbaa [[INT_TBAA2:![0-9]+]]
; O1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP1]], 1
; O1-NEXT: store i32 [[INC]], ptr [[ADD_PTR_I]], align 4, !tbaa [[INT_TBAA2]]
@@ -55,7 +55,7 @@ define dso_local void @_Z7computeRSt6vectorIiSaIiEEy(ptr noundef nonnull align 8
; O2-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[FOR_BODY4_PREHEADER9:.*]], label %[[VECTOR_BODY:.*]]
; O2: [[VECTOR_BODY]]:
; O2-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ], [ 0, %[[FOR_BODY4_PREHEADER]] ]
-; O2-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i64 [[INDEX]]
+; O2-NEXT: [[TMP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP0]], i64 [[INDEX]]
; O2-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP1]], i64 16
; O2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4, !tbaa [[INT_TBAA0:![0-9]+]]
; O2-NEXT: [[WIDE_LOAD8:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4, !tbaa [[INT_TBAA0]]
@@ -79,7 +79,7 @@ define dso_local void @_Z7computeRSt6vectorIiSaIiEEy(ptr noundef nonnull align 8
; O2-NEXT: br i1 [[EXITCOND7_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_COND1_PREHEADER]], !llvm.loop [[LOOP8:![0-9]+]]
; O2: [[FOR_BODY4]]:
; O2-NEXT: [[J_05:%.*]] = phi i64 [ [[INC5:%.*]], %[[FOR_BODY4]] ], [ [[J_05_PH]], %[[FOR_BODY4_PREHEADER9]] ]
-; O2-NEXT: [[ADD_PTR_I:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i64 [[J_05]]
+; O2-NEXT: [[ADD_PTR_I:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP0]], i64 [[J_05]]
; O2-NEXT: [[TMP6:%.*]] = load i32, ptr [[ADD_PTR_I]], align 4, !tbaa [[INT_TBAA0]]
; O2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP6]], 1
; O2-NEXT: store i32 [[INC]], ptr [[ADD_PTR_I]], align 4, !tbaa [[INT_TBAA0]]
@@ -103,7 +103,7 @@ define dso_local void @_Z7computeRSt6vectorIiSaIiEEy(ptr noundef nonnull align 8
; O3-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[FOR_BODY4_US_PREHEADER:.*]], label %[[VECTOR_BODY:.*]]
; O3: [[VECTOR_BODY]]:
; O3-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ], [ 0, %[[FOR_COND1_PREHEADER_US]] ]
-; O3-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i64 [[INDEX]]
+; O3-NEXT: [[TMP1:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP0]], i64 [[INDEX]]
; O3-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP1]], i64 16
; O3-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4, !tbaa [[INT_TBAA0:![0-9]+]]
; O3-NEXT: [[WIDE_LOAD9:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4, !tbaa [[INT_TBAA0]]
@@ -121,7 +121,7 @@ define dso_local void @_Z7computeRSt6vectorIiSaIiEEy(ptr noundef nonnull align 8
; O3-NEXT: br label %[[FOR_BODY4_US:.*]]
; O3: [[FOR_BODY4_US]]:
; O3-NEXT: [[J_05_US:%.*]] = phi i64 [ [[INC5_US:%.*]], %[[FOR_BODY4_US]] ], [ [[J_05_US_PH]], %[[FOR_BODY4_US_PREHEADER]] ]
-; O3-NEXT: [[ADD_PTR_I_US:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i64 [[J_05_US]]
+; O3-NEXT: [[ADD_PTR_I_US:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP0]], i64 [[J_05_US]]
; O3-NEXT: [[TMP6:%.*]] = load i32, ptr [[ADD_PTR_I_US]], align 4, !tbaa [[INT_TBAA0]]
; O3-NEXT: [[INC_US:%.*]] = add nsw i32 [[TMP6]], 1
; O3-NEXT: store i32 [[INC_US]], ptr [[ADD_PTR_I_US]], align 4, !tbaa [[INT_TBAA0]]
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/merge-functions2.ll b/llvm/test/Transforms/PhaseOrdering/X86/merge-functions2.ll
index b77abb7e8f203..f90b4dcaa47bd 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/merge-functions2.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/merge-functions2.ll
@@ -12,7 +12,7 @@ define i32 @f(i32 noundef %x) {
; CHECK-NEXT: br i1 [[TMP0]], label %[[SWITCH_LOOKUP:.*]], label %[[SW_EPILOG:.*]]
; CHECK: [[SWITCH_LOOKUP]]:
; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[X]] to i64
-; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds nuw i32, ptr @switch.table.g, i64 [[TMP1]]
+; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds nuw [4 x i8], ptr @switch.table.g, i64 [[TMP1]]
; CHECK-NEXT: [[SWITCH_LOAD:%.*]] = load i32, ptr [[SWITCH_GEP]], align 4
; CHECK-NEXT: br label %[[SW_EPILOG]]
; CHECK: [[SW_EPILOG]]:
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/merge-functions3.ll b/llvm/test/Transforms/PhaseOrdering/X86/merge-functions3.ll
index 71700276398ff..8ff7dfbf864f7 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/merge-functions3.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/merge-functions3.ll
@@ -15,7 +15,7 @@ define range(i32 0, 2) i32 @f(i32 noundef %x) local_unnamed_addr {
; CHECK-NEXT: br i1 [[TMP0]], label %[[SWITCH_LOOKUP:.*]], label %[[SW_EPILOG:.*]]
; CHECK: [[SWITCH_LOOKUP]]:
; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[X]] to i64
-; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds nuw i32, ptr @switch.table.g, i64 [[TMP1]]
+; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds nuw [4 x i8], ptr @switch.table.g, i64 [[TMP1]]
; CHECK-NEXT: [[SWITCH_LOAD:%.*]] = load i32, ptr [[SWITCH_GEP]], align 4
; CHECK-NEXT: br label %[[SW_EPILOG]]
; CHECK: [[SW_EPILOG]]:
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/pixel-splat.ll b/llvm/test/Transforms/PhaseOrdering/X86/pixel-splat.ll
index 9177f279217fa..975de14a95fc4 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/pixel-splat.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/pixel-splat.ll
@@ -42,7 +42,7 @@ define void @loop_or(ptr noalias %pIn, ptr noalias %pOut, i32 %s) {
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw nsw <4 x i32> [[TMP3]], splat (i32 65793)
; CHECK-NEXT: [[TMP6:%.*]] = or disjoint <4 x i32> [[TMP4]], splat (i32 -16777216)
; CHECK-NEXT: [[TMP7:%.*]] = or disjoint <4 x i32> [[TMP5]], splat (i32 -16777216)
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i32, ptr [[POUT:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[POUT:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP8]], i64 16
; CHECK-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP8]], align 4
; CHECK-NEXT: store <4 x i32> [[TMP7]], ptr [[TMP9]], align 4
@@ -62,7 +62,7 @@ define void @loop_or(ptr noalias %pIn, ptr noalias %pOut, i32 %s) {
; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP11]] to i32
; CHECK-NEXT: [[OR2:%.*]] = mul nuw nsw i32 [[CONV]], 65793
; CHECK-NEXT: [[OR3:%.*]] = or disjoint i32 [[OR2]], -16777216
-; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds nuw i32, ptr [[POUT]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[POUT]], i64 [[INDVARS_IV]]
; CHECK-NEXT: store i32 [[OR3]], ptr [[ARRAYIDX5]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/pr88239.ll b/llvm/test/Transforms/PhaseOrdering/X86/pr88239.ll
index 482907d295706..6bb969a03ba06 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/pr88239.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/pr88239.ll
@@ -12,12 +12,12 @@ define void @foo(ptr noalias noundef %0, ptr noalias noundef %1) optsize {
; CHECK: vector.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[TMP2:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[TMP4]] ]
; CHECK-NEXT: [[TMP3:%.*]] = sub nuw nsw i64 255, [[INDVARS_IV]]
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP0]], i64 [[TMP3]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[TMP0]], i64 [[TMP3]]
; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[TMP7]], i64 -28
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = load <8 x i32>, ptr [[GEP]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = add nsw <8 x i32> [[WIDE_MASKED_GATHER]], splat (i32 5)
; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <8 x i32> [[TMP5]], <8 x i32> poison, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP1]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[TMP1]], i64 [[INDVARS_IV]]
; CHECK-NEXT: store <8 x i32> [[TMP6]], ptr [[TMP10]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw i64 [[INDVARS_IV]], 8
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 256
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/preserve-access-group.ll b/llvm/test/Transforms/PhaseOrdering/X86/preserve-access-group.ll
index d9803697a7265..9a7d064580691 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/preserve-access-group.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/preserve-access-group.ll
@@ -14,7 +14,7 @@ define void @test(i32 noundef %nface, i32 noundef %ncell, ptr noalias noundef %f
; CHECK-NEXT: br i1 [[CMP8]], label %[[FOR_BODY_PREHEADER:.*]], label %[[FOR_COND_CLEANUP:.*]]
; CHECK: [[FOR_BODY_PREHEADER]]:
; CHECK-NEXT: [[TMP0:%.*]] = zext nneg i32 [[NFACE]] to i64
-; CHECK-NEXT: [[INVARIANT_GEP:%.*]] = getelementptr inbounds nuw i32, ptr [[FACE_CELL]], i64 [[TMP0]]
+; CHECK-NEXT: [[INVARIANT_GEP:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[FACE_CELL]], i64 [[TMP0]]
; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i32 [[NFACE]], 4
; CHECK-NEXT: br i1 [[TMP1]], label %[[FOR_BODY_PREHEADER14:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
@@ -22,14 +22,14 @@ define void @test(i32 noundef %nface, i32 noundef %ncell, ptr noalias noundef %f
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDVARS_IV_EPIL:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw i32, ptr [[FACE_CELL]], i64 [[INDVARS_IV_EPIL]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[FACE_CELL]], i64 [[INDVARS_IV_EPIL]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4, !tbaa [[INT_TBAA0:![0-9]+]], !llvm.access.group [[ACC_GRP4:![0-9]+]]
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw i32, ptr [[INVARIANT_GEP]], i64 [[INDVARS_IV_EPIL]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[INVARIANT_GEP]], i64 [[INDVARS_IV_EPIL]]
; CHECK-NEXT: [[WIDE_LOAD12:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4, !tbaa [[INT_TBAA0]], !llvm.access.group [[ACC_GRP4]]
; CHECK-NEXT: [[TMP3:%.*]] = sext <4 x i32> [[WIDE_LOAD]] to <4 x i64>
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds double, ptr [[Y]], <4 x i64> [[TMP3]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [8 x i8], ptr [[Y]], <4 x i64> [[TMP3]]
; CHECK-NEXT: [[TMP5:%.*]] = sext <4 x i32> [[WIDE_LOAD12]] to <4 x i64>
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds double, ptr [[X]], <4 x i64> [[TMP5]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [8 x i8], ptr [[X]], <4 x i64> [[TMP5]]
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = tail call <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr> align 8 [[TMP4]], <4 x i1> splat (i1 true), <4 x double> poison), !tbaa [[DOUBLE_TBAA5:![0-9]+]], !llvm.access.group [[ACC_GRP4]]
; CHECK-NEXT: [[WIDE_MASKED_GATHER13:%.*]] = tail call <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr> align 8 [[TMP6]], <4 x i1> splat (i1 true), <4 x double> poison), !tbaa [[DOUBLE_TBAA5]], !llvm.access.group [[ACC_GRP4]]
; CHECK-NEXT: [[TMP7:%.*]] = fcmp fast olt <4 x double> [[WIDE_MASKED_GATHER]], [[WIDE_MASKED_GATHER13]]
@@ -48,19 +48,19 @@ define void @test(i32 noundef %nface, i32 noundef %ncell, ptr noalias noundef %f
; CHECK-NEXT: ret void
; CHECK: [[FOR_BODY]]:
; CHECK-NEXT: [[INDVARS_IV_NEXT_2:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ [[INDVARS_IV_PH]], %[[FOR_BODY_PREHEADER14]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[FACE_CELL]], i64 [[INDVARS_IV_NEXT_2]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[FACE_CELL]], i64 [[INDVARS_IV_NEXT_2]]
; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA0]], !llvm.access.group [[ACC_GRP4]]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds nuw i32, ptr [[INVARIANT_GEP]], i64 [[INDVARS_IV_NEXT_2]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[INVARIANT_GEP]], i64 [[INDVARS_IV_NEXT_2]]
; CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[GEP]], align 4, !tbaa [[INT_TBAA0]], !llvm.access.group [[ACC_GRP4]]
; CHECK-NEXT: [[IDXPROM3_3:%.*]] = sext i32 [[TMP22]] to i64
-; CHECK-NEXT: [[ARRAYIDX4_3:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 [[IDXPROM3_3]]
+; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [8 x i8], ptr [[Y]], i64 [[IDXPROM3_3]]
; CHECK-NEXT: [[IDXPROM5_3:%.*]] = sext i32 [[TMP23]] to i64
-; CHECK-NEXT: [[ARRAYIDX6_3:%.*]] = getelementptr inbounds double, ptr [[X]], i64 [[IDXPROM5_3]]
-; CHECK-NEXT: [[TMP24:%.*]] = load double, ptr [[ARRAYIDX4_3]], align 8, !tbaa [[DOUBLE_TBAA5]], !llvm.access.group [[ACC_GRP4]]
-; CHECK-NEXT: [[TMP25:%.*]] = load double, ptr [[ARRAYIDX6_3]], align 8, !tbaa [[DOUBLE_TBAA5]], !llvm.access.group [[ACC_GRP4]]
+; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [8 x i8], ptr [[X]], i64 [[IDXPROM5_3]]
+; CHECK-NEXT: [[TMP24:%.*]] = load double, ptr [[ARRAYIDX4]], align 8, !tbaa [[DOUBLE_TBAA5]], !llvm.access.group [[ACC_GRP4]]
+; CHECK-NEXT: [[TMP25:%.*]] = load double, ptr [[ARRAYIDX6]], align 8, !tbaa [[DOUBLE_TBAA5]], !llvm.access.group [[ACC_GRP4]]
; CHECK-NEXT: [[CMP_I_3:%.*]] = fcmp fast olt double [[TMP24]], [[TMP25]]
; CHECK-NEXT: [[TMP26:%.*]] = select i1 [[CMP_I_3]], double [[TMP25]], double [[TMP24]]
-; CHECK-NEXT: store double [[TMP26]], ptr [[ARRAYIDX4_3]], align 8, !tbaa [[DOUBLE_TBAA5]], !llvm.access.group [[ACC_GRP4]]
+; CHECK-NEXT: store double [[TMP26]], ptr [[ARRAYIDX4]], align 8, !tbaa [[DOUBLE_TBAA5]], !llvm.access.group [[ACC_GRP4]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV_NEXT_2]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[TMP0]]
; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/simplifycfg-late.ll b/llvm/test/Transforms/PhaseOrdering/X86/simplifycfg-late.ll
index b8b166bb0e0e3..c186d089c9ec3 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/simplifycfg-late.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/simplifycfg-late.ll
@@ -14,7 +14,7 @@ define i32 @f(i32 %c) {
; CHECK-NEXT: br i1 [[TMP0]], label [[SWITCH_LOOKUP:%.*]], label [[RETURN:%.*]]
; CHECK: switch.lookup:
; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[SWITCH_TABLEIDX]] to i64
-; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds nuw i32, ptr @switch.table.f, i64 [[TMP1]]
+; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds nuw [4 x i8], ptr @switch.table.f, i64 [[TMP1]]
; CHECK-NEXT: [[SWITCH_LOAD:%.*]] = load i32, ptr [[SWITCH_GEP]], align 4
; CHECK-NEXT: br label [[RETURN]]
; CHECK: return:
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/speculation-vs-tbaa.ll b/llvm/test/Transforms/PhaseOrdering/X86/speculation-vs-tbaa.ll
index fcdb68353311d..a57c3ecdd2505 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/speculation-vs-tbaa.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/speculation-vs-tbaa.ll
@@ -19,7 +19,7 @@ define void @licm(ptr align 8 dereferenceable(8) %_M_start.i, i64 %numElem) {
; O1-NEXT: br label %[[FOR_BODY:.*]]
; O1: [[FOR_BODY]]:
; O1-NEXT: [[K_02:%.*]] = phi i64 [ 0, %[[FOR_BODY_LR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ]
-; O1-NEXT: [[ADD_PTR_I:%.*]] = getelementptr inbounds double, ptr [[TMP0]], i64 [[K_02]]
+; O1-NEXT: [[ADD_PTR_I:%.*]] = getelementptr inbounds [8 x i8], ptr [[TMP0]], i64 [[K_02]]
; O1-NEXT: store double 2.000000e+00, ptr [[ADD_PTR_I]], align 8, !tbaa [[DOUBLE_TBAA8:![0-9]+]]
; O1-NEXT: [[INC]] = add nuw i64 [[K_02]], 1
; O1-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[NUMELEM]]
@@ -41,7 +41,7 @@ define void @licm(ptr align 8 dereferenceable(8) %_M_start.i, i64 %numElem) {
; O23-NEXT: br label %[[VECTOR_BODY:.*]]
; O23: [[VECTOR_BODY]]:
; O23-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; O23-NEXT: [[TMP1:%.*]] = getelementptr inbounds double, ptr [[TMP0]], i64 [[INDEX]]
+; O23-NEXT: [[TMP1:%.*]] = getelementptr inbounds [8 x i8], ptr [[TMP0]], i64 [[INDEX]]
; O23-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP1]], i64 16
; O23-NEXT: store <2 x double> splat (double 2.000000e+00), ptr [[TMP1]], align 8, !tbaa [[DOUBLE_TBAA8:![0-9]+]]
; O23-NEXT: store <2 x double> splat (double 2.000000e+00), ptr [[TMP2]], align 8, !tbaa [[DOUBLE_TBAA8]]
@@ -56,7 +56,7 @@ define void @licm(ptr align 8 dereferenceable(8) %_M_start.i, i64 %numElem) {
; O23-NEXT: br label %[[FOR_BODY:.*]]
; O23: [[FOR_BODY]]:
; O23-NEXT: [[K_02:%.*]] = phi i64 [ [[INC:%.*]], %[[FOR_BODY]] ], [ [[K_02_PH]], %[[FOR_BODY_PREHEADER]] ]
-; O23-NEXT: [[ADD_PTR_I:%.*]] = getelementptr inbounds double, ptr [[TMP0]], i64 [[K_02]]
+; O23-NEXT: [[ADD_PTR_I:%.*]] = getelementptr inbounds [8 x i8], ptr [[TMP0]], i64 [[K_02]]
; O23-NEXT: store double 2.000000e+00, ptr [[ADD_PTR_I]], align 8, !tbaa [[DOUBLE_TBAA8]]
; O23-NEXT: [[INC]] = add nuw i64 [[K_02]], 1
; O23-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[NUMELEM]]
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/spurious-peeling.ll b/llvm/test/Transforms/PhaseOrdering/X86/spurious-peeling.ll
index 574132c18d263..2951ac16334c8 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/spurious-peeling.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/spurious-peeling.ll
@@ -18,18 +18,18 @@ define dso_local void @_Z13vecIncFromPtrP12FloatVecPair(ptr %FVP) {
; O1-NEXT: [[ENTRY:.*:]]
; O1-NEXT: [[VSRC23_I:%.*]] = getelementptr inbounds nuw i8, ptr [[FVP]], i64 16
; O1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[VSRC23_I]], align 8, !tbaa [[ANYPTR_TBAA0:![0-9]+]]
-; O1-NEXT: [[ARRAYIDX_I_I:%.*]] = getelementptr inbounds [[CLASS_HOMEMADEVECTOR_0:%.*]], ptr [[TMP0]], i64 undef
+; O1-NEXT: [[ARRAYIDX_I_I:%.*]] = getelementptr inbounds [16 x i8], ptr [[TMP0]], i64 undef
; O1-NEXT: [[SIZE4_I:%.*]] = getelementptr inbounds nuw i8, ptr [[ARRAYIDX_I_I]], i64 8
; O1-NEXT: [[TMP1:%.*]] = load i32, ptr [[SIZE4_I]], align 8, !tbaa [[INT_TBAA6:![0-9]+]]
; O1-NEXT: [[CMP56_NOT_I:%.*]] = icmp eq i32 [[TMP1]], 0
; O1-NEXT: br i1 [[CMP56_NOT_I]], label %[[_ZN12FLOATVECPAIR6VECINCEV_EXIT:.*]], label %[[FOR_BODY7_LR_PH_I:.*]]
; O1: [[FOR_BODY7_LR_PH_I]]:
; O1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[ARRAYIDX_I_I]], align 8, !tbaa [[ANYPTR_TBAA8:![0-9]+]]
-; O1-NEXT: [[ARRAYIDX_I3_I:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 undef
+; O1-NEXT: [[ARRAYIDX_I3_I:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP2]], i64 undef
; O1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[FVP]], align 8, !tbaa [[ANYPTR_TBAA0]]
-; O1-NEXT: [[ARRAYIDX_I4_I:%.*]] = getelementptr inbounds [[CLASS_HOMEMADEVECTOR_0]], ptr [[TMP3]], i64 undef
+; O1-NEXT: [[ARRAYIDX_I4_I:%.*]] = getelementptr inbounds [16 x i8], ptr [[TMP3]], i64 undef
; O1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[ARRAYIDX_I4_I]], align 8, !tbaa [[ANYPTR_TBAA8]]
-; O1-NEXT: [[ARRAYIDX_I5_I:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 undef
+; O1-NEXT: [[ARRAYIDX_I5_I:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP4]], i64 undef
; O1-NEXT: br label %[[FOR_BODY7_I:.*]]
; O1: [[FOR_BODY7_I]]:
; O1-NEXT: [[J_07_I:%.*]] = phi i32 [ 0, %[[FOR_BODY7_LR_PH_I]] ], [ [[INC_I:%.*]], %[[FOR_BODY7_I]] ]
@@ -48,18 +48,18 @@ define dso_local void @_Z13vecIncFromPtrP12FloatVecPair(ptr %FVP) {
; O23-NEXT: [[ENTRY:.*:]]
; O23-NEXT: [[VSRC23_I:%.*]] = getelementptr inbounds nuw i8, ptr [[FVP]], i64 16
; O23-NEXT: [[TMP0:%.*]] = load ptr, ptr [[VSRC23_I]], align 8, !tbaa [[ANYPTR_TBAA0:![0-9]+]]
-; O23-NEXT: [[ARRAYIDX_I_I:%.*]] = getelementptr inbounds [[CLASS_HOMEMADEVECTOR_0:%.*]], ptr [[TMP0]], i64 undef
+; O23-NEXT: [[ARRAYIDX_I_I:%.*]] = getelementptr inbounds [16 x i8], ptr [[TMP0]], i64 undef
; O23-NEXT: [[SIZE4_I:%.*]] = getelementptr inbounds nuw i8, ptr [[ARRAYIDX_I_I]], i64 8
; O23-NEXT: [[TMP1:%.*]] = load i32, ptr [[SIZE4_I]], align 8, !tbaa [[INT_TBAA6:![0-9]+]]
; O23-NEXT: [[CMP56_NOT_I:%.*]] = icmp eq i32 [[TMP1]], 0
; O23-NEXT: br i1 [[CMP56_NOT_I]], label %[[_ZN12FLOATVECPAIR6VECINCEV_EXIT:.*]], label %[[FOR_BODY7_LR_PH_I:.*]]
; O23: [[FOR_BODY7_LR_PH_I]]:
; O23-NEXT: [[TMP2:%.*]] = load ptr, ptr [[ARRAYIDX_I_I]], align 8, !tbaa [[ANYPTR_TBAA8:![0-9]+]]
-; O23-NEXT: [[ARRAYIDX_I3_I:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 undef
+; O23-NEXT: [[ARRAYIDX_I3_I:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP2]], i64 undef
; O23-NEXT: [[TMP3:%.*]] = load ptr, ptr [[FVP]], align 8, !tbaa [[ANYPTR_TBAA0]]
-; O23-NEXT: [[ARRAYIDX_I4_I:%.*]] = getelementptr inbounds [[CLASS_HOMEMADEVECTOR_0]], ptr [[TMP3]], i64 undef
+; O23-NEXT: [[ARRAYIDX_I4_I:%.*]] = getelementptr inbounds [16 x i8], ptr [[TMP3]], i64 undef
; O23-NEXT: [[TMP4:%.*]] = load ptr, ptr [[ARRAYIDX_I4_I]], align 8, !tbaa [[ANYPTR_TBAA8]]
-; O23-NEXT: [[ARRAYIDX_I5_I:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 undef
+; O23-NEXT: [[ARRAYIDX_I5_I:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP4]], i64 undef
; O23-NEXT: [[DOTPRE_I:%.*]] = load float, ptr [[ARRAYIDX_I5_I]], align 4, !tbaa [[FLOAT_TBAA9:![0-9]+]]
; O23-NEXT: br label %[[FOR_BODY7_I:.*]]
; O23: [[FOR_BODY7_I]]:
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/vdiv-nounroll.ll b/llvm/test/Transforms/PhaseOrdering/X86/vdiv-nounroll.ll
index 57637d6077f28..3082b561b7013 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/vdiv-nounroll.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/vdiv-nounroll.ll
@@ -24,7 +24,7 @@ define void @vdiv(ptr %a, float %b) {
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP1]], align 4, !tbaa [[FLOAT_TBAA3:![0-9]+]]
; CHECK-NEXT: [[TMP3:%.*]] = fmul fast <4 x float> [[WIDE_LOAD]], [[TMP0]]
; CHECK-NEXT: store <4 x float> [[TMP3]], ptr [[TMP1]], align 4, !tbaa [[FLOAT_TBAA3]]
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/vdiv.ll b/llvm/test/Transforms/PhaseOrdering/X86/vdiv.ll
index 66a4d3f3dc7dd..820a7a801379e 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/vdiv.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/vdiv.ll
@@ -41,7 +41,7 @@ define void @vdiv(ptr %x, ptr %y, double %a, i32 %N) #0 {
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH1]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw double, ptr [[Y]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[Y]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP5]], i64 32
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP5]], i64 64
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP5]], i64 96
@@ -53,7 +53,7 @@ define void @vdiv(ptr %x, ptr %y, double %a, i32 %N) #0 {
; CHECK-NEXT: [[TMP10:%.*]] = fmul fast <4 x double> [[WIDE_LOAD6]], [[TMP2]]
; CHECK-NEXT: [[TMP11:%.*]] = fmul fast <4 x double> [[WIDE_LOAD7]], [[TMP3]]
; CHECK-NEXT: [[TMP12:%.*]] = fmul fast <4 x double> [[WIDE_LOAD8]], [[TMP4]]
-; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw double, ptr [[X]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[X]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP13]], i64 32
; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP13]], i64 64
; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP13]], i64 96
@@ -69,7 +69,7 @@ define void @vdiv(ptr %x, ptr %y, double %a, i32 %N) #0 {
; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END]], label %[[VEC_EPILOG_ITER_CHECK:.*]]
; CHECK: [[VEC_EPILOG_ITER_CHECK]]:
; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp eq i64 [[N_VEC_REMAINING]], 0
-; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[FOR_BODY_PREHEADER9]], label %[[VEC_EPILOG_PH]]
+; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[FOR_BODY_PREHEADER9]], label %[[VEC_EPILOG_PH]], !prof [[PROF10:![0-9]+]]
; CHECK: [[VEC_EPILOG_PH]]:
; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_PH]] ]
; CHECK-NEXT: [[N_VEC11:%.*]] = and i64 [[WIDE_TRIP_COUNT]], 2147483644
@@ -79,14 +79,14 @@ define void @vdiv(ptr %x, ptr %y, double %a, i32 %N) #0 {
; CHECK-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]]
; CHECK: [[VEC_EPILOG_VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX12:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT16:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP39:%.*]] = getelementptr inbounds nuw double, ptr [[Y]], i64 [[INDEX12]]
+; CHECK-NEXT: [[TMP39:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[Y]], i64 [[INDEX12]]
; CHECK-NEXT: [[WIDE_LOAD13:%.*]] = load <4 x double>, ptr [[TMP39]], align 8, !tbaa [[DOUBLE_TBAA3]]
; CHECK-NEXT: [[TMP40:%.*]] = fmul fast <4 x double> [[WIDE_LOAD13]], [[TMP38]]
-; CHECK-NEXT: [[TMP41:%.*]] = getelementptr inbounds nuw double, ptr [[X]], i64 [[INDEX12]]
+; CHECK-NEXT: [[TMP41:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[X]], i64 [[INDEX12]]
; CHECK-NEXT: store <4 x double> [[TMP40]], ptr [[TMP41]], align 8, !tbaa [[DOUBLE_TBAA3]]
; CHECK-NEXT: [[INDEX_NEXT16]] = add nuw i64 [[INDEX12]], 4
; CHECK-NEXT: [[TMP42:%.*]] = icmp eq i64 [[INDEX_NEXT16]], [[N_VEC11]]
-; CHECK-NEXT: br i1 [[TMP42]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP42]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: [[VEC_EPILOG_MIDDLE_BLOCK]]:
; CHECK-NEXT: [[CMP_N17:%.*]] = icmp eq i64 [[N_VEC11]], [[WIDE_TRIP_COUNT]]
; CHECK-NEXT: br i1 [[CMP_N17]], label %[[FOR_END]], label %[[FOR_BODY_PREHEADER9]]
@@ -102,15 +102,15 @@ define void @vdiv(ptr %x, ptr %y, double %a, i32 %N) #0 {
; CHECK: [[FOR_BODY_PROL]]:
; CHECK-NEXT: [[INDVARS_IV_PROL:%.*]] = phi i64 [ [[INDVARS_IV_NEXT_PROL:%.*]], %[[FOR_BODY_PROL]] ], [ [[INDVARS_IV_PH]], %[[FOR_BODY_PROL_PREHEADER]] ]
; CHECK-NEXT: [[PROL_ITER:%.*]] = phi i64 [ [[PROL_ITER_NEXT:%.*]], %[[FOR_BODY_PROL]] ], [ 0, %[[FOR_BODY_PROL_PREHEADER]] ]
-; CHECK-NEXT: [[ARRAYIDX_PROL:%.*]] = getelementptr inbounds nuw double, ptr [[Y]], i64 [[INDVARS_IV_PROL]]
+; CHECK-NEXT: [[ARRAYIDX_PROL:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[Y]], i64 [[INDVARS_IV_PROL]]
; CHECK-NEXT: [[T0_PROL:%.*]] = load double, ptr [[ARRAYIDX_PROL]], align 8, !tbaa [[DOUBLE_TBAA3]]
; CHECK-NEXT: [[TMP19:%.*]] = fmul fast double [[T0_PROL]], [[TMP18]]
-; CHECK-NEXT: [[ARRAYIDX2_PROL:%.*]] = getelementptr inbounds nuw double, ptr [[X]], i64 [[INDVARS_IV_PROL]]
+; CHECK-NEXT: [[ARRAYIDX2_PROL:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[X]], i64 [[INDVARS_IV_PROL]]
; CHECK-NEXT: store double [[TMP19]], ptr [[ARRAYIDX2_PROL]], align 8, !tbaa [[DOUBLE_TBAA3]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_PROL]] = add nuw nsw i64 [[INDVARS_IV_PROL]], 1
; CHECK-NEXT: [[PROL_ITER_NEXT]] = add i64 [[PROL_ITER]], 1
; CHECK-NEXT: [[PROL_ITER_CMP_NOT:%.*]] = icmp eq i64 [[PROL_ITER_NEXT]], [[XTRAITER]]
-; CHECK-NEXT: br i1 [[PROL_ITER_CMP_NOT]], label %[[FOR_BODY_PROL_LOOPEXIT]], label %[[FOR_BODY_PROL]], !llvm.loop [[LOOP11:![0-9]+]]
+; CHECK-NEXT: br i1 [[PROL_ITER_CMP_NOT]], label %[[FOR_BODY_PROL_LOOPEXIT]], label %[[FOR_BODY_PROL]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: [[FOR_BODY_PROL_LOOPEXIT]]:
; CHECK-NEXT: [[INDVARS_IV_UNR:%.*]] = phi i64 [ [[INDVARS_IV_PH]], %[[FOR_BODY_PREHEADER9]] ], [ [[INDVARS_IV_NEXT_PROL]], %[[FOR_BODY_PROL]] ]
; CHECK-NEXT: [[TMP20:%.*]] = sub nsw i64 [[INDVARS_IV_PH]], [[WIDE_TRIP_COUNT]]
@@ -128,56 +128,56 @@ define void @vdiv(ptr %x, ptr %y, double %a, i32 %N) #0 {
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_UNR]], %[[FOR_BODY_PREHEADER9_NEW]] ], [ [[INDVARS_IV_NEXT_7:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw double, ptr [[Y]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[Y]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[T0:%.*]] = load double, ptr [[ARRAYIDX]], align 8, !tbaa [[DOUBLE_TBAA3]]
; CHECK-NEXT: [[TMP30:%.*]] = fmul fast double [[T0]], [[TMP22]]
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw double, ptr [[X]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[X]], i64 [[INDVARS_IV]]
; CHECK-NEXT: store double [[TMP30]], ptr [[ARRAYIDX2]], align 8, !tbaa [[DOUBLE_TBAA3]]
; CHECK-NEXT: [[INDVARS_IV_NEXT:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds nuw double, ptr [[Y]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[Y]], i64 [[INDVARS_IV_NEXT]]
; CHECK-NEXT: [[T0_1:%.*]] = load double, ptr [[ARRAYIDX_1]], align 8, !tbaa [[DOUBLE_TBAA3]]
; CHECK-NEXT: [[TMP31:%.*]] = fmul fast double [[T0_1]], [[TMP23]]
-; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds nuw double, ptr [[X]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[X]], i64 [[INDVARS_IV_NEXT]]
; CHECK-NEXT: store double [[TMP31]], ptr [[ARRAYIDX2_1]], align 8, !tbaa [[DOUBLE_TBAA3]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_1:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 2
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds nuw double, ptr [[Y]], i64 [[INDVARS_IV_NEXT_1]]
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[Y]], i64 [[INDVARS_IV_NEXT_1]]
; CHECK-NEXT: [[T0_2:%.*]] = load double, ptr [[ARRAYIDX_2]], align 8, !tbaa [[DOUBLE_TBAA3]]
; CHECK-NEXT: [[TMP32:%.*]] = fmul fast double [[T0_2]], [[TMP24]]
-; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds nuw double, ptr [[X]], i64 [[INDVARS_IV_NEXT_1]]
+; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[X]], i64 [[INDVARS_IV_NEXT_1]]
; CHECK-NEXT: store double [[TMP32]], ptr [[ARRAYIDX2_2]], align 8, !tbaa [[DOUBLE_TBAA3]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_2:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 3
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds nuw double, ptr [[Y]], i64 [[INDVARS_IV_NEXT_2]]
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[Y]], i64 [[INDVARS_IV_NEXT_2]]
; CHECK-NEXT: [[T0_3:%.*]] = load double, ptr [[ARRAYIDX_3]], align 8, !tbaa [[DOUBLE_TBAA3]]
; CHECK-NEXT: [[TMP33:%.*]] = fmul fast double [[T0_3]], [[TMP25]]
-; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds nuw double, ptr [[X]], i64 [[INDVARS_IV_NEXT_2]]
+; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[X]], i64 [[INDVARS_IV_NEXT_2]]
; CHECK-NEXT: store double [[TMP33]], ptr [[ARRAYIDX2_3]], align 8, !tbaa [[DOUBLE_TBAA3]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_3:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 4
-; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds nuw double, ptr [[Y]], i64 [[INDVARS_IV_NEXT_3]]
+; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[Y]], i64 [[INDVARS_IV_NEXT_3]]
; CHECK-NEXT: [[T0_4:%.*]] = load double, ptr [[ARRAYIDX_4]], align 8, !tbaa [[DOUBLE_TBAA3]]
; CHECK-NEXT: [[TMP34:%.*]] = fmul fast double [[T0_4]], [[TMP26]]
-; CHECK-NEXT: [[ARRAYIDX2_4:%.*]] = getelementptr inbounds nuw double, ptr [[X]], i64 [[INDVARS_IV_NEXT_3]]
+; CHECK-NEXT: [[ARRAYIDX2_4:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[X]], i64 [[INDVARS_IV_NEXT_3]]
; CHECK-NEXT: store double [[TMP34]], ptr [[ARRAYIDX2_4]], align 8, !tbaa [[DOUBLE_TBAA3]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_4:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 5
-; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds nuw double, ptr [[Y]], i64 [[INDVARS_IV_NEXT_4]]
+; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[Y]], i64 [[INDVARS_IV_NEXT_4]]
; CHECK-NEXT: [[T0_5:%.*]] = load double, ptr [[ARRAYIDX_5]], align 8, !tbaa [[DOUBLE_TBAA3]]
; CHECK-NEXT: [[TMP35:%.*]] = fmul fast double [[T0_5]], [[TMP27]]
-; CHECK-NEXT: [[ARRAYIDX2_5:%.*]] = getelementptr inbounds nuw double, ptr [[X]], i64 [[INDVARS_IV_NEXT_4]]
+; CHECK-NEXT: [[ARRAYIDX2_5:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[X]], i64 [[INDVARS_IV_NEXT_4]]
; CHECK-NEXT: store double [[TMP35]], ptr [[ARRAYIDX2_5]], align 8, !tbaa [[DOUBLE_TBAA3]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_5:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 6
-; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds nuw double, ptr [[Y]], i64 [[INDVARS_IV_NEXT_5]]
+; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[Y]], i64 [[INDVARS_IV_NEXT_5]]
; CHECK-NEXT: [[T0_6:%.*]] = load double, ptr [[ARRAYIDX_6]], align 8, !tbaa [[DOUBLE_TBAA3]]
; CHECK-NEXT: [[TMP36:%.*]] = fmul fast double [[T0_6]], [[TMP28]]
-; CHECK-NEXT: [[ARRAYIDX2_6:%.*]] = getelementptr inbounds nuw double, ptr [[X]], i64 [[INDVARS_IV_NEXT_5]]
+; CHECK-NEXT: [[ARRAYIDX2_6:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[X]], i64 [[INDVARS_IV_NEXT_5]]
; CHECK-NEXT: store double [[TMP36]], ptr [[ARRAYIDX2_6]], align 8, !tbaa [[DOUBLE_TBAA3]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_6:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 7
-; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds nuw double, ptr [[Y]], i64 [[INDVARS_IV_NEXT_6]]
+; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[Y]], i64 [[INDVARS_IV_NEXT_6]]
; CHECK-NEXT: [[T0_7:%.*]] = load double, ptr [[ARRAYIDX_7]], align 8, !tbaa [[DOUBLE_TBAA3]]
; CHECK-NEXT: [[TMP37:%.*]] = fmul fast double [[T0_7]], [[TMP29]]
-; CHECK-NEXT: [[ARRAYIDX2_7:%.*]] = getelementptr inbounds nuw double, ptr [[X]], i64 [[INDVARS_IV_NEXT_6]]
+; CHECK-NEXT: [[ARRAYIDX2_7:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[X]], i64 [[INDVARS_IV_NEXT_6]]
; CHECK-NEXT: store double [[TMP37]], ptr [[ARRAYIDX2_7]], align 8, !tbaa [[DOUBLE_TBAA3]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_7]] = add nuw nsw i64 [[INDVARS_IV]], 8
; CHECK-NEXT: [[EXITCOND_NOT_7:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT_7]], [[WIDE_TRIP_COUNT]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT_7]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT_7]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK: [[FOR_END]]:
; CHECK-NEXT: ret void
;
@@ -232,8 +232,9 @@ attributes #0 = { nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="
; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META8:![0-9]+]], [[META9:![0-9]+]]}
; CHECK: [[META8]] = !{!"llvm.loop.isvectorized", i32 1}
; CHECK: [[META9]] = !{!"llvm.loop.unroll.runtime.disable"}
-; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META8]], [[META9]]}
-; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META12:![0-9]+]]}
-; CHECK: [[META12]] = !{!"llvm.loop.unroll.disable"}
-; CHECK: [[LOOP13]] = distinct !{[[LOOP13]], [[META8]]}
+; CHECK: [[PROF10]] = !{!"branch_weights", i32 4, i32 12}
+; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META8]], [[META9]]}
+; CHECK: [[LOOP12]] = distinct !{[[LOOP12]], [[META13:![0-9]+]]}
+; CHECK: [[META13]] = !{!"llvm.loop.unroll.disable"}
+; CHECK: [[LOOP14]] = distinct !{[[LOOP14]], [[META8]]}
;.
diff --git a/llvm/test/Transforms/PhaseOrdering/gvn-replacement-vs-hoist.ll b/llvm/test/Transforms/PhaseOrdering/gvn-replacement-vs-hoist.ll
index c6d1cbd421436..3550c20c080e6 100644
--- a/llvm/test/Transforms/PhaseOrdering/gvn-replacement-vs-hoist.ll
+++ b/llvm/test/Transforms/PhaseOrdering/gvn-replacement-vs-hoist.ll
@@ -7,7 +7,7 @@ define void @test(ptr noundef %a, i32 noundef %beam) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[MUL:%.*]] = shl nuw nsw i32 [[BEAM]], 1
; CHECK-NEXT: [[IDXPROM:%.*]] = zext nneg i32 [[MUL]] to i64
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IDXPROM]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[IDXPROM]]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
@@ -21,7 +21,7 @@ define void @test(ptr noundef %a, i32 noundef %beam) {
; CHECK: if.else:
; CHECK-NEXT: [[MUL2:%.*]] = shl nuw nsw i32 [[I_06]], 1
; CHECK-NEXT: [[IDXPROM3:%.*]] = zext nneg i32 [[MUL2]] to i64
-; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IDXPROM3]]
+; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A]], i64 [[IDXPROM3]]
; CHECK-NEXT: store i32 1, ptr [[ARRAYIDX4]], align 4
; CHECK-NEXT: br label [[FOR_INC]]
; CHECK: for.inc:
diff --git a/llvm/test/Transforms/PhaseOrdering/loop-access-checks.ll b/llvm/test/Transforms/PhaseOrdering/loop-access-checks.ll
index 5253c42d9c6d2..3758140a20c4c 100644
--- a/llvm/test/Transforms/PhaseOrdering/loop-access-checks.ll
+++ b/llvm/test/Transforms/PhaseOrdering/loop-access-checks.ll
@@ -147,7 +147,7 @@ define void @foo(ptr noundef nonnull align 8 dereferenceable(24) noalias %vec) #
; CHECK-NEXT: ret void
; CHECK: [[FOR_BODY]]:
; CHECK-NEXT: [[I_010:%.*]] = phi i64 [ [[INC:%.*]], %[[FOR_BODY]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[ADD_PTR_I:%.*]] = getelementptr inbounds double, ptr [[TMP1]], i64 [[I_010]]
+; CHECK-NEXT: [[ADD_PTR_I:%.*]] = getelementptr inbounds [8 x i8], ptr [[TMP1]], i64 [[I_010]]
; CHECK-NEXT: [[TMP2:%.*]] = load double, ptr [[ADD_PTR_I]], align 8
; CHECK-NEXT: [[ADD:%.*]] = fadd double [[TMP2]], 1.000000e+00
; CHECK-NEXT: store double [[ADD]], ptr [[ADD_PTR_I]], align 8
@@ -286,7 +286,7 @@ define void @loop_with_signed_induction(ptr noundef nonnull align 8 dereferencea
; CHECK-NEXT: ret void
; CHECK: [[FOR_BODY]]:
; CHECK-NEXT: [[I_010:%.*]] = phi i64 [ [[INC:%.*]], %[[FOR_BODY]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[ADD_PTR_I:%.*]] = getelementptr inbounds nuw double, ptr [[TMP1]], i64 [[I_010]]
+; CHECK-NEXT: [[ADD_PTR_I:%.*]] = getelementptr inbounds nuw [8 x i8], ptr [[TMP1]], i64 [[I_010]]
; CHECK-NEXT: [[TMP2:%.*]] = load double, ptr [[ADD_PTR_I]], align 8, !tbaa [[DOUBLE_TBAA6:![0-9]+]]
; CHECK-NEXT: [[ADD:%.*]] = fadd double [[TMP2]], 1.000000e+00
; CHECK-NEXT: store double [[ADD]], ptr [[ADD_PTR_I]], align 8, !tbaa [[DOUBLE_TBAA6]]
@@ -360,7 +360,7 @@ define void @monkey(ptr noundef %arr, i32 noundef %len) {
; CHECK: [[FOR_BODY4]]:
; CHECK-NEXT: [[K_07:%.*]] = phi i32 [ [[DEC:%.*]], %[[FOR_BODY4]] ], [ [[I_09]], %[[FOR_BODY4_PREHEADER]] ]
; CHECK-NEXT: [[IDX_EXT_I:%.*]] = zext i32 [[K_07]] to i64
-; CHECK-NEXT: [[ADD_PTR_I:%.*]] = getelementptr inbounds nuw i32, ptr [[ARR]], i64 [[IDX_EXT_I]]
+; CHECK-NEXT: [[ADD_PTR_I:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[ARR]], i64 [[IDX_EXT_I]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ADD_PTR_I]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
; CHECK-NEXT: store i32 [[ADD]], ptr [[ADD_PTR_I]], align 4
diff --git a/llvm/test/Transforms/PhaseOrdering/loop-vectorize-bfi.ll b/llvm/test/Transforms/PhaseOrdering/loop-vectorize-bfi.ll
index 6183a2679c3aa..baf7bf38f3351 100644
--- a/llvm/test/Transforms/PhaseOrdering/loop-vectorize-bfi.ll
+++ b/llvm/test/Transforms/PhaseOrdering/loop-vectorize-bfi.ll
@@ -19,7 +19,7 @@ define void @f(i1 %x) !prof !0 {
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 65, %[[ENTRY]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i64, ptr null, i64 [[EVL_BASED_IV]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr [8 x i8], ptr null, i64 [[EVL_BASED_IV]]
; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> poison, ptr align 8 [[TMP4]], <vscale x 2 x i1> [[TMP2]], i32 [[TMP3]])
; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> poison, ptr align 8 [[TMP4]], <vscale x 2 x i1> [[TMP2]], i32 [[TMP3]])
; CHECK-NEXT: [[TMP5:%.*]] = zext nneg i32 [[TMP3]] to i64
diff --git a/llvm/test/Transforms/PhaseOrdering/lto-argpromotion-ipsccp.ll b/llvm/test/Transforms/PhaseOrdering/lto-argpromotion-ipsccp.ll
index bc42b0b354ab7..9d78901443a96 100644
--- a/llvm/test/Transforms/PhaseOrdering/lto-argpromotion-ipsccp.ll
+++ b/llvm/test/Transforms/PhaseOrdering/lto-argpromotion-ipsccp.ll
@@ -28,7 +28,7 @@ define internal void @child(ptr %p, ptr %n, ptr %c) noinline {
; CHECK-NEXT: br i1 [[CMP_NOT]], label %[[FOR_END:.*]], label %[[FOR_INC]]
; CHECK: [[FOR_INC]]:
; CHECK-NEXT: [[IDXPROM:%.*]] = zext nneg i32 [[I_0]] to i64
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[P]], i64 [[IDXPROM]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[P]], i64 [[IDXPROM]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[TMP0]], 5
; CHECK-NEXT: store i32 [[MUL]], ptr [[ARRAYIDX]], align 4
diff --git a/llvm/test/Transforms/PhaseOrdering/lto-licm.ll b/llvm/test/Transforms/PhaseOrdering/lto-licm.ll
index 8b51f9f465587..03c2375a1c723 100644
--- a/llvm/test/Transforms/PhaseOrdering/lto-licm.ll
+++ b/llvm/test/Transforms/PhaseOrdering/lto-licm.ll
@@ -12,7 +12,7 @@ define void @hoist_fdiv(ptr %a, float %b) {
; CHECK-NEXT: br i1 [[CMP_NOT]], label [[FOR_END:%.*]], label [[FOR_INC]]
; CHECK: for.inc:
; CHECK-NEXT: [[IDXPROM:%.*]] = zext nneg i32 [[I_0]] to i64
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A:%.*]], i64 [[IDXPROM]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr [[A:%.*]], i64 [[IDXPROM]]
; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = fmul fast float [[TMP1]], [[TMP0]]
; CHECK-NEXT: store float [[TMP2]], ptr [[ARRAYIDX]], align 4
diff --git a/llvm/test/Transforms/PhaseOrdering/scev-custom-dl.ll b/llvm/test/Transforms/PhaseOrdering/scev-custom-dl.ll
index 625941a93335b..025a1b34086aa 100644
--- a/llvm/test/Transforms/PhaseOrdering/scev-custom-dl.ll
+++ b/llvm/test/Transforms/PhaseOrdering/scev-custom-dl.ll
@@ -19,9 +19,9 @@ define void @test1(i32 %d, ptr %p) nounwind uwtable ssp {
; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%for.body> U: [0,64) S: [0,64) Exits: 63 LoopDispositions: { %for.body: Computable }
; CHECK-NEXT: %p.addr.02 = phi ptr [ %p, %entry ], [ %add.ptr1, %for.body ]
; CHECK-NEXT: --> {%p,+,(8 * (%d /u 4))}<%for.body> U: full-set S: full-set Exits: ((504 * (%d /u 4)) + %p) LoopDispositions: { %for.body: Computable }
-; CHECK-NEXT: %add.ptr = getelementptr inbounds nuw i32, ptr %p.addr.02, i32 %div1
+; CHECK-NEXT: %add.ptr = getelementptr inbounds nuw [4 x i8], ptr %p.addr.02, i32 %div1
; CHECK-NEXT: --> {((4 * (%d /u 4))<nuw><nsw> + %p)<nuw>,+,(8 * (%d /u 4))}<%for.body> U: full-set S: full-set Exits: ((508 * (%d /u 4)) + %p) LoopDispositions: { %for.body: Computable }
-; CHECK-NEXT: %add.ptr1 = getelementptr inbounds nuw i32, ptr %add.ptr, i32 %div1
+; CHECK-NEXT: %add.ptr1 = getelementptr inbounds nuw [4 x i8], ptr %add.ptr, i32 %div1
; CHECK-NEXT: --> {((8 * (%d /u 4)) + %p),+,(8 * (%d /u 4))}<%for.body> U: full-set S: full-set Exits: ((512 * (%d /u 4)) + %p) LoopDispositions: { %for.body: Computable }
; CHECK-NEXT: %inc = add nuw nsw i32 %i.03, 1
; CHECK-NEXT: --> {1,+,1}<nuw><nsw><%for.body> U: [1,65) S: [1,65) Exits: 64 LoopDispositions: { %for.body: Computable }
@@ -66,9 +66,9 @@ define void @test1a(i32 %d, ptr %p) nounwind uwtable ssp {
; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%for.body> U: [0,64) S: [0,64) Exits: 63 LoopDispositions: { %for.body: Computable }
; CHECK-NEXT: %p.addr.02 = phi ptr [ %p, %entry ], [ %add.ptr1, %for.body ]
; CHECK-NEXT: --> {%p,+,(8 * (%d /u 2))}<%for.body> U: full-set S: full-set Exits: ((504 * (%d /u 2)) + %p) LoopDispositions: { %for.body: Computable }
-; CHECK-NEXT: %add.ptr = getelementptr inbounds nuw i32, ptr %p.addr.02, i32 %div1
+; CHECK-NEXT: %add.ptr = getelementptr inbounds nuw [4 x i8], ptr %p.addr.02, i32 %div1
; CHECK-NEXT: --> {((4 * (%d /u 2))<nuw><nsw> + %p)<nuw>,+,(8 * (%d /u 2))}<%for.body> U: full-set S: full-set Exits: ((508 * (%d /u 2)) + %p) LoopDispositions: { %for.body: Computable }
-; CHECK-NEXT: %add.ptr1 = getelementptr inbounds nuw i32, ptr %add.ptr, i32 %div1
+; CHECK-NEXT: %add.ptr1 = getelementptr inbounds nuw [4 x i8], ptr %add.ptr, i32 %div1
; CHECK-NEXT: --> {((8 * (%d /u 2)) + %p),+,(8 * (%d /u 2))}<%for.body> U: full-set S: full-set Exits: ((512 * (%d /u 2)) + %p) LoopDispositions: { %for.body: Computable }
; CHECK-NEXT: %inc = add nuw nsw i32 %i.03, 1
; CHECK-NEXT: --> {1,+,1}<nuw><nsw><%for.body> U: [1,65) S: [1,65) Exits: 64 LoopDispositions: { %for.body: Computable }
@@ -111,7 +111,7 @@ define void @test_range_ref1a(i32 %x) {
; CHECK-NEXT: Classifying expressions for: @test_range_ref1a
; CHECK-NEXT: %i.01.0 = phi i32 [ 100, %entry ], [ %tmp4, %bb ]
; CHECK-NEXT: --> {100,+,-1}<nsw><%bb> U: [0,101) S: [0,101) Exits: 0 LoopDispositions: { %bb: Computable }
-; CHECK-NEXT: %tmp1 = getelementptr i32, ptr @array, i32 %i.01.0
+; CHECK-NEXT: %tmp1 = getelementptr [4 x i8], ptr @array, i32 %i.01.0
; CHECK-NEXT: --> {(400 + @array)<nuw>,+,-4}<nw><%bb> U: [0,-3) S: [-2147483648,2147483645) Exits: @array LoopDispositions: { %bb: Computable }
; CHECK-NEXT: %tmp4 = add nsw i32 %i.01.0, -1
; CHECK-NEXT: --> {99,+,-1}<nsw><%bb> U: [-1,100) S: [-1,100) Exits: -1 LoopDispositions: { %bb: Computable }
@@ -143,11 +143,11 @@ define i32 @test_loop_idiom_recogize(i32 %x, i32 %y, ptr %lam, ptr %alp) nounwin
; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%bb1> U: [0,256) S: [0,256) Exits: 255 LoopDispositions: { %bb1: Computable }
; CHECK-NEXT: %i.0.reg2mem.0 = sub nuw nsw i32 255, %indvar
; CHECK-NEXT: --> {255,+,-1}<nsw><%bb1> U: [0,256) S: [0,256) Exits: 0 LoopDispositions: { %bb1: Computable }
-; CHECK-NEXT: %0 = getelementptr i32, ptr %alp, i32 %i.0.reg2mem.0
+; CHECK-NEXT: %0 = getelementptr [4 x i8], ptr %alp, i32 %i.0.reg2mem.0
; CHECK-NEXT: --> {(1020 + %alp),+,-4}<nw><%bb1> U: full-set S: full-set Exits: %alp LoopDispositions: { %bb1: Computable }
; CHECK-NEXT: %1 = load i32, ptr %0, align 4
; CHECK-NEXT: --> %1 U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %bb1: Variant }
-; CHECK-NEXT: %2 = getelementptr i32, ptr %lam, i32 %i.0.reg2mem.0
+; CHECK-NEXT: %2 = getelementptr [4 x i8], ptr %lam, i32 %i.0.reg2mem.0
; CHECK-NEXT: --> {(1020 + %lam),+,-4}<nw><%bb1> U: full-set S: full-set Exits: %lam LoopDispositions: { %bb1: Computable }
; CHECK-NEXT: %indvar.next = add nuw nsw i32 %indvar, 1
; CHECK-NEXT: --> {1,+,1}<nuw><nsw><%bb1> U: [1,257) S: [1,257) Exits: 256 LoopDispositions: { %bb1: Computable }
diff --git a/llvm/test/Transforms/PhaseOrdering/simplifycfg-options.ll b/llvm/test/Transforms/PhaseOrdering/simplifycfg-options.ll
index ba71299d6919e..fba8a6beec78d 100644
--- a/llvm/test/Transforms/PhaseOrdering/simplifycfg-options.ll
+++ b/llvm/test/Transforms/PhaseOrdering/simplifycfg-options.ll
@@ -70,8 +70,8 @@ declare void @foo()
define double @max_of_loads(ptr %x, ptr %y, i64 %i) {
; CHECK-LABEL: @max_of_loads(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[XI_PTR:%.*]] = getelementptr double, ptr [[X:%.*]], i64 [[I:%.*]]
-; CHECK-NEXT: [[YI_PTR:%.*]] = getelementptr double, ptr [[Y:%.*]], i64 [[I]]
+; CHECK-NEXT: [[XI_PTR:%.*]] = getelementptr [8 x i8], ptr [[X:%.*]], i64 [[I:%.*]]
+; CHECK-NEXT: [[YI_PTR:%.*]] = getelementptr [8 x i8], ptr [[Y:%.*]], i64 [[I]]
; CHECK-NEXT: [[XI:%.*]] = load double, ptr [[XI_PTR]], align 8
; CHECK-NEXT: [[YI:%.*]] = load double, ptr [[YI_PTR]], align 8
; CHECK-NEXT: [[CMP:%.*]] = fcmp ogt double [[XI]], [[YI]]
diff --git a/llvm/test/Transforms/SimpleLoopUnswitch/AMDGPU/uniform-unswitch.ll b/llvm/test/Transforms/SimpleLoopUnswitch/AMDGPU/uniform-unswitch.ll
index cdd0fcab59b7d..209eda54069cd 100644
--- a/llvm/test/Transforms/SimpleLoopUnswitch/AMDGPU/uniform-unswitch.ll
+++ b/llvm/test/Transforms/SimpleLoopUnswitch/AMDGPU/uniform-unswitch.ll
@@ -35,7 +35,7 @@ define amdgpu_kernel void @uniform_unswitch(ptr nocapture %out, i32 %n, i32 %x)
; CHECK-NEXT: br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[FOR_INC]]
; CHECK: if.then:
; CHECK-NEXT: [[TMP0:%.*]] = zext nneg i32 [[I_07]] to i64
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr addrspace(1) [[OUT_GLOBAL]], i64 [[TMP0]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4 x i8], ptr addrspace(1) [[OUT_GLOBAL]], i64 [[TMP0]]
; CHECK-NEXT: store i32 [[I_07]], ptr addrspace(1) [[ARRAYIDX]], align 4
; CHECK-NEXT: br label [[FOR_INC]]
; CHECK: for.inc:
diff --git a/llvm/test/Transforms/SimplifyCFG/Hexagon/switch-to-lookup-table.ll b/llvm/test/Transforms/SimplifyCFG/Hexagon/switch-to-lookup-table.ll
index 4f21c06e519ff..88652f4da6180 100644
--- a/llvm/test/Transforms/SimplifyCFG/Hexagon/switch-to-lookup-table.ll
+++ b/llvm/test/Transforms/SimplifyCFG/Hexagon/switch-to-lookup-table.ll
@@ -13,7 +13,7 @@ define i32 @foo(i32 %x) section ".tcm_text" {
; ENABLE-NEXT: [[TMP0:%.*]] = icmp ult i32 [[X:%.*]], 6
; ENABLE-NEXT: br i1 [[TMP0]], label [[SWITCH_LOOKUP:%.*]], label [[RETURN:%.*]]
; ENABLE: switch.lookup:
-; ENABLE-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds nuw i32, ptr @switch.table.foo, i32 [[X]]
+; ENABLE-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds nuw [4 x i8], ptr @switch.table.foo, i32 [[X]]
; ENABLE-NEXT: [[SWITCH_LOAD:%.*]] = load i32, ptr [[SWITCH_GEP]], align 4
; ENABLE-NEXT: br label [[RETURN]]
; ENABLE: return:
More information about the cfe-commits
mailing list