[llvm] ba1759c - [LoadStoreVectorizer] Convert some tests to opaque pointers (NFC)

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Tue Dec 27 03:57:22 PST 2022


Author: Nikita Popov
Date: 2022-12-27T12:57:01+01:00
New Revision: ba1759c498367c09d0dd7bcccad2ef0c138ca06e

URL: https://github.com/llvm/llvm-project/commit/ba1759c498367c09d0dd7bcccad2ef0c138ca06e
DIFF: https://github.com/llvm/llvm-project/commit/ba1759c498367c09d0dd7bcccad2ef0c138ca06e.diff

LOG: [LoadStoreVectorizer] Convert some tests to opaque pointers (NFC)

Added: 
    

Modified: 
    llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/complex-index.ll
    llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/extended-index.ll
    llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/gep-bitcast.ll
    llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/insertion-point.ll
    llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/interleaved-mayalias-store.ll
    llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/invariant-load.ll
    llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-vectors.ll
    llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/multiple_tails.ll
    llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/no-implicit-float.ll
    llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/optnone.ll
    llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/pointer-elements.ll
    llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/selects-inseltpoison.ll
    llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/selects.ll
    llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/store_with_aliasing_load.ll
    llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/vect-ptr-ptr-size-mismatch.ll
    llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/weird-type-accesses.ll
    llvm/test/Transforms/LoadStoreVectorizer/NVPTX/4x2xhalf.ll
    llvm/test/Transforms/LoadStoreVectorizer/NVPTX/merge-across-side-effects.ll
    llvm/test/Transforms/LoadStoreVectorizer/NVPTX/non-instr-bitcast.ll
    llvm/test/Transforms/LoadStoreVectorizer/X86/codegenprepare-produced-address-math.ll
    llvm/test/Transforms/LoadStoreVectorizer/X86/compare-scev-by-complexity.ll
    llvm/test/Transforms/LoadStoreVectorizer/X86/load-width-inseltpoison.ll
    llvm/test/Transforms/LoadStoreVectorizer/X86/load-width.ll
    llvm/test/Transforms/LoadStoreVectorizer/X86/merge-tbaa.ll
    llvm/test/Transforms/LoadStoreVectorizer/X86/preserve-order32.ll
    llvm/test/Transforms/LoadStoreVectorizer/X86/preserve-order64.ll
    llvm/test/Transforms/LoadStoreVectorizer/X86/vector-scalar.ll
    llvm/test/Transforms/LoadStoreVectorizer/X86/vectorize-i8-nested-add-inseltpoison.ll
    llvm/test/Transforms/LoadStoreVectorizer/X86/vectorize-i8-nested-add.ll
    llvm/test/Transforms/LoadStoreVectorizer/int_sideeffect.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/complex-index.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/complex-index.ll
index 6d48fba131c40..36f6b09a114d0 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/complex-index.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/complex-index.ll
@@ -12,7 +12,7 @@ declare double @llvm.fmuladd.f64(double, double, double)
 ; CHECK-LABEL: @factorizedVsNonfactorizedAccess(
 ; CHECK: load <2 x float>
 ; CHECK: store <2 x float>
-define amdgpu_kernel void @factorizedVsNonfactorizedAccess(float addrspace(1)* nocapture %c) {
+define amdgpu_kernel void @factorizedVsNonfactorizedAccess(ptr addrspace(1) nocapture %c) {
 entry:
   %call = tail call i64 @_Z12get_local_idj(i32 0)
   %call1 = tail call i64 @_Z12get_group_idj(i32 0)
@@ -28,25 +28,25 @@ entry:
   %add7 = add nuw i64 %mul5, %mul6
   %mul9 = shl i64 %add7, 10
   %add10 = add i64 %mul9, %add
-  %arrayidx = getelementptr inbounds float, float addrspace(1)* %c, i64 %add10
-  %load1 = load float, float addrspace(1)* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds float, ptr addrspace(1) %c, i64 %add10
+  %load1 = load float, ptr addrspace(1) %arrayidx, align 4
   %conv = fpext float %load1 to double
   %mul11 = fmul double %conv, 0x3FEAB481D8F35506
   %conv12 = fptrunc double %mul11 to float
   %conv18 = fpext float %conv12 to double
   %storeval1 = tail call double @llvm.fmuladd.f64(double 0x3FF4FFAFBBEC946A, double 0.000000e+00, double %conv18)
   %cstoreval1 = fptrunc double %storeval1 to float
-  store float %cstoreval1, float addrspace(1)* %arrayidx, align 4
+  store float %cstoreval1, ptr addrspace(1) %arrayidx, align 4
 
   %add23 = or i64 %add10, 1
-  %arrayidx24 = getelementptr inbounds float, float addrspace(1)* %c, i64 %add23
-  %load2 = load float, float addrspace(1)* %arrayidx24, align 4
+  %arrayidx24 = getelementptr inbounds float, ptr addrspace(1) %c, i64 %add23
+  %load2 = load float, ptr addrspace(1) %arrayidx24, align 4
   %conv25 = fpext float %load2 to double
   %mul26 = fmul double %conv25, 0x3FEAB481D8F35506
   %conv27 = fptrunc double %mul26 to float
   %conv34 = fpext float %conv27 to double
   %storeval2 = tail call double @llvm.fmuladd.f64(double 0x3FF4FFAFBBEC946A, double 0.000000e+00, double %conv34)
   %cstoreval2 = fptrunc double %storeval2 to float
-  store float %cstoreval2, float addrspace(1)* %arrayidx24, align 4
+  store float %cstoreval2, ptr addrspace(1) %arrayidx24, align 4
   ret void
 }

diff  --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/extended-index.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/extended-index.ll
index 925d1fd6d289b..3a065b3db9f42 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/extended-index.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/extended-index.ll
@@ -9,23 +9,23 @@ declare i32 @llvm.amdgcn.workitem.id.x() #1
 ; CHECK: sext i32 %id.x to i64
 ; CHECK: load <2 x float>
 ; CHECK: store <2 x float> zeroinitializer
-define amdgpu_kernel void @basic_merge_sext_index(float addrspace(1)* nocapture %a, float addrspace(1)* nocapture %b, float addrspace(1)* nocapture readonly %c) #0 {
+define amdgpu_kernel void @basic_merge_sext_index(ptr addrspace(1) nocapture %a, ptr addrspace(1) nocapture %b, ptr addrspace(1) nocapture readonly %c) #0 {
 entry:
   %id.x = call i32 @llvm.amdgcn.workitem.id.x()
   %sext.id.x = sext i32 %id.x to i64
-  %a.idx.x = getelementptr inbounds float, float addrspace(1)* %a, i64 %sext.id.x
-  %c.idx.x = getelementptr inbounds float, float addrspace(1)* %c, i64 %sext.id.x
-  %a.idx.x.1 = getelementptr inbounds float, float addrspace(1)* %a.idx.x, i64 1
-  %c.idx.x.1 = getelementptr inbounds float, float addrspace(1)* %c.idx.x, i64 1
+  %a.idx.x = getelementptr inbounds float, ptr addrspace(1) %a, i64 %sext.id.x
+  %c.idx.x = getelementptr inbounds float, ptr addrspace(1) %c, i64 %sext.id.x
+  %a.idx.x.1 = getelementptr inbounds float, ptr addrspace(1) %a.idx.x, i64 1
+  %c.idx.x.1 = getelementptr inbounds float, ptr addrspace(1) %c.idx.x, i64 1
 
-  %ld.c = load float, float addrspace(1)* %c.idx.x, align 4
-  %ld.c.idx.1 = load float, float addrspace(1)* %c.idx.x.1, align 4
+  %ld.c = load float, ptr addrspace(1) %c.idx.x, align 4
+  %ld.c.idx.1 = load float, ptr addrspace(1) %c.idx.x.1, align 4
 
-  store float 0.0, float addrspace(1)* %a.idx.x, align 4
-  store float 0.0, float addrspace(1)* %a.idx.x.1, align 4
+  store float 0.0, ptr addrspace(1) %a.idx.x, align 4
+  store float 0.0, ptr addrspace(1) %a.idx.x.1, align 4
 
   %add = fadd float %ld.c, %ld.c.idx.1
-  store float %add, float addrspace(1)* %b, align 4
+  store float %add, ptr addrspace(1) %b, align 4
   ret void
 }
 
@@ -33,76 +33,76 @@ entry:
 ; CHECK: zext i32 %id.x to i64
 ; CHECK: load <2 x float>
 ; CHECK: store <2 x float>
-define amdgpu_kernel void @basic_merge_zext_index(float addrspace(1)* nocapture %a, float addrspace(1)* nocapture %b, float addrspace(1)* nocapture readonly %c) #0 {
+define amdgpu_kernel void @basic_merge_zext_index(ptr addrspace(1) nocapture %a, ptr addrspace(1) nocapture %b, ptr addrspace(1) nocapture readonly %c) #0 {
 entry:
   %id.x = call i32 @llvm.amdgcn.workitem.id.x()
   %zext.id.x = zext i32 %id.x to i64
-  %a.idx.x = getelementptr inbounds float, float addrspace(1)* %a, i64 %zext.id.x
-  %c.idx.x = getelementptr inbounds float, float addrspace(1)* %c, i64 %zext.id.x
-  %a.idx.x.1 = getelementptr inbounds float, float addrspace(1)* %a.idx.x, i64 1
-  %c.idx.x.1 = getelementptr inbounds float, float addrspace(1)* %c.idx.x, i64 1
+  %a.idx.x = getelementptr inbounds float, ptr addrspace(1) %a, i64 %zext.id.x
+  %c.idx.x = getelementptr inbounds float, ptr addrspace(1) %c, i64 %zext.id.x
+  %a.idx.x.1 = getelementptr inbounds float, ptr addrspace(1) %a.idx.x, i64 1
+  %c.idx.x.1 = getelementptr inbounds float, ptr addrspace(1) %c.idx.x, i64 1
 
-  %ld.c = load float, float addrspace(1)* %c.idx.x, align 4
-  %ld.c.idx.1 = load float, float addrspace(1)* %c.idx.x.1, align 4
-  store float 0.0, float addrspace(1)* %a.idx.x, align 4
-  store float 0.0, float addrspace(1)* %a.idx.x.1, align 4
+  %ld.c = load float, ptr addrspace(1) %c.idx.x, align 4
+  %ld.c.idx.1 = load float, ptr addrspace(1) %c.idx.x.1, align 4
+  store float 0.0, ptr addrspace(1) %a.idx.x, align 4
+  store float 0.0, ptr addrspace(1) %a.idx.x.1, align 4
 
   %add = fadd float %ld.c, %ld.c.idx.1
-  store float %add, float addrspace(1)* %b, align 4
+  store float %add, ptr addrspace(1) %b, align 4
   ret void
 }
 
 ; CHECK-LABEL: @merge_op_zext_index(
 ; CHECK: load <2 x float>
 ; CHECK: store <2 x float>
-define amdgpu_kernel void @merge_op_zext_index(float addrspace(1)* nocapture noalias %a, float addrspace(1)* nocapture noalias %b, float addrspace(1)* nocapture readonly noalias %c) #0 {
+define amdgpu_kernel void @merge_op_zext_index(ptr addrspace(1) nocapture noalias %a, ptr addrspace(1) nocapture noalias %b, ptr addrspace(1) nocapture readonly noalias %c) #0 {
 entry:
   %id.x = call i32 @llvm.amdgcn.workitem.id.x()
   %shl = shl i32 %id.x, 2
   %zext.id.x = zext i32 %shl to i64
-  %a.0 = getelementptr inbounds float, float addrspace(1)* %a, i64 %zext.id.x
-  %c.0 = getelementptr inbounds float, float addrspace(1)* %c, i64 %zext.id.x
+  %a.0 = getelementptr inbounds float, ptr addrspace(1) %a, i64 %zext.id.x
+  %c.0 = getelementptr inbounds float, ptr addrspace(1) %c, i64 %zext.id.x
 
   %id.x.1 = or i32 %shl, 1
   %id.x.1.ext = zext i32 %id.x.1 to i64
 
-  %a.1 = getelementptr inbounds float, float addrspace(1)* %a, i64 %id.x.1.ext
-  %c.1 = getelementptr inbounds float, float addrspace(1)* %c, i64 %id.x.1.ext
+  %a.1 = getelementptr inbounds float, ptr addrspace(1) %a, i64 %id.x.1.ext
+  %c.1 = getelementptr inbounds float, ptr addrspace(1) %c, i64 %id.x.1.ext
 
-  %ld.c.0 = load float, float addrspace(1)* %c.0, align 4
-  store float 0.0, float addrspace(1)* %a.0, align 4
-  %ld.c.1 = load float, float addrspace(1)* %c.1, align 4
-  store float 0.0, float addrspace(1)* %a.1, align 4
+  %ld.c.0 = load float, ptr addrspace(1) %c.0, align 4
+  store float 0.0, ptr addrspace(1) %a.0, align 4
+  %ld.c.1 = load float, ptr addrspace(1) %c.1, align 4
+  store float 0.0, ptr addrspace(1) %a.1, align 4
 
   %add = fadd float %ld.c.0, %ld.c.1
-  store float %add, float addrspace(1)* %b, align 4
+  store float %add, ptr addrspace(1) %b, align 4
   ret void
 }
 
 ; CHECK-LABEL: @merge_op_sext_index(
 ; CHECK: load <2 x float>
 ; CHECK: store <2 x float>
-define amdgpu_kernel void @merge_op_sext_index(float addrspace(1)* nocapture noalias %a, float addrspace(1)* nocapture noalias %b, float addrspace(1)* nocapture readonly noalias %c) #0 {
+define amdgpu_kernel void @merge_op_sext_index(ptr addrspace(1) nocapture noalias %a, ptr addrspace(1) nocapture noalias %b, ptr addrspace(1) nocapture readonly noalias %c) #0 {
 entry:
   %id.x = call i32 @llvm.amdgcn.workitem.id.x()
   %shl = shl i32 %id.x, 2
   %zext.id.x = sext i32 %shl to i64
-  %a.0 = getelementptr inbounds float, float addrspace(1)* %a, i64 %zext.id.x
-  %c.0 = getelementptr inbounds float, float addrspace(1)* %c, i64 %zext.id.x
+  %a.0 = getelementptr inbounds float, ptr addrspace(1) %a, i64 %zext.id.x
+  %c.0 = getelementptr inbounds float, ptr addrspace(1) %c, i64 %zext.id.x
 
   %id.x.1 = or i32 %shl, 1
   %id.x.1.ext = sext i32 %id.x.1 to i64
 
-  %a.1 = getelementptr inbounds float, float addrspace(1)* %a, i64 %id.x.1.ext
-  %c.1 = getelementptr inbounds float, float addrspace(1)* %c, i64 %id.x.1.ext
+  %a.1 = getelementptr inbounds float, ptr addrspace(1) %a, i64 %id.x.1.ext
+  %c.1 = getelementptr inbounds float, ptr addrspace(1) %c, i64 %id.x.1.ext
 
-  %ld.c.0 = load float, float addrspace(1)* %c.0, align 4
-  store float 0.0, float addrspace(1)* %a.0, align 4
-  %ld.c.1 = load float, float addrspace(1)* %c.1, align 4
-  store float 0.0, float addrspace(1)* %a.1, align 4
+  %ld.c.0 = load float, ptr addrspace(1) %c.0, align 4
+  store float 0.0, ptr addrspace(1) %a.0, align 4
+  %ld.c.1 = load float, ptr addrspace(1) %c.1, align 4
+  store float 0.0, ptr addrspace(1) %a.1, align 4
 
   %add = fadd float %ld.c.0, %ld.c.1
-  store float %add, float addrspace(1)* %b, align 4
+  store float %add, ptr addrspace(1) %b, align 4
   ret void
 }
 
@@ -113,7 +113,7 @@ entry:
 ; CHECK: loop:
 ; CHECK: load <2 x i32>
 ; CHECK: store <2 x i32>
-define amdgpu_kernel void @zext_trunc_phi_1(i32 addrspace(1)* nocapture noalias %a, i32 addrspace(1)* nocapture noalias %b, i32 addrspace(1)* nocapture readonly noalias %c, i32 %n, i64 %arst, i64 %aoeu) #0 {
+define amdgpu_kernel void @zext_trunc_phi_1(ptr addrspace(1) nocapture noalias %a, ptr addrspace(1) nocapture noalias %b, ptr addrspace(1) nocapture readonly noalias %c, i32 %n, i64 %arst, i64 %aoeu) #0 {
 entry:
   %cmp0 = icmp eq i32 %n, 0
   br i1 %cmp0, label %exit, label %loop
@@ -124,18 +124,18 @@ loop:
   %idx = shl i32 %trunc.iv, 4
 
   %idx.ext = zext i32 %idx to i64
-  %c.0 = getelementptr inbounds i32, i32 addrspace(1)* %c, i64 %idx.ext
-  %a.0 = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %idx.ext
+  %c.0 = getelementptr inbounds i32, ptr addrspace(1) %c, i64 %idx.ext
+  %a.0 = getelementptr inbounds i32, ptr addrspace(1) %a, i64 %idx.ext
 
   %idx.1 = or i32 %idx, 1
   %idx.1.ext = zext i32 %idx.1 to i64
-  %c.1 = getelementptr inbounds i32, i32 addrspace(1)* %c, i64 %idx.1.ext
-  %a.1 = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %idx.1.ext
+  %c.1 = getelementptr inbounds i32, ptr addrspace(1) %c, i64 %idx.1.ext
+  %a.1 = getelementptr inbounds i32, ptr addrspace(1) %a, i64 %idx.1.ext
 
-  %ld.c.0 = load i32, i32 addrspace(1)* %c.0, align 4
-  store i32 %ld.c.0, i32 addrspace(1)* %a.0, align 4
-  %ld.c.1 = load i32, i32 addrspace(1)* %c.1, align 4
-  store i32 %ld.c.1, i32 addrspace(1)* %a.1, align 4
+  %ld.c.0 = load i32, ptr addrspace(1) %c.0, align 4
+  store i32 %ld.c.0, ptr addrspace(1) %a.0, align 4
+  %ld.c.1 = load i32, ptr addrspace(1) %c.1, align 4
+  store i32 %ld.c.1, ptr addrspace(1) %a.1, align 4
 
   %indvars.iv.next = add i64 %indvars.iv, 1
   %lftr.wideiv = trunc i64 %indvars.iv.next to i32

diff  --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/gep-bitcast.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/gep-bitcast.ll
index 8fa6069f855c3..06eae3bf38398 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/gep-bitcast.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/gep-bitcast.ll
@@ -6,27 +6,23 @@ target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:3
 ; Check that vectorizer can find a GEP through bitcast
 ; CHECK-LABEL: @vect_zext_bitcast_f32_to_i32_idx
 ; CHECK: load <4 x i32>
-define void @vect_zext_bitcast_f32_to_i32_idx(float addrspace(1)* %arg1, i32 %base) {
+define void @vect_zext_bitcast_f32_to_i32_idx(ptr addrspace(1) %arg1, i32 %base) {
   %add1 = add nuw i32 %base, 0
   %zext1 = zext i32 %add1 to i64
-  %gep1 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 %zext1
-  %f2i1 = bitcast float addrspace(1)* %gep1 to i32 addrspace(1)*
-  %load1 = load i32, i32 addrspace(1)* %f2i1, align 4
+  %gep1 = getelementptr inbounds float, ptr addrspace(1) %arg1, i64 %zext1
+  %load1 = load i32, ptr addrspace(1) %gep1, align 4
   %add2 = add nuw i32 %base, 1
   %zext2 = zext i32 %add2 to i64
-  %gep2 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 %zext2
-  %f2i2 = bitcast float addrspace(1)* %gep2 to i32 addrspace(1)*
-  %load2 = load i32, i32 addrspace(1)* %f2i2, align 4
+  %gep2 = getelementptr inbounds float, ptr addrspace(1) %arg1, i64 %zext2
+  %load2 = load i32, ptr addrspace(1) %gep2, align 4
   %add3 = add nuw i32 %base, 2
   %zext3 = zext i32 %add3 to i64
-  %gep3 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 %zext3
-  %f2i3 = bitcast float addrspace(1)* %gep3 to i32 addrspace(1)*
-  %load3 = load i32, i32 addrspace(1)* %f2i3, align 4
+  %gep3 = getelementptr inbounds float, ptr addrspace(1) %arg1, i64 %zext3
+  %load3 = load i32, ptr addrspace(1) %gep3, align 4
   %add4 = add nuw i32 %base, 3
   %zext4 = zext i32 %add4 to i64
-  %gep4 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 %zext4
-  %f2i4 = bitcast float addrspace(1)* %gep4 to i32 addrspace(1)*
-  %load4 = load i32, i32 addrspace(1)* %f2i4, align 4
+  %gep4 = getelementptr inbounds float, ptr addrspace(1) %arg1, i64 %zext4
+  %load4 = load i32, ptr addrspace(1) %gep4, align 4
   ret void
 }
 
@@ -35,70 +31,59 @@ define void @vect_zext_bitcast_f32_to_i32_idx(float addrspace(1)* %arg1, i32 %ba
 ; CHECK: load i32
 ; CHECK: load i32
 ; CHECK: load i32
-define void @vect_zext_bitcast_i8_st1_to_i32_idx(i8 addrspace(1)* %arg1, i32 %base) {
+define void @vect_zext_bitcast_i8_st1_to_i32_idx(ptr addrspace(1) %arg1, i32 %base) {
   %add1 = add nuw i32 %base, 0
   %zext1 = zext i32 %add1 to i64
-  %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %arg1, i64 %zext1
-  %f2i1 = bitcast i8 addrspace(1)* %gep1 to i32 addrspace(1)*
-  %load1 = load i32, i32 addrspace(1)* %f2i1, align 4
+  %gep1 = getelementptr inbounds i8, ptr addrspace(1) %arg1, i64 %zext1
+  %load1 = load i32, ptr addrspace(1) %gep1, align 4
   %add2 = add nuw i32 %base, 1
   %zext2 = zext i32 %add2 to i64
-  %gep2 = getelementptr inbounds i8,i8 addrspace(1)* %arg1, i64 %zext2
-  %f2i2 = bitcast i8 addrspace(1)* %gep2 to i32 addrspace(1)*
-  %load2 = load i32, i32 addrspace(1)* %f2i2, align 4
+  %gep2 = getelementptr inbounds i8,ptr addrspace(1) %arg1, i64 %zext2
+  %load2 = load i32, ptr addrspace(1) %gep2, align 4
   %add3 = add nuw i32 %base, 2
   %zext3 = zext i32 %add3 to i64
-  %gep3 = getelementptr inbounds i8, i8 addrspace(1)* %arg1, i64 %zext3
-  %f2i3 = bitcast i8 addrspace(1)* %gep3 to i32 addrspace(1)*
-  %load3 = load i32, i32 addrspace(1)* %f2i3, align 4
+  %gep3 = getelementptr inbounds i8, ptr addrspace(1) %arg1, i64 %zext3
+  %load3 = load i32, ptr addrspace(1) %gep3, align 4
   %add4 = add nuw i32 %base, 3
   %zext4 = zext i32 %add4 to i64
-  %gep4 = getelementptr inbounds i8, i8 addrspace(1)* %arg1, i64 %zext4
-  %f2i4 = bitcast i8 addrspace(1)* %gep4 to i32 addrspace(1)*
-  %load4 = load i32, i32 addrspace(1)* %f2i4, align 4
+  %gep4 = getelementptr inbounds i8, ptr addrspace(1) %arg1, i64 %zext4
+  %load4 = load i32, ptr addrspace(1) %gep4, align 4
   ret void
 }
 
 ; CHECK-LABEL: @vect_zext_bitcast_i8_st4_to_i32_idx
 ; CHECK: load <4 x i32>
-define void @vect_zext_bitcast_i8_st4_to_i32_idx(i8 addrspace(1)* %arg1, i32 %base) {
+define void @vect_zext_bitcast_i8_st4_to_i32_idx(ptr addrspace(1) %arg1, i32 %base) {
   %add1 = add nuw i32 %base, 0
   %zext1 = zext i32 %add1 to i64
-  %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %arg1, i64 %zext1
-  %f2i1 = bitcast i8 addrspace(1)* %gep1 to i32 addrspace(1)*
-  %load1 = load i32, i32 addrspace(1)* %f2i1, align 4
+  %gep1 = getelementptr inbounds i8, ptr addrspace(1) %arg1, i64 %zext1
+  %load1 = load i32, ptr addrspace(1) %gep1, align 4
   %add2 = add nuw i32 %base, 4
   %zext2 = zext i32 %add2 to i64
-  %gep2 = getelementptr inbounds i8,i8 addrspace(1)* %arg1, i64 %zext2
-  %f2i2 = bitcast i8 addrspace(1)* %gep2 to i32 addrspace(1)*
-  %load2 = load i32, i32 addrspace(1)* %f2i2, align 4
+  %gep2 = getelementptr inbounds i8,ptr addrspace(1) %arg1, i64 %zext2
+  %load2 = load i32, ptr addrspace(1) %gep2, align 4
   %add3 = add nuw i32 %base, 8
   %zext3 = zext i32 %add3 to i64
-  %gep3 = getelementptr inbounds i8, i8 addrspace(1)* %arg1, i64 %zext3
-  %f2i3 = bitcast i8 addrspace(1)* %gep3 to i32 addrspace(1)*
-  %load3 = load i32, i32 addrspace(1)* %f2i3, align 4
+  %gep3 = getelementptr inbounds i8, ptr addrspace(1) %arg1, i64 %zext3
+  %load3 = load i32, ptr addrspace(1) %gep3, align 4
   %add4 = add nuw i32 %base, 12
   %zext4 = zext i32 %add4 to i64
-  %gep4 = getelementptr inbounds i8, i8 addrspace(1)* %arg1, i64 %zext4
-  %f2i4 = bitcast i8 addrspace(1)* %gep4 to i32 addrspace(1)*
-  %load4 = load i32, i32 addrspace(1)* %f2i4, align 4
+  %gep4 = getelementptr inbounds i8, ptr addrspace(1) %arg1, i64 %zext4
+  %load4 = load i32, ptr addrspace(1) %gep4, align 4
   ret void
 }
 
 ; CHECK-LABEL: @vect_zext_bitcast_negative_ptr_delta
 ; CHECK: load <2 x i32>
-define void @vect_zext_bitcast_negative_ptr_delta(i32 addrspace(1)* %p, i32 %base) {
-  %p.bitcasted = bitcast i32 addrspace(1)* %p to i16 addrspace(1)*
+define void @vect_zext_bitcast_negative_ptr_delta(ptr addrspace(1) %p, i32 %base) {
   %a.offset = add nuw i32 %base, 4
   %t.offset.zexted = zext i32 %base to i64
   %a.offset.zexted = zext i32 %a.offset to i64
-  %t.ptr = getelementptr inbounds i16, i16 addrspace(1)* %p.bitcasted, i64 %t.offset.zexted
-  %a.ptr = getelementptr inbounds i16, i16 addrspace(1)* %p.bitcasted, i64 %a.offset.zexted
-  %b.ptr = getelementptr inbounds i16, i16 addrspace(1)* %t.ptr, i64 6
-  %a.ptr.bitcasted = bitcast i16 addrspace(1)* %a.ptr to i32 addrspace(1)*
-  %b.ptr.bitcasted = bitcast i16 addrspace(1)* %b.ptr to i32 addrspace(1)*
-  %a.val = load i32, i32 addrspace(1)* %a.ptr.bitcasted
-  %b.val = load i32, i32 addrspace(1)* %b.ptr.bitcasted
+  %t.ptr = getelementptr inbounds i16, ptr addrspace(1) %p, i64 %t.offset.zexted
+  %a.ptr = getelementptr inbounds i16, ptr addrspace(1) %p, i64 %a.offset.zexted
+  %b.ptr = getelementptr inbounds i16, ptr addrspace(1) %t.ptr, i64 6
+  %a.val = load i32, ptr addrspace(1) %a.ptr
+  %b.val = load i32, ptr addrspace(1) %b.ptr
   ret void
 }
 
@@ -106,15 +91,15 @@ define void @vect_zext_bitcast_negative_ptr_delta(i32 addrspace(1)* %p, i32 %bas
 ; CHECK-LABEL: @zexted_i1_gep_index
 ; CHECK: load i32
 ; CHECK: load i32
-define void @zexted_i1_gep_index(i32 addrspace(1)* %p, i32 %val) {
+define void @zexted_i1_gep_index(ptr addrspace(1) %p, i32 %val) {
   %selector = icmp eq i32 %val, 0
   %flipped = xor i1 %selector, 1
   %index.0 = zext i1 %selector to i64
   %index.1 = zext i1 %flipped to i64
-  %gep.0 = getelementptr inbounds i32, i32 addrspace(1)* %p, i64 %index.0
-  %gep.1 = getelementptr inbounds i32, i32 addrspace(1)* %p, i64 %index.1
-  %val0 = load i32, i32 addrspace(1)* %gep.0
-  %val1 = load i32, i32 addrspace(1)* %gep.1
+  %gep.0 = getelementptr inbounds i32, ptr addrspace(1) %p, i64 %index.0
+  %gep.1 = getelementptr inbounds i32, ptr addrspace(1) %p, i64 %index.1
+  %val0 = load i32, ptr addrspace(1) %gep.0
+  %val1 = load i32, ptr addrspace(1) %gep.1
   ret void
 }
 
@@ -122,14 +107,14 @@ define void @zexted_i1_gep_index(i32 addrspace(1)* %p, i32 %val) {
 ; CHECK-LABEL: @sexted_i1_gep_index
 ; CHECK: load i32
 ; CHECK: load i32
-define void @sexted_i1_gep_index(i32 addrspace(1)* %p, i32 %val) {
+define void @sexted_i1_gep_index(ptr addrspace(1) %p, i32 %val) {
   %selector = icmp eq i32 %val, 0
   %flipped = xor i1 %selector, 1
   %index.0 = sext i1 %selector to i64
   %index.1 = sext i1 %flipped to i64
-  %gep.0 = getelementptr inbounds i32, i32 addrspace(1)* %p, i64 %index.0
-  %gep.1 = getelementptr inbounds i32, i32 addrspace(1)* %p, i64 %index.1
-  %val0 = load i32, i32 addrspace(1)* %gep.0
-  %val1 = load i32, i32 addrspace(1)* %gep.1
+  %gep.0 = getelementptr inbounds i32, ptr addrspace(1) %p, i64 %index.0
+  %gep.1 = getelementptr inbounds i32, ptr addrspace(1) %p, i64 %index.1
+  %val0 = load i32, ptr addrspace(1) %gep.0
+  %val1 = load i32, ptr addrspace(1) %gep.1
   ret void
 }

diff  --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/insertion-point.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/insertion-point.ll
index c12bd9503ffd3..418b03e653042 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/insertion-point.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/insertion-point.ll
@@ -12,25 +12,25 @@ target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:3
 ; CHECK: load <2 x float>
 ; CHECK: %w = add i32 %y, 9
 ; CHECK: %foo = add i32 %z, %w
-define amdgpu_kernel void @insert_load_point(float addrspace(1)* nocapture %a, float addrspace(1)* nocapture %b, float addrspace(1)* nocapture readonly %c, i64 %idx, i32 %x, i32 %y) #0 {
+define amdgpu_kernel void @insert_load_point(ptr addrspace(1) nocapture %a, ptr addrspace(1) nocapture %b, ptr addrspace(1) nocapture readonly %c, i64 %idx, i32 %x, i32 %y) #0 {
 entry:
-  %a.idx.x = getelementptr inbounds float, float addrspace(1)* %a, i64 %idx
-  %c.idx.x = getelementptr inbounds float, float addrspace(1)* %c, i64 %idx
-  %a.idx.x.1 = getelementptr inbounds float, float addrspace(1)* %a.idx.x, i64 1
-  %c.idx.x.1 = getelementptr inbounds float, float addrspace(1)* %c.idx.x, i64 1
+  %a.idx.x = getelementptr inbounds float, ptr addrspace(1) %a, i64 %idx
+  %c.idx.x = getelementptr inbounds float, ptr addrspace(1) %c, i64 %idx
+  %a.idx.x.1 = getelementptr inbounds float, ptr addrspace(1) %a.idx.x, i64 1
+  %c.idx.x.1 = getelementptr inbounds float, ptr addrspace(1) %c.idx.x, i64 1
 
   %z = add i32 %x, 4
-  %ld.c = load float, float addrspace(1)* %c.idx.x, align 4
+  %ld.c = load float, ptr addrspace(1) %c.idx.x, align 4
   %w = add i32 %y, 9
-  %ld.c.idx.1 = load float, float addrspace(1)* %c.idx.x.1, align 4
+  %ld.c.idx.1 = load float, ptr addrspace(1) %c.idx.x.1, align 4
   %foo = add i32 %z, %w
 
-  store float 0.0, float addrspace(1)* %a.idx.x, align 4
-  store float 0.0, float addrspace(1)* %a.idx.x.1, align 4
+  store float 0.0, ptr addrspace(1) %a.idx.x, align 4
+  store float 0.0, ptr addrspace(1) %a.idx.x.1, align 4
 
   %add = fadd float %ld.c, %ld.c.idx.1
-  store float %add, float addrspace(1)* %b, align 4
-  store i32 %foo, i32 addrspace(3)* null, align 4
+  store float %add, ptr addrspace(1) %b, align 4
+  store i32 %foo, ptr addrspace(3) null, align 4
   ret void
 }
 
@@ -39,25 +39,25 @@ entry:
 ; CHECK: %w = add i32 %y, 9
 ; CHECK: store <2 x float>
 ; CHECK: %foo = add i32 %z, %w
-define amdgpu_kernel void @insert_store_point(float addrspace(1)* nocapture %a, float addrspace(1)* nocapture %b, float addrspace(1)* nocapture readonly %c, i64 %idx, i32 %x, i32 %y) #0 {
+define amdgpu_kernel void @insert_store_point(ptr addrspace(1) nocapture %a, ptr addrspace(1) nocapture %b, ptr addrspace(1) nocapture readonly %c, i64 %idx, i32 %x, i32 %y) #0 {
 entry:
-  %a.idx.x = getelementptr inbounds float, float addrspace(1)* %a, i64 %idx
-  %c.idx.x = getelementptr inbounds float, float addrspace(1)* %c, i64 %idx
-  %a.idx.x.1 = getelementptr inbounds float, float addrspace(1)* %a.idx.x, i64 1
-  %c.idx.x.1 = getelementptr inbounds float, float addrspace(1)* %c.idx.x, i64 1
+  %a.idx.x = getelementptr inbounds float, ptr addrspace(1) %a, i64 %idx
+  %c.idx.x = getelementptr inbounds float, ptr addrspace(1) %c, i64 %idx
+  %a.idx.x.1 = getelementptr inbounds float, ptr addrspace(1) %a.idx.x, i64 1
+  %c.idx.x.1 = getelementptr inbounds float, ptr addrspace(1) %c.idx.x, i64 1
 
-  %ld.c = load float, float addrspace(1)* %c.idx.x, align 4
-  %ld.c.idx.1 = load float, float addrspace(1)* %c.idx.x.1, align 4
+  %ld.c = load float, ptr addrspace(1) %c.idx.x, align 4
+  %ld.c.idx.1 = load float, ptr addrspace(1) %c.idx.x.1, align 4
 
   %z = add i32 %x, 4
-  store float 0.0, float addrspace(1)* %a.idx.x, align 4
+  store float 0.0, ptr addrspace(1) %a.idx.x, align 4
   %w = add i32 %y, 9
-  store float 0.0, float addrspace(1)* %a.idx.x.1, align 4
+  store float 0.0, ptr addrspace(1) %a.idx.x.1, align 4
   %foo = add i32 %z, %w
 
   %add = fadd float %ld.c, %ld.c.idx.1
-  store float %add, float addrspace(1)* %b, align 4
-  store i32 %foo, i32 addrspace(3)* null, align 4
+  store float %add, ptr addrspace(1) %b, align 4
+  store i32 %foo, ptr addrspace(3) null, align 4
   ret void
 }
 
@@ -67,20 +67,20 @@ entry:
 ;
 ; CHECK-LABEL: @insert_store_point_alias
 ; CHECK: store <3 x float>
-; CHECK: load float, float addrspace(1)* %a.idx.2
+; CHECK: load float, ptr addrspace(1) %a.idx.2
 ; CHECK: store float
 ; CHECK-SAME: %a.idx.3
-define float @insert_store_point_alias(float addrspace(1)* nocapture %a, i64 %idx) {
-  %a.idx = getelementptr inbounds float, float addrspace(1)* %a, i64 %idx
-  %a.idx.1 = getelementptr inbounds float, float addrspace(1)* %a.idx, i64 1
-  %a.idx.2 = getelementptr inbounds float, float addrspace(1)* %a.idx.1, i64 1
-  %a.idx.3 = getelementptr inbounds float, float addrspace(1)* %a.idx.2, i64 1
-
-  store float 0.0, float addrspace(1)* %a.idx, align 4
-  store float 0.0, float addrspace(1)* %a.idx.1, align 4
-  store float 0.0, float addrspace(1)* %a.idx.2, align 4
-  %x = load float, float addrspace(1)* %a.idx.2, align 4
-  store float 0.0, float addrspace(1)* %a.idx.3, align 4
+define float @insert_store_point_alias(ptr addrspace(1) nocapture %a, i64 %idx) {
+  %a.idx = getelementptr inbounds float, ptr addrspace(1) %a, i64 %idx
+  %a.idx.1 = getelementptr inbounds float, ptr addrspace(1) %a.idx, i64 1
+  %a.idx.2 = getelementptr inbounds float, ptr addrspace(1) %a.idx.1, i64 1
+  %a.idx.3 = getelementptr inbounds float, ptr addrspace(1) %a.idx.2, i64 1
+
+  store float 0.0, ptr addrspace(1) %a.idx, align 4
+  store float 0.0, ptr addrspace(1) %a.idx.1, align 4
+  store float 0.0, ptr addrspace(1) %a.idx.2, align 4
+  %x = load float, ptr addrspace(1) %a.idx.2, align 4
+  store float 0.0, ptr addrspace(1) %a.idx.3, align 4
 
   ret float %x
 }
@@ -97,20 +97,20 @@ define float @insert_store_point_alias(float addrspace(1)* nocapture %a, i64 %id
 ; CHECK-SAME: %a.idx.1
 ; CHECK: store float
 ; CHECK-SAME: %a.idx.2
-; CHECK: load float, float addrspace(1)* %a.idx.2
+; CHECK: load float, ptr addrspace(1) %a.idx.2
 ; CHECK: store float
 ; CHECK-SAME: %a.idx
-define float @insert_store_point_alias_ooo(float addrspace(1)* nocapture %a, i64 %idx) {
-  %a.idx = getelementptr inbounds float, float addrspace(1)* %a, i64 %idx
-  %a.idx.1 = getelementptr inbounds float, float addrspace(1)* %a.idx, i64 1
-  %a.idx.2 = getelementptr inbounds float, float addrspace(1)* %a.idx.1, i64 1
-  %a.idx.3 = getelementptr inbounds float, float addrspace(1)* %a.idx.2, i64 1
-
-  store float 0.0, float addrspace(1)* %a.idx.3, align 4
-  store float 0.0, float addrspace(1)* %a.idx.1, align 4
-  store float 0.0, float addrspace(1)* %a.idx.2, align 4
-  %x = load float, float addrspace(1)* %a.idx.2, align 4
-  store float 0.0, float addrspace(1)* %a.idx, align 4
+define float @insert_store_point_alias_ooo(ptr addrspace(1) nocapture %a, i64 %idx) {
+  %a.idx = getelementptr inbounds float, ptr addrspace(1) %a, i64 %idx
+  %a.idx.1 = getelementptr inbounds float, ptr addrspace(1) %a.idx, i64 1
+  %a.idx.2 = getelementptr inbounds float, ptr addrspace(1) %a.idx.1, i64 1
+  %a.idx.3 = getelementptr inbounds float, ptr addrspace(1) %a.idx.2, i64 1
+
+  store float 0.0, ptr addrspace(1) %a.idx.3, align 4
+  store float 0.0, ptr addrspace(1) %a.idx.1, align 4
+  store float 0.0, ptr addrspace(1) %a.idx.2, align 4
+  %x = load float, ptr addrspace(1) %a.idx.2, align 4
+  store float 0.0, ptr addrspace(1) %a.idx, align 4
 
   ret float %x
 }

diff  --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/interleaved-mayalias-store.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/interleaved-mayalias-store.ll
index ca36e94d6c114..f6f98d6fbafad 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/interleaved-mayalias-store.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/interleaved-mayalias-store.ll
@@ -6,22 +6,22 @@ target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:3
 ; This is NOT OK to vectorize, as either load may alias either store.
 
 ; CHECK: load double
-; CHECK: store double 0.000000e+00, double addrspace(1)* %a,
+; CHECK: store double 0.000000e+00, ptr addrspace(1) %a,
 ; CHECK: load double
-; CHECK: store double 0.000000e+00, double addrspace(1)* %a.idx.1
-define amdgpu_kernel void @interleave(double addrspace(1)* nocapture %a, double addrspace(1)* nocapture %b, double addrspace(1)* nocapture readonly %c) #0 {
+; CHECK: store double 0.000000e+00, ptr addrspace(1) %a.idx.1
+define amdgpu_kernel void @interleave(ptr addrspace(1) nocapture %a, ptr addrspace(1) nocapture %b, ptr addrspace(1) nocapture readonly %c) #0 {
 entry:
-  %a.idx.1 = getelementptr inbounds double, double addrspace(1)* %a, i64 1
-  %c.idx.1 = getelementptr inbounds double, double addrspace(1)* %c, i64 1
+  %a.idx.1 = getelementptr inbounds double, ptr addrspace(1) %a, i64 1
+  %c.idx.1 = getelementptr inbounds double, ptr addrspace(1) %c, i64 1
 
-  %ld.c = load double, double addrspace(1)* %c, align 8 ; may alias store to %a
-  store double 0.0, double addrspace(1)* %a, align 8
+  %ld.c = load double, ptr addrspace(1) %c, align 8 ; may alias store to %a
+  store double 0.0, ptr addrspace(1) %a, align 8
 
-  %ld.c.idx.1 = load double, double addrspace(1)* %c.idx.1, align 8 ; may alias store to %a
-  store double 0.0, double addrspace(1)* %a.idx.1, align 8
+  %ld.c.idx.1 = load double, ptr addrspace(1) %c.idx.1, align 8 ; may alias store to %a
+  store double 0.0, ptr addrspace(1) %a.idx.1, align 8
 
   %add = fadd double %ld.c, %ld.c.idx.1
-  store double %add, double addrspace(1)* %b
+  store double %add, ptr addrspace(1) %b
 
   ret void
 }

diff  --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/invariant-load.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/invariant-load.ll
index 1fad6db04831f..9b0eb5bcea66f 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/invariant-load.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/invariant-load.ll
@@ -4,22 +4,22 @@
 target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 ; CHECK-LABEL: @interleave
-; CHECK: load <2 x double>, <2 x double> addrspace(1)* %{{.}}, align 8{{$}}
+; CHECK: load <2 x double>, ptr addrspace(1) %{{.}}, align 8{{$}}
 ; CHECK: store <2 x double> zeroinitializer
 ; CHECK: store double %add
-define amdgpu_kernel void @interleave(double addrspace(1)* nocapture %a, double addrspace(1)* nocapture %b, double addrspace(1)* nocapture readonly %c) #0 {
+define amdgpu_kernel void @interleave(ptr addrspace(1) nocapture %a, ptr addrspace(1) nocapture %b, ptr addrspace(1) nocapture readonly %c) #0 {
 entry:
-  %a.idx.1 = getelementptr inbounds double, double addrspace(1)* %a, i64 1
-  %c.idx.1 = getelementptr inbounds double, double addrspace(1)* %c, i64 1
+  %a.idx.1 = getelementptr inbounds double, ptr addrspace(1) %a, i64 1
+  %c.idx.1 = getelementptr inbounds double, ptr addrspace(1) %c, i64 1
 
-  %ld.c = load double, double addrspace(1)* %c, align 8
-  store double 0.0, double addrspace(1)* %a, align 8 ; Cannot alias invariant load
+  %ld.c = load double, ptr addrspace(1) %c, align 8
+  store double 0.0, ptr addrspace(1) %a, align 8 ; Cannot alias invariant load
 
-  %ld.c.idx.1 = load double, double addrspace(1)* %c.idx.1, align 8, !invariant.load !0
-  store double 0.0, double addrspace(1)* %a.idx.1, align 8
+  %ld.c.idx.1 = load double, ptr addrspace(1) %c.idx.1, align 8, !invariant.load !0
+  store double 0.0, ptr addrspace(1) %a.idx.1, align 8
 
   %add = fadd double %ld.c, %ld.c.idx.1
-  store double %add, double addrspace(1)* %b
+  store double %add, ptr addrspace(1) %b
 
   ret void
 }

diff  --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-vectors.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-vectors.ll
index 786389a05e4e3..e906093f632ac 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-vectors.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-vectors.ll
@@ -5,16 +5,16 @@ target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:3
 ; CHECK-LABEL: @merge_v2i32_v2i32(
 ; CHECK: load <4 x i32>
 ; CHECK: store <4 x i32> zeroinitializer
-define amdgpu_kernel void @merge_v2i32_v2i32(<2 x i32> addrspace(1)* nocapture %a, <2 x i32> addrspace(1)* nocapture readonly %b) #0 {
+define amdgpu_kernel void @merge_v2i32_v2i32(ptr addrspace(1) nocapture %a, ptr addrspace(1) nocapture readonly %b) #0 {
 entry:
-  %a.1 = getelementptr inbounds <2 x i32>, <2 x i32> addrspace(1)* %a, i64 1
-  %b.1 = getelementptr inbounds <2 x i32>, <2 x i32> addrspace(1)* %b, i64 1
+  %a.1 = getelementptr inbounds <2 x i32>, ptr addrspace(1) %a, i64 1
+  %b.1 = getelementptr inbounds <2 x i32>, ptr addrspace(1) %b, i64 1
 
-  %ld.c = load <2 x i32>, <2 x i32> addrspace(1)* %b, align 4
-  %ld.c.idx.1 = load <2 x i32>, <2 x i32> addrspace(1)* %b.1, align 4
+  %ld.c = load <2 x i32>, ptr addrspace(1) %b, align 4
+  %ld.c.idx.1 = load <2 x i32>, ptr addrspace(1) %b.1, align 4
 
-  store <2 x i32> zeroinitializer, <2 x i32> addrspace(1)* %a, align 4
-  store <2 x i32> zeroinitializer, <2 x i32> addrspace(1)* %a.1, align 4
+  store <2 x i32> zeroinitializer, ptr addrspace(1) %a, align 4
+  store <2 x i32> zeroinitializer, ptr addrspace(1) %a.1, align 4
 
   ret void
 }
@@ -22,16 +22,16 @@ entry:
 ; CHECK-LABEL: @merge_v1i32_v1i32(
 ; CHECK: load <2 x i32>
 ; CHECK: store <2 x i32> zeroinitializer
-define amdgpu_kernel void @merge_v1i32_v1i32(<1 x i32> addrspace(1)* nocapture %a, <1 x i32> addrspace(1)* nocapture readonly %b) #0 {
+define amdgpu_kernel void @merge_v1i32_v1i32(ptr addrspace(1) nocapture %a, ptr addrspace(1) nocapture readonly %b) #0 {
 entry:
-  %a.1 = getelementptr inbounds <1 x i32>, <1 x i32> addrspace(1)* %a, i64 1
-  %b.1 = getelementptr inbounds <1 x i32>, <1 x i32> addrspace(1)* %b, i64 1
+  %a.1 = getelementptr inbounds <1 x i32>, ptr addrspace(1) %a, i64 1
+  %b.1 = getelementptr inbounds <1 x i32>, ptr addrspace(1) %b, i64 1
 
-  %ld.c = load <1 x i32>, <1 x i32> addrspace(1)* %b, align 4
-  %ld.c.idx.1 = load <1 x i32>, <1 x i32> addrspace(1)* %b.1, align 4
+  %ld.c = load <1 x i32>, ptr addrspace(1) %b, align 4
+  %ld.c.idx.1 = load <1 x i32>, ptr addrspace(1) %b.1, align 4
 
-  store <1 x i32> zeroinitializer, <1 x i32> addrspace(1)* %a, align 4
-  store <1 x i32> zeroinitializer, <1 x i32> addrspace(1)* %a.1, align 4
+  store <1 x i32> zeroinitializer, ptr addrspace(1) %a, align 4
+  store <1 x i32> zeroinitializer, ptr addrspace(1) %a.1, align 4
 
   ret void
 }
@@ -41,16 +41,16 @@ entry:
 ; CHECK: load <3 x i32>
 ; CHECK: store <3 x i32> zeroinitializer
 ; CHECK: store <3 x i32> zeroinitializer
-define amdgpu_kernel void @no_merge_v3i32_v3i32(<3 x i32> addrspace(1)* nocapture %a, <3 x i32> addrspace(1)* nocapture readonly %b) #0 {
+define amdgpu_kernel void @no_merge_v3i32_v3i32(ptr addrspace(1) nocapture %a, ptr addrspace(1) nocapture readonly %b) #0 {
 entry:
-  %a.1 = getelementptr inbounds <3 x i32>, <3 x i32> addrspace(1)* %a, i64 1
-  %b.1 = getelementptr inbounds <3 x i32>, <3 x i32> addrspace(1)* %b, i64 1
+  %a.1 = getelementptr inbounds <3 x i32>, ptr addrspace(1) %a, i64 1
+  %b.1 = getelementptr inbounds <3 x i32>, ptr addrspace(1) %b, i64 1
 
-  %ld.c = load <3 x i32>, <3 x i32> addrspace(1)* %b, align 4
-  %ld.c.idx.1 = load <3 x i32>, <3 x i32> addrspace(1)* %b.1, align 4
+  %ld.c = load <3 x i32>, ptr addrspace(1) %b, align 4
+  %ld.c.idx.1 = load <3 x i32>, ptr addrspace(1) %b.1, align 4
 
-  store <3 x i32> zeroinitializer, <3 x i32> addrspace(1)* %a, align 4
-  store <3 x i32> zeroinitializer, <3 x i32> addrspace(1)* %a.1, align 4
+  store <3 x i32> zeroinitializer, ptr addrspace(1) %a, align 4
+  store <3 x i32> zeroinitializer, ptr addrspace(1) %a.1, align 4
 
   ret void
 }
@@ -58,16 +58,16 @@ entry:
 ; CHECK-LABEL: @merge_v2i16_v2i16(
 ; CHECK: load <4 x i16>
 ; CHECK: store <4 x i16> zeroinitializer
-define amdgpu_kernel void @merge_v2i16_v2i16(<2 x i16> addrspace(1)* nocapture %a, <2 x i16> addrspace(1)* nocapture readonly %b) #0 {
+define amdgpu_kernel void @merge_v2i16_v2i16(ptr addrspace(1) nocapture %a, ptr addrspace(1) nocapture readonly %b) #0 {
 entry:
-  %a.1 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %a, i64 1
-  %b.1 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %b, i64 1
+  %a.1 = getelementptr inbounds <2 x i16>, ptr addrspace(1) %a, i64 1
+  %b.1 = getelementptr inbounds <2 x i16>, ptr addrspace(1) %b, i64 1
 
-  %ld.c = load <2 x i16>, <2 x i16> addrspace(1)* %b, align 4
-  %ld.c.idx.1 = load <2 x i16>, <2 x i16> addrspace(1)* %b.1, align 4
+  %ld.c = load <2 x i16>, ptr addrspace(1) %b, align 4
+  %ld.c.idx.1 = load <2 x i16>, ptr addrspace(1) %b.1, align 4
 
-  store <2 x i16> zeroinitializer, <2 x i16> addrspace(1)* %a, align 4
-  store <2 x i16> zeroinitializer, <2 x i16> addrspace(1)* %a.1, align 4
+  store <2 x i16> zeroinitializer, ptr addrspace(1) %a, align 4
+  store <2 x i16> zeroinitializer, ptr addrspace(1) %a.1, align 4
 
   ret void
 }
@@ -76,13 +76,12 @@ entry:
 ; CHECK-LABEL: @merge_load_i32_v2i16(
 ; CHECK: load i32,
 ; CHECK: load <2 x i16>
-define amdgpu_kernel void @merge_load_i32_v2i16(i32 addrspace(1)* nocapture %a) #0 {
+define amdgpu_kernel void @merge_load_i32_v2i16(ptr addrspace(1) nocapture %a) #0 {
 entry:
-  %a.1 = getelementptr inbounds i32, i32 addrspace(1)* %a, i32 1
-  %a.1.cast = bitcast i32 addrspace(1)* %a.1 to <2 x i16> addrspace(1)*
+  %a.1 = getelementptr inbounds i32, ptr addrspace(1) %a, i32 1
 
-  %ld.0 = load i32, i32 addrspace(1)* %a
-  %ld.1 = load <2 x i16>, <2 x i16> addrspace(1)* %a.1.cast
+  %ld.0 = load i32, ptr addrspace(1) %a
+  %ld.1 = load <2 x i16>, ptr addrspace(1) %a.1
 
   ret void
 }

diff  --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/multiple_tails.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/multiple_tails.ll
index 134c4b163c0e4..7dadbee6b74b7 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/multiple_tails.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/multiple_tails.ll
@@ -14,14 +14,14 @@ target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:3
 
 define amdgpu_kernel void @no_crash(i32 %arg) {
   %tmp2 = add i32 %arg, 14
-  %tmp3 = getelementptr [16384 x i32], [16384 x i32] addrspace(3)* @0, i32 0, i32 %tmp2
+  %tmp3 = getelementptr [16384 x i32], ptr addrspace(3) @0, i32 0, i32 %tmp2
   %tmp4 = add i32 %arg, 15
-  %tmp5 = getelementptr [16384 x i32], [16384 x i32] addrspace(3)* @0, i32 0, i32 %tmp4
+  %tmp5 = getelementptr [16384 x i32], ptr addrspace(3) @0, i32 0, i32 %tmp4
 
-  store i32 0, i32 addrspace(3)* %tmp3, align 4
-  store i32 0, i32 addrspace(3)* %tmp5, align 4
-  store i32 0, i32 addrspace(3)* %tmp5, align 4
-  store i32 0, i32 addrspace(3)* %tmp5, align 4
+  store i32 0, ptr addrspace(3) %tmp3, align 4
+  store i32 0, ptr addrspace(3) %tmp5, align 4
+  store i32 0, ptr addrspace(3) %tmp5, align 4
+  store i32 0, ptr addrspace(3) %tmp5, align 4
 
   ret void
 }
@@ -51,22 +51,22 @@ define amdgpu_kernel void @interleave_get_longest(i32 %arg) {
   %a2 = add i32 %arg, 2
   %a3 = add i32 %arg, 3
   %a4 = add i32 %arg, 4
-  %tmp1 = getelementptr [16384 x i32], [16384 x i32] addrspace(3)* @0, i32 0, i32 %arg
-  %tmp2 = getelementptr [16384 x i32], [16384 x i32] addrspace(3)* @0, i32 0, i32 %a1
-  %tmp3 = getelementptr [16384 x i32], [16384 x i32] addrspace(3)* @0, i32 0, i32 %a2
-  %tmp4 = getelementptr [16384 x i32], [16384 x i32] addrspace(3)* @0, i32 0, i32 %a3
-  %tmp5 = getelementptr [16384 x i32], [16384 x i32] addrspace(3)* @0, i32 0, i32 %a4
+  %tmp1 = getelementptr [16384 x i32], ptr addrspace(3) @0, i32 0, i32 %arg
+  %tmp2 = getelementptr [16384 x i32], ptr addrspace(3) @0, i32 0, i32 %a1
+  %tmp3 = getelementptr [16384 x i32], ptr addrspace(3) @0, i32 0, i32 %a2
+  %tmp4 = getelementptr [16384 x i32], ptr addrspace(3) @0, i32 0, i32 %a3
+  %tmp5 = getelementptr [16384 x i32], ptr addrspace(3) @0, i32 0, i32 %a4
 
-  %l1 = load i32, i32 addrspace(3)* %tmp2, align 4
-  %l2 = load i32, i32 addrspace(3)* %tmp1, align 4
-  store i32 0, i32 addrspace(3)* %tmp2, align 4
-  store i32 0, i32 addrspace(3)* %tmp1, align 4
-  %l3 = load i32, i32 addrspace(3)* %tmp2, align 4
-  %l4 = load i32, i32 addrspace(3)* %tmp3, align 4
-  %l5 = load i32, i32 addrspace(3)* %tmp4, align 4
-  %l6 = load i32, i32 addrspace(3)* %tmp5, align 4
-  %l7 = load i32, i32 addrspace(3)* %tmp5, align 4
-  %l8 = load i32, i32 addrspace(3)* %tmp5, align 4
+  %l1 = load i32, ptr addrspace(3) %tmp2, align 4
+  %l2 = load i32, ptr addrspace(3) %tmp1, align 4
+  store i32 0, ptr addrspace(3) %tmp2, align 4
+  store i32 0, ptr addrspace(3) %tmp1, align 4
+  %l3 = load i32, ptr addrspace(3) %tmp2, align 4
+  %l4 = load i32, ptr addrspace(3) %tmp3, align 4
+  %l5 = load i32, ptr addrspace(3) %tmp4, align 4
+  %l6 = load i32, ptr addrspace(3) %tmp5, align 4
+  %l7 = load i32, ptr addrspace(3) %tmp5, align 4
+  %l8 = load i32, ptr addrspace(3) %tmp5, align 4
 
   ret void
 }

diff  --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/no-implicit-float.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/no-implicit-float.ll
index 47f222642e966..7161d7c03b6e5 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/no-implicit-float.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/no-implicit-float.ll
@@ -7,15 +7,15 @@ target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:3
 ; CHECK: store i32
 ; CHECK: store i32
 ; CHECK: store i32
-define amdgpu_kernel void @no_implicit_float(i32 addrspace(1)* %out) #0 {
-  %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
-  %out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2
-  %out.gep.3 = getelementptr i32, i32 addrspace(1)* %out, i32 3
+define amdgpu_kernel void @no_implicit_float(ptr addrspace(1) %out) #0 {
+  %out.gep.1 = getelementptr i32, ptr addrspace(1) %out, i32 1
+  %out.gep.2 = getelementptr i32, ptr addrspace(1) %out, i32 2
+  %out.gep.3 = getelementptr i32, ptr addrspace(1) %out, i32 3
 
-  store i32 123, i32 addrspace(1)* %out.gep.1
-  store i32 456, i32 addrspace(1)* %out.gep.2
-  store i32 333, i32 addrspace(1)* %out.gep.3
-  store i32 1234, i32 addrspace(1)* %out
+  store i32 123, ptr addrspace(1) %out.gep.1
+  store i32 456, ptr addrspace(1) %out.gep.2
+  store i32 333, ptr addrspace(1) %out.gep.3
+  store i32 1234, ptr addrspace(1) %out
   ret void
 }
 

diff  --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/optnone.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/optnone.ll
index 6a394aba1c4dd..a132cc49a95fc 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/optnone.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/optnone.ll
@@ -5,20 +5,20 @@ target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:3
 ; CHECK-LABEL: @optnone(
 ; CHECK: store i32
 ; CHECK: store i32
-define amdgpu_kernel void @optnone(i32 addrspace(1)* %out) noinline optnone {
-  %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
+define amdgpu_kernel void @optnone(ptr addrspace(1) %out) noinline optnone {
+  %out.gep.1 = getelementptr i32, ptr addrspace(1) %out, i32 1
 
-  store i32 123, i32 addrspace(1)* %out.gep.1
-  store i32 456, i32 addrspace(1)* %out
+  store i32 123, ptr addrspace(1) %out.gep.1
+  store i32 456, ptr addrspace(1) %out
   ret void
 }
 
 ; CHECK-LABEL: @do_opt(
 ; CHECK: store <2 x i32>
-define amdgpu_kernel void @do_opt(i32 addrspace(1)* %out) {
-  %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
+define amdgpu_kernel void @do_opt(ptr addrspace(1) %out) {
+  %out.gep.1 = getelementptr i32, ptr addrspace(1) %out, i32 1
 
-  store i32 123, i32 addrspace(1)* %out.gep.1
-  store i32 456, i32 addrspace(1)* %out
+  store i32 123, ptr addrspace(1) %out.gep.1
+  store i32 456, ptr addrspace(1) %out
   ret void
 }

diff  --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/pointer-elements.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/pointer-elements.ll
index fc3de464f5fd8..e0985c2f57ec8 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/pointer-elements.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/pointer-elements.ll
@@ -6,38 +6,38 @@ declare i32 @llvm.amdgcn.workitem.id.x() #1
 
 ; CHECK-LABEL: @merge_v2p1i8(
 ; CHECK: load <2 x i64>
-; CHECK: inttoptr i64 %{{[^ ]+}} to i8 addrspace(1)*
-; CHECK: inttoptr i64 %{{[^ ]+}} to i8 addrspace(1)*
+; CHECK: inttoptr i64 %{{[^ ]+}} to ptr addrspace(1)
+; CHECK: inttoptr i64 %{{[^ ]+}} to ptr addrspace(1)
 ; CHECK: store <2 x i64> zeroinitializer
-define amdgpu_kernel void @merge_v2p1i8(i8 addrspace(1)* addrspace(1)* nocapture %a, i8 addrspace(1)* addrspace(1)* nocapture readonly %b) #0 {
+define amdgpu_kernel void @merge_v2p1i8(ptr addrspace(1) nocapture %a, ptr addrspace(1) nocapture readonly %b) #0 {
 entry:
-  %a.1 = getelementptr inbounds i8 addrspace(1)*, i8 addrspace(1)* addrspace(1)* %a, i64 1
-  %b.1 = getelementptr inbounds i8 addrspace(1)*, i8 addrspace(1)* addrspace(1)* %b, i64 1
+  %a.1 = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) %a, i64 1
+  %b.1 = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) %b, i64 1
 
-  %ld.c = load i8 addrspace(1)*, i8 addrspace(1)* addrspace(1)* %b, align 4
-  %ld.c.idx.1 = load i8 addrspace(1)*, i8 addrspace(1)* addrspace(1)* %b.1, align 4
+  %ld.c = load ptr addrspace(1), ptr addrspace(1) %b, align 4
+  %ld.c.idx.1 = load ptr addrspace(1), ptr addrspace(1) %b.1, align 4
 
-  store i8 addrspace(1)* null, i8 addrspace(1)* addrspace(1)* %a, align 4
-  store i8 addrspace(1)* null, i8 addrspace(1)* addrspace(1)* %a.1, align 4
+  store ptr addrspace(1) null, ptr addrspace(1) %a, align 4
+  store ptr addrspace(1) null, ptr addrspace(1) %a.1, align 4
 
   ret void
 }
 
 ; CHECK-LABEL: @merge_v2p3i8(
 ; CHECK: load <2 x i32>
-; CHECK: inttoptr i32 %{{[^ ]+}} to i8 addrspace(3)*
-; CHECK: inttoptr i32 %{{[^ ]+}} to i8 addrspace(3)*
+; CHECK: inttoptr i32 %{{[^ ]+}} to ptr addrspace(3)
+; CHECK: inttoptr i32 %{{[^ ]+}} to ptr addrspace(3)
 ; CHECK: store <2 x i32> zeroinitializer
-define amdgpu_kernel void @merge_v2p3i8(i8 addrspace(3)* addrspace(3)* nocapture %a, i8 addrspace(3)* addrspace(3)* nocapture readonly %b) #0 {
+define amdgpu_kernel void @merge_v2p3i8(ptr addrspace(3) nocapture %a, ptr addrspace(3) nocapture readonly %b) #0 {
 entry:
-  %a.1 = getelementptr inbounds i8 addrspace(3)*, i8 addrspace(3)* addrspace(3)* %a, i64 1
-  %b.1 = getelementptr inbounds i8 addrspace(3)*, i8 addrspace(3)* addrspace(3)* %b, i64 1
+  %a.1 = getelementptr inbounds ptr addrspace(3), ptr addrspace(3) %a, i64 1
+  %b.1 = getelementptr inbounds ptr addrspace(3), ptr addrspace(3) %b, i64 1
 
-  %ld.c = load i8 addrspace(3)*, i8 addrspace(3)* addrspace(3)* %b, align 4
-  %ld.c.idx.1 = load i8 addrspace(3)*, i8 addrspace(3)* addrspace(3)* %b.1, align 4
+  %ld.c = load ptr addrspace(3), ptr addrspace(3) %b, align 4
+  %ld.c.idx.1 = load ptr addrspace(3), ptr addrspace(3) %b.1, align 4
 
-  store i8 addrspace(3)* null, i8 addrspace(3)* addrspace(3)* %a, align 4
-  store i8 addrspace(3)* null, i8 addrspace(3)* addrspace(3)* %a.1, align 4
+  store ptr addrspace(3) null, ptr addrspace(3) %a, align 4
+  store ptr addrspace(3) null, ptr addrspace(3) %a.1, align 4
 
   ret void
 }
@@ -45,14 +45,13 @@ entry:
 ; CHECK-LABEL: @merge_load_i64_ptr64(
 ; CHECK: load <2 x i64>
 ; CHECK: [[ELT1:%[^ ]+]] = extractelement <2 x i64> %{{[^ ]+}}, i32 1
-; CHECK: inttoptr i64 [[ELT1]] to i8 addrspace(1)*
-define amdgpu_kernel void @merge_load_i64_ptr64(i64 addrspace(1)* nocapture %a) #0 {
+; CHECK: inttoptr i64 [[ELT1]] to ptr addrspace(1)
+define amdgpu_kernel void @merge_load_i64_ptr64(ptr addrspace(1) nocapture %a) #0 {
 entry:
-  %a.1 = getelementptr inbounds i64, i64 addrspace(1)* %a, i64 1
-  %a.1.cast = bitcast i64 addrspace(1)* %a.1 to i8 addrspace(1)* addrspace(1)*
+  %a.1 = getelementptr inbounds i64, ptr addrspace(1) %a, i64 1
 
-  %ld.0 = load i64, i64 addrspace(1)* %a
-  %ld.1 = load i8 addrspace(1)*, i8 addrspace(1)* addrspace(1)* %a.1.cast
+  %ld.0 = load i64, ptr addrspace(1) %a
+  %ld.1 = load ptr addrspace(1), ptr addrspace(1) %a.1
 
   ret void
 }
@@ -60,45 +59,42 @@ entry:
 ; CHECK-LABEL: @merge_load_ptr64_i64(
 ; CHECK: load <2 x i64>
 ; CHECK: [[ELT0:%[^ ]+]] = extractelement <2 x i64> %{{[^ ]+}}, i32 0
-; CHECK: inttoptr i64 [[ELT0]] to i8 addrspace(1)*
-define amdgpu_kernel void @merge_load_ptr64_i64(i64 addrspace(1)* nocapture %a) #0 {
+; CHECK: inttoptr i64 [[ELT0]] to ptr addrspace(1)
+define amdgpu_kernel void @merge_load_ptr64_i64(ptr addrspace(1) nocapture %a) #0 {
 entry:
-  %a.cast = bitcast i64 addrspace(1)* %a to i8 addrspace(1)* addrspace(1)*
-  %a.1 =  getelementptr inbounds i64, i64 addrspace(1)* %a, i64 1
+  %a.1 =  getelementptr inbounds i64, ptr addrspace(1) %a, i64 1
 
-  %ld.0 = load i8 addrspace(1)*, i8 addrspace(1)* addrspace(1)* %a.cast
-  %ld.1 = load i64, i64 addrspace(1)* %a.1
+  %ld.0 = load ptr addrspace(1), ptr addrspace(1) %a
+  %ld.1 = load i64, ptr addrspace(1) %a.1
 
   ret void
 }
 
 ; CHECK-LABEL: @merge_store_ptr64_i64(
-; CHECK: [[ELT0:%[^ ]+]] = ptrtoint i8 addrspace(1)* %ptr0 to i64
+; CHECK: [[ELT0:%[^ ]+]] = ptrtoint ptr addrspace(1) %ptr0 to i64
 ; CHECK: insertelement <2 x i64> poison, i64 [[ELT0]], i32 0
 ; CHECK: store <2 x i64>
-define amdgpu_kernel void @merge_store_ptr64_i64(i64 addrspace(1)* nocapture %a, i8 addrspace(1)* %ptr0, i64 %val1) #0 {
+define amdgpu_kernel void @merge_store_ptr64_i64(ptr addrspace(1) nocapture %a, ptr addrspace(1) %ptr0, i64 %val1) #0 {
 entry:
-  %a.cast = bitcast i64 addrspace(1)* %a to i8 addrspace(1)* addrspace(1)*
-  %a.1 = getelementptr inbounds i64, i64 addrspace(1)* %a, i64 1
+  %a.1 = getelementptr inbounds i64, ptr addrspace(1) %a, i64 1
 
 
-  store i8 addrspace(1)* %ptr0, i8 addrspace(1)* addrspace(1)* %a.cast
-  store i64 %val1, i64 addrspace(1)* %a.1
+  store ptr addrspace(1) %ptr0, ptr addrspace(1) %a
+  store i64 %val1, ptr addrspace(1) %a.1
 
   ret void
 }
 
 ; CHECK-LABEL: @merge_store_i64_ptr64(
-; CHECK: [[ELT1:%[^ ]+]] = ptrtoint i8 addrspace(1)* %ptr1 to i64
+; CHECK: [[ELT1:%[^ ]+]] = ptrtoint ptr addrspace(1) %ptr1 to i64
 ; CHECK: insertelement <2 x i64> %{{[^ ]+}}, i64 [[ELT1]], i32 1
 ; CHECK: store <2 x i64>
-define amdgpu_kernel void @merge_store_i64_ptr64(i8 addrspace(1)* addrspace(1)* nocapture %a, i64 %val0, i8 addrspace(1)* %ptr1) #0 {
+define amdgpu_kernel void @merge_store_i64_ptr64(ptr addrspace(1) nocapture %a, i64 %val0, ptr addrspace(1) %ptr1) #0 {
 entry:
-  %a.1 = getelementptr inbounds i8 addrspace(1)*, i8 addrspace(1)* addrspace(1)* %a, i64 1
-  %a.cast = bitcast i8 addrspace(1)* addrspace(1)* %a to i64 addrspace(1)*
+  %a.1 = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) %a, i64 1
 
-  store i64 %val0, i64 addrspace(1)* %a.cast
-  store i8 addrspace(1)* %ptr1, i8 addrspace(1)* addrspace(1)* %a.1
+  store i64 %val0, ptr addrspace(1) %a
+  store ptr addrspace(1) %ptr1, ptr addrspace(1) %a.1
 
   ret void
 }
@@ -106,14 +102,13 @@ entry:
 ; CHECK-LABEL: @merge_load_i32_ptr32(
 ; CHECK: load <2 x i32>
 ; CHECK: [[ELT1:%[^ ]+]] = extractelement <2 x i32> %{{[^ ]+}}, i32 1
-; CHECK: inttoptr i32 [[ELT1]] to i8 addrspace(3)*
-define amdgpu_kernel void @merge_load_i32_ptr32(i32 addrspace(3)* nocapture %a) #0 {
+; CHECK: inttoptr i32 [[ELT1]] to ptr addrspace(3)
+define amdgpu_kernel void @merge_load_i32_ptr32(ptr addrspace(3) nocapture %a) #0 {
 entry:
-  %a.1 = getelementptr inbounds i32, i32 addrspace(3)* %a, i32 1
-  %a.1.cast = bitcast i32 addrspace(3)* %a.1 to i8 addrspace(3)* addrspace(3)*
+  %a.1 = getelementptr inbounds i32, ptr addrspace(3) %a, i32 1
 
-  %ld.0 = load i32, i32 addrspace(3)* %a
-  %ld.1 = load i8 addrspace(3)*, i8 addrspace(3)* addrspace(3)* %a.1.cast
+  %ld.0 = load i32, ptr addrspace(3) %a
+  %ld.1 = load ptr addrspace(3), ptr addrspace(3) %a.1
 
   ret void
 }
@@ -121,137 +116,129 @@ entry:
 ; CHECK-LABEL: @merge_load_ptr32_i32(
 ; CHECK: load <2 x i32>
 ; CHECK: [[ELT0:%[^ ]+]] = extractelement <2 x i32> %{{[^ ]+}}, i32 0
-; CHECK: inttoptr i32 [[ELT0]] to i8 addrspace(3)*
-define amdgpu_kernel void @merge_load_ptr32_i32(i32 addrspace(3)* nocapture %a) #0 {
+; CHECK: inttoptr i32 [[ELT0]] to ptr addrspace(3)
+define amdgpu_kernel void @merge_load_ptr32_i32(ptr addrspace(3) nocapture %a) #0 {
 entry:
-  %a.cast = bitcast i32 addrspace(3)* %a to i8 addrspace(3)* addrspace(3)*
-  %a.1 = getelementptr inbounds i32, i32 addrspace(3)* %a, i32 1
+  %a.1 = getelementptr inbounds i32, ptr addrspace(3) %a, i32 1
 
-  %ld.0 = load i8 addrspace(3)*, i8 addrspace(3)* addrspace(3)* %a.cast
-  %ld.1 = load i32, i32 addrspace(3)* %a.1
+  %ld.0 = load ptr addrspace(3), ptr addrspace(3) %a
+  %ld.1 = load i32, ptr addrspace(3) %a.1
 
   ret void
 }
 
 ; CHECK-LABEL: @merge_store_ptr32_i32(
-; CHECK: [[ELT0:%[^ ]+]] = ptrtoint i8 addrspace(3)* %ptr0 to i32
+; CHECK: [[ELT0:%[^ ]+]] = ptrtoint ptr addrspace(3) %ptr0 to i32
 ; CHECK: insertelement <2 x i32> poison, i32 [[ELT0]], i32 0
 ; CHECK: store <2 x i32>
-define amdgpu_kernel void @merge_store_ptr32_i32(i32 addrspace(3)* nocapture %a, i8 addrspace(3)* %ptr0, i32 %val1) #0 {
+define amdgpu_kernel void @merge_store_ptr32_i32(ptr addrspace(3) nocapture %a, ptr addrspace(3) %ptr0, i32 %val1) #0 {
 entry:
-  %a.cast = bitcast i32 addrspace(3)* %a to i8 addrspace(3)* addrspace(3)*
-  %a.1 = getelementptr inbounds i32, i32 addrspace(3)* %a, i32 1
+  %a.1 = getelementptr inbounds i32, ptr addrspace(3) %a, i32 1
 
-  store i8 addrspace(3)* %ptr0, i8 addrspace(3)* addrspace(3)* %a.cast
-  store i32 %val1, i32 addrspace(3)* %a.1
+  store ptr addrspace(3) %ptr0, ptr addrspace(3) %a
+  store i32 %val1, ptr addrspace(3) %a.1
 
   ret void
 }
 
 ; CHECK-LABEL: @merge_store_i32_ptr32(
-; CHECK: [[ELT1:%[^ ]+]] = ptrtoint i8 addrspace(3)* %ptr1 to i32
+; CHECK: [[ELT1:%[^ ]+]] = ptrtoint ptr addrspace(3) %ptr1 to i32
 ; CHECK: insertelement <2 x i32> %{{[^ ]+}}, i32 [[ELT1]], i32 1
 ; CHECK: store <2 x i32>
-define amdgpu_kernel void @merge_store_i32_ptr32(i8 addrspace(3)* addrspace(3)* nocapture %a, i32 %val0, i8 addrspace(3)* %ptr1) #0 {
+define amdgpu_kernel void @merge_store_i32_ptr32(ptr addrspace(3) nocapture %a, i32 %val0, ptr addrspace(3) %ptr1) #0 {
 entry:
-  %a.1 = getelementptr inbounds i8 addrspace(3)*, i8 addrspace(3)* addrspace(3)* %a, i32 1
-  %a.cast = bitcast i8 addrspace(3)* addrspace(3)* %a to i32 addrspace(3)*
+  %a.1 = getelementptr inbounds ptr addrspace(3), ptr addrspace(3) %a, i32 1
 
-  store i32 %val0, i32 addrspace(3)* %a.cast
-  store i8 addrspace(3)* %ptr1, i8 addrspace(3)* addrspace(3)* %a.1
+  store i32 %val0, ptr addrspace(3) %a
+  store ptr addrspace(3) %ptr1, ptr addrspace(3) %a.1
 
   ret void
 }
 
 ; CHECK-LABEL: @no_merge_store_ptr32_i64(
-; CHECK: store i8 addrspace(3)*
+; CHECK: store ptr addrspace(3)
 ; CHECK: store i64
-define amdgpu_kernel void @no_merge_store_ptr32_i64(i64 addrspace(1)* nocapture %a, i8 addrspace(3)* %ptr0, i64 %val1) #0 {
+define amdgpu_kernel void @no_merge_store_ptr32_i64(ptr addrspace(1) nocapture %a, ptr addrspace(3) %ptr0, i64 %val1) #0 {
 entry:
-  %a.cast = bitcast i64 addrspace(1)* %a to i8 addrspace(3)* addrspace(1)*
-  %a.1 = getelementptr inbounds i64, i64 addrspace(1)* %a, i64 1
+  %a.1 = getelementptr inbounds i64, ptr addrspace(1) %a, i64 1
 
 
-  store i8 addrspace(3)* %ptr0, i8 addrspace(3)* addrspace(1)* %a.cast
-  store i64 %val1, i64 addrspace(1)* %a.1
+  store ptr addrspace(3) %ptr0, ptr addrspace(1) %a
+  store i64 %val1, ptr addrspace(1) %a.1
 
   ret void
 }
 
 ; CHECK-LABEL: @no_merge_store_i64_ptr32(
 ; CHECK: store i64
-; CHECK: store i8 addrspace(3)*
-define amdgpu_kernel void @no_merge_store_i64_ptr32(i8 addrspace(3)* addrspace(1)* nocapture %a, i64 %val0, i8 addrspace(3)* %ptr1) #0 {
+; CHECK: store ptr addrspace(3)
+define amdgpu_kernel void @no_merge_store_i64_ptr32(ptr addrspace(1) nocapture %a, i64 %val0, ptr addrspace(3) %ptr1) #0 {
 entry:
-  %a.1 =  getelementptr inbounds i8 addrspace(3)*, i8 addrspace(3)* addrspace(1)* %a, i64 1
-  %a.cast = bitcast i8 addrspace(3)* addrspace(1)* %a to i64 addrspace(1)*
+  %a.1 =  getelementptr inbounds ptr addrspace(3), ptr addrspace(1) %a, i64 1
 
-  store i64 %val0, i64 addrspace(1)* %a.cast
-  store i8 addrspace(3)* %ptr1, i8 addrspace(3)* addrspace(1)* %a.1
+  store i64 %val0, ptr addrspace(1) %a
+  store ptr addrspace(3) %ptr1, ptr addrspace(1) %a.1
 
   ret void
 }
 
 ; CHECK-LABEL: @no_merge_load_i64_ptr32(
 ; CHECK: load i64,
-; CHECK: load i8 addrspace(3)*,
-define amdgpu_kernel void @no_merge_load_i64_ptr32(i64 addrspace(1)* nocapture %a) #0 {
+; CHECK: load ptr addrspace(3),
+define amdgpu_kernel void @no_merge_load_i64_ptr32(ptr addrspace(1) nocapture %a) #0 {
 entry:
-  %a.1 = getelementptr inbounds i64, i64 addrspace(1)* %a, i64 1
-  %a.1.cast = bitcast i64 addrspace(1)* %a.1 to i8 addrspace(3)* addrspace(1)*
+  %a.1 = getelementptr inbounds i64, ptr addrspace(1) %a, i64 1
 
-  %ld.0 = load i64, i64 addrspace(1)* %a
-  %ld.1 = load i8 addrspace(3)*, i8 addrspace(3)* addrspace(1)* %a.1.cast
+  %ld.0 = load i64, ptr addrspace(1) %a
+  %ld.1 = load ptr addrspace(3), ptr addrspace(1) %a.1
 
   ret void
 }
 
 ; CHECK-LABEL: @no_merge_load_ptr32_i64(
-; CHECK: load i8 addrspace(3)*,
+; CHECK: load ptr addrspace(3),
 ; CHECK: load i64,
-define amdgpu_kernel void @no_merge_load_ptr32_i64(i64 addrspace(1)* nocapture %a) #0 {
+define amdgpu_kernel void @no_merge_load_ptr32_i64(ptr addrspace(1) nocapture %a) #0 {
 entry:
-  %a.cast = bitcast i64 addrspace(1)* %a to i8 addrspace(3)* addrspace(1)*
-  %a.1 =  getelementptr inbounds i64, i64 addrspace(1)* %a, i64 1
+  %a.1 =  getelementptr inbounds i64, ptr addrspace(1) %a, i64 1
 
-  %ld.0 = load i8 addrspace(3)*, i8 addrspace(3)* addrspace(1)* %a.cast
-  %ld.1 = load i64, i64 addrspace(1)* %a.1
+  %ld.0 = load ptr addrspace(3), ptr addrspace(1) %a
+  %ld.1 = load i64, ptr addrspace(1) %a.1
 
   ret void
 }
 
 ; XXX - This isn't merged for some reason
 ; CHECK-LABEL: @merge_v2p1i8_v2p1i8(
-; CHECK: load <2 x i8 addrspace(1)*>
-; CHECK: load <2 x i8 addrspace(1)*>
-; CHECK: store <2 x i8 addrspace(1)*>
-; CHECK: store <2 x i8 addrspace(1)*>
-define amdgpu_kernel void @merge_v2p1i8_v2p1i8(<2 x i8 addrspace(1)*> addrspace(1)* nocapture noalias %a, <2 x i8 addrspace(1)*> addrspace(1)* nocapture readonly noalias %b) #0 {
+; CHECK: load <2 x ptr addrspace(1)>
+; CHECK: load <2 x ptr addrspace(1)>
+; CHECK: store <2 x ptr addrspace(1)>
+; CHECK: store <2 x ptr addrspace(1)>
+define amdgpu_kernel void @merge_v2p1i8_v2p1i8(ptr addrspace(1) nocapture noalias %a, ptr addrspace(1) nocapture readonly noalias %b) #0 {
 entry:
-  %a.1 = getelementptr inbounds <2 x i8 addrspace(1)*>, <2 x i8 addrspace(1)*> addrspace(1)* %a, i64 1
-  %b.1 = getelementptr inbounds <2 x i8 addrspace(1)*>, <2 x i8 addrspace(1)*> addrspace(1)* %b, i64 1
+  %a.1 = getelementptr inbounds <2 x ptr addrspace(1)>, ptr addrspace(1) %a, i64 1
+  %b.1 = getelementptr inbounds <2 x ptr addrspace(1)>, ptr addrspace(1) %b, i64 1
 
-  %ld.c = load <2 x i8 addrspace(1)*>, <2 x i8 addrspace(1)*> addrspace(1)* %b, align 4
-  %ld.c.idx.1 = load <2 x i8 addrspace(1)*>, <2 x i8 addrspace(1)*> addrspace(1)* %b.1, align 4
+  %ld.c = load <2 x ptr addrspace(1)>, ptr addrspace(1) %b, align 4
+  %ld.c.idx.1 = load <2 x ptr addrspace(1)>, ptr addrspace(1) %b.1, align 4
 
-  store <2 x i8 addrspace(1)*> zeroinitializer, <2 x i8 addrspace(1)*> addrspace(1)* %a, align 4
-  store <2 x i8 addrspace(1)*> zeroinitializer, <2 x i8 addrspace(1)*> addrspace(1)* %a.1, align 4
+  store <2 x ptr addrspace(1)> zeroinitializer, ptr addrspace(1) %a, align 4
+  store <2 x ptr addrspace(1)> zeroinitializer, ptr addrspace(1) %a.1, align 4
   ret void
 }
 
 ; CHECK-LABEL: @merge_load_ptr64_f64(
 ; CHECK: load <2 x i64>
 ; CHECK: [[ELT0:%[^ ]+]] = extractelement <2 x i64> %{{[^ ]+}}, i32 0
-; CHECK: [[ELT0_INT:%[^ ]+]] = inttoptr i64 [[ELT0]] to i8 addrspace(1)*
+; CHECK: [[ELT0_INT:%[^ ]+]] = inttoptr i64 [[ELT0]] to ptr addrspace(1)
 ; CHECK: [[ELT1_INT:%[^ ]+]] = extractelement <2 x i64> %{{[^ ]+}}, i32 1
 ; CHECK: bitcast i64 [[ELT1_INT]] to double
-define amdgpu_kernel void @merge_load_ptr64_f64(double addrspace(1)* nocapture %a) #0 {
+define amdgpu_kernel void @merge_load_ptr64_f64(ptr addrspace(1) nocapture %a) #0 {
 entry:
-  %a.cast = bitcast double addrspace(1)* %a to i8 addrspace(1)* addrspace(1)*
-  %a.1 =  getelementptr inbounds double, double addrspace(1)* %a, i64 1
+  %a.1 =  getelementptr inbounds double, ptr addrspace(1) %a, i64 1
 
-  %ld.0 = load i8 addrspace(1)*, i8 addrspace(1)* addrspace(1)* %a.cast
-  %ld.1 = load double, double addrspace(1)* %a.1
+  %ld.0 = load ptr addrspace(1), ptr addrspace(1) %a
+  %ld.1 = load double, ptr addrspace(1) %a.1
 
   ret void
 }
@@ -261,31 +248,29 @@ entry:
 ; CHECK: [[ELT0:%[^ ]+]] = extractelement <2 x i64> %{{[^ ]+}}, i32 0
 ; CHECK: bitcast i64 [[ELT0]] to double
 ; CHECK: [[ELT1:%[^ ]+]] = extractelement <2 x i64> %{{[^ ]+}}, i32 1
-; CHECK: inttoptr i64 [[ELT1]] to i8 addrspace(1)*
-define amdgpu_kernel void @merge_load_f64_ptr64(double addrspace(1)* nocapture %a) #0 {
+; CHECK: inttoptr i64 [[ELT1]] to ptr addrspace(1)
+define amdgpu_kernel void @merge_load_f64_ptr64(ptr addrspace(1) nocapture %a) #0 {
 entry:
-  %a.1 = getelementptr inbounds double, double addrspace(1)* %a, i64 1
-  %a.1.cast = bitcast double addrspace(1)* %a.1 to i8 addrspace(1)* addrspace(1)*
+  %a.1 = getelementptr inbounds double, ptr addrspace(1) %a, i64 1
 
-  %ld.0 = load double, double addrspace(1)* %a
-  %ld.1 = load i8 addrspace(1)*, i8 addrspace(1)* addrspace(1)* %a.1.cast
+  %ld.0 = load double, ptr addrspace(1) %a
+  %ld.1 = load ptr addrspace(1), ptr addrspace(1) %a.1
 
   ret void
 }
 
 ; CHECK-LABEL: @merge_store_ptr64_f64(
-; CHECK: [[ELT0_INT:%[^ ]+]] = ptrtoint i8 addrspace(1)* %ptr0 to i64
+; CHECK: [[ELT0_INT:%[^ ]+]] = ptrtoint ptr addrspace(1) %ptr0 to i64
 ; CHECK: insertelement <2 x i64> poison, i64 [[ELT0_INT]], i32 0
 ; CHECK: [[ELT1_INT:%[^ ]+]] = bitcast double %val1 to i64
 ; CHECK: insertelement <2 x i64> %{{[^ ]+}}, i64 [[ELT1_INT]], i32 1
 ; CHECK: store <2 x i64>
-define amdgpu_kernel void @merge_store_ptr64_f64(double addrspace(1)* nocapture %a, i8 addrspace(1)* %ptr0, double %val1) #0 {
+define amdgpu_kernel void @merge_store_ptr64_f64(ptr addrspace(1) nocapture %a, ptr addrspace(1) %ptr0, double %val1) #0 {
 entry:
-  %a.cast = bitcast double addrspace(1)* %a to i8 addrspace(1)* addrspace(1)*
-  %a.1 = getelementptr inbounds double, double addrspace(1)* %a, i64 1
+  %a.1 = getelementptr inbounds double, ptr addrspace(1) %a, i64 1
 
-  store i8 addrspace(1)* %ptr0, i8 addrspace(1)* addrspace(1)* %a.cast
-  store double %val1, double addrspace(1)* %a.1
+  store ptr addrspace(1) %ptr0, ptr addrspace(1) %a
+  store double %val1, ptr addrspace(1) %a.1
 
   ret void
 }
@@ -293,16 +278,15 @@ entry:
 ; CHECK-LABEL: @merge_store_f64_ptr64(
 ; CHECK: [[ELT0_INT:%[^ ]+]] = bitcast double %val0 to i64
 ; CHECK: insertelement <2 x i64> poison, i64 [[ELT0_INT]], i32 0
-; CHECK: [[ELT1_INT:%[^ ]+]] = ptrtoint i8 addrspace(1)* %ptr1 to i64
+; CHECK: [[ELT1_INT:%[^ ]+]] = ptrtoint ptr addrspace(1) %ptr1 to i64
 ; CHECK: insertelement <2 x i64> %{{[^ ]+}}, i64 [[ELT1_INT]], i32 1
 ; CHECK: store <2 x i64>
-define amdgpu_kernel void @merge_store_f64_ptr64(i8 addrspace(1)* addrspace(1)* nocapture %a, double %val0, i8 addrspace(1)* %ptr1) #0 {
+define amdgpu_kernel void @merge_store_f64_ptr64(ptr addrspace(1) nocapture %a, double %val0, ptr addrspace(1) %ptr1) #0 {
 entry:
-  %a.1 = getelementptr inbounds i8 addrspace(1)*, i8 addrspace(1)* addrspace(1)* %a, i64 1
-  %a.cast = bitcast i8 addrspace(1)* addrspace(1)* %a to double addrspace(1)*
+  %a.1 = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) %a, i64 1
 
-  store double %val0, double addrspace(1)* %a.cast
-  store i8 addrspace(1)* %ptr1, i8 addrspace(1)* addrspace(1)* %a.1
+  store double %val0, ptr addrspace(1) %a
+  store ptr addrspace(1) %ptr1, ptr addrspace(1) %a.1
 
   ret void
 }

diff  --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/selects-inseltpoison.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/selects-inseltpoison.ll
index 1f9aea29f9ced..78eddcb335632 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/selects-inseltpoison.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/selects-inseltpoison.ll
@@ -2,28 +2,28 @@
 
 target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
-define void @base_case(i1 %cnd, i32 addrspace(1)* %a, i32 addrspace(1)* %b, <3 x i32> addrspace(1)* %out) {
+define void @base_case(i1 %cnd, ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %out) {
 ; CHECK-LABEL: @base_case
 ; CHECK: load <3 x i32>
 entry:
-  %gep1 = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 1
-  %gep2 = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 2
-  %gep4 = getelementptr inbounds i32, i32 addrspace(1)* %b, i64 1
-  %gep5 = getelementptr inbounds i32, i32 addrspace(1)* %b, i64 2
-  %selected = select i1 %cnd, i32 addrspace(1)* %a, i32 addrspace(1)* %b
-  %selected14 = select i1 %cnd, i32 addrspace(1)* %gep1, i32 addrspace(1)* %gep4
-  %selected25 = select i1 %cnd, i32 addrspace(1)* %gep2, i32 addrspace(1)* %gep5
-  %val0 = load i32, i32 addrspace(1)* %selected, align 4
-  %val1 = load i32, i32 addrspace(1)* %selected14, align 4
-  %val2 = load i32, i32 addrspace(1)* %selected25, align 4
+  %gep1 = getelementptr inbounds i32, ptr addrspace(1) %a, i64 1
+  %gep2 = getelementptr inbounds i32, ptr addrspace(1) %a, i64 2
+  %gep4 = getelementptr inbounds i32, ptr addrspace(1) %b, i64 1
+  %gep5 = getelementptr inbounds i32, ptr addrspace(1) %b, i64 2
+  %selected = select i1 %cnd, ptr addrspace(1) %a, ptr addrspace(1) %b
+  %selected14 = select i1 %cnd, ptr addrspace(1) %gep1, ptr addrspace(1) %gep4
+  %selected25 = select i1 %cnd, ptr addrspace(1) %gep2, ptr addrspace(1) %gep5
+  %val0 = load i32, ptr addrspace(1) %selected, align 4
+  %val1 = load i32, ptr addrspace(1) %selected14, align 4
+  %val2 = load i32, ptr addrspace(1) %selected25, align 4
   %t0 = insertelement <3 x i32> poison, i32 %val0, i32 0
   %t1 = insertelement <3 x i32> %t0, i32 %val1, i32 1
   %t2 = insertelement <3 x i32> %t1, i32 %val2, i32 2
-  store <3 x i32> %t2, <3 x i32> addrspace(1)* %out
+  store <3 x i32> %t2, ptr addrspace(1) %out
   ret void
 }
 
-define void @scev_targeting_complex_case(i1 %cnd, i32 addrspace(1)* %a, i32 addrspace(1)* %b, i32 %base, <2 x i32> addrspace(1)* %out) {
+define void @scev_targeting_complex_case(i1 %cnd, ptr addrspace(1) %a, ptr addrspace(1) %b, i32 %base, ptr addrspace(1) %out) {
 ; CHECK-LABEL: @scev_targeting_complex_case
 ; CHECK: load <2 x i32>
 entry:
@@ -39,28 +39,23 @@ entry:
   %base.x16.p4 = shl i64 %zext.x4.p1, 2
   %base.x16.p8 = shl i64 %zext.x4.p2, 2
   %base.x16.p12 = mul i64 %zext.x4.p3, 4
-  %a.pi8 = bitcast i32 addrspace(1)* %a to i8 addrspace(1)*
-  %b.pi8 = bitcast i32 addrspace(1)* %b to i8 addrspace(1)*
-  %gep.a.base.x16 = getelementptr inbounds i8, i8 addrspace(1)* %a.pi8, i64 %base.x16
-  %gep.b.base.x16.p4 = getelementptr inbounds i8, i8 addrspace(1)* %b.pi8, i64 %base.x16.p4
-  %gep.a.base.x16.p8 = getelementptr inbounds i8, i8 addrspace(1)* %a.pi8, i64 %base.x16.p8
-  %gep.b.base.x16.p12 = getelementptr inbounds i8, i8 addrspace(1)* %b.pi8, i64 %base.x16.p12
-  %a.base.x16 = bitcast i8 addrspace(1)* %gep.a.base.x16 to i32 addrspace(1)*
-  %b.base.x16.p4 = bitcast i8 addrspace(1)* %gep.b.base.x16.p4 to i32 addrspace(1)*
-  %selected.base.x16.p0.or.4 = select i1 %cnd, i32 addrspace(1)* %a.base.x16, i32 addrspace(1)* %b.base.x16.p4
-  %gep.selected.base.x16.p8.or.12 = select i1 %cnd, i8 addrspace(1)* %gep.a.base.x16.p8, i8 addrspace(1)* %gep.b.base.x16.p12
-  %selected.base.x16.p8.or.12 = bitcast i8 addrspace(1)* %gep.selected.base.x16.p8.or.12 to i32 addrspace(1)*
-  %selected.base.x16.p40.or.44 = getelementptr inbounds i32, i32 addrspace(1)* %selected.base.x16.p0.or.4, i64 10
-  %selected.base.x16.p44.or.48 = getelementptr inbounds i32, i32 addrspace(1)* %selected.base.x16.p8.or.12, i64 9
-  %val0 = load i32, i32 addrspace(1)* %selected.base.x16.p40.or.44, align 4
-  %val1 = load i32, i32 addrspace(1)* %selected.base.x16.p44.or.48, align 4
+  %gep.a.base.x16 = getelementptr inbounds i8, ptr addrspace(1) %a, i64 %base.x16
+  %gep.b.base.x16.p4 = getelementptr inbounds i8, ptr addrspace(1) %b, i64 %base.x16.p4
+  %gep.a.base.x16.p8 = getelementptr inbounds i8, ptr addrspace(1) %a, i64 %base.x16.p8
+  %gep.b.base.x16.p12 = getelementptr inbounds i8, ptr addrspace(1) %b, i64 %base.x16.p12
+  %selected.base.x16.p0.or.4 = select i1 %cnd, ptr addrspace(1) %gep.a.base.x16, ptr addrspace(1) %gep.b.base.x16.p4
+  %gep.selected.base.x16.p8.or.12 = select i1 %cnd, ptr addrspace(1) %gep.a.base.x16.p8, ptr addrspace(1) %gep.b.base.x16.p12
+  %selected.base.x16.p40.or.44 = getelementptr inbounds i32, ptr addrspace(1) %selected.base.x16.p0.or.4, i64 10
+  %selected.base.x16.p44.or.48 = getelementptr inbounds i32, ptr addrspace(1) %gep.selected.base.x16.p8.or.12, i64 9
+  %val0 = load i32, ptr addrspace(1) %selected.base.x16.p40.or.44, align 4
+  %val1 = load i32, ptr addrspace(1) %selected.base.x16.p44.or.48, align 4
   %t0 = insertelement <2 x i32> poison, i32 %val0, i32 0
   %t1 = insertelement <2 x i32> %t0, i32 %val1, i32 1
-  store <2 x i32> %t1, <2 x i32> addrspace(1)* %out
+  store <2 x i32> %t1, ptr addrspace(1) %out
   ret void
 }
 
-define void @nested_selects(i1 %cnd0, i1 %cnd1, i32 addrspace(1)* %a, i32 addrspace(1)* %b, i32 %base, <2 x i32> addrspace(1)* %out) {
+define void @nested_selects(i1 %cnd0, i1 %cnd1, ptr addrspace(1) %a, ptr addrspace(1) %b, i32 %base, ptr addrspace(1) %out) {
 ; CHECK-LABEL: @nested_selects
 ; CHECK: load <2 x i32>
 entry:
@@ -76,20 +71,20 @@ entry:
   %sext.p3 = sext i32 %base.p3 to i64
   %sext.x4.p5 = sext i32 %base.x4.p5 to i64
   %sext.x4.p6 = sext i32 %base.x4.p6 to i64
-  %gep.a.base = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %sext
-  %gep.a.base.p1 = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %sext.p1
-  %gep.a.base.p2 = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %sext.p2
-  %gep.a.base.p3 = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %sext.p3
-  %gep.b.base.x4.p5 = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %sext.x4.p5
-  %gep.b.base.x4.p6 = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %sext.x4.p6
-  %selected.1.L = select i1 %cnd1, i32 addrspace(1)* %gep.a.base.p2, i32 addrspace(1)* %gep.b.base.x4.p5
-  %selected.1.R = select i1 %cnd1, i32 addrspace(1)* %gep.a.base.p3, i32 addrspace(1)* %gep.b.base.x4.p6
-  %selected.0.L = select i1 %cnd0, i32 addrspace(1)* %gep.a.base, i32 addrspace(1)* %selected.1.L
-  %selected.0.R = select i1 %cnd0, i32 addrspace(1)* %gep.a.base.p1, i32 addrspace(1)* %selected.1.R
-  %val0 = load i32, i32 addrspace(1)* %selected.0.L, align 4
-  %val1 = load i32, i32 addrspace(1)* %selected.0.R, align 4
+  %gep.a.base = getelementptr inbounds i32, ptr addrspace(1) %a, i64 %sext
+  %gep.a.base.p1 = getelementptr inbounds i32, ptr addrspace(1) %a, i64 %sext.p1
+  %gep.a.base.p2 = getelementptr inbounds i32, ptr addrspace(1) %a, i64 %sext.p2
+  %gep.a.base.p3 = getelementptr inbounds i32, ptr addrspace(1) %a, i64 %sext.p3
+  %gep.b.base.x4.p5 = getelementptr inbounds i32, ptr addrspace(1) %a, i64 %sext.x4.p5
+  %gep.b.base.x4.p6 = getelementptr inbounds i32, ptr addrspace(1) %a, i64 %sext.x4.p6
+  %selected.1.L = select i1 %cnd1, ptr addrspace(1) %gep.a.base.p2, ptr addrspace(1) %gep.b.base.x4.p5
+  %selected.1.R = select i1 %cnd1, ptr addrspace(1) %gep.a.base.p3, ptr addrspace(1) %gep.b.base.x4.p6
+  %selected.0.L = select i1 %cnd0, ptr addrspace(1) %gep.a.base, ptr addrspace(1) %selected.1.L
+  %selected.0.R = select i1 %cnd0, ptr addrspace(1) %gep.a.base.p1, ptr addrspace(1) %selected.1.R
+  %val0 = load i32, ptr addrspace(1) %selected.0.L, align 4
+  %val1 = load i32, ptr addrspace(1) %selected.0.R, align 4
   %t0 = insertelement <2 x i32> poison, i32 %val0, i32 0
   %t1 = insertelement <2 x i32> %t0, i32 %val1, i32 1
-  store <2 x i32> %t1, <2 x i32> addrspace(1)* %out
+  store <2 x i32> %t1, ptr addrspace(1) %out
   ret void
 }

diff  --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/selects.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/selects.ll
index b1395d0e01790..7fa7ae27730d7 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/selects.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/selects.ll
@@ -2,28 +2,28 @@
 
 target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
-define void @base_case(i1 %cnd, i32 addrspace(1)* %a, i32 addrspace(1)* %b, <3 x i32> addrspace(1)* %out) {
+define void @base_case(i1 %cnd, ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %out) {
 ; CHECK-LABEL: @base_case
 ; CHECK: load <3 x i32>
 entry:
-  %gep1 = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 1
-  %gep2 = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 2
-  %gep4 = getelementptr inbounds i32, i32 addrspace(1)* %b, i64 1
-  %gep5 = getelementptr inbounds i32, i32 addrspace(1)* %b, i64 2
-  %selected = select i1 %cnd, i32 addrspace(1)* %a, i32 addrspace(1)* %b
-  %selected14 = select i1 %cnd, i32 addrspace(1)* %gep1, i32 addrspace(1)* %gep4
-  %selected25 = select i1 %cnd, i32 addrspace(1)* %gep2, i32 addrspace(1)* %gep5
-  %val0 = load i32, i32 addrspace(1)* %selected, align 4
-  %val1 = load i32, i32 addrspace(1)* %selected14, align 4
-  %val2 = load i32, i32 addrspace(1)* %selected25, align 4
+  %gep1 = getelementptr inbounds i32, ptr addrspace(1) %a, i64 1
+  %gep2 = getelementptr inbounds i32, ptr addrspace(1) %a, i64 2
+  %gep4 = getelementptr inbounds i32, ptr addrspace(1) %b, i64 1
+  %gep5 = getelementptr inbounds i32, ptr addrspace(1) %b, i64 2
+  %selected = select i1 %cnd, ptr addrspace(1) %a, ptr addrspace(1) %b
+  %selected14 = select i1 %cnd, ptr addrspace(1) %gep1, ptr addrspace(1) %gep4
+  %selected25 = select i1 %cnd, ptr addrspace(1) %gep2, ptr addrspace(1) %gep5
+  %val0 = load i32, ptr addrspace(1) %selected, align 4
+  %val1 = load i32, ptr addrspace(1) %selected14, align 4
+  %val2 = load i32, ptr addrspace(1) %selected25, align 4
   %t0 = insertelement <3 x i32> undef, i32 %val0, i32 0
   %t1 = insertelement <3 x i32> %t0, i32 %val1, i32 1
   %t2 = insertelement <3 x i32> %t1, i32 %val2, i32 2
-  store <3 x i32> %t2, <3 x i32> addrspace(1)* %out
+  store <3 x i32> %t2, ptr addrspace(1) %out
   ret void
 }
 
-define void @scev_targeting_complex_case(i1 %cnd, i32 addrspace(1)* %a, i32 addrspace(1)* %b, i32 %base, <2 x i32> addrspace(1)* %out) {
+define void @scev_targeting_complex_case(i1 %cnd, ptr addrspace(1) %a, ptr addrspace(1) %b, i32 %base, ptr addrspace(1) %out) {
 ; CHECK-LABEL: @scev_targeting_complex_case
 ; CHECK: load <2 x i32>
 entry:
@@ -39,28 +39,23 @@ entry:
   %base.x16.p4 = shl i64 %zext.x4.p1, 2
   %base.x16.p8 = shl i64 %zext.x4.p2, 2
   %base.x16.p12 = mul i64 %zext.x4.p3, 4
-  %a.pi8 = bitcast i32 addrspace(1)* %a to i8 addrspace(1)*
-  %b.pi8 = bitcast i32 addrspace(1)* %b to i8 addrspace(1)*
-  %gep.a.base.x16 = getelementptr inbounds i8, i8 addrspace(1)* %a.pi8, i64 %base.x16
-  %gep.b.base.x16.p4 = getelementptr inbounds i8, i8 addrspace(1)* %b.pi8, i64 %base.x16.p4
-  %gep.a.base.x16.p8 = getelementptr inbounds i8, i8 addrspace(1)* %a.pi8, i64 %base.x16.p8
-  %gep.b.base.x16.p12 = getelementptr inbounds i8, i8 addrspace(1)* %b.pi8, i64 %base.x16.p12
-  %a.base.x16 = bitcast i8 addrspace(1)* %gep.a.base.x16 to i32 addrspace(1)*
-  %b.base.x16.p4 = bitcast i8 addrspace(1)* %gep.b.base.x16.p4 to i32 addrspace(1)*
-  %selected.base.x16.p0.or.4 = select i1 %cnd, i32 addrspace(1)* %a.base.x16, i32 addrspace(1)* %b.base.x16.p4
-  %gep.selected.base.x16.p8.or.12 = select i1 %cnd, i8 addrspace(1)* %gep.a.base.x16.p8, i8 addrspace(1)* %gep.b.base.x16.p12
-  %selected.base.x16.p8.or.12 = bitcast i8 addrspace(1)* %gep.selected.base.x16.p8.or.12 to i32 addrspace(1)*
-  %selected.base.x16.p40.or.44 = getelementptr inbounds i32, i32 addrspace(1)* %selected.base.x16.p0.or.4, i64 10
-  %selected.base.x16.p44.or.48 = getelementptr inbounds i32, i32 addrspace(1)* %selected.base.x16.p8.or.12, i64 9
-  %val0 = load i32, i32 addrspace(1)* %selected.base.x16.p40.or.44, align 4
-  %val1 = load i32, i32 addrspace(1)* %selected.base.x16.p44.or.48, align 4
+  %gep.a.base.x16 = getelementptr inbounds i8, ptr addrspace(1) %a, i64 %base.x16
+  %gep.b.base.x16.p4 = getelementptr inbounds i8, ptr addrspace(1) %b, i64 %base.x16.p4
+  %gep.a.base.x16.p8 = getelementptr inbounds i8, ptr addrspace(1) %a, i64 %base.x16.p8
+  %gep.b.base.x16.p12 = getelementptr inbounds i8, ptr addrspace(1) %b, i64 %base.x16.p12
+  %selected.base.x16.p0.or.4 = select i1 %cnd, ptr addrspace(1) %gep.a.base.x16, ptr addrspace(1) %gep.b.base.x16.p4
+  %gep.selected.base.x16.p8.or.12 = select i1 %cnd, ptr addrspace(1) %gep.a.base.x16.p8, ptr addrspace(1) %gep.b.base.x16.p12
+  %selected.base.x16.p40.or.44 = getelementptr inbounds i32, ptr addrspace(1) %selected.base.x16.p0.or.4, i64 10
+  %selected.base.x16.p44.or.48 = getelementptr inbounds i32, ptr addrspace(1) %gep.selected.base.x16.p8.or.12, i64 9
+  %val0 = load i32, ptr addrspace(1) %selected.base.x16.p40.or.44, align 4
+  %val1 = load i32, ptr addrspace(1) %selected.base.x16.p44.or.48, align 4
   %t0 = insertelement <2 x i32> undef, i32 %val0, i32 0
   %t1 = insertelement <2 x i32> %t0, i32 %val1, i32 1
-  store <2 x i32> %t1, <2 x i32> addrspace(1)* %out
+  store <2 x i32> %t1, ptr addrspace(1) %out
   ret void
 }
 
-define void @nested_selects(i1 %cnd0, i1 %cnd1, i32 addrspace(1)* %a, i32 addrspace(1)* %b, i32 %base, <2 x i32> addrspace(1)* %out) {
+define void @nested_selects(i1 %cnd0, i1 %cnd1, ptr addrspace(1) %a, ptr addrspace(1) %b, i32 %base, ptr addrspace(1) %out) {
 ; CHECK-LABEL: @nested_selects
 ; CHECK: load <2 x i32>
 entry:
@@ -76,20 +71,20 @@ entry:
   %sext.p3 = sext i32 %base.p3 to i64
   %sext.x4.p5 = sext i32 %base.x4.p5 to i64
   %sext.x4.p6 = sext i32 %base.x4.p6 to i64
-  %gep.a.base = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %sext
-  %gep.a.base.p1 = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %sext.p1
-  %gep.a.base.p2 = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %sext.p2
-  %gep.a.base.p3 = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %sext.p3
-  %gep.b.base.x4.p5 = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %sext.x4.p5
-  %gep.b.base.x4.p6 = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %sext.x4.p6
-  %selected.1.L = select i1 %cnd1, i32 addrspace(1)* %gep.a.base.p2, i32 addrspace(1)* %gep.b.base.x4.p5
-  %selected.1.R = select i1 %cnd1, i32 addrspace(1)* %gep.a.base.p3, i32 addrspace(1)* %gep.b.base.x4.p6
-  %selected.0.L = select i1 %cnd0, i32 addrspace(1)* %gep.a.base, i32 addrspace(1)* %selected.1.L
-  %selected.0.R = select i1 %cnd0, i32 addrspace(1)* %gep.a.base.p1, i32 addrspace(1)* %selected.1.R
-  %val0 = load i32, i32 addrspace(1)* %selected.0.L, align 4
-  %val1 = load i32, i32 addrspace(1)* %selected.0.R, align 4
+  %gep.a.base = getelementptr inbounds i32, ptr addrspace(1) %a, i64 %sext
+  %gep.a.base.p1 = getelementptr inbounds i32, ptr addrspace(1) %a, i64 %sext.p1
+  %gep.a.base.p2 = getelementptr inbounds i32, ptr addrspace(1) %a, i64 %sext.p2
+  %gep.a.base.p3 = getelementptr inbounds i32, ptr addrspace(1) %a, i64 %sext.p3
+  %gep.b.base.x4.p5 = getelementptr inbounds i32, ptr addrspace(1) %a, i64 %sext.x4.p5
+  %gep.b.base.x4.p6 = getelementptr inbounds i32, ptr addrspace(1) %a, i64 %sext.x4.p6
+  %selected.1.L = select i1 %cnd1, ptr addrspace(1) %gep.a.base.p2, ptr addrspace(1) %gep.b.base.x4.p5
+  %selected.1.R = select i1 %cnd1, ptr addrspace(1) %gep.a.base.p3, ptr addrspace(1) %gep.b.base.x4.p6
+  %selected.0.L = select i1 %cnd0, ptr addrspace(1) %gep.a.base, ptr addrspace(1) %selected.1.L
+  %selected.0.R = select i1 %cnd0, ptr addrspace(1) %gep.a.base.p1, ptr addrspace(1) %selected.1.R
+  %val0 = load i32, ptr addrspace(1) %selected.0.L, align 4
+  %val1 = load i32, ptr addrspace(1) %selected.0.R, align 4
   %t0 = insertelement <2 x i32> undef, i32 %val0, i32 0
   %t1 = insertelement <2 x i32> %t0, i32 %val1, i32 1
-  store <2 x i32> %t1, <2 x i32> addrspace(1)* %out
+  store <2 x i32> %t1, ptr addrspace(1) %out
   ret void
 }

diff  --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/store_with_aliasing_load.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/store_with_aliasing_load.ll
index 578670bccf153..8d6b286077647 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/store_with_aliasing_load.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/store_with_aliasing_load.ll
@@ -11,48 +11,35 @@ target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:3
 ; CHECK: store <4 x float>
 
 ; Function Attrs: nounwind
-define amdgpu_kernel void @store_vectorize_with_alias(i8 addrspace(1)* %a, i8 addrspace(1)* %b) #0 {
+define amdgpu_kernel void @store_vectorize_with_alias(ptr addrspace(1) %a, ptr addrspace(1) %b) #0 {
 bb:
-  %tmp = bitcast i8 addrspace(1)* %b to float addrspace(1)*
-  %tmp1 = load float, float addrspace(1)* %tmp, align 4
-
-  %tmp2 = bitcast i8 addrspace(1)* %a to float addrspace(1)*
-  store float %tmp1, float addrspace(1)* %tmp2, align 4
-  %tmp3 = getelementptr i8, i8 addrspace(1)* %a, i64 4
-  %tmp4 = bitcast i8 addrspace(1)* %tmp3 to float addrspace(1)*
-  store float %tmp1, float addrspace(1)* %tmp4, align 4
-  %tmp5 = getelementptr i8, i8 addrspace(1)* %a, i64 8
-  %tmp6 = bitcast i8 addrspace(1)* %tmp5 to float addrspace(1)*
-  store float %tmp1, float addrspace(1)* %tmp6, align 4
-  %tmp7 = getelementptr i8, i8 addrspace(1)* %a, i64 12
-  %tmp8 = bitcast i8 addrspace(1)* %tmp7 to float addrspace(1)*
-  store float %tmp1, float addrspace(1)* %tmp8, align 4
-
-  %tmp9 = getelementptr i8, i8 addrspace(1)* %b, i64 16
-  %tmp10 = bitcast i8 addrspace(1)* %tmp9 to float addrspace(1)*
-  %tmp11 = load float, float addrspace(1)* %tmp10, align 4
-  %tmp12 = getelementptr i8, i8 addrspace(1)* %b, i64 20
-  %tmp13 = bitcast i8 addrspace(1)* %tmp12 to float addrspace(1)*
-  %tmp14 = load float, float addrspace(1)* %tmp13, align 4
-  %tmp15 = getelementptr i8, i8 addrspace(1)* %b, i64 24
-  %tmp16 = bitcast i8 addrspace(1)* %tmp15 to float addrspace(1)*
-  %tmp17 = load float, float addrspace(1)* %tmp16, align 4
-  %tmp18 = getelementptr i8, i8 addrspace(1)* %b, i64 28
-  %tmp19 = bitcast i8 addrspace(1)* %tmp18 to float addrspace(1)*
-  %tmp20 = load float, float addrspace(1)* %tmp19, align 4
-
-  %tmp21 = getelementptr i8, i8 addrspace(1)* %a, i64 16
-  %tmp22 = bitcast i8 addrspace(1)* %tmp21 to float addrspace(1)*
-  store float %tmp11, float addrspace(1)* %tmp22, align 4
-  %tmp23 = getelementptr i8, i8 addrspace(1)* %a, i64 20
-  %tmp24 = bitcast i8 addrspace(1)* %tmp23 to float addrspace(1)*
-  store float %tmp14, float addrspace(1)* %tmp24, align 4
-  %tmp25 = getelementptr i8, i8 addrspace(1)* %a, i64 24
-  %tmp26 = bitcast i8 addrspace(1)* %tmp25 to float addrspace(1)*
-  store float %tmp17, float addrspace(1)* %tmp26, align 4
-  %tmp27 = getelementptr i8, i8 addrspace(1)* %a, i64 28
-  %tmp28 = bitcast i8 addrspace(1)* %tmp27 to float addrspace(1)*
-  store float %tmp20, float addrspace(1)* %tmp28, align 4
+  %tmp1 = load float, ptr addrspace(1) %b, align 4
+
+  store float %tmp1, ptr addrspace(1) %a, align 4
+  %tmp3 = getelementptr i8, ptr addrspace(1) %a, i64 4
+  store float %tmp1, ptr addrspace(1) %tmp3, align 4
+  %tmp5 = getelementptr i8, ptr addrspace(1) %a, i64 8
+  store float %tmp1, ptr addrspace(1) %tmp5, align 4
+  %tmp7 = getelementptr i8, ptr addrspace(1) %a, i64 12
+  store float %tmp1, ptr addrspace(1) %tmp7, align 4
+
+  %tmp9 = getelementptr i8, ptr addrspace(1) %b, i64 16
+  %tmp11 = load float, ptr addrspace(1) %tmp9, align 4
+  %tmp12 = getelementptr i8, ptr addrspace(1) %b, i64 20
+  %tmp14 = load float, ptr addrspace(1) %tmp12, align 4
+  %tmp15 = getelementptr i8, ptr addrspace(1) %b, i64 24
+  %tmp17 = load float, ptr addrspace(1) %tmp15, align 4
+  %tmp18 = getelementptr i8, ptr addrspace(1) %b, i64 28
+  %tmp20 = load float, ptr addrspace(1) %tmp18, align 4
+
+  %tmp21 = getelementptr i8, ptr addrspace(1) %a, i64 16
+  store float %tmp11, ptr addrspace(1) %tmp21, align 4
+  %tmp23 = getelementptr i8, ptr addrspace(1) %a, i64 20
+  store float %tmp14, ptr addrspace(1) %tmp23, align 4
+  %tmp25 = getelementptr i8, ptr addrspace(1) %a, i64 24
+  store float %tmp17, ptr addrspace(1) %tmp25, align 4
+  %tmp27 = getelementptr i8, ptr addrspace(1) %a, i64 28
+  store float %tmp20, ptr addrspace(1) %tmp27, align 4
 
   ret void
 }

diff  --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/vect-ptr-ptr-size-mismatch.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/vect-ptr-ptr-size-mismatch.ll
index b513523507082..8a4e4f00ab02c 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/vect-ptr-ptr-size-mismatch.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/vect-ptr-ptr-size-mismatch.ll
@@ -7,89 +7,82 @@ target datalayout = "e-p:64:64-p1:64:64-p5:32:32"
 ; The p5:32:32 portion of the data layout is critical for the test.
 
 ; CHECK-LABEL: @cast_to_ptr
-; CHECK: store i32* undef, i32** %tmp9, align 8
-; CHECK: store i32* undef, i32** %tmp7, align 8
+; CHECK: store ptr undef, ptr %tmp9, align 8
+; CHECK: store ptr undef, ptr %tmp7, align 8
 define void @cast_to_ptr() {
 entry:
-  %ascast = addrspacecast i32* addrspace(5)* null to i32**
+  %ascast = addrspacecast ptr addrspace(5) null to ptr
   %tmp4 = icmp eq i32 undef, 0
-  %tmp6 = select i1 false, i32** undef, i32** undef
-  %tmp7 = select i1 %tmp4, i32** null, i32** %tmp6
-  %tmp9 = select i1 %tmp4, i32** %ascast, i32** null
-  store i32* undef, i32** %tmp9, align 8
-  store i32* undef, i32** %tmp7, align 8
+  %tmp6 = select i1 false, ptr undef, ptr undef
+  %tmp7 = select i1 %tmp4, ptr null, ptr %tmp6
+  %tmp9 = select i1 %tmp4, ptr %ascast, ptr null
+  store ptr undef, ptr %tmp9, align 8
+  store ptr undef, ptr %tmp7, align 8
   unreachable
 }
 
 ; CHECK-LABEL: @cast_to_cast
-; CHECK: %tmp4 = load i32*, i32** %tmp1, align 8
-; CHECK: %tmp5 = load i32*, i32** %tmp3, align 8
+; CHECK: %tmp4 = load ptr, ptr %tmp1, align 8
+; CHECK: %tmp5 = load ptr, ptr %tmp3, align 8
 define void @cast_to_cast() {
 entry:
-  %a.ascast = addrspacecast i32* addrspace(5)* undef to i32**
-  %b.ascast = addrspacecast i32* addrspace(5)* null to i32**
-  %tmp1 = select i1 false, i32** %a.ascast, i32** undef
-  %tmp3 = select i1 false, i32** %b.ascast, i32** undef
-  %tmp4 = load i32*, i32** %tmp1, align 8
-  %tmp5 = load i32*, i32** %tmp3, align 8
+  %a.ascast = addrspacecast ptr addrspace(5) undef to ptr
+  %b.ascast = addrspacecast ptr addrspace(5) null to ptr
+  %tmp1 = select i1 false, ptr %a.ascast, ptr undef
+  %tmp3 = select i1 false, ptr %b.ascast, ptr undef
+  %tmp4 = load ptr, ptr %tmp1, align 8
+  %tmp5 = load ptr, ptr %tmp3, align 8
   unreachable
 }
 
 ; CHECK-LABEL: @all_to_cast
 ; CHECK: load <4 x float>
-define void @all_to_cast(i8* nocapture readonly align 16 dereferenceable(16) %alloc1) {
+define void @all_to_cast(ptr nocapture readonly align 16 dereferenceable(16) %alloc1) {
 entry:
-  %alloc16 = addrspacecast i8* %alloc1 to i8 addrspace(1)*
-  %tmp = bitcast i8 addrspace(1)* %alloc16 to float addrspace(1)*
-  %tmp1 = load float, float addrspace(1)* %tmp, align 16, !invariant.load !0
-  %tmp6 = getelementptr inbounds i8, i8 addrspace(1)* %alloc16, i64 4
-  %tmp7 = bitcast i8 addrspace(1)* %tmp6 to float addrspace(1)*
-  %tmp8 = load float, float addrspace(1)* %tmp7, align 4, !invariant.load !0
-  %tmp15 = getelementptr inbounds i8, i8 addrspace(1)* %alloc16, i64 8
-  %tmp16 = bitcast i8 addrspace(1)* %tmp15 to float addrspace(1)*
-  %tmp17 = load float, float addrspace(1)* %tmp16, align 8, !invariant.load !0
-  %tmp24 = getelementptr inbounds i8, i8 addrspace(1)* %alloc16, i64 12
-  %tmp25 = bitcast i8 addrspace(1)* %tmp24 to float addrspace(1)*
-  %tmp26 = load float, float addrspace(1)* %tmp25, align 4, !invariant.load !0
+  %alloc16 = addrspacecast ptr %alloc1 to ptr addrspace(1)
+  %tmp1 = load float, ptr addrspace(1) %alloc16, align 16, !invariant.load !0
+  %tmp6 = getelementptr inbounds i8, ptr addrspace(1) %alloc16, i64 4
+  %tmp8 = load float, ptr addrspace(1) %tmp6, align 4, !invariant.load !0
+  %tmp15 = getelementptr inbounds i8, ptr addrspace(1) %alloc16, i64 8
+  %tmp17 = load float, ptr addrspace(1) %tmp15, align 8, !invariant.load !0
+  %tmp24 = getelementptr inbounds i8, ptr addrspace(1) %alloc16, i64 12
+  %tmp26 = load float, ptr addrspace(1) %tmp24, align 4, !invariant.load !0
   ret void
 }
 
 ; CHECK-LABEL: @ext_ptr
 ; CHECK: load <2 x i32>
-define void @ext_ptr(i32 addrspace(5)* %p) {
+define void @ext_ptr(ptr addrspace(5) %p) {
 entry:
-  %gep1 = getelementptr inbounds i32, i32 addrspace(5)* %p, i64 0
-  %gep2 = getelementptr inbounds i32, i32 addrspace(5)* %p, i64 1
-  %a.ascast = addrspacecast i32 addrspace(5)* %gep1 to i32*
-  %b.ascast = addrspacecast i32 addrspace(5)* %gep2 to i32*
-  %tmp1 = load i32, i32* %a.ascast, align 8
-  %tmp2 = load i32, i32* %b.ascast, align 8
+  %gep2 = getelementptr inbounds i32, ptr addrspace(5) %p, i64 1
+  %a.ascast = addrspacecast ptr addrspace(5) %p to ptr
+  %b.ascast = addrspacecast ptr addrspace(5) %gep2 to ptr
+  %tmp1 = load i32, ptr %a.ascast, align 8
+  %tmp2 = load i32, ptr %b.ascast, align 8
   unreachable
 }
 
 ; CHECK-LABEL: @shrink_ptr
 ; CHECK: load <2 x i32>
-define void @shrink_ptr(i32* %p) {
+define void @shrink_ptr(ptr %p) {
 entry:
-  %gep1 = getelementptr inbounds i32, i32* %p, i64 0
-  %gep2 = getelementptr inbounds i32, i32* %p, i64 1
-  %a.ascast = addrspacecast i32* %gep1 to i32 addrspace(5)*
-  %b.ascast = addrspacecast i32* %gep2 to i32 addrspace(5)*
-  %tmp1 = load i32, i32 addrspace(5)* %a.ascast, align 8
-  %tmp2 = load i32, i32 addrspace(5)* %b.ascast, align 8
+  %gep2 = getelementptr inbounds i32, ptr %p, i64 1
+  %a.ascast = addrspacecast ptr %p to ptr addrspace(5)
+  %b.ascast = addrspacecast ptr %gep2 to ptr addrspace(5)
+  %tmp1 = load i32, ptr addrspace(5) %a.ascast, align 8
+  %tmp2 = load i32, ptr addrspace(5) %b.ascast, align 8
   unreachable
 }
 
 ; CHECK-LABEL: @ext_ptr_wrap
 ; CHECK: load <2 x i8>
-define void @ext_ptr_wrap(i8 addrspace(5)* %p) {
+define void @ext_ptr_wrap(ptr addrspace(5) %p) {
 entry:
-  %gep1 = getelementptr inbounds i8, i8 addrspace(5)* %p, i64 0
-  %gep2 = getelementptr inbounds i8, i8 addrspace(5)* %p, i64 4294967295
-  %a.ascast = addrspacecast i8 addrspace(5)* %gep1 to i8*
-  %b.ascast = addrspacecast i8 addrspace(5)* %gep2 to i8*
-  %tmp1 = load i8, i8* %a.ascast, align 1
-  %tmp2 = load i8, i8* %b.ascast, align 1
+  %gep2 = getelementptr inbounds i8, ptr addrspace(5) %p, i64 4294967295
+  %a.ascast = addrspacecast ptr addrspace(5) %p to ptr
+  %b.ascast = addrspacecast ptr addrspace(5) %gep2 to ptr
+  %tmp1 = load i8, ptr %a.ascast, align 1
+  %tmp2 = load i8, ptr %b.ascast, align 1
   unreachable
 }
 

diff  --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/weird-type-accesses.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/weird-type-accesses.ll
index 2b0624bb7e81e..8ab1a22259b75 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/weird-type-accesses.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/weird-type-accesses.ll
@@ -18,52 +18,50 @@ declare void @use_v2i9(<2 x i9>)
 ; CHECK-LABEL: @merge_store_2_constants_i1(
 ; CHECK: store i1
 ; CHECK: store i1
-define amdgpu_kernel void @merge_store_2_constants_i1(i1 addrspace(1)* %out) #0 {
-  %out.gep.1 = getelementptr i1, i1 addrspace(1)* %out, i32 1
-  store i1 true, i1 addrspace(1)* %out.gep.1
-  store i1 false, i1 addrspace(1)* %out
+define amdgpu_kernel void @merge_store_2_constants_i1(ptr addrspace(1) %out) #0 {
+  %out.gep.1 = getelementptr i1, ptr addrspace(1) %out, i32 1
+  store i1 true, ptr addrspace(1) %out.gep.1
+  store i1 false, ptr addrspace(1) %out
   ret void
 }
 
 ; CHECK-LABEL: @merge_store_2_constants_i2(
 ; CHECK: store i2 1
 ; CHECK: store i2 -1
-define amdgpu_kernel void @merge_store_2_constants_i2(i2 addrspace(1)* %out) #0 {
-  %out.gep.1 = getelementptr i2, i2 addrspace(1)* %out, i32 1
-  store i2 1, i2 addrspace(1)* %out.gep.1
-  store i2 -1, i2 addrspace(1)* %out
+define amdgpu_kernel void @merge_store_2_constants_i2(ptr addrspace(1) %out) #0 {
+  %out.gep.1 = getelementptr i2, ptr addrspace(1) %out, i32 1
+  store i2 1, ptr addrspace(1) %out.gep.1
+  store i2 -1, ptr addrspace(1) %out
   ret void
 }
 
 ; CHECK-LABEL: @merge_
diff erent_store_sizes_i1_i8(
 ; CHECK: store i1 true
 ; CHECK: store i8 123
-define amdgpu_kernel void @merge_
diff erent_store_sizes_i1_i8(i8 addrspace(1)* %out) #0 {
-  %out.i1 = bitcast i8 addrspace(1)* %out to i1 addrspace(1)*
-  %out.gep.1 = getelementptr i8, i8 addrspace(1)* %out, i32 1
-  store i1 true, i1 addrspace(1)* %out.i1
-  store i8 123, i8 addrspace(1)* %out.gep.1
+define amdgpu_kernel void @merge_
diff erent_store_sizes_i1_i8(ptr addrspace(1) %out) #0 {
+  %out.gep.1 = getelementptr i8, ptr addrspace(1) %out, i32 1
+  store i1 true, ptr addrspace(1) %out
+  store i8 123, ptr addrspace(1) %out.gep.1
   ret void
 }
 
 ; CHECK-LABEL: @merge_
diff erent_store_sizes_i8_i1(
 ; CHECK: store i8 123
 ; CHECK: store i1 true
-define amdgpu_kernel void @merge_
diff erent_store_sizes_i8_i1(i1 addrspace(1)* %out) #0 {
-  %out.i8 = bitcast i1 addrspace(1)* %out to i8 addrspace(1)*
-  %out.gep.1 = getelementptr i8, i8 addrspace(1)* %out.i8, i32 1
-  store i8 123, i8 addrspace(1)* %out.gep.1
-  store i1 true, i1 addrspace(1)* %out
+define amdgpu_kernel void @merge_
diff erent_store_sizes_i8_i1(ptr addrspace(1) %out) #0 {
+  %out.gep.1 = getelementptr i8, ptr addrspace(1) %out, i32 1
+  store i8 123, ptr addrspace(1) %out.gep.1
+  store i1 true, ptr addrspace(1) %out
   ret void
 }
 
 ; CHECK-LABEL: @merge_store_2_constant_structs(
 ; CHECK: store %struct.foo
 ; CHECK: store %struct.foo
-define amdgpu_kernel void @merge_store_2_constant_structs(%struct.foo addrspace(1)* %out) #0 {
-  %out.gep.1 = getelementptr %struct.foo, %struct.foo addrspace(1)* %out, i32 1
-  store %struct.foo { i32 12, i8 3 }, %struct.foo addrspace(1)* %out.gep.1
-  store %struct.foo { i32 92, i8 9 }, %struct.foo addrspace(1)* %out
+define amdgpu_kernel void @merge_store_2_constant_structs(ptr addrspace(1) %out) #0 {
+  %out.gep.1 = getelementptr %struct.foo, ptr addrspace(1) %out, i32 1
+  store %struct.foo { i32 12, i8 3 }, ptr addrspace(1) %out.gep.1
+  store %struct.foo { i32 92, i8 9 }, ptr addrspace(1) %out
   ret void
 }
 
@@ -71,10 +69,10 @@ define amdgpu_kernel void @merge_store_2_constant_structs(%struct.foo addrspace(
 ; CHECK-LABEL: @merge_store_2_constants_v2i2(
 ; CHECK: store <2 x i2>
 ; CHECK: store <2 x i2>
-define amdgpu_kernel void @merge_store_2_constants_v2i2(<2 x i2> addrspace(1)* %out) #0 {
-  %out.gep.1 = getelementptr <2 x i2>, <2 x i2> addrspace(1)* %out, i32 1
-  store <2 x i2> <i2 1, i2 -1>, <2 x i2> addrspace(1)* %out.gep.1
-  store <2 x i2> <i2 -1, i2 1>, <2 x i2> addrspace(1)* %out
+define amdgpu_kernel void @merge_store_2_constants_v2i2(ptr addrspace(1) %out) #0 {
+  %out.gep.1 = getelementptr <2 x i2>, ptr addrspace(1) %out, i32 1
+  store <2 x i2> <i2 1, i2 -1>, ptr addrspace(1) %out.gep.1
+  store <2 x i2> <i2 -1, i2 1>, ptr addrspace(1) %out
   ret void
 }
 
@@ -83,20 +81,20 @@ define amdgpu_kernel void @merge_store_2_constants_v2i2(<2 x i2> addrspace(1)* %
 ; CHECK-LABEL: @merge_store_2_constants_v4i2(
 ; CHECK: store <4 x i2>
 ; CHECK: store <4 x i2>
-define amdgpu_kernel void @merge_store_2_constants_v4i2(<4 x i2> addrspace(1)* %out) #0 {
-  %out.gep.1 = getelementptr <4 x i2>, <4 x i2> addrspace(1)* %out, i32 1
-  store <4 x i2> <i2 1, i2 -1, i2 1, i2 -1>, <4 x i2> addrspace(1)* %out.gep.1
-  store <4 x i2> <i2 -1, i2 1, i2 -1, i2 1>, <4 x i2> addrspace(1)* %out
+define amdgpu_kernel void @merge_store_2_constants_v4i2(ptr addrspace(1) %out) #0 {
+  %out.gep.1 = getelementptr <4 x i2>, ptr addrspace(1) %out, i32 1
+  store <4 x i2> <i2 1, i2 -1, i2 1, i2 -1>, ptr addrspace(1) %out.gep.1
+  store <4 x i2> <i2 -1, i2 1, i2 -1, i2 1>, ptr addrspace(1) %out
   ret void
 }
 
 ; CHECK-LABEL: @merge_load_2_constants_i1(
 ; CHECK: load i1
 ; CHECK: load i1
-define amdgpu_kernel void @merge_load_2_constants_i1(i1 addrspace(1)* %out) #0 {
-  %out.gep.1 = getelementptr i1, i1 addrspace(1)* %out, i32 1
-  %x = load i1, i1 addrspace(1)* %out.gep.1
-  %y = load i1, i1 addrspace(1)* %out
+define amdgpu_kernel void @merge_load_2_constants_i1(ptr addrspace(1) %out) #0 {
+  %out.gep.1 = getelementptr i1, ptr addrspace(1) %out, i32 1
+  %x = load i1, ptr addrspace(1) %out.gep.1
+  %y = load i1, ptr addrspace(1) %out
   call void @use_i1(i1 %x)
   call void @use_i1(i1 %y)
   ret void
@@ -105,10 +103,10 @@ define amdgpu_kernel void @merge_load_2_constants_i1(i1 addrspace(1)* %out) #0 {
 ; CHECK-LABEL: @merge_load_2_constants_i2(
 ; CHECK: load i2
 ; CHECK: load i2
-define amdgpu_kernel void @merge_load_2_constants_i2(i2 addrspace(1)* %out) #0 {
-  %out.gep.1 = getelementptr i2, i2 addrspace(1)* %out, i32 1
-  %x = load i2, i2 addrspace(1)* %out.gep.1
-  %y = load i2, i2 addrspace(1)* %out
+define amdgpu_kernel void @merge_load_2_constants_i2(ptr addrspace(1) %out) #0 {
+  %out.gep.1 = getelementptr i2, ptr addrspace(1) %out, i32 1
+  %x = load i2, ptr addrspace(1) %out.gep.1
+  %y = load i2, ptr addrspace(1) %out
   call void @use_i2(i2 %x)
   call void @use_i2(i2 %y)
   ret void
@@ -117,11 +115,10 @@ define amdgpu_kernel void @merge_load_2_constants_i2(i2 addrspace(1)* %out) #0 {
 ; CHECK-LABEL: @merge_
diff erent_load_sizes_i1_i8(
 ; CHECK: load i1
 ; CHECK: load i8
-define amdgpu_kernel void @merge_
diff erent_load_sizes_i1_i8(i8 addrspace(1)* %out) #0 {
-  %out.i1 = bitcast i8 addrspace(1)* %out to i1 addrspace(1)*
-  %out.gep.1 = getelementptr i8, i8 addrspace(1)* %out, i32 1
-  %x = load i1, i1 addrspace(1)* %out.i1
-  %y = load i8, i8 addrspace(1)* %out.gep.1
+define amdgpu_kernel void @merge_
diff erent_load_sizes_i1_i8(ptr addrspace(1) %out) #0 {
+  %out.gep.1 = getelementptr i8, ptr addrspace(1) %out, i32 1
+  %x = load i1, ptr addrspace(1) %out
+  %y = load i8, ptr addrspace(1) %out.gep.1
   call void @use_i1(i1 %x)
   call void @use_i8(i8 %y)
   ret void
@@ -130,11 +127,10 @@ define amdgpu_kernel void @merge_
diff erent_load_sizes_i1_i8(i8 addrspace(1)* %ou
 ; CHECK-LABEL: @merge_
diff erent_load_sizes_i8_i1(
 ; CHECK: load i8
 ; CHECK: load i1
-define amdgpu_kernel void @merge_
diff erent_load_sizes_i8_i1(i1 addrspace(1)* %out) #0 {
-  %out.i8 = bitcast i1 addrspace(1)* %out to i8 addrspace(1)*
-  %out.gep.1 = getelementptr i8, i8 addrspace(1)* %out.i8, i32 1
-  %x = load i8, i8 addrspace(1)* %out.gep.1
-  %y = load i1, i1 addrspace(1)* %out
+define amdgpu_kernel void @merge_
diff erent_load_sizes_i8_i1(ptr addrspace(1) %out) #0 {
+  %out.gep.1 = getelementptr i8, ptr addrspace(1) %out, i32 1
+  %x = load i8, ptr addrspace(1) %out.gep.1
+  %y = load i1, ptr addrspace(1) %out
   call void @use_i8(i8 %x)
   call void @use_i1(i1 %y)
   ret void
@@ -143,10 +139,10 @@ define amdgpu_kernel void @merge_
diff erent_load_sizes_i8_i1(i1 addrspace(1)* %ou
 ; CHECK-LABEL: @merge_load_2_constant_structs(
 ; CHECK: load %struct.foo
 ; CHECK: load %struct.foo
-define amdgpu_kernel void @merge_load_2_constant_structs(%struct.foo addrspace(1)* %out) #0 {
-  %out.gep.1 = getelementptr %struct.foo, %struct.foo addrspace(1)* %out, i32 1
-  %x = load %struct.foo, %struct.foo addrspace(1)* %out.gep.1
-  %y = load %struct.foo, %struct.foo addrspace(1)* %out
+define amdgpu_kernel void @merge_load_2_constant_structs(ptr addrspace(1) %out) #0 {
+  %out.gep.1 = getelementptr %struct.foo, ptr addrspace(1) %out, i32 1
+  %x = load %struct.foo, ptr addrspace(1) %out.gep.1
+  %y = load %struct.foo, ptr addrspace(1) %out
   call void @use_foo(%struct.foo %x)
   call void @use_foo(%struct.foo %y)
   ret void
@@ -155,10 +151,10 @@ define amdgpu_kernel void @merge_load_2_constant_structs(%struct.foo addrspace(1
 ; CHECK-LABEL: @merge_load_2_constants_v2i2(
 ; CHECK: load <2 x i2>
 ; CHECK: load <2 x i2>
-define amdgpu_kernel void @merge_load_2_constants_v2i2(<2 x i2> addrspace(1)* %out) #0 {
-  %out.gep.1 = getelementptr <2 x i2>, <2 x i2> addrspace(1)* %out, i32 1
-  %x = load <2 x i2>, <2 x i2> addrspace(1)* %out.gep.1
-  %y = load <2 x i2>, <2 x i2> addrspace(1)* %out
+define amdgpu_kernel void @merge_load_2_constants_v2i2(ptr addrspace(1) %out) #0 {
+  %out.gep.1 = getelementptr <2 x i2>, ptr addrspace(1) %out, i32 1
+  %x = load <2 x i2>, ptr addrspace(1) %out.gep.1
+  %y = load <2 x i2>, ptr addrspace(1) %out
   call void @use_v2i2(<2 x i2> %x)
   call void @use_v2i2(<2 x i2> %y)
   ret void
@@ -167,10 +163,10 @@ define amdgpu_kernel void @merge_load_2_constants_v2i2(<2 x i2> addrspace(1)* %o
 ; CHECK-LABEL: @merge_load_2_constants_v4i2(
 ; CHECK: load <4 x i2>
 ; CHECK: load <4 x i2>
-define amdgpu_kernel void @merge_load_2_constants_v4i2(<4 x i2> addrspace(1)* %out) #0 {
-  %out.gep.1 = getelementptr <4 x i2>, <4 x i2> addrspace(1)* %out, i32 1
-  %x = load <4 x i2>, <4 x i2> addrspace(1)* %out.gep.1
-  %y = load <4 x i2>, <4 x i2> addrspace(1)* %out
+define amdgpu_kernel void @merge_load_2_constants_v4i2(ptr addrspace(1) %out) #0 {
+  %out.gep.1 = getelementptr <4 x i2>, ptr addrspace(1) %out, i32 1
+  %x = load <4 x i2>, ptr addrspace(1) %out.gep.1
+  %y = load <4 x i2>, ptr addrspace(1) %out
   call void @use_v4i2(<4 x i2> %x)
   call void @use_v4i2(<4 x i2> %y)
   ret void
@@ -179,20 +175,20 @@ define amdgpu_kernel void @merge_load_2_constants_v4i2(<4 x i2> addrspace(1)* %o
 ; CHECK-LABEL: @merge_store_2_constants_i9(
 ; CHECK: store i9 3
 ; CHECK: store i9 -5
-define amdgpu_kernel void @merge_store_2_constants_i9(i9 addrspace(1)* %out) #0 {
-  %out.gep.1 = getelementptr i9, i9 addrspace(1)* %out, i32 1
-  store i9 3, i9 addrspace(1)* %out.gep.1
-  store i9 -5, i9 addrspace(1)* %out
+define amdgpu_kernel void @merge_store_2_constants_i9(ptr addrspace(1) %out) #0 {
+  %out.gep.1 = getelementptr i9, ptr addrspace(1) %out, i32 1
+  store i9 3, ptr addrspace(1) %out.gep.1
+  store i9 -5, ptr addrspace(1) %out
   ret void
 }
 
 ; CHECK-LABEL: @merge_load_2_constants_v2i9(
 ; CHECK: load <2 x i9>
 ; CHECK: load <2 x i9>
-define amdgpu_kernel void @merge_load_2_constants_v2i9(<2 x i9> addrspace(1)* %out) #0 {
-  %out.gep.1 = getelementptr <2 x i9>, <2 x i9> addrspace(1)* %out, i32 1
-  %x = load <2 x i9>, <2 x i9> addrspace(1)* %out.gep.1
-  %y = load <2 x i9>, <2 x i9> addrspace(1)* %out
+define amdgpu_kernel void @merge_load_2_constants_v2i9(ptr addrspace(1) %out) #0 {
+  %out.gep.1 = getelementptr <2 x i9>, ptr addrspace(1) %out, i32 1
+  %x = load <2 x i9>, ptr addrspace(1) %out.gep.1
+  %y = load <2 x i9>, ptr addrspace(1) %out
   call void @use_v2i9(<2 x i9> %x)
   call void @use_v2i9(<2 x i9> %y)
   ret void

diff  --git a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/4x2xhalf.ll b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/4x2xhalf.ll
index 627ba807d8db5..92efbb238fe95 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/4x2xhalf.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/4x2xhalf.ll
@@ -1,33 +1,29 @@
 ; RUN: opt -mtriple=nvptx64-nvidia-cuda -passes=load-store-vectorizer -S -o - %s | FileCheck %s
 
-define void @ldg_f16(half* nocapture align 16 %rd0) {
-  %in1b = bitcast half* %rd0 to <2 x half>*
-  %load1 = load <2 x half>, <2 x half>* %in1b, align 4
+define void @ldg_f16(ptr nocapture align 16 %rd0) {
+  %load1 = load <2 x half>, ptr %rd0, align 4
   %p1 = fcmp ogt <2 x half> %load1, zeroinitializer
   %s1 = select <2 x i1> %p1, <2 x half> %load1, <2 x half> zeroinitializer
-  store <2 x half> %s1, <2 x half>* %in1b, align 4
-  %in2 = getelementptr half, half* %rd0, i64 2
-  %in2b = bitcast half* %in2 to <2 x half>*
-  %load2 = load <2 x half>, <2 x half>* %in2b, align 4
+  store <2 x half> %s1, ptr %rd0, align 4
+  %in2 = getelementptr half, ptr %rd0, i64 2
+  %load2 = load <2 x half>, ptr %in2, align 4
   %p2 = fcmp ogt <2 x half> %load2, zeroinitializer
   %s2 = select <2 x i1> %p2, <2 x half> %load2, <2 x half> zeroinitializer
-  store <2 x half> %s2, <2 x half>* %in2b, align 4
-  %in3 = getelementptr half, half* %rd0, i64 4
-  %in3b = bitcast half* %in3 to <2 x half>*
-  %load3 = load <2 x half>, <2 x half>* %in3b, align 4
+  store <2 x half> %s2, ptr %in2, align 4
+  %in3 = getelementptr half, ptr %rd0, i64 4
+  %load3 = load <2 x half>, ptr %in3, align 4
   %p3 = fcmp ogt <2 x half> %load3, zeroinitializer
   %s3 = select <2 x i1> %p3, <2 x half> %load3, <2 x half> zeroinitializer
-  store <2 x half> %s3, <2 x half>* %in3b, align 4
-  %in4 = getelementptr half, half* %rd0, i64 6
-  %in4b = bitcast half* %in4 to <2 x half>*
-  %load4 = load <2 x half>, <2 x half>* %in4b, align 4
+  store <2 x half> %s3, ptr %in3, align 4
+  %in4 = getelementptr half, ptr %rd0, i64 6
+  %load4 = load <2 x half>, ptr %in4, align 4
   %p4 = fcmp ogt <2 x half> %load4, zeroinitializer
   %s4 = select <2 x i1> %p4, <2 x half> %load4, <2 x half> zeroinitializer
-  store <2 x half> %s4, <2 x half>* %in4b, align 4
+  store <2 x half> %s4, ptr %in4, align 4
   ret void
 
 ; CHECK-LABEL: @ldg_f16
-; CHECK: %[[LD:.*]] = load <8 x half>, <8 x half>*
+; CHECK: %[[LD:.*]] = load <8 x half>, ptr
 ; CHECK: shufflevector <8 x half> %[[LD]], <8 x half> poison, <2 x i32> <i32 0, i32 1>
 ; CHECK: shufflevector <8 x half> %[[LD]], <8 x half> poison, <2 x i32> <i32 2, i32 3>
 ; CHECK: shufflevector <8 x half> %[[LD]], <8 x half> poison, <2 x i32> <i32 4, i32 5>
@@ -35,60 +31,52 @@ define void @ldg_f16(half* nocapture align 16 %rd0) {
 ; CHECK: store <8 x half>
 }
 
-define void @no_nonpow2_vector(half* nocapture align 16 %rd0) {
-  %in1b = bitcast half* %rd0 to <3 x half>*
-  %load1 = load <3 x half>, <3 x half>* %in1b, align 4
+define void @no_nonpow2_vector(ptr nocapture align 16 %rd0) {
+  %load1 = load <3 x half>, ptr %rd0, align 4
   %p1 = fcmp ogt <3 x half> %load1, zeroinitializer
   %s1 = select <3 x i1> %p1, <3 x half> %load1, <3 x half> zeroinitializer
-  store <3 x half> %s1, <3 x half>* %in1b, align 4
-  %in2 = getelementptr half, half* %rd0, i64 3
-  %in2b = bitcast half* %in2 to <3 x half>*
-  %load2 = load <3 x half>, <3 x half>* %in2b, align 4
+  store <3 x half> %s1, ptr %rd0, align 4
+  %in2 = getelementptr half, ptr %rd0, i64 3
+  %load2 = load <3 x half>, ptr %in2, align 4
   %p2 = fcmp ogt <3 x half> %load2, zeroinitializer
   %s2 = select <3 x i1> %p2, <3 x half> %load2, <3 x half> zeroinitializer
-  store <3 x half> %s2, <3 x half>* %in2b, align 4
-  %in3 = getelementptr half, half* %rd0, i64 6
-  %in3b = bitcast half* %in3 to <3 x half>*
-  %load3 = load <3 x half>, <3 x half>* %in3b, align 4
+  store <3 x half> %s2, ptr %in2, align 4
+  %in3 = getelementptr half, ptr %rd0, i64 6
+  %load3 = load <3 x half>, ptr %in3, align 4
   %p3 = fcmp ogt <3 x half> %load3, zeroinitializer
   %s3 = select <3 x i1> %p3, <3 x half> %load3, <3 x half> zeroinitializer
-  store <3 x half> %s3, <3 x half>* %in3b, align 4
-  %in4 = getelementptr half, half* %rd0, i64 9
-  %in4b = bitcast half* %in4 to <3 x half>*
-  %load4 = load <3 x half>, <3 x half>* %in4b, align 4
+  store <3 x half> %s3, ptr %in3, align 4
+  %in4 = getelementptr half, ptr %rd0, i64 9
+  %load4 = load <3 x half>, ptr %in4, align 4
   %p4 = fcmp ogt <3 x half> %load4, zeroinitializer
   %s4 = select <3 x i1> %p4, <3 x half> %load4, <3 x half> zeroinitializer
-  store <3 x half> %s4, <3 x half>* %in4b, align 4
+  store <3 x half> %s4, ptr %in4, align 4
   ret void
 
 ; CHECK-LABEL: @no_nonpow2_vector
 ; CHECK-NOT: shufflevector
 }
 
-define void @no_pointer_vector(half** nocapture align 16 %rd0) {
-  %in1b = bitcast half** %rd0 to <2 x half*>*
-  %load1 = load <2 x half*>, <2 x half*>* %in1b, align 4
-  %p1 = icmp ne <2 x half*> %load1, zeroinitializer
-  %s1 = select <2 x i1> %p1, <2 x half*> %load1, <2 x half*> zeroinitializer
-  store <2 x half*> %s1, <2 x half*>* %in1b, align 4
-  %in2 = getelementptr half*, half** %rd0, i64 2
-  %in2b = bitcast half** %in2 to <2 x half*>*
-  %load2 = load <2 x half*>, <2 x half*>* %in2b, align 4
-  %p2 = icmp ne <2 x half*> %load2, zeroinitializer
-  %s2 = select <2 x i1> %p2, <2 x half*> %load2, <2 x half*> zeroinitializer
-  store <2 x half*> %s2, <2 x half*>* %in2b, align 4
-  %in3 = getelementptr half*, half** %rd0, i64 4
-  %in3b = bitcast half** %in3 to <2 x half*>*
-  %load3 = load <2 x half*>, <2 x half*>* %in3b, align 4
-  %p3 = icmp ne <2 x half*> %load3, zeroinitializer
-  %s3 = select <2 x i1> %p3, <2 x half*> %load3, <2 x half*> zeroinitializer
-  store <2 x half*> %s3, <2 x half*>* %in3b, align 4
-  %in4 = getelementptr half*, half** %rd0, i64 6
-  %in4b = bitcast half** %in4 to <2 x half*>*
-  %load4 = load <2 x half*>, <2 x half*>* %in4b, align 4
-  %p4 = icmp ne <2 x half*> %load4, zeroinitializer
-  %s4 = select <2 x i1> %p4, <2 x half*> %load4, <2 x half*> zeroinitializer
-  store <2 x half*> %s4, <2 x half*>* %in4b, align 4
+define void @no_pointer_vector(ptr nocapture align 16 %rd0) {
+  %load1 = load <2 x ptr>, ptr %rd0, align 4
+  %p1 = icmp ne <2 x ptr> %load1, zeroinitializer
+  %s1 = select <2 x i1> %p1, <2 x ptr> %load1, <2 x ptr> zeroinitializer
+  store <2 x ptr> %s1, ptr %rd0, align 4
+  %in2 = getelementptr ptr, ptr %rd0, i64 2
+  %load2 = load <2 x ptr>, ptr %in2, align 4
+  %p2 = icmp ne <2 x ptr> %load2, zeroinitializer
+  %s2 = select <2 x i1> %p2, <2 x ptr> %load2, <2 x ptr> zeroinitializer
+  store <2 x ptr> %s2, ptr %in2, align 4
+  %in3 = getelementptr ptr, ptr %rd0, i64 4
+  %load3 = load <2 x ptr>, ptr %in3, align 4
+  %p3 = icmp ne <2 x ptr> %load3, zeroinitializer
+  %s3 = select <2 x i1> %p3, <2 x ptr> %load3, <2 x ptr> zeroinitializer
+  store <2 x ptr> %s3, ptr %in3, align 4
+  %in4 = getelementptr ptr, ptr %rd0, i64 6
+  %load4 = load <2 x ptr>, ptr %in4, align 4
+  %p4 = icmp ne <2 x ptr> %load4, zeroinitializer
+  %s4 = select <2 x i1> %p4, <2 x ptr> %load4, <2 x ptr> zeroinitializer
+  store <2 x ptr> %s4, ptr %in4, align 4
   ret void
 
 ; CHECK-LABEL: @no_pointer_vector

diff  --git a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/merge-across-side-effects.ll b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/merge-across-side-effects.ll
index 4f69d1a552b44..782dba25f063d 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/merge-across-side-effects.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/merge-across-side-effects.ll
@@ -19,12 +19,12 @@ declare void @fn_readnone() #5
 ; CHECK: load
 ; CHECK: call void @fn()
 ; CHECK: load
-define void @load_fn(i32* %p) #0 {
-  %p.1 = getelementptr i32, i32* %p, i32 1
+define void @load_fn(ptr %p) #0 {
+  %p.1 = getelementptr i32, ptr %p, i32 1
 
-  %v0 = load i32, i32* %p, align 8
+  %v0 = load i32, ptr %p, align 8
   call void @fn()
-  %v1 = load i32, i32* %p.1, align 4
+  %v1 = load i32, ptr %p.1, align 4
   ret void
 }
 
@@ -32,12 +32,12 @@ define void @load_fn(i32* %p) #0 {
 ; CHECK: load
 ; CHECK: call void @fn_nounwind()
 ; CHECK: load
-define void @load_fn_nounwind(i32* %p) #0 {
-  %p.1 = getelementptr i32, i32* %p, i32 1
+define void @load_fn_nounwind(ptr %p) #0 {
+  %p.1 = getelementptr i32, ptr %p, i32 1
 
-  %v0 = load i32, i32* %p, align 8
+  %v0 = load i32, ptr %p, align 8
   call void @fn_nounwind() #0
-  %v1 = load i32, i32* %p.1, align 4
+  %v1 = load i32, ptr %p.1, align 4
   ret void
 }
 
@@ -45,24 +45,24 @@ define void @load_fn_nounwind(i32* %p) #0 {
 ; CHECK: load
 ; CHECK: call void @fn_nounwind_writeonly()
 ; CHECK: load
-define void @load_fn_nounwind_writeonly(i32* %p) #0 {
-  %p.1 = getelementptr i32, i32* %p, i32 1
+define void @load_fn_nounwind_writeonly(ptr %p) #0 {
+  %p.1 = getelementptr i32, ptr %p, i32 1
 
-  %v0 = load i32, i32* %p, align 8
+  %v0 = load i32, ptr %p, align 8
   call void @fn_nounwind_writeonly() #1
-  %v1 = load i32, i32* %p.1, align 4
+  %v1 = load i32, ptr %p.1, align 4
   ret void
 }
 
 ; CHECK-LABEL: @load_fn_nounwind_readonly
 ; CHECK-DAG: load <2 x i32>
 ; CHECK-DAG: call void @fn_nounwind_readonly()
-define void @load_fn_nounwind_readonly(i32* %p) #0 {
-  %p.1 = getelementptr i32, i32* %p, i32 1
+define void @load_fn_nounwind_readonly(ptr %p) #0 {
+  %p.1 = getelementptr i32, ptr %p, i32 1
 
-  %v0 = load i32, i32* %p, align 8
+  %v0 = load i32, ptr %p, align 8
   call void @fn_nounwind_readonly() #2
-  %v1 = load i32, i32* %p.1, align 4
+  %v1 = load i32, ptr %p.1, align 4
   ret void
 }
 
@@ -70,12 +70,12 @@ define void @load_fn_nounwind_readonly(i32* %p) #0 {
 ; CHECK: load
 ; CHECK: call void @fn_readonly
 ; CHECK: load
-define void @load_fn_readonly(i32* %p) #0 {
-  %p.1 = getelementptr i32, i32* %p, i32 1
+define void @load_fn_readonly(ptr %p) #0 {
+  %p.1 = getelementptr i32, ptr %p, i32 1
 
-  %v0 = load i32, i32* %p, align 8
+  %v0 = load i32, ptr %p, align 8
   call void @fn_readonly() #4
-  %v1 = load i32, i32* %p.1, align 4
+  %v1 = load i32, ptr %p.1, align 4
   ret void
 }
 
@@ -83,24 +83,24 @@ define void @load_fn_readonly(i32* %p) #0 {
 ; CHECK: load
 ; CHECK: call void @fn_writeonly()
 ; CHECK: load
-define void @load_fn_writeonly(i32* %p) #0 {
-  %p.1 = getelementptr i32, i32* %p, i32 1
+define void @load_fn_writeonly(ptr %p) #0 {
+  %p.1 = getelementptr i32, ptr %p, i32 1
 
-  %v0 = load i32, i32* %p, align 8
+  %v0 = load i32, ptr %p, align 8
   call void @fn_writeonly() #3
-  %v1 = load i32, i32* %p.1, align 4
+  %v1 = load i32, ptr %p.1, align 4
   ret void
 }
 
 ; CHECK-LABEL: @load_fn_readnone
 ; CHECK-DAG: load <2 x i32>
 ; CHECK-DAG: call void @fn_readnone()
-define void @load_fn_readnone(i32* %p) #0 {
-  %p.1 = getelementptr i32, i32* %p, i32 1
+define void @load_fn_readnone(ptr %p) #0 {
+  %p.1 = getelementptr i32, ptr %p, i32 1
 
-  %v0 = load i32, i32* %p, align 8
+  %v0 = load i32, ptr %p, align 8
   call void @fn_readnone() #5
-  %v1 = load i32, i32* %p.1, align 4
+  %v1 = load i32, ptr %p.1, align 4
   ret void
 }
 
@@ -112,12 +112,12 @@ define void @load_fn_readnone(i32* %p) #0 {
 ; CHECK: store
 ; CHECK: call void @fn()
 ; CHECK: store
-define void @store_fn(i32* %p) #0 {
-  %p.1 = getelementptr i32, i32* %p, i32 1
+define void @store_fn(ptr %p) #0 {
+  %p.1 = getelementptr i32, ptr %p, i32 1
 
-  store i32 0, i32* %p
+  store i32 0, ptr %p
   call void @fn()
-  store i32 0, i32* %p.1
+  store i32 0, ptr %p.1
   ret void
 }
 
@@ -125,12 +125,12 @@ define void @store_fn(i32* %p) #0 {
 ; CHECK: store
 ; CHECK: call void @fn_nounwind()
 ; CHECK: store
-define void @store_fn_nounwind(i32* %p) #0 {
-  %p.1 = getelementptr i32, i32* %p, i32 1
+define void @store_fn_nounwind(ptr %p) #0 {
+  %p.1 = getelementptr i32, ptr %p, i32 1
 
-  store i32 0, i32* %p
+  store i32 0, ptr %p
   call void @fn_nounwind() #0
-  store i32 0, i32* %p.1
+  store i32 0, ptr %p.1
   ret void
 }
 
@@ -138,12 +138,12 @@ define void @store_fn_nounwind(i32* %p) #0 {
 ; CHECK: store
 ; CHECK: call void @fn_nounwind_writeonly()
 ; CHECK: store
-define void @store_fn_nounwind_writeonly(i32* %p) #0 {
-  %p.1 = getelementptr i32, i32* %p, i32 1
+define void @store_fn_nounwind_writeonly(ptr %p) #0 {
+  %p.1 = getelementptr i32, ptr %p, i32 1
 
-  store i32 0, i32* %p
+  store i32 0, ptr %p
   call void @fn_nounwind_writeonly() #1
-  store i32 0, i32* %p.1
+  store i32 0, ptr %p.1
   ret void
 }
 
@@ -151,12 +151,12 @@ define void @store_fn_nounwind_writeonly(i32* %p) #0 {
 ; CHECK: store
 ; CHECK: call void @fn_nounwind_readonly()
 ; CHECK: store
-define void @store_fn_nounwind_readonly(i32* %p) #0 {
-  %p.1 = getelementptr i32, i32* %p, i32 1
+define void @store_fn_nounwind_readonly(ptr %p) #0 {
+  %p.1 = getelementptr i32, ptr %p, i32 1
 
-  store i32 0, i32* %p
+  store i32 0, ptr %p
   call void @fn_nounwind_readonly() #2
-  store i32 0, i32* %p.1
+  store i32 0, ptr %p.1
   ret void
 }
 
@@ -164,12 +164,12 @@ define void @store_fn_nounwind_readonly(i32* %p) #0 {
 ; CHECK: store
 ; CHECK: call void @fn_readonly
 ; CHECK: store
-define void @store_fn_readonly(i32* %p) #0 {
-  %p.1 = getelementptr i32, i32* %p, i32 1
+define void @store_fn_readonly(ptr %p) #0 {
+  %p.1 = getelementptr i32, ptr %p, i32 1
 
-  store i32 0, i32* %p
+  store i32 0, ptr %p
   call void @fn_readonly() #4
-  store i32 0, i32* %p.1
+  store i32 0, ptr %p.1
   ret void
 }
 
@@ -177,12 +177,12 @@ define void @store_fn_readonly(i32* %p) #0 {
 ; CHECK: store
 ; CHECK: call void @fn_writeonly()
 ; CHECK: store
-define void @store_fn_writeonly(i32* %p) #0 {
-  %p.1 = getelementptr i32, i32* %p, i32 1
+define void @store_fn_writeonly(ptr %p) #0 {
+  %p.1 = getelementptr i32, ptr %p, i32 1
 
-  store i32 0, i32* %p
+  store i32 0, ptr %p
   call void @fn_writeonly() #3
-  store i32 0, i32* %p.1
+  store i32 0, ptr %p.1
   ret void
 }
 
@@ -190,12 +190,12 @@ define void @store_fn_writeonly(i32* %p) #0 {
 ; CHECK-LABEL: @store_fn_readnone
 ; CHECK-DAG: store <2 x i32>
 ; CHECK-DAG: call void @fn_readnone()
-define void @store_fn_readnone(i32* %p) #0 {
-  %p.1 = getelementptr i32, i32* %p, i32 1
+define void @store_fn_readnone(ptr %p) #0 {
+  %p.1 = getelementptr i32, ptr %p, i32 1
 
-  store i32 0, i32* %p, align 8
+  store i32 0, ptr %p, align 8
   call void @fn_readnone() #5
-  store i32 0, i32* %p.1, align 8
+  store i32 0, ptr %p.1, align 8
   ret void
 }
 

diff  --git a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/non-instr-bitcast.ll b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/non-instr-bitcast.ll
index e8558b59e9839..de8a9904cb582 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/non-instr-bitcast.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/non-instr-bitcast.ll
@@ -6,9 +6,9 @@
 
 define void @foo() {
   ; CHECK: load <4 x float>
-  %a = load float, float addrspace(1)* getelementptr inbounds ([4 x float], [4 x float] addrspace(1)* @global, i64 0, i64 0), align 16
-  %b = load float, float addrspace(1)* getelementptr inbounds ([4 x float], [4 x float] addrspace(1)* @global, i64 0, i64 1), align 4
-  %c = load float, float addrspace(1)* getelementptr inbounds ([4 x float], [4 x float] addrspace(1)* @global, i64 0, i64 2), align 4
-  %d = load float, float addrspace(1)* getelementptr inbounds ([4 x float], [4 x float] addrspace(1)* @global, i64 0, i64 3), align 4
+  %a = load float, ptr addrspace(1) @global, align 16
+  %b = load float, ptr addrspace(1) getelementptr inbounds ([4 x float], ptr addrspace(1) @global, i64 0, i64 1), align 4
+  %c = load float, ptr addrspace(1) getelementptr inbounds ([4 x float], ptr addrspace(1) @global, i64 0, i64 2), align 4
+  %d = load float, ptr addrspace(1) getelementptr inbounds ([4 x float], ptr addrspace(1) @global, i64 0, i64 3), align 4
   ret void
 }

diff  --git a/llvm/test/Transforms/LoadStoreVectorizer/X86/codegenprepare-produced-address-math.ll b/llvm/test/Transforms/LoadStoreVectorizer/X86/codegenprepare-produced-address-math.ll
index 9aea9b1d88753..ff8a804beeb51 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/X86/codegenprepare-produced-address-math.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/X86/codegenprepare-produced-address-math.ll
@@ -18,12 +18,10 @@ entry:
   %mul331 = and i32 %base, -4
   %add350.4 = add i32 4, %mul331
   %idx351.4 = zext i32 %add350.4 to i64
-  %arrayidx352.4 = getelementptr inbounds { %union, [2000 x i8] }, { %union, [2000 x i8] }* @global_pointer, i64 0, i32 0, i32 0, i32 1, i64 0, i64 0, i64 0, i64 %idx351.4
-  %tmp296.4 = bitcast float* %arrayidx352.4 to i32*
+  %arrayidx352.4 = getelementptr inbounds { %union, [2000 x i8] }, ptr @global_pointer, i64 0, i32 0, i32 0, i32 1, i64 0, i64 0, i64 0, i64 %idx351.4
   %add350.5 = add i32 5, %mul331
   %idx351.5 = zext i32 %add350.5 to i64
-  %arrayidx352.5 = getelementptr inbounds { %union, [2000 x i8] }, { %union, [2000 x i8] }* @global_pointer, i64 0, i32 0, i32 0, i32 1, i64 0, i64 0, i64 0, i64 %idx351.5
-  %tmp296.5 = bitcast float* %arrayidx352.5 to i32*
+  %arrayidx352.5 = getelementptr inbounds { %union, [2000 x i8] }, ptr @global_pointer, i64 0, i32 0, i32 0, i32 1, i64 0, i64 0, i64 0, i64 %idx351.5
   %cnd = icmp ult i32 %base, 1000
   br i1 %cnd, label %loads, label %exit
 
@@ -31,8 +29,8 @@ loads:
   ; If and only if the loads are in a 
diff erent BB from the GEPs codegenprepare
   ; would try to turn the GEPs into math, which makes LoadStoreVectorizer's job
   ; harder
-  %tmp297.4 = load i32, i32* %tmp296.4, align 4, !tbaa !0
-  %tmp297.5 = load i32, i32* %tmp296.5, align 4, !tbaa !0
+  %tmp297.4 = load i32, ptr %arrayidx352.4, align 4, !tbaa !0
+  %tmp297.5 = load i32, ptr %arrayidx352.5, align 4, !tbaa !0
   br label %exit
 
 exit:
@@ -56,15 +54,13 @@ entry:
 
 loads:                                            ; preds = %entry
   %sunkaddr = mul i64 %idx351.4, 4
-  %sunkaddr1 = getelementptr inbounds i8, i8* bitcast ({ %union, [2000 x i8] }* @global_pointer to i8*), i64 %sunkaddr
-  %sunkaddr2 = getelementptr inbounds i8, i8* %sunkaddr1, i64 4096
-  %0 = bitcast i8* %sunkaddr2 to i32*
-  %tmp297.4 = load i32, i32* %0, align 4, !tbaa !0
+  %sunkaddr1 = getelementptr inbounds i8, ptr @global_pointer, i64 %sunkaddr
+  %sunkaddr2 = getelementptr inbounds i8, ptr %sunkaddr1, i64 4096
+  %tmp297.4 = load i32, ptr %sunkaddr2, align 4, !tbaa !0
   %sunkaddr3 = mul i64 %idx351.5, 4
-  %sunkaddr4 = getelementptr inbounds i8, i8* bitcast ({ %union, [2000 x i8] }* @global_pointer to i8*), i64 %sunkaddr3
-  %sunkaddr5 = getelementptr inbounds i8, i8* %sunkaddr4, i64 4096
-  %1 = bitcast i8* %sunkaddr5 to i32*
-  %tmp297.5 = load i32, i32* %1, align 4, !tbaa !0
+  %sunkaddr4 = getelementptr inbounds i8, ptr @global_pointer, i64 %sunkaddr3
+  %sunkaddr5 = getelementptr inbounds i8, ptr %sunkaddr4, i64 4096
+  %tmp297.5 = load i32, ptr %sunkaddr5, align 4, !tbaa !0
   br label %exit
 
 exit:                                             ; preds = %loads, %entry

diff  --git a/llvm/test/Transforms/LoadStoreVectorizer/X86/compare-scev-by-complexity.ll b/llvm/test/Transforms/LoadStoreVectorizer/X86/compare-scev-by-complexity.ll
index 769814f535819..4051ed4554be7 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/X86/compare-scev-by-complexity.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/X86/compare-scev-by-complexity.ll
@@ -20,12 +20,11 @@ target triple = "x86_64--"
 ; Function Attrs: nounwind
 define void @main() local_unnamed_addr #0 {
 ; CHECK-LABEL: @main()
-; CHECK: [[PTR:%[0-9]+]] = bitcast float* %preheader.load0.address to <2 x float>*
-; CHECK:  = load <2 x float>, <2 x float>* [[PTR]]
+; CHECK:  = load <2 x float>, ptr %preheader.load0.address
 ; CHECK-LABEL: for.body23:
 entry:
-  %tmp = load i32, i32* @global_value0, !range !0
-  %tmp2 = load i32, i32* @global_value1
+  %tmp = load i32, ptr @global_value0, !range !0
+  %tmp2 = load i32, ptr @global_value1
   %and.i.i = and i32 %tmp2, 2
   %add.nuw.nsw.i.i = add nuw nsw i32 %and.i.i, 0
   %mul.i.i = shl nuw nsw i32 %add.nuw.nsw.i.i, 1
@@ -38,29 +37,29 @@ entry:
   %add7.i.7 = add nuw nsw i32 %reass.mul347.7, 0
   %preheader.address0.idx = add nuw nsw i32 %add7.i.7, %mul.i.i
   %preheader.address0.idx.zext = zext i32 %preheader.address0.idx to i64
-  %preheader.load0.address = getelementptr inbounds float, float* @other_value, i64 %preheader.address0.idx.zext
-  %preheader.load0. = load float, float* %preheader.load0.address, align 4, !tbaa !1
+  %preheader.load0.address = getelementptr inbounds float, ptr @other_value, i64 %preheader.address0.idx.zext
+  %preheader.load0. = load float, ptr %preheader.load0.address, align 4, !tbaa !1
   %common.address.idx = add nuw nsw i32 %add7.i.7, %conv3.i42.i
   %preheader.header.common.address.idx.zext = zext i32 %common.address.idx to i64
-  %preheader.load1.address = getelementptr inbounds float, float* @other_value, i64 %preheader.header.common.address.idx.zext
-  %preheader.load1. = load float, float* %preheader.load1.address, align 4, !tbaa !1
+  %preheader.load1.address = getelementptr inbounds float, ptr @other_value, i64 %preheader.header.common.address.idx.zext
+  %preheader.load1. = load float, ptr %preheader.load1.address, align 4, !tbaa !1
   br label %for.body23
 
 for.body23:                                       ; preds = %for.body23, %entry
-  %loop.header.load0.address = getelementptr inbounds float, float* @other_value, i64 %preheader.header.common.address.idx.zext
-  %loop.header.load0. = load float, float* %loop.header.load0.address, align 4, !tbaa !1
+  %loop.header.load0.address = getelementptr inbounds float, ptr @other_value, i64 %preheader.header.common.address.idx.zext
+  %loop.header.load0. = load float, ptr %loop.header.load0.address, align 4, !tbaa !1
   %reass.mul343.7 = mul nuw nsw i32 %reass.add346.7, 72
   %add7.i286.7.7 = add nuw nsw i32 %reass.mul343.7, 56
   %add9.i288.7.7 = add nuw nsw i32 %add7.i286.7.7, %mul.i.i
   %loop.header.address1.idx = add nuw nsw i32 %add9.i288.7.7, 1
   %loop.header.address1.idx.zext = zext i32 %loop.header.address1.idx to i64
-  %loop.header.load1.address = getelementptr inbounds float, float* @other_value, i64 %loop.header.address1.idx.zext
-  %loop.header.load1. = load float, float* %loop.header.load1.address, align 4, !tbaa !1
-  store float %preheader.load0., float* @a, align 4, !tbaa !1
-  store float %preheader.load1., float* @b, align 4, !tbaa !1
-  store float %loop.header.load0., float* @c, align 4, !tbaa !1
-  store float %loop.header.load1., float* @d, align 4, !tbaa !1
-  %loaded.cnd = load i8, i8* @cnd
+  %loop.header.load1.address = getelementptr inbounds float, ptr @other_value, i64 %loop.header.address1.idx.zext
+  %loop.header.load1. = load float, ptr %loop.header.load1.address, align 4, !tbaa !1
+  store float %preheader.load0., ptr @a, align 4, !tbaa !1
+  store float %preheader.load1., ptr @b, align 4, !tbaa !1
+  store float %loop.header.load0., ptr @c, align 4, !tbaa !1
+  store float %loop.header.load1., ptr @d, align 4, !tbaa !1
+  %loaded.cnd = load i8, ptr @cnd
   %condition = trunc i8 %loaded.cnd to i1
   br i1 %condition, label %for.body23, label %exit
 

diff  --git a/llvm/test/Transforms/LoadStoreVectorizer/X86/load-width-inseltpoison.ll b/llvm/test/Transforms/LoadStoreVectorizer/X86/load-width-inseltpoison.ll
index eed99d4032d01..a38aacfc3ce0e 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/X86/load-width-inseltpoison.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/X86/load-width-inseltpoison.ll
@@ -3,20 +3,19 @@
 ; RUN: opt -mtriple=x86_64-unknown-linux-gnu -aa-pipeline=basic-aa -passes='function(load-store-vectorizer)' -mcpu haswell -S -o - %s | FileCheck --check-prefix=CHECK-HSW %s
 ; RUN: opt -mtriple=x86_64-unknown-linux-gnu -aa-pipeline=basic-aa -passes='function(load-store-vectorizer)' -mcpu knl -S -o - %s | FileCheck --check-prefix=CHECK-KNL %s
 
-define <8 x double> @loadwidth_insert_extract(double* %ptr) {
-    %a = bitcast double* %ptr to <2 x double> *
-    %b = getelementptr <2 x double>, <2 x double>* %a, i32 1
-    %c = getelementptr <2 x double>, <2 x double>* %a, i32 2
-    %d = getelementptr <2 x double>, <2 x double>* %a, i32 3
+define <8 x double> @loadwidth_insert_extract(ptr %ptr) {
+    %b = getelementptr <2 x double>, ptr %ptr, i32 1
+    %c = getelementptr <2 x double>, ptr %ptr, i32 2
+    %d = getelementptr <2 x double>, ptr %ptr, i32 3
 ; CHECK-HSW: load <4 x double>
 ; CHECK-HSW: load <4 x double>
 ; CHECK-HSW-NOT: load
 ; CHECK-KNL: load <8 x double>
 ; CHECK-KNL-NOT: load
-    %la = load <2 x double>, <2 x double> *%a
-    %lb = load <2 x double>, <2 x double> *%b
-    %lc = load <2 x double>, <2 x double> *%c
-    %ld = load <2 x double>, <2 x double> *%d
+    %la = load <2 x double>, ptr %ptr
+    %lb = load <2 x double>, ptr %b
+    %lc = load <2 x double>, ptr %c
+    %ld = load <2 x double>, ptr %d
     ; Scalarize everything - Explicitly not a shufflevector to test this code
     ; path in the LSV
     %v1 = extractelement <2 x double> %la, i32 0

diff  --git a/llvm/test/Transforms/LoadStoreVectorizer/X86/load-width.ll b/llvm/test/Transforms/LoadStoreVectorizer/X86/load-width.ll
index 257bfbb871991..f225762d43801 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/X86/load-width.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/X86/load-width.ll
@@ -3,20 +3,19 @@
 ; RUN: opt -mtriple=x86_64-unknown-linux-gnu -aa-pipeline=basic-aa -passes='function(load-store-vectorizer)' -mcpu haswell -S -o - %s | FileCheck --check-prefix=CHECK-HSW %s
 ; RUN: opt -mtriple=x86_64-unknown-linux-gnu -aa-pipeline=basic-aa -passes='function(load-store-vectorizer)' -mcpu knl -S -o - %s | FileCheck --check-prefix=CHECK-KNL %s
 
-define <8 x double> @loadwidth_insert_extract(double* %ptr) {
-    %a = bitcast double* %ptr to <2 x double> *
-    %b = getelementptr <2 x double>, <2 x double>* %a, i32 1
-    %c = getelementptr <2 x double>, <2 x double>* %a, i32 2
-    %d = getelementptr <2 x double>, <2 x double>* %a, i32 3
+define <8 x double> @loadwidth_insert_extract(ptr %ptr) {
+    %b = getelementptr <2 x double>, ptr %ptr, i32 1
+    %c = getelementptr <2 x double>, ptr %ptr, i32 2
+    %d = getelementptr <2 x double>, ptr %ptr, i32 3
 ; CHECK-HSW: load <4 x double>
 ; CHECK-HSW: load <4 x double>
 ; CHECK-HSW-NOT: load
 ; CHECK-KNL: load <8 x double>
 ; CHECK-KNL-NOT: load
-    %la = load <2 x double>, <2 x double> *%a
-    %lb = load <2 x double>, <2 x double> *%b
-    %lc = load <2 x double>, <2 x double> *%c
-    %ld = load <2 x double>, <2 x double> *%d
+    %la = load <2 x double>, ptr %ptr
+    %lb = load <2 x double>, ptr %b
+    %lc = load <2 x double>, ptr %c
+    %ld = load <2 x double>, ptr %d
     ; Scalarize everything - Explicitly not a shufflevector to test this code
     ; path in the LSV
     %v1 = extractelement <2 x double> %la, i32 0

diff  --git a/llvm/test/Transforms/LoadStoreVectorizer/X86/merge-tbaa.ll b/llvm/test/Transforms/LoadStoreVectorizer/X86/merge-tbaa.ll
index 629a8479c7196..20e1fa8633b07 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/X86/merge-tbaa.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/X86/merge-tbaa.ll
@@ -20,19 +20,18 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
 ;   p->i -= 1;
 ;   return p->f;
 ; }
-define float @foo(%struct.S* %p) {
+define float @foo(ptr %p) {
 entry:
 ; CHECK-LABEL: foo
 ; CHECK: load <2 x i32>, {{.*}}, !tbaa [[TAG_char:!.*]]
 ; CHECK: store <2 x i32> {{.*}}, !tbaa [[TAG_char]]
-  %f = getelementptr inbounds %struct.S, %struct.S* %p, i64 0, i32 0
-  %0 = load float, float* %f, align 4, !tbaa !2
+  %0 = load float, ptr %p, align 4, !tbaa !2
   %sub = fadd float %0, -1.000000e+00
-  store float %sub, float* %f, align 4, !tbaa !2
-  %i = getelementptr inbounds %struct.S, %struct.S* %p, i64 0, i32 1
-  %1 = load i32, i32* %i, align 4, !tbaa !8
+  store float %sub, ptr %p, align 4, !tbaa !2
+  %i = getelementptr inbounds %struct.S, ptr %p, i64 0, i32 1
+  %1 = load i32, ptr %i, align 4, !tbaa !8
   %sub1 = add nsw i32 %1, -1
-  store i32 %sub1, i32* %i, align 4, !tbaa !8
+  store i32 %sub1, ptr %i, align 4, !tbaa !8
   ret float %sub
 }
 

diff  --git a/llvm/test/Transforms/LoadStoreVectorizer/X86/preserve-order32.ll b/llvm/test/Transforms/LoadStoreVectorizer/X86/preserve-order32.ll
index f8e96740e35fb..897c39a8cbb5a 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/X86/preserve-order32.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/X86/preserve-order32.ll
@@ -3,9 +3,9 @@
 
 target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:64:64-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
 
-%struct.buffer_t = type { i32, i8* }
+%struct.buffer_t = type { i32, ptr }
 
-; Check an i32 and i8* get vectorized, and that the two accesses
+; Check an i32 and ptr get vectorized, and that the two accesses
 ; (load into buff.val and store to buff.p) preserve their order.
 ; Vectorized loads should be inserted at the position of the first load,
 ; and instructions which were between the first and last load should be
@@ -15,14 +15,13 @@ target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:
 ; CHECK: load <2 x i32>
 ; CHECK: %buff.val = load i8
 ; CHECK: store i8 0
-define void @preserve_order_32(%struct.buffer_t* noalias %buff) #0 {
+define void @preserve_order_32(ptr noalias %buff) #0 {
 entry:
-  %tmp1 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %buff, i32 0, i32 1
-  %buff.p = load i8*, i8** %tmp1
-  %buff.val = load i8, i8* %buff.p
-  store i8 0, i8* %buff.p, align 8
-  %tmp0 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %buff, i32 0, i32 0
-  %buff.int = load i32, i32* %tmp0, align 8
+  %tmp1 = getelementptr inbounds %struct.buffer_t, ptr %buff, i32 0, i32 1
+  %buff.p = load ptr, ptr %tmp1
+  %buff.val = load i8, ptr %buff.p
+  store i8 0, ptr %buff.p, align 8
+  %buff.int = load i32, ptr %buff, align 8
   ret void
 }
 

diff  --git a/llvm/test/Transforms/LoadStoreVectorizer/X86/preserve-order64.ll b/llvm/test/Transforms/LoadStoreVectorizer/X86/preserve-order64.ll
index 42cb3ecfc988b..d5b91fea5da36 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/X86/preserve-order64.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/X86/preserve-order64.ll
@@ -3,10 +3,10 @@
 
 target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
 
-%struct.buffer_t = type { i64, i8* }
+%struct.buffer_t = type { i64, ptr }
 %struct.nested.buffer = type { %struct.buffer_t, %struct.buffer_t }
 
-; Check an i64 and i8* get vectorized, and that the two accesses
+; Check an i64 and ptr get vectorized, and that the two accesses
 ; (load into buff.val and store to buff.p) preserve their order.
 ; Vectorized loads should be inserted at the position of the first load,
 ; and instructions which were between the first and last load should be
@@ -16,14 +16,13 @@ target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
 ; CHECK: load <2 x i64>
 ; CHECK: %buff.val = load i8
 ; CHECK: store i8 0
-define void @preserve_order_64(%struct.buffer_t* noalias %buff) #0 {
+define void @preserve_order_64(ptr noalias %buff) #0 {
 entry:
-  %tmp1 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %buff, i64 0, i32 1
-  %buff.p = load i8*, i8** %tmp1
-  %buff.val = load i8, i8* %buff.p
-  store i8 0, i8* %buff.p, align 8
-  %tmp0 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %buff, i64 0, i32 0
-  %buff.int = load i64, i64* %tmp0, align 16
+  %tmp1 = getelementptr inbounds %struct.buffer_t, ptr %buff, i64 0, i32 1
+  %buff.p = load ptr, ptr %tmp1
+  %buff.val = load i8, ptr %buff.p
+  store i8 0, ptr %buff.p, align 8
+  %buff.int = load i64, ptr %buff, align 16
   ret void
 }
 
@@ -33,39 +32,35 @@ entry:
 ; CHECK: load <2 x i64>
 ; CHECK: %buff.val = load i8
 ; CHECK: store i8 0
-define void @transitive_reorder(%struct.buffer_t* noalias %buff, %struct.nested.buffer* noalias %nest) #0 {
+define void @transitive_reorder(ptr noalias %buff, ptr noalias %nest) #0 {
 entry:
-  %nest0_0 = getelementptr inbounds %struct.nested.buffer, %struct.nested.buffer* %nest, i64 0, i32 0
-  %tmp1 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %nest0_0, i64 0, i32 1
-  %buff.p = load i8*, i8** %tmp1
-  %buff.val = load i8, i8* %buff.p
-  store i8 0, i8* %buff.p, align 8
-  %nest1_0 = getelementptr inbounds %struct.nested.buffer, %struct.nested.buffer* %nest, i64 0, i32 0
-  %tmp0 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %nest1_0, i64 0, i32 0
-  %buff.int = load i64, i64* %tmp0, align 16
+  %tmp1 = getelementptr inbounds %struct.buffer_t, ptr %nest, i64 0, i32 1
+  %buff.p = load ptr, ptr %tmp1
+  %buff.val = load i8, ptr %buff.p
+  store i8 0, ptr %buff.p, align 8
+  %buff.int = load i64, ptr %nest, align 16
   ret void
 }
 
 ; Check for no vectorization over phi node
 
 ; CHECK-LABEL: @no_vect_phi(
-; CHECK: load i8*
+; CHECK: load ptr
 ; CHECK: load i8
 ; CHECK: store i8 0
 ; CHECK: load i64
-define void @no_vect_phi(i32* noalias %ptr, %struct.buffer_t* noalias %buff) {
+define void @no_vect_phi(ptr noalias %ptr, ptr noalias %buff) {
 entry:
-  %tmp1 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %buff, i64 0, i32 1
-  %buff.p = load i8*, i8** %tmp1
-  %buff.val = load i8, i8* %buff.p
-  store i8 0, i8* %buff.p, align 8
+  %tmp1 = getelementptr inbounds %struct.buffer_t, ptr %buff, i64 0, i32 1
+  %buff.p = load ptr, ptr %tmp1
+  %buff.val = load i8, ptr %buff.p
+  store i8 0, ptr %buff.p, align 8
   br label %"for something"
 
 "for something":
   %index = phi i64 [ 0, %entry ], [ %index.next, %"for something" ]
 
-  %tmp0 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %buff, i64 0, i32 0
-  %buff.int = load i64, i64* %tmp0, align 16
+  %buff.int = load i64, ptr %buff, align 16
 
   %index.next = add i64 %index, 8
   %cmp_res = icmp eq i64 %index.next, 8

diff  --git a/llvm/test/Transforms/LoadStoreVectorizer/X86/vector-scalar.ll b/llvm/test/Transforms/LoadStoreVectorizer/X86/vector-scalar.ll
index a4ad70f04e1e1..d1a1fa45c7410 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/X86/vector-scalar.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/X86/vector-scalar.ll
@@ -6,10 +6,9 @@
 ; CHECK-LABEL: @vector_scalar(
 ; CHECK: store double
 ; CHECK: store <1 x double>
-define void @vector_scalar(double* %ptr, double %a, <1 x double> %b) {
-  %1 = bitcast double* %ptr to <1 x double>*
-  %2 = getelementptr <1 x double>, <1 x double>* %1, i32 1
-  store double %a, double* %ptr, align 8
-  store <1 x double> %b, <1 x double>* %2, align 8
+define void @vector_scalar(ptr %ptr, double %a, <1 x double> %b) {
+  %1 = getelementptr <1 x double>, ptr %ptr, i32 1
+  store double %a, ptr %ptr, align 8
+  store <1 x double> %b, ptr %1, align 8
   ret void
 }

diff  --git a/llvm/test/Transforms/LoadStoreVectorizer/X86/vectorize-i8-nested-add-inseltpoison.ll b/llvm/test/Transforms/LoadStoreVectorizer/X86/vectorize-i8-nested-add-inseltpoison.ll
index ae01ddc0bdedb..37e9ec7f0f461 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/X86/vectorize-i8-nested-add-inseltpoison.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/X86/vectorize-i8-nested-add-inseltpoison.ll
@@ -8,15 +8,14 @@
 
 target triple = "x86_64--"
 
-define void @ld_v4i8_add_nsw(i32 %v0, i32 %v1, i8* %src, <4 x i8>* %dst) {
+define void @ld_v4i8_add_nsw(i32 %v0, i32 %v1, ptr %src, ptr %dst) {
 ; CHECK-LABEL: @ld_v4i8_add_nsw(
 ; CHECK-NEXT:  bb:
 ; CHECK-NEXT:    [[TMP:%.*]] = add nsw i32 [[V0:%.*]], -1
 ; CHECK-NEXT:    [[TMP1:%.*]] = add nsw i32 [[V1:%.*]], [[TMP]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = sext i32 [[TMP1]] to i64
-; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i8, i8* [[SRC:%.*]], i64 [[TMP2]]
-; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i8* [[TMP3]] to <4 x i8>*
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i8>, <4 x i8>* [[TMP0]], align 1
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[TMP2]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i8>, ptr [[TMP3]], align 1
 ; CHECK-NEXT:    [[TMP41:%.*]] = extractelement <4 x i8> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[TMP82:%.*]] = extractelement <4 x i8> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[TMP133:%.*]] = extractelement <4 x i8> [[TMP1]], i32 2
@@ -25,46 +24,45 @@ define void @ld_v4i8_add_nsw(i32 %v0, i32 %v1, i8* %src, <4 x i8>* %dst) {
 ; CHECK-NEXT:    [[TMP20:%.*]] = insertelement <4 x i8> [[TMP19]], i8 [[TMP82]], i32 1
 ; CHECK-NEXT:    [[TMP21:%.*]] = insertelement <4 x i8> [[TMP20]], i8 [[TMP133]], i32 2
 ; CHECK-NEXT:    [[TMP22:%.*]] = insertelement <4 x i8> [[TMP21]], i8 [[TMP184]], i32 3
-; CHECK-NEXT:    store <4 x i8> [[TMP22]], <4 x i8>* [[DST:%.*]], align 4
+; CHECK-NEXT:    store <4 x i8> [[TMP22]], ptr [[DST:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
 bb:
   %tmp = add nsw i32 %v0, -1
   %tmp1 = add nsw i32 %v1, %tmp
   %tmp2 = sext i32 %tmp1 to i64
-  %tmp3 = getelementptr inbounds i8, i8* %src, i64 %tmp2
-  %tmp4 = load i8, i8* %tmp3, align 1
+  %tmp3 = getelementptr inbounds i8, ptr %src, i64 %tmp2
+  %tmp4 = load i8, ptr %tmp3, align 1
   %tmp5 = add nsw i32 %v1, %v0
   %tmp6 = sext i32 %tmp5 to i64
-  %tmp7 = getelementptr inbounds i8, i8* %src, i64 %tmp6
-  %tmp8 = load i8, i8* %tmp7, align 1
+  %tmp7 = getelementptr inbounds i8, ptr %src, i64 %tmp6
+  %tmp8 = load i8, ptr %tmp7, align 1
   %tmp9 = add nsw i32 %v0, 1
   %tmp10 = add nsw i32 %v1, %tmp9
   %tmp11 = sext i32 %tmp10 to i64
-  %tmp12 = getelementptr inbounds i8, i8* %src, i64 %tmp11
-  %tmp13 = load i8, i8* %tmp12, align 1
+  %tmp12 = getelementptr inbounds i8, ptr %src, i64 %tmp11
+  %tmp13 = load i8, ptr %tmp12, align 1
   %tmp14 = add nsw i32 %v0, 2
   %tmp15 = add nsw i32 %v1, %tmp14
   %tmp16 = sext i32 %tmp15 to i64
-  %tmp17 = getelementptr inbounds i8, i8* %src, i64 %tmp16
-  %tmp18 = load i8, i8* %tmp17, align 1
+  %tmp17 = getelementptr inbounds i8, ptr %src, i64 %tmp16
+  %tmp18 = load i8, ptr %tmp17, align 1
   %tmp19 = insertelement <4 x i8> poison, i8 %tmp4, i32 0
   %tmp20 = insertelement <4 x i8> %tmp19, i8 %tmp8, i32 1
   %tmp21 = insertelement <4 x i8> %tmp20, i8 %tmp13, i32 2
   %tmp22 = insertelement <4 x i8> %tmp21, i8 %tmp18, i32 3
-  store <4 x i8> %tmp22, <4 x i8>* %dst
+  store <4 x i8> %tmp22, ptr %dst
   ret void
 }
 
-define void @ld_v4i8_add_nuw(i32 %v0, i32 %v1, i8* %src, <4 x i8>* %dst) {
+define void @ld_v4i8_add_nuw(i32 %v0, i32 %v1, ptr %src, ptr %dst) {
 ; CHECK-LABEL: @ld_v4i8_add_nuw(
 ; CHECK-NEXT:  bb:
 ; CHECK-NEXT:    [[TMP:%.*]] = add nuw i32 [[V0:%.*]], -1
 ; CHECK-NEXT:    [[TMP1:%.*]] = add nuw i32 [[V1:%.*]], [[TMP]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
-; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i8, i8* [[SRC:%.*]], i64 [[TMP2]]
-; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i8* [[TMP3]] to <4 x i8>*
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i8>, <4 x i8>* [[TMP0]], align 1
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[TMP2]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i8>, ptr [[TMP3]], align 1
 ; CHECK-NEXT:    [[TMP41:%.*]] = extractelement <4 x i8> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[TMP82:%.*]] = extractelement <4 x i8> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[TMP133:%.*]] = extractelement <4 x i8> [[TMP1]], i32 2
@@ -73,93 +71,93 @@ define void @ld_v4i8_add_nuw(i32 %v0, i32 %v1, i8* %src, <4 x i8>* %dst) {
 ; CHECK-NEXT:    [[TMP20:%.*]] = insertelement <4 x i8> [[TMP19]], i8 [[TMP82]], i32 1
 ; CHECK-NEXT:    [[TMP21:%.*]] = insertelement <4 x i8> [[TMP20]], i8 [[TMP133]], i32 2
 ; CHECK-NEXT:    [[TMP22:%.*]] = insertelement <4 x i8> [[TMP21]], i8 [[TMP184]], i32 3
-; CHECK-NEXT:    store <4 x i8> [[TMP22]], <4 x i8>* [[DST:%.*]], align 4
+; CHECK-NEXT:    store <4 x i8> [[TMP22]], ptr [[DST:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
 bb:
   %tmp = add nuw i32 %v0, -1
   %tmp1 = add nuw i32 %v1, %tmp
   %tmp2 = zext i32 %tmp1 to i64
-  %tmp3 = getelementptr inbounds i8, i8* %src, i64 %tmp2
-  %tmp4 = load i8, i8* %tmp3, align 1
+  %tmp3 = getelementptr inbounds i8, ptr %src, i64 %tmp2
+  %tmp4 = load i8, ptr %tmp3, align 1
   %tmp5 = add nuw i32 %v1, %v0
   %tmp6 = zext i32 %tmp5 to i64
-  %tmp7 = getelementptr inbounds i8, i8* %src, i64 %tmp6
-  %tmp8 = load i8, i8* %tmp7, align 1
+  %tmp7 = getelementptr inbounds i8, ptr %src, i64 %tmp6
+  %tmp8 = load i8, ptr %tmp7, align 1
   %tmp9 = add nuw i32 %v0, 1
   %tmp10 = add nuw i32 %v1, %tmp9
   %tmp11 = zext i32 %tmp10 to i64
-  %tmp12 = getelementptr inbounds i8, i8* %src, i64 %tmp11
-  %tmp13 = load i8, i8* %tmp12, align 1
+  %tmp12 = getelementptr inbounds i8, ptr %src, i64 %tmp11
+  %tmp13 = load i8, ptr %tmp12, align 1
   %tmp14 = add nuw i32 %v0, 2
   %tmp15 = add nuw i32 %v1, %tmp14
   %tmp16 = zext i32 %tmp15 to i64
-  %tmp17 = getelementptr inbounds i8, i8* %src, i64 %tmp16
-  %tmp18 = load i8, i8* %tmp17, align 1
+  %tmp17 = getelementptr inbounds i8, ptr %src, i64 %tmp16
+  %tmp18 = load i8, ptr %tmp17, align 1
   %tmp19 = insertelement <4 x i8> poison, i8 %tmp4, i32 0
   %tmp20 = insertelement <4 x i8> %tmp19, i8 %tmp8, i32 1
   %tmp21 = insertelement <4 x i8> %tmp20, i8 %tmp13, i32 2
   %tmp22 = insertelement <4 x i8> %tmp21, i8 %tmp18, i32 3
-  store <4 x i8> %tmp22, <4 x i8>* %dst
+  store <4 x i8> %tmp22, ptr %dst
   ret void
 }
 
 ; Make sure we don't vectorize the loads below because the source of
 ; sext instructions doesn't have the nsw flag.
 
-define void @ld_v4i8_add_not_safe(i32 %v0, i32 %v1, i8* %src, <4 x i8>* %dst) {
+define void @ld_v4i8_add_not_safe(i32 %v0, i32 %v1, ptr %src, ptr %dst) {
 ; CHECK-LABEL: @ld_v4i8_add_not_safe(
 ; CHECK-NEXT:  bb:
 ; CHECK-NEXT:    [[TMP:%.*]] = add nsw i32 [[V0:%.*]], -1
 ; CHECK-NEXT:    [[TMP1:%.*]] = add i32 [[V1:%.*]], [[TMP]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = sext i32 [[TMP1]] to i64
-; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i8, i8* [[SRC:%.*]], i64 [[TMP2]]
-; CHECK-NEXT:    [[TMP4:%.*]] = load i8, i8* [[TMP3]], align 1
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load i8, ptr [[TMP3]], align 1
 ; CHECK-NEXT:    [[TMP5:%.*]] = add i32 [[V1]], [[V0]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext i32 [[TMP5]] to i64
-; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 [[TMP6]]
-; CHECK-NEXT:    [[TMP8:%.*]] = load i8, i8* [[TMP7]], align 1
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[TMP6]]
+; CHECK-NEXT:    [[TMP8:%.*]] = load i8, ptr [[TMP7]], align 1
 ; CHECK-NEXT:    [[TMP9:%.*]] = add nsw i32 [[V0]], 1
 ; CHECK-NEXT:    [[TMP10:%.*]] = add i32 [[V1]], [[TMP9]]
 ; CHECK-NEXT:    [[TMP11:%.*]] = sext i32 [[TMP10]] to i64
-; CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 [[TMP11]]
-; CHECK-NEXT:    [[TMP13:%.*]] = load i8, i8* [[TMP12]], align 1
+; CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[TMP11]]
+; CHECK-NEXT:    [[TMP13:%.*]] = load i8, ptr [[TMP12]], align 1
 ; CHECK-NEXT:    [[TMP14:%.*]] = add nsw i32 [[V0]], 2
 ; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[V1]], [[TMP14]]
 ; CHECK-NEXT:    [[TMP16:%.*]] = sext i32 [[TMP15]] to i64
-; CHECK-NEXT:    [[TMP17:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 [[TMP16]]
-; CHECK-NEXT:    [[TMP18:%.*]] = load i8, i8* [[TMP17]], align 1
+; CHECK-NEXT:    [[TMP17:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[TMP16]]
+; CHECK-NEXT:    [[TMP18:%.*]] = load i8, ptr [[TMP17]], align 1
 ; CHECK-NEXT:    [[TMP19:%.*]] = insertelement <4 x i8> poison, i8 [[TMP4]], i32 0
 ; CHECK-NEXT:    [[TMP20:%.*]] = insertelement <4 x i8> [[TMP19]], i8 [[TMP8]], i32 1
 ; CHECK-NEXT:    [[TMP21:%.*]] = insertelement <4 x i8> [[TMP20]], i8 [[TMP13]], i32 2
 ; CHECK-NEXT:    [[TMP22:%.*]] = insertelement <4 x i8> [[TMP21]], i8 [[TMP18]], i32 3
-; CHECK-NEXT:    store <4 x i8> [[TMP22]], <4 x i8>* [[DST:%.*]], align 4
+; CHECK-NEXT:    store <4 x i8> [[TMP22]], ptr [[DST:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
 bb:
   %tmp = add nsw i32 %v0, -1
   %tmp1 = add i32 %v1, %tmp
   %tmp2 = sext i32 %tmp1 to i64
-  %tmp3 = getelementptr inbounds i8, i8* %src, i64 %tmp2
-  %tmp4 = load i8, i8* %tmp3, align 1
+  %tmp3 = getelementptr inbounds i8, ptr %src, i64 %tmp2
+  %tmp4 = load i8, ptr %tmp3, align 1
   %tmp5 = add i32 %v1, %v0
   %tmp6 = sext i32 %tmp5 to i64
-  %tmp7 = getelementptr inbounds i8, i8* %src, i64 %tmp6
-  %tmp8 = load i8, i8* %tmp7, align 1
+  %tmp7 = getelementptr inbounds i8, ptr %src, i64 %tmp6
+  %tmp8 = load i8, ptr %tmp7, align 1
   %tmp9 = add nsw i32 %v0, 1
   %tmp10 = add i32 %v1, %tmp9
   %tmp11 = sext i32 %tmp10 to i64
-  %tmp12 = getelementptr inbounds i8, i8* %src, i64 %tmp11
-  %tmp13 = load i8, i8* %tmp12, align 1
+  %tmp12 = getelementptr inbounds i8, ptr %src, i64 %tmp11
+  %tmp13 = load i8, ptr %tmp12, align 1
   %tmp14 = add nsw i32 %v0, 2
   %tmp15 = add i32 %v1, %tmp14
   %tmp16 = sext i32 %tmp15 to i64
-  %tmp17 = getelementptr inbounds i8, i8* %src, i64 %tmp16
-  %tmp18 = load i8, i8* %tmp17, align 1
+  %tmp17 = getelementptr inbounds i8, ptr %src, i64 %tmp16
+  %tmp18 = load i8, ptr %tmp17, align 1
   %tmp19 = insertelement <4 x i8> poison, i8 %tmp4, i32 0
   %tmp20 = insertelement <4 x i8> %tmp19, i8 %tmp8, i32 1
   %tmp21 = insertelement <4 x i8> %tmp20, i8 %tmp13, i32 2
   %tmp22 = insertelement <4 x i8> %tmp21, i8 %tmp18, i32 3
-  store <4 x i8> %tmp22, <4 x i8>* %dst
+  store <4 x i8> %tmp22, ptr %dst
   ret void
 }

diff  --git a/llvm/test/Transforms/LoadStoreVectorizer/X86/vectorize-i8-nested-add.ll b/llvm/test/Transforms/LoadStoreVectorizer/X86/vectorize-i8-nested-add.ll
index 72da9e4ed07bb..c931a6b181acd 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/X86/vectorize-i8-nested-add.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/X86/vectorize-i8-nested-add.ll
@@ -8,15 +8,14 @@
 
 target triple = "x86_64--"
 
-define void @ld_v4i8_add_nsw(i32 %v0, i32 %v1, i8* %src, <4 x i8>* %dst) {
+define void @ld_v4i8_add_nsw(i32 %v0, i32 %v1, ptr %src, ptr %dst) {
 ; CHECK-LABEL: @ld_v4i8_add_nsw(
 ; CHECK-NEXT:  bb:
 ; CHECK-NEXT:    [[TMP:%.*]] = add nsw i32 [[V0:%.*]], -1
 ; CHECK-NEXT:    [[TMP1:%.*]] = add nsw i32 [[V1:%.*]], [[TMP]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = sext i32 [[TMP1]] to i64
-; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i8, i8* [[SRC:%.*]], i64 [[TMP2]]
-; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i8* [[TMP3]] to <4 x i8>*
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i8>, <4 x i8>* [[TMP0]], align 1
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[TMP2]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i8>, ptr [[TMP3]], align 1
 ; CHECK-NEXT:    [[TMP41:%.*]] = extractelement <4 x i8> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[TMP82:%.*]] = extractelement <4 x i8> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[TMP133:%.*]] = extractelement <4 x i8> [[TMP1]], i32 2
@@ -25,46 +24,45 @@ define void @ld_v4i8_add_nsw(i32 %v0, i32 %v1, i8* %src, <4 x i8>* %dst) {
 ; CHECK-NEXT:    [[TMP20:%.*]] = insertelement <4 x i8> [[TMP19]], i8 [[TMP82]], i32 1
 ; CHECK-NEXT:    [[TMP21:%.*]] = insertelement <4 x i8> [[TMP20]], i8 [[TMP133]], i32 2
 ; CHECK-NEXT:    [[TMP22:%.*]] = insertelement <4 x i8> [[TMP21]], i8 [[TMP184]], i32 3
-; CHECK-NEXT:    store <4 x i8> [[TMP22]], <4 x i8>* [[DST:%.*]]
+; CHECK-NEXT:    store <4 x i8> [[TMP22]], ptr [[DST:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 bb:
   %tmp = add nsw i32 %v0, -1
   %tmp1 = add nsw i32 %v1, %tmp
   %tmp2 = sext i32 %tmp1 to i64
-  %tmp3 = getelementptr inbounds i8, i8* %src, i64 %tmp2
-  %tmp4 = load i8, i8* %tmp3, align 1
+  %tmp3 = getelementptr inbounds i8, ptr %src, i64 %tmp2
+  %tmp4 = load i8, ptr %tmp3, align 1
   %tmp5 = add nsw i32 %v1, %v0
   %tmp6 = sext i32 %tmp5 to i64
-  %tmp7 = getelementptr inbounds i8, i8* %src, i64 %tmp6
-  %tmp8 = load i8, i8* %tmp7, align 1
+  %tmp7 = getelementptr inbounds i8, ptr %src, i64 %tmp6
+  %tmp8 = load i8, ptr %tmp7, align 1
   %tmp9 = add nsw i32 %v0, 1
   %tmp10 = add nsw i32 %v1, %tmp9
   %tmp11 = sext i32 %tmp10 to i64
-  %tmp12 = getelementptr inbounds i8, i8* %src, i64 %tmp11
-  %tmp13 = load i8, i8* %tmp12, align 1
+  %tmp12 = getelementptr inbounds i8, ptr %src, i64 %tmp11
+  %tmp13 = load i8, ptr %tmp12, align 1
   %tmp14 = add nsw i32 %v0, 2
   %tmp15 = add nsw i32 %v1, %tmp14
   %tmp16 = sext i32 %tmp15 to i64
-  %tmp17 = getelementptr inbounds i8, i8* %src, i64 %tmp16
-  %tmp18 = load i8, i8* %tmp17, align 1
+  %tmp17 = getelementptr inbounds i8, ptr %src, i64 %tmp16
+  %tmp18 = load i8, ptr %tmp17, align 1
   %tmp19 = insertelement <4 x i8> undef, i8 %tmp4, i32 0
   %tmp20 = insertelement <4 x i8> %tmp19, i8 %tmp8, i32 1
   %tmp21 = insertelement <4 x i8> %tmp20, i8 %tmp13, i32 2
   %tmp22 = insertelement <4 x i8> %tmp21, i8 %tmp18, i32 3
-  store <4 x i8> %tmp22, <4 x i8>* %dst
+  store <4 x i8> %tmp22, ptr %dst
   ret void
 }
 
-define void @ld_v4i8_add_nuw(i32 %v0, i32 %v1, i8* %src, <4 x i8>* %dst) {
+define void @ld_v4i8_add_nuw(i32 %v0, i32 %v1, ptr %src, ptr %dst) {
 ; CHECK-LABEL: @ld_v4i8_add_nuw(
 ; CHECK-NEXT:  bb:
 ; CHECK-NEXT:    [[TMP:%.*]] = add nuw i32 [[V0:%.*]], -1
 ; CHECK-NEXT:    [[TMP1:%.*]] = add nuw i32 [[V1:%.*]], [[TMP]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
-; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i8, i8* [[SRC:%.*]], i64 [[TMP2]]
-; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i8* [[TMP3]] to <4 x i8>*
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i8>, <4 x i8>* [[TMP0]], align 1
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[TMP2]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i8>, ptr [[TMP3]], align 1
 ; CHECK-NEXT:    [[TMP41:%.*]] = extractelement <4 x i8> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[TMP82:%.*]] = extractelement <4 x i8> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[TMP133:%.*]] = extractelement <4 x i8> [[TMP1]], i32 2
@@ -73,47 +71,46 @@ define void @ld_v4i8_add_nuw(i32 %v0, i32 %v1, i8* %src, <4 x i8>* %dst) {
 ; CHECK-NEXT:    [[TMP20:%.*]] = insertelement <4 x i8> [[TMP19]], i8 [[TMP82]], i32 1
 ; CHECK-NEXT:    [[TMP21:%.*]] = insertelement <4 x i8> [[TMP20]], i8 [[TMP133]], i32 2
 ; CHECK-NEXT:    [[TMP22:%.*]] = insertelement <4 x i8> [[TMP21]], i8 [[TMP184]], i32 3
-; CHECK-NEXT:    store <4 x i8> [[TMP22]], <4 x i8>* [[DST:%.*]]
+; CHECK-NEXT:    store <4 x i8> [[TMP22]], ptr [[DST:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 bb:
   %tmp = add nuw i32 %v0, -1
   %tmp1 = add nuw i32 %v1, %tmp
   %tmp2 = zext i32 %tmp1 to i64
-  %tmp3 = getelementptr inbounds i8, i8* %src, i64 %tmp2
-  %tmp4 = load i8, i8* %tmp3, align 1
+  %tmp3 = getelementptr inbounds i8, ptr %src, i64 %tmp2
+  %tmp4 = load i8, ptr %tmp3, align 1
   %tmp5 = add nuw i32 %v1, %v0
   %tmp6 = zext i32 %tmp5 to i64
-  %tmp7 = getelementptr inbounds i8, i8* %src, i64 %tmp6
-  %tmp8 = load i8, i8* %tmp7, align 1
+  %tmp7 = getelementptr inbounds i8, ptr %src, i64 %tmp6
+  %tmp8 = load i8, ptr %tmp7, align 1
   %tmp9 = add nuw i32 %v0, 1
   %tmp10 = add nuw i32 %v1, %tmp9
   %tmp11 = zext i32 %tmp10 to i64
-  %tmp12 = getelementptr inbounds i8, i8* %src, i64 %tmp11
-  %tmp13 = load i8, i8* %tmp12, align 1
+  %tmp12 = getelementptr inbounds i8, ptr %src, i64 %tmp11
+  %tmp13 = load i8, ptr %tmp12, align 1
   %tmp14 = add nuw i32 %v0, 2
   %tmp15 = add nuw i32 %v1, %tmp14
   %tmp16 = zext i32 %tmp15 to i64
-  %tmp17 = getelementptr inbounds i8, i8* %src, i64 %tmp16
-  %tmp18 = load i8, i8* %tmp17, align 1
+  %tmp17 = getelementptr inbounds i8, ptr %src, i64 %tmp16
+  %tmp18 = load i8, ptr %tmp17, align 1
   %tmp19 = insertelement <4 x i8> undef, i8 %tmp4, i32 0
   %tmp20 = insertelement <4 x i8> %tmp19, i8 %tmp8, i32 1
   %tmp21 = insertelement <4 x i8> %tmp20, i8 %tmp13, i32 2
   %tmp22 = insertelement <4 x i8> %tmp21, i8 %tmp18, i32 3
-  store <4 x i8> %tmp22, <4 x i8>* %dst
+  store <4 x i8> %tmp22, ptr %dst
   ret void
 }
 
 ; Apply 
diff erent operand orders for the nested add sequences
-define void @ld_v4i8_add_nsw_operand_orders(i32 %v0, i32 %v1, i8* %src, <4 x i8>* %dst) {
+define void @ld_v4i8_add_nsw_operand_orders(i32 %v0, i32 %v1, ptr %src, ptr %dst) {
 ; CHECK-LABEL: @ld_v4i8_add_nsw_operand_orders(
 ; CHECK-NEXT:  bb:
 ; CHECK-NEXT:    [[TMP:%.*]] = add nsw i32 [[V0:%.*]], -1
 ; CHECK-NEXT:    [[TMP1:%.*]] = add nsw i32 [[V1:%.*]], [[TMP]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = sext i32 [[TMP1]] to i64
-; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i8, i8* [[SRC:%.*]], i64 [[TMP2]]
-; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i8* [[TMP3]] to <4 x i8>*
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i8>, <4 x i8>* [[TMP0]], align 1
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[TMP2]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i8>, ptr [[TMP3]], align 1
 ; CHECK-NEXT:    [[TMP41:%.*]] = extractelement <4 x i8> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[TMP82:%.*]] = extractelement <4 x i8> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[TMP133:%.*]] = extractelement <4 x i8> [[TMP1]], i32 2
@@ -122,47 +119,46 @@ define void @ld_v4i8_add_nsw_operand_orders(i32 %v0, i32 %v1, i8* %src, <4 x i8>
 ; CHECK-NEXT:    [[TMP20:%.*]] = insertelement <4 x i8> [[TMP19]], i8 [[TMP82]], i32 1
 ; CHECK-NEXT:    [[TMP21:%.*]] = insertelement <4 x i8> [[TMP20]], i8 [[TMP133]], i32 2
 ; CHECK-NEXT:    [[TMP22:%.*]] = insertelement <4 x i8> [[TMP21]], i8 [[TMP184]], i32 3
-; CHECK-NEXT:    store <4 x i8> [[TMP22]], <4 x i8>* [[DST:%.*]]
+; CHECK-NEXT:    store <4 x i8> [[TMP22]], ptr [[DST:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 bb:
   %tmp = add nsw i32 %v0, -1
   %tmp1 = add nsw i32 %v1, %tmp
   %tmp2 = sext i32 %tmp1 to i64
-  %tmp3 = getelementptr inbounds i8, i8* %src, i64 %tmp2
-  %tmp4 = load i8, i8* %tmp3, align 1
+  %tmp3 = getelementptr inbounds i8, ptr %src, i64 %tmp2
+  %tmp4 = load i8, ptr %tmp3, align 1
   %tmp5 = add nsw i32 %v0, %v1
   %tmp6 = sext i32 %tmp5 to i64
-  %tmp7 = getelementptr inbounds i8, i8* %src, i64 %tmp6
-  %tmp8 = load i8, i8* %tmp7, align 1
+  %tmp7 = getelementptr inbounds i8, ptr %src, i64 %tmp6
+  %tmp8 = load i8, ptr %tmp7, align 1
   %tmp9 = add nsw i32 %v0, 1
   %tmp10 = add nsw i32 %tmp9, %v1
   %tmp11 = sext i32 %tmp10 to i64
-  %tmp12 = getelementptr inbounds i8, i8* %src, i64 %tmp11
-  %tmp13 = load i8, i8* %tmp12, align 1
+  %tmp12 = getelementptr inbounds i8, ptr %src, i64 %tmp11
+  %tmp13 = load i8, ptr %tmp12, align 1
   %tmp14 = add nsw i32 %v0, 2
   %tmp15 = add nsw i32 %v1, %tmp14
   %tmp16 = sext i32 %tmp15 to i64
-  %tmp17 = getelementptr inbounds i8, i8* %src, i64 %tmp16
-  %tmp18 = load i8, i8* %tmp17, align 1
+  %tmp17 = getelementptr inbounds i8, ptr %src, i64 %tmp16
+  %tmp18 = load i8, ptr %tmp17, align 1
   %tmp19 = insertelement <4 x i8> undef, i8 %tmp4, i32 0
   %tmp20 = insertelement <4 x i8> %tmp19, i8 %tmp8, i32 1
   %tmp21 = insertelement <4 x i8> %tmp20, i8 %tmp13, i32 2
   %tmp22 = insertelement <4 x i8> %tmp21, i8 %tmp18, i32 3
-  store <4 x i8> %tmp22, <4 x i8>* %dst
+  store <4 x i8> %tmp22, ptr %dst
   ret void
 }
 
 ; Apply 
diff erent operand orders for the nested add sequences
-define void @ld_v4i8_add_nuw_operand_orders(i32 %v0, i32 %v1, i8* %src, <4 x i8>* %dst) {
+define void @ld_v4i8_add_nuw_operand_orders(i32 %v0, i32 %v1, ptr %src, ptr %dst) {
 ; CHECK-LABEL: @ld_v4i8_add_nuw_operand_orders(
 ; CHECK-NEXT:  bb:
 ; CHECK-NEXT:    [[TMP:%.*]] = add nuw i32 [[V0:%.*]], -1
 ; CHECK-NEXT:    [[TMP1:%.*]] = add nuw i32 [[V1:%.*]], [[TMP]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
-; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i8, i8* [[SRC:%.*]], i64 [[TMP2]]
-; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i8* [[TMP3]] to <4 x i8>*
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i8>, <4 x i8>* [[TMP0]], align 1
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[TMP2]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i8>, ptr [[TMP3]], align 1
 ; CHECK-NEXT:    [[TMP41:%.*]] = extractelement <4 x i8> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[TMP82:%.*]] = extractelement <4 x i8> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[TMP133:%.*]] = extractelement <4 x i8> [[TMP1]], i32 2
@@ -171,38 +167,38 @@ define void @ld_v4i8_add_nuw_operand_orders(i32 %v0, i32 %v1, i8* %src, <4 x i8>
 ; CHECK-NEXT:    [[TMP20:%.*]] = insertelement <4 x i8> [[TMP19]], i8 [[TMP82]], i32 1
 ; CHECK-NEXT:    [[TMP21:%.*]] = insertelement <4 x i8> [[TMP20]], i8 [[TMP133]], i32 2
 ; CHECK-NEXT:    [[TMP22:%.*]] = insertelement <4 x i8> [[TMP21]], i8 [[TMP184]], i32 3
-; CHECK-NEXT:    store <4 x i8> [[TMP22]], <4 x i8>* [[DST:%.*]]
+; CHECK-NEXT:    store <4 x i8> [[TMP22]], ptr [[DST:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 bb:
   %tmp = add nuw i32 %v0, -1
   %tmp1 = add nuw i32 %v1, %tmp
   %tmp2 = zext i32 %tmp1 to i64
-  %tmp3 = getelementptr inbounds i8, i8* %src, i64 %tmp2
-  %tmp4 = load i8, i8* %tmp3, align 1
+  %tmp3 = getelementptr inbounds i8, ptr %src, i64 %tmp2
+  %tmp4 = load i8, ptr %tmp3, align 1
   %tmp5 = add nuw i32 %v0, %v1
   %tmp6 = zext i32 %tmp5 to i64
-  %tmp7 = getelementptr inbounds i8, i8* %src, i64 %tmp6
-  %tmp8 = load i8, i8* %tmp7, align 1
+  %tmp7 = getelementptr inbounds i8, ptr %src, i64 %tmp6
+  %tmp8 = load i8, ptr %tmp7, align 1
   %tmp9 = add nuw i32 %v0, 1
   %tmp10 = add nuw i32 %tmp9, %v1
   %tmp11 = zext i32 %tmp10 to i64
-  %tmp12 = getelementptr inbounds i8, i8* %src, i64 %tmp11
-  %tmp13 = load i8, i8* %tmp12, align 1
+  %tmp12 = getelementptr inbounds i8, ptr %src, i64 %tmp11
+  %tmp13 = load i8, ptr %tmp12, align 1
   %tmp14 = add nuw i32 %v0, 2
   %tmp15 = add nuw i32 %v1, %tmp14
   %tmp16 = zext i32 %tmp15 to i64
-  %tmp17 = getelementptr inbounds i8, i8* %src, i64 %tmp16
-  %tmp18 = load i8, i8* %tmp17, align 1
+  %tmp17 = getelementptr inbounds i8, ptr %src, i64 %tmp16
+  %tmp18 = load i8, ptr %tmp17, align 1
   %tmp19 = insertelement <4 x i8> undef, i8 %tmp4, i32 0
   %tmp20 = insertelement <4 x i8> %tmp19, i8 %tmp8, i32 1
   %tmp21 = insertelement <4 x i8> %tmp20, i8 %tmp13, i32 2
   %tmp22 = insertelement <4 x i8> %tmp21, i8 %tmp18, i32 3
-  store <4 x i8> %tmp22, <4 x i8>* %dst
+  store <4 x i8> %tmp22, ptr %dst
   ret void
 }
 
-define void @ld_v4i8_add_known_bits(i32 %ind0, i32 %ind1, i8* %src, <4 x i8>* %dst) {
+define void @ld_v4i8_add_known_bits(i32 %ind0, i32 %ind1, ptr %src, ptr %dst) {
 ; CHECK-LABEL: @ld_v4i8_add_known_bits(
 ; CHECK-NEXT:  bb:
 ; CHECK-NEXT:    [[V0:%.*]] = mul i32 [[IND0:%.*]], 4
@@ -210,13 +206,12 @@ define void @ld_v4i8_add_known_bits(i32 %ind0, i32 %ind1, i8* %src, <4 x i8>* %d
 ; CHECK-NEXT:    [[TMP:%.*]] = add i32 [[V0]], -1
 ; CHECK-NEXT:    [[TMP1:%.*]] = add i32 [[V1]], [[TMP]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = sext i32 [[TMP1]] to i64
-; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i8, i8* [[SRC:%.*]], i64 [[TMP2]]
-; CHECK-NEXT:    [[TMP4:%.*]] = load i8, i8* [[TMP3]], align 1
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load i8, ptr [[TMP3]], align 1
 ; CHECK-NEXT:    [[TMP5:%.*]] = add i32 [[V1]], [[V0]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext i32 [[TMP5]] to i64
-; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 [[TMP6]]
-; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i8* [[TMP7]] to <3 x i8>*
-; CHECK-NEXT:    [[TMP1:%.*]] = load <3 x i8>, <3 x i8>* [[TMP0]], align 1
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[TMP6]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load <3 x i8>, ptr [[TMP7]], align 1
 ; CHECK-NEXT:    [[TMP81:%.*]] = extractelement <3 x i8> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[TMP132:%.*]] = extractelement <3 x i8> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[TMP183:%.*]] = extractelement <3 x i8> [[TMP1]], i32 2
@@ -224,7 +219,7 @@ define void @ld_v4i8_add_known_bits(i32 %ind0, i32 %ind1, i8* %src, <4 x i8>* %d
 ; CHECK-NEXT:    [[TMP20:%.*]] = insertelement <4 x i8> [[TMP19]], i8 [[TMP81]], i32 1
 ; CHECK-NEXT:    [[TMP21:%.*]] = insertelement <4 x i8> [[TMP20]], i8 [[TMP132]], i32 2
 ; CHECK-NEXT:    [[TMP22:%.*]] = insertelement <4 x i8> [[TMP21]], i8 [[TMP183]], i32 3
-; CHECK-NEXT:    store <4 x i8> [[TMP22]], <4 x i8>* [[DST:%.*]]
+; CHECK-NEXT:    store <4 x i8> [[TMP22]], ptr [[DST:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 bb:
@@ -233,40 +228,39 @@ bb:
   %tmp = add i32 %v0, -1
   %tmp1 = add i32 %v1, %tmp
   %tmp2 = sext i32 %tmp1 to i64
-  %tmp3 = getelementptr inbounds i8, i8* %src, i64 %tmp2
-  %tmp4 = load i8, i8* %tmp3, align 1
+  %tmp3 = getelementptr inbounds i8, ptr %src, i64 %tmp2
+  %tmp4 = load i8, ptr %tmp3, align 1
   %tmp5 = add i32 %v1, %v0
   %tmp6 = sext i32 %tmp5 to i64
-  %tmp7 = getelementptr inbounds i8, i8* %src, i64 %tmp6
-  %tmp8 = load i8, i8* %tmp7, align 1
+  %tmp7 = getelementptr inbounds i8, ptr %src, i64 %tmp6
+  %tmp8 = load i8, ptr %tmp7, align 1
   %tmp9 = add i32 %v0, 1
   %tmp10 = add i32 %v1, %tmp9
   %tmp11 = sext i32 %tmp10 to i64
-  %tmp12 = getelementptr inbounds i8, i8* %src, i64 %tmp11
-  %tmp13 = load i8, i8* %tmp12, align 1
+  %tmp12 = getelementptr inbounds i8, ptr %src, i64 %tmp11
+  %tmp13 = load i8, ptr %tmp12, align 1
   %tmp14 = add i32 %v0, 2
   %tmp15 = add i32 %v1, %tmp14
   %tmp16 = sext i32 %tmp15 to i64
-  %tmp17 = getelementptr inbounds i8, i8* %src, i64 %tmp16
-  %tmp18 = load i8, i8* %tmp17, align 1
+  %tmp17 = getelementptr inbounds i8, ptr %src, i64 %tmp16
+  %tmp18 = load i8, ptr %tmp17, align 1
   %tmp19 = insertelement <4 x i8> undef, i8 %tmp4, i32 0
   %tmp20 = insertelement <4 x i8> %tmp19, i8 %tmp8, i32 1
   %tmp21 = insertelement <4 x i8> %tmp20, i8 %tmp13, i32 2
   %tmp22 = insertelement <4 x i8> %tmp21, i8 %tmp18, i32 3
-  store <4 x i8> %tmp22, <4 x i8>* %dst
+  store <4 x i8> %tmp22, ptr %dst
   ret void
 }
 
-define void @ld_v4i8_add_known_bits1(i32 %ind0, i32 %ind1, i8* %src, <4 x i8>* %dst) {
+define void @ld_v4i8_add_known_bits1(i32 %ind0, i32 %ind1, ptr %src, ptr %dst) {
 ; CHECK-LABEL: @ld_v4i8_add_known_bits1(
 ; CHECK-NEXT:  bb:
 ; CHECK-NEXT:    [[V0:%.*]] = mul i32 [[IND0:%.*]], 4
 ; CHECK-NEXT:    [[V1:%.*]] = mul i32 [[IND1:%.*]], 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = add i32 [[V1]], [[V0]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext i32 [[TMP5]] to i64
-; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds i8, i8* [[SRC:%.*]], i64 [[TMP6]]
-; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i8* [[TMP7]] to <4 x i8>*
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i8>, <4 x i8>* [[TMP0]], align 1
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[TMP6]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i8>, ptr [[TMP7]], align 1
 ; CHECK-NEXT:    [[TMP81:%.*]] = extractelement <4 x i8> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[TMP132:%.*]] = extractelement <4 x i8> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[TMP183:%.*]] = extractelement <4 x i8> [[TMP1]], i32 2
@@ -275,7 +269,7 @@ define void @ld_v4i8_add_known_bits1(i32 %ind0, i32 %ind1, i8* %src, <4 x i8>* %
 ; CHECK-NEXT:    [[TMP20:%.*]] = insertelement <4 x i8> [[TMP19]], i8 [[TMP81]], i32 1
 ; CHECK-NEXT:    [[TMP21:%.*]] = insertelement <4 x i8> [[TMP20]], i8 [[TMP132]], i32 2
 ; CHECK-NEXT:    [[TMP22:%.*]] = insertelement <4 x i8> [[TMP21]], i8 [[TMP183]], i32 3
-; CHECK-NEXT:    store <4 x i8> [[TMP22]], <4 x i8>* [[DST:%.*]]
+; CHECK-NEXT:    store <4 x i8> [[TMP22]], ptr [[DST:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 bb:
@@ -284,31 +278,31 @@ bb:
   %tmp = add i32 %v0, 3
   %tmp1 = add i32 %v1, %tmp
   %tmp2 = sext i32 %tmp1 to i64
-  %tmp3 = getelementptr inbounds i8, i8* %src, i64 %tmp2
-  %tmp4 = load i8, i8* %tmp3, align 1
+  %tmp3 = getelementptr inbounds i8, ptr %src, i64 %tmp2
+  %tmp4 = load i8, ptr %tmp3, align 1
   %tmp5 = add i32 %v1, %v0
   %tmp6 = sext i32 %tmp5 to i64
-  %tmp7 = getelementptr inbounds i8, i8* %src, i64 %tmp6
-  %tmp8 = load i8, i8* %tmp7, align 1
+  %tmp7 = getelementptr inbounds i8, ptr %src, i64 %tmp6
+  %tmp8 = load i8, ptr %tmp7, align 1
   %tmp9 = add i32 %v0, 1
   %tmp10 = add i32 %v1, %tmp9
   %tmp11 = sext i32 %tmp10 to i64
-  %tmp12 = getelementptr inbounds i8, i8* %src, i64 %tmp11
-  %tmp13 = load i8, i8* %tmp12, align 1
+  %tmp12 = getelementptr inbounds i8, ptr %src, i64 %tmp11
+  %tmp13 = load i8, ptr %tmp12, align 1
   %tmp14 = add i32 %v0, 2
   %tmp15 = add i32 %v1, %tmp14
   %tmp16 = sext i32 %tmp15 to i64
-  %tmp17 = getelementptr inbounds i8, i8* %src, i64 %tmp16
-  %tmp18 = load i8, i8* %tmp17, align 1
+  %tmp17 = getelementptr inbounds i8, ptr %src, i64 %tmp16
+  %tmp18 = load i8, ptr %tmp17, align 1
   %tmp19 = insertelement <4 x i8> undef, i8 %tmp4, i32 0
   %tmp20 = insertelement <4 x i8> %tmp19, i8 %tmp8, i32 1
   %tmp21 = insertelement <4 x i8> %tmp20, i8 %tmp13, i32 2
   %tmp22 = insertelement <4 x i8> %tmp21, i8 %tmp18, i32 3
-  store <4 x i8> %tmp22, <4 x i8>* %dst
+  store <4 x i8> %tmp22, ptr %dst
   ret void
 }
 
-define void @ld_v4i8_add_known_bits_by_assume(i32 %ind0, i32 %ind1, i8* %src, <4 x i8>* %dst) {
+define void @ld_v4i8_add_known_bits_by_assume(i32 %ind0, i32 %ind1, ptr %src, ptr %dst) {
 ; CHECK-LABEL: @ld_v4i8_add_known_bits_by_assume(
 ; CHECK-NEXT:  bb:
 ; CHECK-NEXT:    [[V0:%.*]] = mul i32 [[IND0:%.*]], 3
@@ -321,9 +315,8 @@ define void @ld_v4i8_add_known_bits_by_assume(i32 %ind0, i32 %ind1, i8* %src, <4
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP_I_1]])
 ; CHECK-NEXT:    [[TMP5:%.*]] = add i32 [[V1]], [[V0]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext i32 [[TMP5]] to i64
-; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds i8, i8* [[SRC:%.*]], i64 [[TMP6]]
-; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i8* [[TMP7]] to <4 x i8>*
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i8>, <4 x i8>* [[TMP0]], align 1
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[TMP6]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i8>, ptr [[TMP7]], align 1
 ; CHECK-NEXT:    [[TMP81:%.*]] = extractelement <4 x i8> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[TMP132:%.*]] = extractelement <4 x i8> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[TMP183:%.*]] = extractelement <4 x i8> [[TMP1]], i32 2
@@ -332,7 +325,7 @@ define void @ld_v4i8_add_known_bits_by_assume(i32 %ind0, i32 %ind1, i8* %src, <4
 ; CHECK-NEXT:    [[TMP20:%.*]] = insertelement <4 x i8> [[TMP19]], i8 [[TMP81]], i32 1
 ; CHECK-NEXT:    [[TMP21:%.*]] = insertelement <4 x i8> [[TMP20]], i8 [[TMP132]], i32 2
 ; CHECK-NEXT:    [[TMP22:%.*]] = insertelement <4 x i8> [[TMP21]], i8 [[TMP183]], i32 3
-; CHECK-NEXT:    store <4 x i8> [[TMP22]], <4 x i8>* [[DST:%.*]]
+; CHECK-NEXT:    store <4 x i8> [[TMP22]], ptr [[DST:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 bb:
@@ -347,33 +340,33 @@ bb:
   %tmp = add i32 %v0, 3
   %tmp1 = add i32 %v1, %tmp
   %tmp2 = sext i32 %tmp1 to i64
-  %tmp3 = getelementptr inbounds i8, i8* %src, i64 %tmp2
-  %tmp4 = load i8, i8* %tmp3, align 1
+  %tmp3 = getelementptr inbounds i8, ptr %src, i64 %tmp2
+  %tmp4 = load i8, ptr %tmp3, align 1
   %tmp5 = add i32 %v1, %v0
   %tmp6 = sext i32 %tmp5 to i64
-  %tmp7 = getelementptr inbounds i8, i8* %src, i64 %tmp6
-  %tmp8 = load i8, i8* %tmp7, align 1
+  %tmp7 = getelementptr inbounds i8, ptr %src, i64 %tmp6
+  %tmp8 = load i8, ptr %tmp7, align 1
   %tmp9 = add i32 %v0, 1
   %tmp10 = add i32 %v1, %tmp9
   %tmp11 = sext i32 %tmp10 to i64
-  %tmp12 = getelementptr inbounds i8, i8* %src, i64 %tmp11
-  %tmp13 = load i8, i8* %tmp12, align 1
+  %tmp12 = getelementptr inbounds i8, ptr %src, i64 %tmp11
+  %tmp13 = load i8, ptr %tmp12, align 1
   %tmp14 = add i32 %v0, 2
   %tmp15 = add i32 %v1, %tmp14
   %tmp16 = sext i32 %tmp15 to i64
-  %tmp17 = getelementptr inbounds i8, i8* %src, i64 %tmp16
-  %tmp18 = load i8, i8* %tmp17, align 1
+  %tmp17 = getelementptr inbounds i8, ptr %src, i64 %tmp16
+  %tmp18 = load i8, ptr %tmp17, align 1
   %tmp19 = insertelement <4 x i8> undef, i8 %tmp4, i32 0
   %tmp20 = insertelement <4 x i8> %tmp19, i8 %tmp8, i32 1
   %tmp21 = insertelement <4 x i8> %tmp20, i8 %tmp13, i32 2
   %tmp22 = insertelement <4 x i8> %tmp21, i8 %tmp18, i32 3
-  store <4 x i8> %tmp22, <4 x i8>* %dst
+  store <4 x i8> %tmp22, ptr %dst
   ret void
 }
 
 declare void @llvm.assume(i1)
 
-define void @ld_v4i8_add_assume_on_arg(i32 %v0, i32 %v1, i8* %src, <4 x i8>* %dst) {
+define void @ld_v4i8_add_assume_on_arg(i32 %v0, i32 %v1, ptr %src, ptr %dst) {
 ; CHECK-LABEL: @ld_v4i8_add_assume_on_arg(
 ; CHECK-NEXT:  bb:
 ; CHECK-NEXT:    [[AND_I:%.*]] = and i32 [[V0:%.*]], 3
@@ -385,13 +378,12 @@ define void @ld_v4i8_add_assume_on_arg(i32 %v0, i32 %v1, i8* %src, <4 x i8>* %ds
 ; CHECK-NEXT:    [[TMP:%.*]] = add nsw i32 [[V0]], -1
 ; CHECK-NEXT:    [[TMP1:%.*]] = add i32 [[V1]], [[TMP]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = sext i32 [[TMP1]] to i64
-; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i8, i8* [[SRC:%.*]], i64 [[TMP2]]
-; CHECK-NEXT:    [[TMP4:%.*]] = load i8, i8* [[TMP3]], align 1
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load i8, ptr [[TMP3]], align 1
 ; CHECK-NEXT:    [[TMP5:%.*]] = add i32 [[V1]], [[V0]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext i32 [[TMP5]] to i64
-; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 [[TMP6]]
-; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i8* [[TMP7]] to <3 x i8>*
-; CHECK-NEXT:    [[TMP1:%.*]] = load <3 x i8>, <3 x i8>* [[TMP0]], align 1
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[TMP6]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load <3 x i8>, ptr [[TMP7]], align 1
 ; CHECK-NEXT:    [[TMP81:%.*]] = extractelement <3 x i8> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[TMP132:%.*]] = extractelement <3 x i8> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[TMP183:%.*]] = extractelement <3 x i8> [[TMP1]], i32 2
@@ -399,7 +391,7 @@ define void @ld_v4i8_add_assume_on_arg(i32 %v0, i32 %v1, i8* %src, <4 x i8>* %ds
 ; CHECK-NEXT:    [[TMP20:%.*]] = insertelement <4 x i8> [[TMP19]], i8 [[TMP81]], i32 1
 ; CHECK-NEXT:    [[TMP21:%.*]] = insertelement <4 x i8> [[TMP20]], i8 [[TMP132]], i32 2
 ; CHECK-NEXT:    [[TMP22:%.*]] = insertelement <4 x i8> [[TMP21]], i8 [[TMP183]], i32 3
-; CHECK-NEXT:    store <4 x i8> [[TMP22]], <4 x i8>* [[DST:%.*]]
+; CHECK-NEXT:    store <4 x i8> [[TMP22]], ptr [[DST:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 bb:
@@ -412,31 +404,31 @@ bb:
   %tmp = add nsw i32 %v0, -1
   %tmp1 = add i32 %v1, %tmp
   %tmp2 = sext i32 %tmp1 to i64
-  %tmp3 = getelementptr inbounds i8, i8* %src, i64 %tmp2
-  %tmp4 = load i8, i8* %tmp3, align 1
+  %tmp3 = getelementptr inbounds i8, ptr %src, i64 %tmp2
+  %tmp4 = load i8, ptr %tmp3, align 1
   %tmp5 = add i32 %v1, %v0
   %tmp6 = sext i32 %tmp5 to i64
-  %tmp7 = getelementptr inbounds i8, i8* %src, i64 %tmp6
-  %tmp8 = load i8, i8* %tmp7, align 1
+  %tmp7 = getelementptr inbounds i8, ptr %src, i64 %tmp6
+  %tmp8 = load i8, ptr %tmp7, align 1
   %tmp9 = add nsw i32 %v0, 1
   %tmp10 = add i32 %v1, %tmp9
   %tmp11 = sext i32 %tmp10 to i64
-  %tmp12 = getelementptr inbounds i8, i8* %src, i64 %tmp11
-  %tmp13 = load i8, i8* %tmp12, align 1
+  %tmp12 = getelementptr inbounds i8, ptr %src, i64 %tmp11
+  %tmp13 = load i8, ptr %tmp12, align 1
   %tmp14 = add nsw i32 %v0, 2
   %tmp15 = add i32 %v1, %tmp14
   %tmp16 = sext i32 %tmp15 to i64
-  %tmp17 = getelementptr inbounds i8, i8* %src, i64 %tmp16
-  %tmp18 = load i8, i8* %tmp17, align 1
+  %tmp17 = getelementptr inbounds i8, ptr %src, i64 %tmp16
+  %tmp18 = load i8, ptr %tmp17, align 1
   %tmp19 = insertelement <4 x i8> undef, i8 %tmp4, i32 0
   %tmp20 = insertelement <4 x i8> %tmp19, i8 %tmp8, i32 1
   %tmp21 = insertelement <4 x i8> %tmp20, i8 %tmp13, i32 2
   %tmp22 = insertelement <4 x i8> %tmp21, i8 %tmp18, i32 3
-  store <4 x i8> %tmp22, <4 x i8>* %dst
+  store <4 x i8> %tmp22, ptr %dst
   ret void
 }
 
-define void @ld_v4i8_add_assume_on_arg1(i32 %v0, i32 %v1, i8* %src, <4 x i8>* %dst) {
+define void @ld_v4i8_add_assume_on_arg1(i32 %v0, i32 %v1, ptr %src, ptr %dst) {
 ; CHECK-LABEL: @ld_v4i8_add_assume_on_arg1(
 ; CHECK-NEXT:  bb:
 ; CHECK-NEXT:    [[AND_I:%.*]] = and i32 [[V0:%.*]], 3
@@ -447,9 +439,8 @@ define void @ld_v4i8_add_assume_on_arg1(i32 %v0, i32 %v1, i8* %src, <4 x i8>* %d
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP_I_1]])
 ; CHECK-NEXT:    [[TMP5:%.*]] = add i32 [[V1]], [[V0]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext i32 [[TMP5]] to i64
-; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds i8, i8* [[SRC:%.*]], i64 [[TMP6]]
-; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i8* [[TMP7]] to <4 x i8>*
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i8>, <4 x i8>* [[TMP0]], align 1
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[TMP6]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i8>, ptr [[TMP7]], align 1
 ; CHECK-NEXT:    [[TMP81:%.*]] = extractelement <4 x i8> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[TMP132:%.*]] = extractelement <4 x i8> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[TMP183:%.*]] = extractelement <4 x i8> [[TMP1]], i32 2
@@ -458,7 +449,7 @@ define void @ld_v4i8_add_assume_on_arg1(i32 %v0, i32 %v1, i8* %src, <4 x i8>* %d
 ; CHECK-NEXT:    [[TMP20:%.*]] = insertelement <4 x i8> [[TMP19]], i8 [[TMP81]], i32 1
 ; CHECK-NEXT:    [[TMP21:%.*]] = insertelement <4 x i8> [[TMP20]], i8 [[TMP132]], i32 2
 ; CHECK-NEXT:    [[TMP22:%.*]] = insertelement <4 x i8> [[TMP21]], i8 [[TMP183]], i32 3
-; CHECK-NEXT:    store <4 x i8> [[TMP22]], <4 x i8>* [[DST:%.*]]
+; CHECK-NEXT:    store <4 x i8> [[TMP22]], ptr [[DST:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 bb:
@@ -471,34 +462,34 @@ bb:
   %tmp = add nsw i32 %v0, 3
   %tmp1 = add i32 %v1, %tmp
   %tmp2 = sext i32 %tmp1 to i64
-  %tmp3 = getelementptr inbounds i8, i8* %src, i64 %tmp2
-  %tmp4 = load i8, i8* %tmp3, align 1
+  %tmp3 = getelementptr inbounds i8, ptr %src, i64 %tmp2
+  %tmp4 = load i8, ptr %tmp3, align 1
   %tmp5 = add i32 %v1, %v0
   %tmp6 = sext i32 %tmp5 to i64
-  %tmp7 = getelementptr inbounds i8, i8* %src, i64 %tmp6
-  %tmp8 = load i8, i8* %tmp7, align 1
+  %tmp7 = getelementptr inbounds i8, ptr %src, i64 %tmp6
+  %tmp8 = load i8, ptr %tmp7, align 1
   %tmp9 = add nsw i32 %v0, 1
   %tmp10 = add i32 %v1, %tmp9
   %tmp11 = sext i32 %tmp10 to i64
-  %tmp12 = getelementptr inbounds i8, i8* %src, i64 %tmp11
-  %tmp13 = load i8, i8* %tmp12, align 1
+  %tmp12 = getelementptr inbounds i8, ptr %src, i64 %tmp11
+  %tmp13 = load i8, ptr %tmp12, align 1
   %tmp14 = add nsw i32 %v0, 2
   %tmp15 = add i32 %v1, %tmp14
   %tmp16 = sext i32 %tmp15 to i64
-  %tmp17 = getelementptr inbounds i8, i8* %src, i64 %tmp16
-  %tmp18 = load i8, i8* %tmp17, align 1
+  %tmp17 = getelementptr inbounds i8, ptr %src, i64 %tmp16
+  %tmp18 = load i8, ptr %tmp17, align 1
   %tmp19 = insertelement <4 x i8> undef, i8 %tmp4, i32 0
   %tmp20 = insertelement <4 x i8> %tmp19, i8 %tmp8, i32 1
   %tmp21 = insertelement <4 x i8> %tmp20, i8 %tmp13, i32 2
   %tmp22 = insertelement <4 x i8> %tmp21, i8 %tmp18, i32 3
-  store <4 x i8> %tmp22, <4 x i8>* %dst
+  store <4 x i8> %tmp22, ptr %dst
   ret void
 }
 
 ; Address computations are partly separated by control flow and with llvm.assume placed
 ; in the second basic block
 
-define void @ld_v2i8_add_
diff erent_contexts(i32 %ind0, i32 %ind1, i8* %src, <2 x i8>* %dst) {
+define void @ld_v2i8_add_
diff erent_contexts(i32 %ind0, i32 %ind1, ptr %src, ptr %dst) {
 ; CHECK-LABEL: @ld_v2i8_add_
diff erent_contexts(
 ; CHECK-NEXT:  bb:
 ; CHECK-NEXT:    [[V0:%.*]] = mul i32 [[IND0:%.*]], 4
@@ -509,14 +500,13 @@ define void @ld_v2i8_add_
diff erent_contexts(i32 %ind0, i32 %ind1, i8* %src, <2 x
 ; CHECK:       bb.loads:
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[BIT_COND]])
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext i32 [[TMP5]] to i64
-; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds i8, i8* [[SRC:%.*]], i64 [[TMP6]]
-; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i8* [[TMP7]] to <2 x i8>*
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i8>, <2 x i8>* [[TMP0]], align 1
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[TMP6]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i8>, ptr [[TMP7]], align 1
 ; CHECK-NEXT:    [[TMP81:%.*]] = extractelement <2 x i8> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[TMP42:%.*]] = extractelement <2 x i8> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[TMP19:%.*]] = insertelement <2 x i8> undef, i8 [[TMP42]], i32 0
 ; CHECK-NEXT:    [[TMP20:%.*]] = insertelement <2 x i8> [[TMP19]], i8 [[TMP81]], i32 1
-; CHECK-NEXT:    store <2 x i8> [[TMP20]], <2 x i8>* [[DST:%.*]]
+; CHECK-NEXT:    store <2 x i8> [[TMP20]], ptr [[DST:%.*]]
 ; CHECK-NEXT:    br label [[BB_SKIP]]
 ; CHECK:       bb.skip:
 ; CHECK-NEXT:    ret void
@@ -533,14 +523,14 @@ bb.loads:
   %tmp = add nsw i32 %v0, 1
   %tmp1 = add i32 %v1, %tmp
   %tmp2 = sext i32 %tmp1 to i64
-  %tmp3 = getelementptr inbounds i8, i8* %src, i64 %tmp2
-  %tmp4 = load i8, i8* %tmp3, align 1
+  %tmp3 = getelementptr inbounds i8, ptr %src, i64 %tmp2
+  %tmp4 = load i8, ptr %tmp3, align 1
   %tmp6 = sext i32 %tmp5 to i64
-  %tmp7 = getelementptr inbounds i8, i8* %src, i64 %tmp6
-  %tmp8 = load i8, i8* %tmp7, align 1
+  %tmp7 = getelementptr inbounds i8, ptr %src, i64 %tmp6
+  %tmp8 = load i8, ptr %tmp7, align 1
   %tmp19 = insertelement <2 x i8> undef, i8 %tmp4, i32 0
   %tmp20 = insertelement <2 x i8> %tmp19, i8 %tmp8, i32 1
-  store <2 x i8> %tmp20, <2 x i8>* %dst
+  store <2 x i8> %tmp20, ptr %dst
   br label %bb.skip
 
 bb.skip:
@@ -549,7 +539,7 @@ bb.skip:
 
 ; Same as ld_v2i8_add_
diff erent_contexts but with llvm.assume placed between loads
 
-define void @ld_v2i8_add_
diff erent_contexts1(i32 %ind0, i32 %ind1, i8* %src, <2 x i8>* %dst) {
+define void @ld_v2i8_add_
diff erent_contexts1(i32 %ind0, i32 %ind1, ptr %src, ptr %dst) {
 ; CHECK-LABEL: @ld_v2i8_add_
diff erent_contexts1(
 ; CHECK-NEXT:  bb:
 ; CHECK-NEXT:    [[V0:%.*]] = mul i32 [[IND0:%.*]], 4
@@ -559,15 +549,14 @@ define void @ld_v2i8_add_
diff erent_contexts1(i32 %ind0, i32 %ind1, i8* %src, <2
 ; CHECK-NEXT:    br i1 [[BIT_COND]], label [[BB_LOADS:%.*]], label [[BB_SKIP:%.*]]
 ; CHECK:       bb.loads:
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext i32 [[TMP5]] to i64
-; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds i8, i8* [[SRC:%.*]], i64 [[TMP6]]
-; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i8* [[TMP7]] to <2 x i8>*
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i8>, <2 x i8>* [[TMP0]], align 1
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[TMP6]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i8>, ptr [[TMP7]], align 1
 ; CHECK-NEXT:    [[TMP81:%.*]] = extractelement <2 x i8> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[TMP42:%.*]] = extractelement <2 x i8> [[TMP1]], i32 1
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[BIT_COND]])
 ; CHECK-NEXT:    [[TMP19:%.*]] = insertelement <2 x i8> undef, i8 [[TMP42]], i32 0
 ; CHECK-NEXT:    [[TMP20:%.*]] = insertelement <2 x i8> [[TMP19]], i8 [[TMP81]], i32 1
-; CHECK-NEXT:    store <2 x i8> [[TMP20]], <2 x i8>* [[DST:%.*]]
+; CHECK-NEXT:    store <2 x i8> [[TMP20]], ptr [[DST:%.*]]
 ; CHECK-NEXT:    br label [[BB_SKIP]]
 ; CHECK:       bb.skip:
 ; CHECK-NEXT:    ret void
@@ -581,17 +570,17 @@ bb:
 
 bb.loads:
   %tmp6 = sext i32 %tmp5 to i64
-  %tmp7 = getelementptr inbounds i8, i8* %src, i64 %tmp6
-  %tmp8 = load i8, i8* %tmp7, align 1
+  %tmp7 = getelementptr inbounds i8, ptr %src, i64 %tmp6
+  %tmp8 = load i8, ptr %tmp7, align 1
   call void @llvm.assume(i1 %bit_cond)
   %tmp = add nsw i32 %v0, 1
   %tmp1 = add i32 %v1, %tmp
   %tmp2 = sext i32 %tmp1 to i64
-  %tmp3 = getelementptr inbounds i8, i8* %src, i64 %tmp2
-  %tmp4 = load i8, i8* %tmp3, align 1
+  %tmp3 = getelementptr inbounds i8, ptr %src, i64 %tmp2
+  %tmp4 = load i8, ptr %tmp3, align 1
   %tmp19 = insertelement <2 x i8> undef, i8 %tmp4, i32 0
   %tmp20 = insertelement <2 x i8> %tmp19, i8 %tmp8, i32 1
-  store <2 x i8> %tmp20, <2 x i8>* %dst
+  store <2 x i8> %tmp20, ptr %dst
   br label %bb.skip
 
 bb.skip:
@@ -600,23 +589,22 @@ bb.skip:
 
 ; llvm.assume is placed between loads in a single basic block
 
-define void @ld_v2i8_add_context(i32 %ind0, i32 %ind1, i8* %src, <2 x i8>* %dst) {
+define void @ld_v2i8_add_context(i32 %ind0, i32 %ind1, ptr %src, ptr %dst) {
 ; CHECK-LABEL: @ld_v2i8_add_context(
 ; CHECK-NEXT:  bb:
 ; CHECK-NEXT:    [[V0:%.*]] = mul i32 [[IND0:%.*]], 4
 ; CHECK-NEXT:    [[V1:%.*]] = mul i32 [[IND1:%.*]], 3
 ; CHECK-NEXT:    [[TMP5:%.*]] = add i32 [[V1]], [[V0]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext i32 [[TMP5]] to i64
-; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds i8, i8* [[SRC:%.*]], i64 [[TMP6]]
-; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i8* [[TMP7]] to <2 x i8>*
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i8>, <2 x i8>* [[TMP0]], align 1
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[TMP6]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i8>, ptr [[TMP7]], align 1
 ; CHECK-NEXT:    [[TMP81:%.*]] = extractelement <2 x i8> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[TMP42:%.*]] = extractelement <2 x i8> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[BIT_COND:%.*]] = icmp eq i32 [[TMP5]], 0
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[BIT_COND]])
 ; CHECK-NEXT:    [[TMP19:%.*]] = insertelement <2 x i8> undef, i8 [[TMP42]], i32 0
 ; CHECK-NEXT:    [[TMP20:%.*]] = insertelement <2 x i8> [[TMP19]], i8 [[TMP81]], i32 1
-; CHECK-NEXT:    store <2 x i8> [[TMP20]], <2 x i8>* [[DST:%.*]]
+; CHECK-NEXT:    store <2 x i8> [[TMP20]], ptr [[DST:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 bb:
@@ -624,38 +612,37 @@ bb:
   %v1 = mul i32 %ind1, 3
   %tmp5 = add i32 %v1, %v0
   %tmp6 = sext i32 %tmp5 to i64
-  %tmp7 = getelementptr inbounds i8, i8* %src, i64 %tmp6
-  %tmp8 = load i8, i8* %tmp7, align 1
+  %tmp7 = getelementptr inbounds i8, ptr %src, i64 %tmp6
+  %tmp8 = load i8, ptr %tmp7, align 1
   %bit_cond = icmp eq i32 %tmp5, 0
   call void @llvm.assume(i1 %bit_cond)
   %tmp = add nsw i32 %v0, 1
   %tmp1 = add i32 %v1, %tmp
   %tmp2 = sext i32 %tmp1 to i64
-  %tmp3 = getelementptr inbounds i8, i8* %src, i64 %tmp2
-  %tmp4 = load i8, i8* %tmp3, align 1
+  %tmp3 = getelementptr inbounds i8, ptr %src, i64 %tmp2
+  %tmp4 = load i8, ptr %tmp3, align 1
   %tmp19 = insertelement <2 x i8> undef, i8 %tmp4, i32 0
   %tmp20 = insertelement <2 x i8> %tmp19, i8 %tmp8, i32 1
-  store <2 x i8> %tmp20, <2 x i8>* %dst
+  store <2 x i8> %tmp20, ptr %dst
   ret void
 }
 
 ; Placing llvm.assume after all the loads and stores in the basic block still works
 
-define void @ld_v2i8_add_context1(i32 %ind0, i32 %ind1, i8* %src, <2 x i8>* %dst) {
+define void @ld_v2i8_add_context1(i32 %ind0, i32 %ind1, ptr %src, ptr %dst) {
 ; CHECK-LABEL: @ld_v2i8_add_context1(
 ; CHECK-NEXT:  bb:
 ; CHECK-NEXT:    [[V0:%.*]] = mul i32 [[IND0:%.*]], 4
 ; CHECK-NEXT:    [[V1:%.*]] = mul i32 [[IND1:%.*]], 3
 ; CHECK-NEXT:    [[TMP5:%.*]] = add i32 [[V1]], [[V0]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext i32 [[TMP5]] to i64
-; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds i8, i8* [[SRC:%.*]], i64 [[TMP6]]
-; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i8* [[TMP7]] to <2 x i8>*
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i8>, <2 x i8>* [[TMP0]], align 1
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[TMP6]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i8>, ptr [[TMP7]], align 1
 ; CHECK-NEXT:    [[TMP81:%.*]] = extractelement <2 x i8> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[TMP42:%.*]] = extractelement <2 x i8> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[TMP19:%.*]] = insertelement <2 x i8> undef, i8 [[TMP42]], i32 0
 ; CHECK-NEXT:    [[TMP20:%.*]] = insertelement <2 x i8> [[TMP19]], i8 [[TMP81]], i32 1
-; CHECK-NEXT:    store <2 x i8> [[TMP20]], <2 x i8>* [[DST:%.*]]
+; CHECK-NEXT:    store <2 x i8> [[TMP20]], ptr [[DST:%.*]]
 ; CHECK-NEXT:    [[BIT_COND:%.*]] = icmp eq i32 [[TMP5]], 0
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[BIT_COND]])
 ; CHECK-NEXT:    ret void
@@ -665,16 +652,16 @@ bb:
   %v1 = mul i32 %ind1, 3
   %tmp5 = add i32 %v1, %v0
   %tmp6 = sext i32 %tmp5 to i64
-  %tmp7 = getelementptr inbounds i8, i8* %src, i64 %tmp6
-  %tmp8 = load i8, i8* %tmp7, align 1
+  %tmp7 = getelementptr inbounds i8, ptr %src, i64 %tmp6
+  %tmp8 = load i8, ptr %tmp7, align 1
   %tmp = add nsw i32 %v0, 1
   %tmp1 = add i32 %v1, %tmp
   %tmp2 = sext i32 %tmp1 to i64
-  %tmp3 = getelementptr inbounds i8, i8* %src, i64 %tmp2
-  %tmp4 = load i8, i8* %tmp3, align 1
+  %tmp3 = getelementptr inbounds i8, ptr %src, i64 %tmp2
+  %tmp4 = load i8, ptr %tmp3, align 1
   %tmp19 = insertelement <2 x i8> undef, i8 %tmp4, i32 0
   %tmp20 = insertelement <2 x i8> %tmp19, i8 %tmp8, i32 1
-  store <2 x i8> %tmp20, <2 x i8>* %dst
+  store <2 x i8> %tmp20, ptr %dst
   %bit_cond = icmp eq i32 %tmp5, 0
   call void @llvm.assume(i1 %bit_cond)
   ret void
@@ -684,59 +671,59 @@ bb:
 ; sext instructions doesn't have the nsw flag or known bits allowing
 ; to apply the vectorization.
 
-define void @ld_v4i8_add_not_safe(i32 %v0, i32 %v1, i8* %src, <4 x i8>* %dst) {
+define void @ld_v4i8_add_not_safe(i32 %v0, i32 %v1, ptr %src, ptr %dst) {
 ; CHECK-LABEL: @ld_v4i8_add_not_safe(
 ; CHECK-NEXT:  bb:
 ; CHECK-NEXT:    [[TMP:%.*]] = add nsw i32 [[V0:%.*]], -1
 ; CHECK-NEXT:    [[TMP1:%.*]] = add i32 [[V1:%.*]], [[TMP]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = sext i32 [[TMP1]] to i64
-; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i8, i8* [[SRC:%.*]], i64 [[TMP2]]
-; CHECK-NEXT:    [[TMP4:%.*]] = load i8, i8* [[TMP3]], align 1
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load i8, ptr [[TMP3]], align 1
 ; CHECK-NEXT:    [[TMP5:%.*]] = add i32 [[V1]], [[V0]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext i32 [[TMP5]] to i64
-; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 [[TMP6]]
-; CHECK-NEXT:    [[TMP8:%.*]] = load i8, i8* [[TMP7]], align 1
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[TMP6]]
+; CHECK-NEXT:    [[TMP8:%.*]] = load i8, ptr [[TMP7]], align 1
 ; CHECK-NEXT:    [[TMP9:%.*]] = add nsw i32 [[V0]], 1
 ; CHECK-NEXT:    [[TMP10:%.*]] = add i32 [[V1]], [[TMP9]]
 ; CHECK-NEXT:    [[TMP11:%.*]] = sext i32 [[TMP10]] to i64
-; CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 [[TMP11]]
-; CHECK-NEXT:    [[TMP13:%.*]] = load i8, i8* [[TMP12]], align 1
+; CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[TMP11]]
+; CHECK-NEXT:    [[TMP13:%.*]] = load i8, ptr [[TMP12]], align 1
 ; CHECK-NEXT:    [[TMP14:%.*]] = add nsw i32 [[V0]], 2
 ; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[V1]], [[TMP14]]
 ; CHECK-NEXT:    [[TMP16:%.*]] = sext i32 [[TMP15]] to i64
-; CHECK-NEXT:    [[TMP17:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 [[TMP16]]
-; CHECK-NEXT:    [[TMP18:%.*]] = load i8, i8* [[TMP17]], align 1
+; CHECK-NEXT:    [[TMP17:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[TMP16]]
+; CHECK-NEXT:    [[TMP18:%.*]] = load i8, ptr [[TMP17]], align 1
 ; CHECK-NEXT:    [[TMP19:%.*]] = insertelement <4 x i8> undef, i8 [[TMP4]], i32 0
 ; CHECK-NEXT:    [[TMP20:%.*]] = insertelement <4 x i8> [[TMP19]], i8 [[TMP8]], i32 1
 ; CHECK-NEXT:    [[TMP21:%.*]] = insertelement <4 x i8> [[TMP20]], i8 [[TMP13]], i32 2
 ; CHECK-NEXT:    [[TMP22:%.*]] = insertelement <4 x i8> [[TMP21]], i8 [[TMP18]], i32 3
-; CHECK-NEXT:    store <4 x i8> [[TMP22]], <4 x i8>* [[DST:%.*]]
+; CHECK-NEXT:    store <4 x i8> [[TMP22]], ptr [[DST:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 bb:
   %tmp = add nsw i32 %v0, -1
   %tmp1 = add i32 %v1, %tmp
   %tmp2 = sext i32 %tmp1 to i64
-  %tmp3 = getelementptr inbounds i8, i8* %src, i64 %tmp2
-  %tmp4 = load i8, i8* %tmp3, align 1
+  %tmp3 = getelementptr inbounds i8, ptr %src, i64 %tmp2
+  %tmp4 = load i8, ptr %tmp3, align 1
   %tmp5 = add i32 %v1, %v0
   %tmp6 = sext i32 %tmp5 to i64
-  %tmp7 = getelementptr inbounds i8, i8* %src, i64 %tmp6
-  %tmp8 = load i8, i8* %tmp7, align 1
+  %tmp7 = getelementptr inbounds i8, ptr %src, i64 %tmp6
+  %tmp8 = load i8, ptr %tmp7, align 1
   %tmp9 = add nsw i32 %v0, 1
   %tmp10 = add i32 %v1, %tmp9
   %tmp11 = sext i32 %tmp10 to i64
-  %tmp12 = getelementptr inbounds i8, i8* %src, i64 %tmp11
-  %tmp13 = load i8, i8* %tmp12, align 1
+  %tmp12 = getelementptr inbounds i8, ptr %src, i64 %tmp11
+  %tmp13 = load i8, ptr %tmp12, align 1
   %tmp14 = add nsw i32 %v0, 2
   %tmp15 = add i32 %v1, %tmp14
   %tmp16 = sext i32 %tmp15 to i64
-  %tmp17 = getelementptr inbounds i8, i8* %src, i64 %tmp16
-  %tmp18 = load i8, i8* %tmp17, align 1
+  %tmp17 = getelementptr inbounds i8, ptr %src, i64 %tmp16
+  %tmp18 = load i8, ptr %tmp17, align 1
   %tmp19 = insertelement <4 x i8> undef, i8 %tmp4, i32 0
   %tmp20 = insertelement <4 x i8> %tmp19, i8 %tmp8, i32 1
   %tmp21 = insertelement <4 x i8> %tmp20, i8 %tmp13, i32 2
   %tmp22 = insertelement <4 x i8> %tmp21, i8 %tmp18, i32 3
-  store <4 x i8> %tmp22, <4 x i8>* %dst
+  store <4 x i8> %tmp22, ptr %dst
   ret void
 }

diff  --git a/llvm/test/Transforms/LoadStoreVectorizer/int_sideeffect.ll b/llvm/test/Transforms/LoadStoreVectorizer/int_sideeffect.ll
index 3d3dd1701c945..8479ce8327c7f 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/int_sideeffect.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/int_sideeffect.ll
@@ -6,11 +6,9 @@ declare void @llvm.sideeffect()
 
 ; load-store vectorization across a @llvm.sideeffect.
 
-define void @test_sideeffect(float* %p) {
+define void @test_sideeffect(ptr %p) {
 ; CHECK-LABEL: @test_sideeffect(
-; CHECK-NEXT:    [[P0:%.*]] = getelementptr float, float* [[P:%.*]], i64 0
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast float* [[P0]] to <4 x float>*
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x float>, <4 x float>* [[TMP1]], align 16
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x float>, ptr [[P:%.*]], align 16
 ; CHECK-NEXT:    [[L01:%.*]] = extractelement <4 x float> [[TMP2]], i32 0
 ; CHECK-NEXT:    [[L12:%.*]] = extractelement <4 x float> [[TMP2]], i32 1
 ; CHECK-NEXT:    [[L23:%.*]] = extractelement <4 x float> [[TMP2]], i32 2
@@ -21,34 +19,30 @@ define void @test_sideeffect(float* %p) {
 ; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <4 x float> [[TMP3]], float [[L12]], i32 1
 ; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <4 x float> [[TMP4]], float [[L23]], i32 2
 ; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <4 x float> [[TMP5]], float [[L34]], i32 3
-; CHECK-NEXT:    [[TMP7:%.*]] = bitcast float* [[P0]] to <4 x float>*
-; CHECK-NEXT:    store <4 x float> [[TMP6]], <4 x float>* [[TMP7]], align 16
+; CHECK-NEXT:    store <4 x float> [[TMP6]], ptr [[P]], align 16
 ; CHECK-NEXT:    ret void
 ;
-  %p0 = getelementptr float, float* %p, i64 0
-  %p1 = getelementptr float, float* %p, i64 1
-  %p2 = getelementptr float, float* %p, i64 2
-  %p3 = getelementptr float, float* %p, i64 3
-  %l0 = load float, float* %p0, align 16
-  %l1 = load float, float* %p1
-  %l2 = load float, float* %p2
+  %p1 = getelementptr float, ptr %p, i64 1
+  %p2 = getelementptr float, ptr %p, i64 2
+  %p3 = getelementptr float, ptr %p, i64 3
+  %l0 = load float, ptr %p, align 16
+  %l1 = load float, ptr %p1
+  %l2 = load float, ptr %p2
   call void @llvm.sideeffect()
-  %l3 = load float, float* %p3
-  store float %l0, float* %p0, align 16
+  %l3 = load float, ptr %p3
+  store float %l0, ptr %p, align 16
   call void @llvm.sideeffect()
-  store float %l1, float* %p1
-  store float %l2, float* %p2
-  store float %l3, float* %p3
+  store float %l1, ptr %p1
+  store float %l2, ptr %p2
+  store float %l3, ptr %p3
   ret void
 }
 
 declare void @foo()
 
-define void @test_inaccessiblememonly_nounwind_willreturn(float* %p) {
+define void @test_inaccessiblememonly_nounwind_willreturn(ptr %p) {
 ; CHECK-LABEL: @test_inaccessiblememonly_nounwind_willreturn(
-; CHECK-NEXT:    [[P0:%.*]] = getelementptr float, float* [[P:%.*]], i64 0
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast float* [[P0]] to <4 x float>*
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x float>, <4 x float>* [[TMP1]], align 16
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x float>, ptr [[P:%.*]], align 16
 ; CHECK-NEXT:    [[L01:%.*]] = extractelement <4 x float> [[TMP2]], i32 0
 ; CHECK-NEXT:    [[L12:%.*]] = extractelement <4 x float> [[TMP2]], i32 1
 ; CHECK-NEXT:    [[L23:%.*]] = extractelement <4 x float> [[TMP2]], i32 2
@@ -59,93 +53,87 @@ define void @test_inaccessiblememonly_nounwind_willreturn(float* %p) {
 ; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <4 x float> [[TMP3]], float [[L12]], i32 1
 ; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <4 x float> [[TMP4]], float [[L23]], i32 2
 ; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <4 x float> [[TMP5]], float [[L34]], i32 3
-; CHECK-NEXT:    [[TMP7:%.*]] = bitcast float* [[P0]] to <4 x float>*
-; CHECK-NEXT:    store <4 x float> [[TMP6]], <4 x float>* [[TMP7]], align 16
+; CHECK-NEXT:    store <4 x float> [[TMP6]], ptr [[P]], align 16
 ; CHECK-NEXT:    ret void
 ;
-  %p0 = getelementptr float, float* %p, i64 0
-  %p1 = getelementptr float, float* %p, i64 1
-  %p2 = getelementptr float, float* %p, i64 2
-  %p3 = getelementptr float, float* %p, i64 3
-  %l0 = load float, float* %p0, align 16
-  %l1 = load float, float* %p1
-  %l2 = load float, float* %p2
+  %p1 = getelementptr float, ptr %p, i64 1
+  %p2 = getelementptr float, ptr %p, i64 2
+  %p3 = getelementptr float, ptr %p, i64 3
+  %l0 = load float, ptr %p, align 16
+  %l1 = load float, ptr %p1
+  %l2 = load float, ptr %p2
   call void @foo() inaccessiblememonly nounwind willreturn
-  %l3 = load float, float* %p3
-  store float %l0, float* %p0, align 16
+  %l3 = load float, ptr %p3
+  store float %l0, ptr %p, align 16
   call void @foo() inaccessiblememonly nounwind willreturn
-  store float %l1, float* %p1
-  store float %l2, float* %p2
-  store float %l3, float* %p3
+  store float %l1, ptr %p1
+  store float %l2, ptr %p2
+  store float %l3, ptr %p3
   ret void
 }
 
-define void @test_inaccessiblememonly_not_willreturn(float* %p) {
+define void @test_inaccessiblememonly_not_willreturn(ptr %p) {
 ; CHECK-LABEL: @test_inaccessiblememonly_not_willreturn(
-; CHECK-NEXT:    [[P0:%.*]] = getelementptr float, float* [[P:%.*]], i64 0
-; CHECK-NEXT:    [[P1:%.*]] = getelementptr float, float* [[P]], i64 1
-; CHECK-NEXT:    [[P2:%.*]] = getelementptr float, float* [[P]], i64 2
-; CHECK-NEXT:    [[P3:%.*]] = getelementptr float, float* [[P]], i64 3
-; CHECK-NEXT:    [[L0:%.*]] = load float, float* [[P0]], align 16
-; CHECK-NEXT:    [[L1:%.*]] = load float, float* [[P1]], align 4
-; CHECK-NEXT:    [[L2:%.*]] = load float, float* [[P2]], align 4
+; CHECK-NEXT:    [[P1:%.*]] = getelementptr float, ptr [[P:%.*]], i64 1
+; CHECK-NEXT:    [[P2:%.*]] = getelementptr float, ptr [[P]], i64 2
+; CHECK-NEXT:    [[P3:%.*]] = getelementptr float, ptr [[P]], i64 3
+; CHECK-NEXT:    [[L0:%.*]] = load float, ptr [[P]], align 16
+; CHECK-NEXT:    [[L1:%.*]] = load float, ptr [[P1]], align 4
+; CHECK-NEXT:    [[L2:%.*]] = load float, ptr [[P2]], align 4
 ; CHECK-NEXT:    call void @foo() #[[ATTR2:[0-9]+]]
-; CHECK-NEXT:    [[L3:%.*]] = load float, float* [[P3]], align 4
-; CHECK-NEXT:    store float [[L0]], float* [[P0]], align 16
+; CHECK-NEXT:    [[L3:%.*]] = load float, ptr [[P3]], align 4
+; CHECK-NEXT:    store float [[L0]], ptr [[P]], align 16
 ; CHECK-NEXT:    call void @foo() #[[ATTR2]]
-; CHECK-NEXT:    store float [[L1]], float* [[P1]], align 4
-; CHECK-NEXT:    store float [[L2]], float* [[P2]], align 4
-; CHECK-NEXT:    store float [[L3]], float* [[P3]], align 4
+; CHECK-NEXT:    store float [[L1]], ptr [[P1]], align 4
+; CHECK-NEXT:    store float [[L2]], ptr [[P2]], align 4
+; CHECK-NEXT:    store float [[L3]], ptr [[P3]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  %p0 = getelementptr float, float* %p, i64 0
-  %p1 = getelementptr float, float* %p, i64 1
-  %p2 = getelementptr float, float* %p, i64 2
-  %p3 = getelementptr float, float* %p, i64 3
-  %l0 = load float, float* %p0, align 16
-  %l1 = load float, float* %p1
-  %l2 = load float, float* %p2
+  %p1 = getelementptr float, ptr %p, i64 1
+  %p2 = getelementptr float, ptr %p, i64 2
+  %p3 = getelementptr float, ptr %p, i64 3
+  %l0 = load float, ptr %p, align 16
+  %l1 = load float, ptr %p1
+  %l2 = load float, ptr %p2
   call void @foo() inaccessiblememonly nounwind
-  %l3 = load float, float* %p3
-  store float %l0, float* %p0, align 16
+  %l3 = load float, ptr %p3
+  store float %l0, ptr %p, align 16
   call void @foo() inaccessiblememonly nounwind
-  store float %l1, float* %p1
-  store float %l2, float* %p2
-  store float %l3, float* %p3
+  store float %l1, ptr %p1
+  store float %l2, ptr %p2
+  store float %l3, ptr %p3
   ret void
 }
 
-define void @test_inaccessiblememonly_not_nounwind(float* %p) {
+define void @test_inaccessiblememonly_not_nounwind(ptr %p) {
 ; CHECK-LABEL: @test_inaccessiblememonly_not_nounwind(
-; CHECK-NEXT:    [[P0:%.*]] = getelementptr float, float* [[P:%.*]], i64 0
-; CHECK-NEXT:    [[P1:%.*]] = getelementptr float, float* [[P]], i64 1
-; CHECK-NEXT:    [[P2:%.*]] = getelementptr float, float* [[P]], i64 2
-; CHECK-NEXT:    [[P3:%.*]] = getelementptr float, float* [[P]], i64 3
-; CHECK-NEXT:    [[L0:%.*]] = load float, float* [[P0]], align 16
-; CHECK-NEXT:    [[L1:%.*]] = load float, float* [[P1]], align 4
-; CHECK-NEXT:    [[L2:%.*]] = load float, float* [[P2]], align 4
+; CHECK-NEXT:    [[P1:%.*]] = getelementptr float, ptr [[P:%.*]], i64 1
+; CHECK-NEXT:    [[P2:%.*]] = getelementptr float, ptr [[P]], i64 2
+; CHECK-NEXT:    [[P3:%.*]] = getelementptr float, ptr [[P]], i64 3
+; CHECK-NEXT:    [[L0:%.*]] = load float, ptr [[P]], align 16
+; CHECK-NEXT:    [[L1:%.*]] = load float, ptr [[P1]], align 4
+; CHECK-NEXT:    [[L2:%.*]] = load float, ptr [[P2]], align 4
 ; CHECK-NEXT:    call void @foo() #[[ATTR3:[0-9]+]]
-; CHECK-NEXT:    [[L3:%.*]] = load float, float* [[P3]], align 4
-; CHECK-NEXT:    store float [[L0]], float* [[P0]], align 16
+; CHECK-NEXT:    [[L3:%.*]] = load float, ptr [[P3]], align 4
+; CHECK-NEXT:    store float [[L0]], ptr [[P]], align 16
 ; CHECK-NEXT:    call void @foo() #[[ATTR3]]
-; CHECK-NEXT:    store float [[L1]], float* [[P1]], align 4
-; CHECK-NEXT:    store float [[L2]], float* [[P2]], align 4
-; CHECK-NEXT:    store float [[L3]], float* [[P3]], align 4
+; CHECK-NEXT:    store float [[L1]], ptr [[P1]], align 4
+; CHECK-NEXT:    store float [[L2]], ptr [[P2]], align 4
+; CHECK-NEXT:    store float [[L3]], ptr [[P3]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  %p0 = getelementptr float, float* %p, i64 0
-  %p1 = getelementptr float, float* %p, i64 1
-  %p2 = getelementptr float, float* %p, i64 2
-  %p3 = getelementptr float, float* %p, i64 3
-  %l0 = load float, float* %p0, align 16
-  %l1 = load float, float* %p1
-  %l2 = load float, float* %p2
+  %p1 = getelementptr float, ptr %p, i64 1
+  %p2 = getelementptr float, ptr %p, i64 2
+  %p3 = getelementptr float, ptr %p, i64 3
+  %l0 = load float, ptr %p, align 16
+  %l1 = load float, ptr %p1
+  %l2 = load float, ptr %p2
   call void @foo() inaccessiblememonly willreturn
-  %l3 = load float, float* %p3
-  store float %l0, float* %p0, align 16
+  %l3 = load float, ptr %p3
+  store float %l0, ptr %p, align 16
   call void @foo() inaccessiblememonly willreturn
-  store float %l1, float* %p1
-  store float %l2, float* %p2
-  store float %l3, float* %p3
+  store float %l1, ptr %p1
+  store float %l2, ptr %p2
+  store float %l3, ptr %p3
   ret void
 }


        


More information about the llvm-commits mailing list