[llvm] 2d69827 - [Transforms] Convert tests to opaque pointers (NFC)

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Mon Feb 5 02:57:42 PST 2024


Author: Nikita Popov
Date: 2024-02-05T11:57:34+01:00
New Revision: 2d69827c5c754f0eca98e497ecf0e52ed54b4fd3

URL: https://github.com/llvm/llvm-project/commit/2d69827c5c754f0eca98e497ecf0e52ed54b4fd3
DIFF: https://github.com/llvm/llvm-project/commit/2d69827c5c754f0eca98e497ecf0e52ed54b4fd3.diff

LOG: [Transforms] Convert tests to opaque pointers (NFC)

Added: 
    

Modified: 
    llvm/test/Transforms/AggressiveInstCombine/AArch64/or-load.ll
    llvm/test/Transforms/AggressiveInstCombine/X86/or-load.ll
    llvm/test/Transforms/ArgumentPromotion/X86/thiscall.ll
    llvm/test/Transforms/ArgumentPromotion/store-into-inself.ll
    llvm/test/Transforms/Attributor/convergent.ll
    llvm/test/Transforms/Attributor/dereferenceable-2-inseltpoison.ll
    llvm/test/Transforms/Attributor/dereferenceable-2.ll
    llvm/test/Transforms/Attributor/value-simplify-pointer-info.ll
    llvm/test/Transforms/CodeGenPrepare/AArch64/combine-address-mode.ll
    llvm/test/Transforms/CodeGenPrepare/X86/sink-addrmode-base.ll
    llvm/test/Transforms/ConstraintElimination/reproducer-remarks.ll
    llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-infinite-loop-bug.ll
    llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-start-bug.ll
    llvm/test/Transforms/Coroutines/coro-async-coro-id-async-bug.ll
    llvm/test/Transforms/Coroutines/coro-async-end-bug.ll
    llvm/test/Transforms/Coroutines/coro-async-no-cse-swift-async-context-addr.ll
    llvm/test/Transforms/Coroutines/coro-async-phi.ll
    llvm/test/Transforms/Coroutines/coro-async-unreachable.ll
    llvm/test/Transforms/CorrelatedValuePropagation/basic.ll
    llvm/test/Transforms/CorrelatedValuePropagation/minmaxabs.ll
    llvm/test/Transforms/CorrelatedValuePropagation/range.ll
    llvm/test/Transforms/CorrelatedValuePropagation/select.ll
    llvm/test/Transforms/CorrelatedValuePropagation/sub.ll
    llvm/test/Transforms/DeadArgElim/byref.ll
    llvm/test/Transforms/DeadArgElim/fct_ptr.ll
    llvm/test/Transforms/GVN/condprop-memdep-invalidation.ll
    llvm/test/Transforms/GVN/pr17732.ll
    llvm/test/Transforms/GVNHoist/hoist-recursive-geps.ll
    llvm/test/Transforms/GVNHoist/infinite-loop-direct.ll
    llvm/test/Transforms/GVNHoist/infinite-loop-indirect.ll
    llvm/test/Transforms/GlobalOpt/2007-06-04-PackedStruct.ll
    llvm/test/Transforms/GlobalOpt/2008-07-17-addrspace.ll
    llvm/test/Transforms/GlobalOpt/GSROA-section.ll
    llvm/test/Transforms/GlobalOpt/cleanup-pointer-root-users-gep-constexpr.ll
    llvm/test/Transforms/GlobalOpt/cleanup-pointer-root-users-other-constexpr.ll
    llvm/test/Transforms/GlobalOpt/cleanup-pointer-root-users-ptrtoint-add-constexpr.ll
    llvm/test/Transforms/GlobalOpt/externally-initialized-aggregate.ll
    llvm/test/Transforms/GlobalOpt/globalsra-partial.ll
    llvm/test/Transforms/GlobalOpt/globalsra.ll
    llvm/test/Transforms/GlobalOpt/invariant.ll
    llvm/test/Transforms/GlobalOpt/malloc-promote-opaque-ptr.ll
    llvm/test/Transforms/GlobalOpt/sra-many-stores-initializers.ll
    llvm/test/Transforms/GlobalOpt/sra-many-stores-once.ll
    llvm/test/Transforms/GlobalOpt/sra-many-stores.ll
    llvm/test/Transforms/IROutliner/nooutline-attribute.ll
    llvm/test/Transforms/InferAddressSpaces/AMDGPU/insert-pos-assert.ll
    llvm/test/Transforms/Inline/call-intrinsic-objectsize.ll
    llvm/test/Transforms/Inline/inline-byval-bonus.ll
    llvm/test/Transforms/Inline/inlined-loop-metadata-inseltpoison.ll
    llvm/test/Transforms/Inline/inlined-loop-metadata.ll
    llvm/test/Transforms/InstCombine/alloca.ll
    llvm/test/Transforms/InstCombine/call.ll
    llvm/test/Transforms/InstCombine/fmul.ll
    llvm/test/Transforms/InstCombine/memchr-8.ll
    llvm/test/Transforms/InstCombine/scalable-vector-struct.ll
    llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/opaque_ptr.ll
    llvm/test/Transforms/LoopDistribute/symbolic-stride.ll
    llvm/test/Transforms/LoopFlatten/loop-flatten-negative.ll
    llvm/test/Transforms/LoopFlatten/loop-flatten-version.ll
    llvm/test/Transforms/LoopFlatten/widen-iv.ll
    llvm/test/Transforms/LoopIdiom/lir-heurs-multi-block-loop.ll
    llvm/test/Transforms/LoopInterchange/profitability.ll
    llvm/test/Transforms/LoopLoadElim/type-mismatch-opaque-ptr.ll
    llvm/test/Transforms/LoopSimplify/do-preheader-dbg-inseltpoison.ll
    llvm/test/Transforms/LoopSimplify/do-preheader-dbg.ll
    llvm/test/Transforms/LoopStrengthReduce/Power/memory-intrinsic.ll
    llvm/test/Transforms/LoopStrengthReduce/X86/2012-01-13-phielim.ll
    llvm/test/Transforms/LoopStrengthReduce/lsr-term-fold-negative-testcase.ll
    llvm/test/Transforms/LoopUnroll/ARM/mve-nounroll.ll
    llvm/test/Transforms/LoopUnroll/peel-loop-conditions.ll
    llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll
    llvm/test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll
    llvm/test/Transforms/LoopVectorize/lcssa-crashes.ll
    llvm/test/Transforms/LoopVectorize/runtime-checks-difference.ll
    llvm/test/Transforms/MemCpyOpt/vscale-crashes.ll
    llvm/test/Transforms/MoveAutoInit/clobber.ll
    llvm/test/Transforms/NewGVN/flags-simplify.ll
    llvm/test/Transforms/NewGVN/no_speculative_loads_with_asan.ll
    llvm/test/Transforms/NewGVN/pr17732.ll
    llvm/test/Transforms/NewGVN/unreachable_block_infinite_loop.ll
    llvm/test/Transforms/PGOProfile/coverage.ll
    llvm/test/Transforms/SLPVectorizer/AArch64/slp-abs.ll
    llvm/test/Transforms/SLPVectorizer/RISCV/reductions.ll
    llvm/test/Transforms/SLPVectorizer/RISCV/strided-loads-vectorized.ll
    llvm/test/Transforms/SLPVectorizer/X86/arith-fshl-rot.ll
    llvm/test/Transforms/SLPVectorizer/X86/arith-fshl.ll
    llvm/test/Transforms/SLPVectorizer/X86/arith-fshr-rot.ll
    llvm/test/Transforms/SLPVectorizer/X86/arith-fshr.ll
    llvm/test/Transforms/SLPVectorizer/X86/horizontal.ll
    llvm/test/Transforms/SLPVectorizer/X86/opaque-ptr.ll
    llvm/test/Transforms/SLPVectorizer/X86/stackrestore-dependence.ll
    llvm/test/Transforms/SROA/invariant-group.ll
    llvm/test/Transforms/SROA/phi-gep.ll
    llvm/test/Transforms/SROA/scalable-vector-struct.ll
    llvm/test/Transforms/SROA/sroa-common-type-fail-promotion.ll
    llvm/test/Transforms/SROA/vector-promotion.ll
    llvm/test/Transforms/ScalarizeMaskedMemIntrin/AArch64/streaming-compatible-expand-masked-gather-scatter.ll
    llvm/test/Transforms/Util/pr49185.ll
    llvm/test/Transforms/VectorCombine/AArch64/select-shuffle.ll
    llvm/test/Transforms/VectorCombine/X86/load-widening.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/AggressiveInstCombine/AArch64/or-load.ll b/llvm/test/Transforms/AggressiveInstCombine/AArch64/or-load.ll
index fe0a1bcd3870d..1400ee7f703ca 100644
--- a/llvm/test/Transforms/AggressiveInstCombine/AArch64/or-load.ll
+++ b/llvm/test/Transforms/AggressiveInstCombine/AArch64/or-load.ll
@@ -172,7 +172,7 @@ define i32 @loadCombine_4consecutive_alias(ptr %p) {
   %p2 = getelementptr i8, ptr %p, i32 2
   %p3 = getelementptr i8, ptr %p, i32 3
   %l1 = load i8, ptr %p
-  store i8 10, i8* %p
+  store i8 10, ptr %p
   %l2 = load i8, ptr %p1
   %l3 = load i8, ptr %p2
   %l4 = load i8, ptr %p3
@@ -223,7 +223,7 @@ define i32 @loadCombine_4consecutive_alias_BE(ptr %p) {
   %p2 = getelementptr i8, ptr %p, i32 2
   %p3 = getelementptr i8, ptr %p, i32 3
   %l1 = load i8, ptr %p
-  store i8 10, i8* %p
+  store i8 10, ptr %p
   %l2 = load i8, ptr %p1
   %l3 = load i8, ptr %p2
   %l4 = load i8, ptr %p3
@@ -271,7 +271,7 @@ define i32 @loadCombine_4consecutive_alias2(ptr %p, ptr %pstr) {
   %l1 = load i8, ptr %p
   %l2 = load i8, ptr %p1
   %l3 = load i8, ptr %p2
-  store i8 10, i8* %pstr
+  store i8 10, ptr %pstr
   %l4 = load i8, ptr %p3
 
   %e1 = zext i8 %l1 to i32
@@ -317,7 +317,7 @@ define i32 @loadCombine_4consecutive_alias2_BE(ptr %p, ptr %pstr) {
   %l1 = load i8, ptr %p
   %l2 = load i8, ptr %p1
   %l3 = load i8, ptr %p2
-  store i8 10, i8* %pstr
+  store i8 10, ptr %pstr
   %l4 = load i8, ptr %p3
 
   %e1 = zext i8 %l1 to i32
@@ -364,8 +364,8 @@ define i32 @loadCombine_4consecutive_alias3(ptr %p) {
   %l1 = load i8, ptr %p
   %l2 = load i8, ptr %p1
   %l3 = load i8, ptr %p2
-  store i8 10, i8* %p3
-  store i8 5, i8* %p
+  store i8 10, ptr %p3
+  store i8 5, ptr %p
   %l4 = load i8, ptr %p3
 
   %e1 = zext i8 %l1 to i32
@@ -412,8 +412,8 @@ define i32 @loadCombine_4consecutive_alias3_BE(ptr %p) {
   %l1 = load i8, ptr %p
   %l2 = load i8, ptr %p1
   %l3 = load i8, ptr %p2
-  store i8 10, i8* %p3
-  store i8 5, i8* %p
+  store i8 10, ptr %p3
+  store i8 5, ptr %p
   %l4 = load i8, ptr %p3
 
   %e1 = zext i8 %l1 to i32
@@ -466,13 +466,13 @@ define i32 @loadCombine_4consecutive_with_alias4(ptr %p, ptr %ps) {
   %ps2 = getelementptr i8, ptr %ps, i32 2
   %ps3 = getelementptr i8, ptr %ps, i32 3
   %l1 = load i8, ptr %p
-  store i8 10, i8* %ps
+  store i8 10, ptr %ps
   %l2 = load i8, ptr %p1
-  store i8 10, i8* %ps1
+  store i8 10, ptr %ps1
   %l3 = load i8, ptr %p2
-  store i8 10, i8* %ps2
+  store i8 10, ptr %ps2
   %l4 = load i8, ptr %p3
-  store i8 10, i8* %ps3
+  store i8 10, ptr %ps3
 
   %e1 = zext i8 %l1 to i32
   %e2 = zext i8 %l2 to i32

diff  --git a/llvm/test/Transforms/AggressiveInstCombine/X86/or-load.ll b/llvm/test/Transforms/AggressiveInstCombine/X86/or-load.ll
index 88f92baa63ef0..0aa6f9ecdf884 100644
--- a/llvm/test/Transforms/AggressiveInstCombine/X86/or-load.ll
+++ b/llvm/test/Transforms/AggressiveInstCombine/X86/or-load.ll
@@ -180,7 +180,7 @@ define i32 @loadCombine_4consecutive_alias(ptr %p) {
   %p2 = getelementptr i8, ptr %p, i32 2
   %p3 = getelementptr i8, ptr %p, i32 3
   %l1 = load i8, ptr %p
-  store i8 10, i8* %p
+  store i8 10, ptr %p
   %l2 = load i8, ptr %p1
   %l3 = load i8, ptr %p2
   %l4 = load i8, ptr %p3
@@ -231,7 +231,7 @@ define i32 @loadCombine_4consecutive_alias_BE(ptr %p) {
   %p2 = getelementptr i8, ptr %p, i32 2
   %p3 = getelementptr i8, ptr %p, i32 3
   %l1 = load i8, ptr %p
-  store i8 10, i8* %p
+  store i8 10, ptr %p
   %l2 = load i8, ptr %p1
   %l3 = load i8, ptr %p2
   %l4 = load i8, ptr %p3
@@ -295,7 +295,7 @@ define i32 @loadCombine_4consecutive_alias2(ptr %p, ptr %pstr) {
   %l1 = load i8, ptr %p
   %l2 = load i8, ptr %p1
   %l3 = load i8, ptr %p2
-  store i8 10, i8* %pstr
+  store i8 10, ptr %pstr
   %l4 = load i8, ptr %p3
 
   %e1 = zext i8 %l1 to i32
@@ -357,7 +357,7 @@ define i32 @loadCombine_4consecutive_alias2_BE(ptr %p, ptr %pstr) {
   %l1 = load i8, ptr %p
   %l2 = load i8, ptr %p1
   %l3 = load i8, ptr %p2
-  store i8 10, i8* %pstr
+  store i8 10, ptr %pstr
   %l4 = load i8, ptr %p3
 
   %e1 = zext i8 %l1 to i32
@@ -421,8 +421,8 @@ define i32 @loadCombine_4consecutive_alias3(ptr %p) {
   %l1 = load i8, ptr %p
   %l2 = load i8, ptr %p1
   %l3 = load i8, ptr %p2
-  store i8 10, i8* %p3
-  store i8 5, i8* %p
+  store i8 10, ptr %p3
+  store i8 5, ptr %p
   %l4 = load i8, ptr %p3
 
   %e1 = zext i8 %l1 to i32
@@ -486,8 +486,8 @@ define i32 @loadCombine_4consecutive_alias3_BE(ptr %p) {
   %l1 = load i8, ptr %p
   %l2 = load i8, ptr %p1
   %l3 = load i8, ptr %p2
-  store i8 10, i8* %p3
-  store i8 5, i8* %p
+  store i8 10, ptr %p3
+  store i8 5, ptr %p
   %l4 = load i8, ptr %p3
 
   %e1 = zext i8 %l1 to i32
@@ -540,13 +540,13 @@ define i32 @loadCombine_4consecutive_with_alias4(ptr %p, ptr %ps) {
   %ps2 = getelementptr i8, ptr %ps, i32 2
   %ps3 = getelementptr i8, ptr %ps, i32 3
   %l1 = load i8, ptr %p
-  store i8 10, i8* %ps
+  store i8 10, ptr %ps
   %l2 = load i8, ptr %p1
-  store i8 10, i8* %ps1
+  store i8 10, ptr %ps1
   %l3 = load i8, ptr %p2
-  store i8 10, i8* %ps2
+  store i8 10, ptr %ps2
   %l4 = load i8, ptr %p3
-  store i8 10, i8* %ps3
+  store i8 10, ptr %ps3
 
   %e1 = zext i8 %l1 to i32
   %e2 = zext i8 %l2 to i32

diff  --git a/llvm/test/Transforms/ArgumentPromotion/X86/thiscall.ll b/llvm/test/Transforms/ArgumentPromotion/X86/thiscall.ll
index 15ba83b22dde6..2195e437bc863 100644
--- a/llvm/test/Transforms/ArgumentPromotion/X86/thiscall.ll
+++ b/llvm/test/Transforms/ArgumentPromotion/X86/thiscall.ll
@@ -12,7 +12,7 @@ target triple = "i386-pc-windows-msvc19.11.0"
 
 %struct.a = type { i8 }
 
-define internal x86_thiscallcc void @internalfun(ptr %this, <{ %struct.a }>* inalloca(<{ %struct.a }>)) {
+define internal x86_thiscallcc void @internalfun(ptr %this, ptr inalloca(<{ %struct.a }>)) {
 ; ARGPROMOTION-LABEL: define {{[^@]+}}@internalfun
 ; ARGPROMOTION-SAME: (ptr [[THIS:%.*]], ptr inalloca(<{ [[STRUCT_A:%.*]] }>) [[TMP0:%.*]]) {
 ; ARGPROMOTION-NEXT:  entry:
@@ -34,11 +34,11 @@ define internal x86_thiscallcc void @internalfun(ptr %this, <{ %struct.a }>* ina
 ; GLOBALOPT_ARGPROMOTION-NEXT:    ret void
 ;
 entry:
-  %a = getelementptr inbounds <{ %struct.a }>, <{ %struct.a }>* %0, i32 0, i32 0
+  %a = getelementptr inbounds <{ %struct.a }>, ptr %0, i32 0, i32 0
   %argmem = alloca inalloca <{ %struct.a }>, align 4
-  %1 = getelementptr inbounds <{ %struct.a }>, <{ %struct.a }>* %argmem, i32 0, i32 0
+  %1 = getelementptr inbounds <{ %struct.a }>, ptr %argmem, i32 0, i32 0
   %call = call x86_thiscallcc ptr @copy_ctor(ptr %1, ptr dereferenceable(1) %a)
-  call void @ext(<{ %struct.a }>* inalloca(<{ %struct.a }>) %argmem)
+  call void @ext(ptr inalloca(<{ %struct.a }>) %argmem)
   ret void
 }
 
@@ -62,12 +62,12 @@ define void @exportedfun(ptr %a) {
 ;
   %inalloca.save = tail call ptr @llvm.stacksave()
   %argmem = alloca inalloca <{ %struct.a }>, align 4
-  call x86_thiscallcc void @internalfun(ptr %a, <{ %struct.a }>* inalloca(<{ %struct.a }>) %argmem)
+  call x86_thiscallcc void @internalfun(ptr %a, ptr inalloca(<{ %struct.a }>) %argmem)
   call void @llvm.stackrestore(ptr %inalloca.save)
   ret void
 }
 
 declare x86_thiscallcc ptr @copy_ctor(ptr returned, ptr dereferenceable(1))
-declare void @ext(<{ %struct.a }>* inalloca(<{ %struct.a }>))
+declare void @ext(ptr inalloca(<{ %struct.a }>))
 declare ptr @llvm.stacksave()
 declare void @llvm.stackrestore(ptr)

diff  --git a/llvm/test/Transforms/ArgumentPromotion/store-into-inself.ll b/llvm/test/Transforms/ArgumentPromotion/store-into-inself.ll
index be94af6a0bd03..0db42a97841f4 100644
--- a/llvm/test/Transforms/ArgumentPromotion/store-into-inself.ll
+++ b/llvm/test/Transforms/ArgumentPromotion/store-into-inself.ll
@@ -88,10 +88,10 @@ define i32 @main() nounwind  {
 ;
 entry:
   %S = alloca %struct.ss, align 32
-  %temp1 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 0
-  store i32 1, i32* %temp1, align 4
-  %temp4 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 1
-  store i64 2, i64* %temp4, align 8
+  %temp1 = getelementptr %struct.ss, ptr %S, i32 0, i32 0
+  store i32 1, ptr %temp1, align 4
+  %temp4 = getelementptr %struct.ss, ptr %S, i32 0, i32 1
+  store i64 2, ptr %temp4, align 8
   call void @f(ptr byval(ptr) align 4 %S) nounwind
   call void @g(ptr byval(ptr) align 4 %S) nounwind
   call void @h(ptr byval(ptr) align 4 %S) nounwind

diff  --git a/llvm/test/Transforms/Attributor/convergent.ll b/llvm/test/Transforms/Attributor/convergent.ll
index 74db2b2ee12a8..ccb606a64187a 100644
--- a/llvm/test/Transforms/Attributor/convergent.ll
+++ b/llvm/test/Transforms/Attributor/convergent.ll
@@ -88,26 +88,26 @@ define i32 @calls_defined_with_asm(i32 %a, i32 %b) convergent {
   ret i32 %c
 }
 
-declare void @llvm.convergent.copy.p0i8.p0i8.i64(ptr %dest, ptr %src, i64 %size, i1 %isVolatile) #0
+declare void @llvm.convergent.copy.p0.p0.i64(ptr %dest, ptr %src, i64 %size, i1 %isVolatile) #0
 
 define void @calls_convergent_intrinsic(ptr %dest, ptr %src, i64 %size) convergent {
 ; TUNIT: Function Attrs: convergent mustprogress nofree nosync nounwind willreturn memory(argmem: readwrite)
 ; TUNIT-LABEL: define {{[^@]+}}@calls_convergent_intrinsic
 ; TUNIT-SAME: (ptr nofree [[DEST:%.*]], ptr nofree [[SRC:%.*]], i64 [[SIZE:%.*]]) #[[ATTR3:[0-9]+]] {
-; TUNIT-NEXT:    call void @llvm.convergent.copy.p0i8.p0i8.i64(ptr nofree [[DEST]], ptr nofree [[SRC]], i64 [[SIZE]], i1 noundef false) #[[ATTR5:[0-9]+]]
+; TUNIT-NEXT:    call void @llvm.convergent.copy.p0.p0.i64(ptr nofree [[DEST]], ptr nofree [[SRC]], i64 [[SIZE]], i1 noundef false) #[[ATTR5:[0-9]+]]
 ; TUNIT-NEXT:    ret void
 ;
 ; CGSCC: Function Attrs: convergent mustprogress nofree nosync nounwind willreturn memory(argmem: readwrite)
 ; CGSCC-LABEL: define {{[^@]+}}@calls_convergent_intrinsic
 ; CGSCC-SAME: (ptr nofree [[DEST:%.*]], ptr nofree [[SRC:%.*]], i64 [[SIZE:%.*]]) #[[ATTR4:[0-9]+]] {
-; CGSCC-NEXT:    call void @llvm.convergent.copy.p0i8.p0i8.i64(ptr nofree [[DEST]], ptr nofree [[SRC]], i64 [[SIZE]], i1 noundef false) #[[ATTR7:[0-9]+]]
+; CGSCC-NEXT:    call void @llvm.convergent.copy.p0.p0.i64(ptr nofree [[DEST]], ptr nofree [[SRC]], i64 [[SIZE]], i1 noundef false) #[[ATTR7:[0-9]+]]
 ; CGSCC-NEXT:    ret void
 ;
-  call void @llvm.convergent.copy.p0i8.p0i8.i64(ptr %dest, ptr %src, i64 %size, i1 false)
+  call void @llvm.convergent.copy.p0.p0.i64(ptr %dest, ptr %src, i64 %size, i1 false)
   ret void
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i64(ptr %dest, ptr %src, i64 %size, i1 %isVolatile) #0
+declare void @llvm.memcpy.p0.p0.i64(ptr %dest, ptr %src, i64 %size, i1 %isVolatile) #0
 
 define void @calls_intrinsic(ptr %dest, ptr %src, i64 %size) convergent {
 ; TUNIT: Function Attrs: convergent mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite)
@@ -122,7 +122,7 @@ define void @calls_intrinsic(ptr %dest, ptr %src, i64 %size) convergent {
 ; CGSCC-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr nocapture nofree writeonly [[DEST]], ptr nocapture nofree readonly [[SRC]], i64 [[SIZE]], i1 noundef false) #[[ATTR7]]
 ; CGSCC-NEXT:    ret void
 ;
-  call void @llvm.memcpy.p0i8.p0i8.i64(ptr %dest, ptr %src, i64 %size, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %dest, ptr %src, i64 %size, i1 false)
   ret void
 }
 

diff  --git a/llvm/test/Transforms/Attributor/dereferenceable-2-inseltpoison.ll b/llvm/test/Transforms/Attributor/dereferenceable-2-inseltpoison.ll
index 67f1f1be1646c..c7f76a291b836 100644
--- a/llvm/test/Transforms/Attributor/dereferenceable-2-inseltpoison.ll
+++ b/llvm/test/Transforms/Attributor/dereferenceable-2-inseltpoison.ll
@@ -286,7 +286,7 @@ define void @multi_index_gep(ptr %ptr) {
 ; CHECK-SAME: (ptr nocapture nofree nonnull readnone dereferenceable(1) [[PTR:%.*]]) #[[ATTR2]] {
 ; CHECK-NEXT:    ret void
 ;
-  %arrayidx00 = getelementptr <4 x i8>, <4 x i8>* %ptr, i64 0, i64 0
+  %arrayidx00 = getelementptr <4 x i8>, ptr %ptr, i64 0, i64 0
   %t0 = load i8, ptr %arrayidx00
   ret void
 }

diff  --git a/llvm/test/Transforms/Attributor/dereferenceable-2.ll b/llvm/test/Transforms/Attributor/dereferenceable-2.ll
index 4f48703e35de7..435544dc8e844 100644
--- a/llvm/test/Transforms/Attributor/dereferenceable-2.ll
+++ b/llvm/test/Transforms/Attributor/dereferenceable-2.ll
@@ -286,7 +286,7 @@ define void @multi_index_gep(ptr %ptr) {
 ; CHECK-SAME: (ptr nocapture nofree nonnull readnone dereferenceable(1) [[PTR:%.*]]) #[[ATTR2]] {
 ; CHECK-NEXT:    ret void
 ;
-  %arrayidx00 = getelementptr <4 x i8>, <4 x i8>* %ptr, i64 0, i64 0
+  %arrayidx00 = getelementptr <4 x i8>, ptr %ptr, i64 0, i64 0
   %t0 = load i8, ptr %arrayidx00
   ret void
 }

diff  --git a/llvm/test/Transforms/Attributor/value-simplify-pointer-info.ll b/llvm/test/Transforms/Attributor/value-simplify-pointer-info.ll
index 99a4c0aac7a23..7a35b5c856097 100644
--- a/llvm/test/Transforms/Attributor/value-simplify-pointer-info.ll
+++ b/llvm/test/Transforms/Attributor/value-simplify-pointer-info.ll
@@ -787,7 +787,7 @@ define i32 @test_range_merge1() {
 ;
   store <2 x i32> <i32 1, i32 1>, ptr @Vs1
   store float 2.000000e+00, ptr getelementptr inbounds (%struct.S, ptr @Vs1, i64 0, i32 4)
-  %l0 = load i32, ptr getelementptr inbounds (%struct.S, ptr @Vs1, i64 0, i32 0)
+  %l0 = load i32, ptr @Vs1
   %l1 = load i32, ptr getelementptr inbounds (%struct.S, ptr @Vs1, i64 0, i32 1)
   %add = add i32 %l0, %l1
   ret i32 %add
@@ -814,7 +814,7 @@ define i32 @test_range_merge2() {
 ;
   store <2 x i32> <i32 3, i32 4>, ptr @Vs2
   store float 2.000000e+00, ptr getelementptr inbounds (%struct.S, ptr @Vs2, i64 0, i32 4)
-  %l0 = load i32, ptr getelementptr inbounds (%struct.S, ptr @Vs2, i64 0, i32 0)
+  %l0 = load i32, ptr @Vs2
   %l1 = load i32, ptr getelementptr inbounds (%struct.S, ptr @Vs2, i64 0, i32 1)
   %add = add i32 %l0, %l1
   ret i32 %add

diff  --git a/llvm/test/Transforms/CodeGenPrepare/AArch64/combine-address-mode.ll b/llvm/test/Transforms/CodeGenPrepare/AArch64/combine-address-mode.ll
index 25d4492f4c169..a38cfbd97a447 100644
--- a/llvm/test/Transforms/CodeGenPrepare/AArch64/combine-address-mode.ll
+++ b/llvm/test/Transforms/CodeGenPrepare/AArch64/combine-address-mode.ll
@@ -56,8 +56,8 @@ bb9:                                              ; preds = %bb1
   br label %bb6
 
 bb10:                                             ; preds = %bb6, %bb
-  %i11 = phi ptr [ getelementptr inbounds (<{ i32, i32 }>, ptr @_MergedGlobals, i32 0, i32 0), %bb ], [ %i7, %bb6 ]
-  %i12 = phi ptr [ getelementptr inbounds (<{ i32, i32 }>, ptr @_MergedGlobals, i32 0, i32 0), %bb ], [ %i8, %bb6 ]
+  %i11 = phi ptr [ @_MergedGlobals, %bb ], [ %i7, %bb6 ]
+  %i12 = phi ptr [ @_MergedGlobals, %bb ], [ %i8, %bb6 ]
   br label %bb13
 
 bb13:                                             ; preds = %bb18, %bb10

diff  --git a/llvm/test/Transforms/CodeGenPrepare/X86/sink-addrmode-base.ll b/llvm/test/Transforms/CodeGenPrepare/X86/sink-addrmode-base.ll
index 08e822f7e2112..61107a9b70163 100644
--- a/llvm/test/Transforms/CodeGenPrepare/X86/sink-addrmode-base.ll
+++ b/llvm/test/Transforms/CodeGenPrepare/X86/sink-addrmode-base.ll
@@ -480,34 +480,34 @@ fallthrough:
 }
 
 ; Different types but null is the first?
-define i32 @test19(i1 %cond1, i1 %cond2, i64* %b2, i8* %b1) {
+define i32 @test19(i1 %cond1, i1 %cond2, ptr %b2, ptr %b1) {
 ; CHECK-LABEL: @test19
 entry:
-  %g1 = getelementptr inbounds i64, i64* %b2, i64 5
-  %bc1 = bitcast i64* %g1 to i32*
+  %g1 = getelementptr inbounds i64, ptr %b2, i64 5
+  %bc1 = bitcast ptr %g1 to ptr
   br i1 %cond1, label %if.then1, label %if.then2
 
 if.then1:
-  %g2 = getelementptr inbounds i8, i8* %b1, i64 40
-  %bc2 = bitcast i8* %g2 to i32*
+  %g2 = getelementptr inbounds i8, ptr %b1, i64 40
+  %bc2 = bitcast ptr %g2 to ptr
   br label %fallthrough
 
 if.then2:
-  %bc1_1 = bitcast i64* %g1 to i32*
+  %bc1_1 = bitcast ptr %g1 to ptr
   br i1 %cond2, label %fallthrough, label %if.then3
 
 if.then3:
-  %g3 = getelementptr inbounds i64, i64* null, i64 5
-  %bc1_2 = bitcast i64* %g3 to i32*
+  %g3 = getelementptr inbounds i64, ptr null, i64 5
+  %bc1_2 = bitcast ptr %g3 to ptr
   br label %fallthrough
 
 fallthrough:
 ; CHECK-NOT: sunk_phi
-  %c = phi i32* [%bc2, %if.then1], [%bc1_1, %if.then2], [%bc1_2, %if.then3]
-  %v1 = load i32, i32* %c, align 4
-  %g1_1 = getelementptr inbounds i64, i64* %b2, i64 5
-  %bc1_1_1 = bitcast i64* %g1_1 to i32*
-  %v2 = load i32, i32* %bc1_1_1, align 4
+  %c = phi ptr [%bc2, %if.then1], [%bc1_1, %if.then2], [%bc1_2, %if.then3]
+  %v1 = load i32, ptr %c, align 4
+  %g1_1 = getelementptr inbounds i64, ptr %b2, i64 5
+  %bc1_1_1 = bitcast ptr %g1_1 to ptr
+  %v2 = load i32, ptr %bc1_1_1, align 4
   %v = add i32 %v1, %v2
   ret i32 %v
 }

diff  --git a/llvm/test/Transforms/ConstraintElimination/reproducer-remarks.ll b/llvm/test/Transforms/ConstraintElimination/reproducer-remarks.ll
index f912abfc24a8f..63e1826ece5d7 100644
--- a/llvm/test/Transforms/ConstraintElimination/reproducer-remarks.ll
+++ b/llvm/test/Transforms/ConstraintElimination/reproducer-remarks.ll
@@ -19,7 +19,7 @@ define i1 @test_no_known_facts(ptr %dst) {
 entry:
   %dst.0 = getelementptr inbounds ptr, ptr %dst, i64 0
   %upper = getelementptr inbounds ptr, ptr %dst, i64 2
-  %c = icmp ult i32* %dst.0, %upper
+  %c = icmp ult ptr %dst.0, %upper
   ret i1 %c
 }
 

diff  --git a/llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-infinite-loop-bug.ll b/llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-infinite-loop-bug.ll
index 83212a79d69b6..07b3bd8fa94ac 100644
--- a/llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-infinite-loop-bug.ll
+++ b/llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-infinite-loop-bug.ll
@@ -16,7 +16,7 @@ declare void @my_other_async_function(ptr %async.ctxt)
   <{ i32 trunc ( ; Relative pointer to async function
        i64 sub (
          i64 ptrtoint (ptr @my_async_function to i64),
-         i64 ptrtoint (ptr getelementptr inbounds (<{ i32, i32 }>, <{ i32, i32 }>* @my_async_function_fp, i32 0, i32 1) to i64)
+         i64 ptrtoint (ptr getelementptr inbounds (<{ i32, i32 }>, ptr @my_async_function_fp, i32 0, i32 1) to i64)
        )
      to i32),
      i32 128    ; Initial async context size without space for frame
@@ -47,7 +47,7 @@ entry:
   %escaped_addr = alloca i64
 
   %id = call token @llvm.coro.id.async(i32 128, i32 16, i32 0,
-          ptr bitcast (<{i32, i32}>* @my_async_function_fp to ptr))
+          ptr @my_async_function_fp)
   %hdl = call ptr @llvm.coro.begin(token %id, ptr null)
   call void @llvm.lifetime.start.p0(i64 4, ptr %escaped_addr)
   call void @escape(ptr %escaped_addr)

diff  --git a/llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-start-bug.ll b/llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-start-bug.ll
index 6a6d839fae361..2306b72a0055f 100644
--- a/llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-start-bug.ll
+++ b/llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-start-bug.ll
@@ -16,7 +16,7 @@ declare void @my_other_async_function(ptr %async.ctxt)
   <{ i32 trunc ( ; Relative pointer to async function
        i64 sub (
          i64 ptrtoint (ptr @my_async_function to i64),
-         i64 ptrtoint (ptr getelementptr inbounds (<{ i32, i32 }>, <{ i32, i32 }>* @my_async_function_fp, i32 0, i32 1) to i64)
+         i64 ptrtoint (ptr getelementptr inbounds (<{ i32, i32 }>, ptr @my_async_function_fp, i32 0, i32 1) to i64)
        )
      to i32),
      i32 128    ; Initial async context size without space for frame
@@ -41,7 +41,7 @@ entry:
   %escaped_addr = alloca i64
 
   %id = call token @llvm.coro.id.async(i32 128, i32 16, i32 0,
-          ptr bitcast (<{i32, i32}>* @my_async_function_fp to ptr))
+          ptr @my_async_function_fp)
   %hdl = call ptr @llvm.coro.begin(token %id, ptr null)
   call void @llvm.lifetime.start.p0(i64 4, ptr %escaped_addr)
   br label %callblock

diff  --git a/llvm/test/Transforms/Coroutines/coro-async-coro-id-async-bug.ll b/llvm/test/Transforms/Coroutines/coro-async-coro-id-async-bug.ll
index 3c9cdf91a02d4..3a2201f4d30c0 100644
--- a/llvm/test/Transforms/Coroutines/coro-async-coro-id-async-bug.ll
+++ b/llvm/test/Transforms/Coroutines/coro-async-coro-id-async-bug.ll
@@ -13,7 +13,7 @@ entry:
   %3 = call ptr @llvm.coro.begin(token %2, ptr null)
   store ptr %0, ptr %1, align 8
   %4 = load ptr, ptr %1, align 8
-  %5 = getelementptr inbounds <{ ptr, ptr }>, <{ ptr, ptr }>* %4, i32 0, i32 1
+  %5 = getelementptr inbounds <{ ptr, ptr }>, ptr %4, i32 0, i32 1
   %6 = load ptr, ptr %5, align 8
   %7 = load ptr, ptr %1, align 8
   %8 = call i1 (ptr, i1, ...) @llvm.coro.end.async(ptr %3, i1 false, ptr @repo.0, ptr %6, ptr %7)

diff  --git a/llvm/test/Transforms/Coroutines/coro-async-end-bug.ll b/llvm/test/Transforms/Coroutines/coro-async-end-bug.ll
index 3c523dce0abc7..0daa4b0c3da64 100644
--- a/llvm/test/Transforms/Coroutines/coro-async-end-bug.ll
+++ b/llvm/test/Transforms/Coroutines/coro-async-end-bug.ll
@@ -24,7 +24,7 @@ entry:
   store ptr %0, ptr %5, align 8
   %8 = call swiftcc i1 %3(ptr noalias nocapture %1, ptr noalias nocapture %2, ptr swiftself %4) #2
   %9 = load ptr, ptr %5, align 8
-  %10 = getelementptr inbounds <{ ptr, ptr }>, <{ ptr, ptr }>* %9, i32 0, i32 1
+  %10 = getelementptr inbounds <{ ptr, ptr }>, ptr %9, i32 0, i32 1
   %11 = load ptr, ptr %10, align 8
   %12 = load ptr, ptr %5, align 8
   %13 = call i1 (ptr, i1, ...) @llvm.coro.end.async(ptr %7, i1 false, ptr @repo.0, ptr %11, ptr %12, i1 %8, ptr null)

diff  --git a/llvm/test/Transforms/Coroutines/coro-async-no-cse-swift-async-context-addr.ll b/llvm/test/Transforms/Coroutines/coro-async-no-cse-swift-async-context-addr.ll
index aac3cc970962a..c898a1b0c2983 100644
--- a/llvm/test/Transforms/Coroutines/coro-async-no-cse-swift-async-context-addr.ll
+++ b/llvm/test/Transforms/Coroutines/coro-async-no-cse-swift-async-context-addr.ll
@@ -32,7 +32,7 @@ entry:
                                                                            ptr @callee,
                                                                            ptr %5, i64 0, i64 0, ptr %0)
   %7 = load ptr, ptr %1, align 8
-  %8 = getelementptr inbounds <{ ptr, ptr }>, <{ ptr, ptr }>* %7, i32 0, i32 1
+  %8 = getelementptr inbounds <{ ptr, ptr }>, ptr %7, i32 0, i32 1
   %9 = load ptr, ptr %8, align 8
   %10 = load ptr, ptr %1, align 8
 

diff  --git a/llvm/test/Transforms/Coroutines/coro-async-phi.ll b/llvm/test/Transforms/Coroutines/coro-async-phi.ll
index a60ffa6feedd2..25be1eaa059eb 100644
--- a/llvm/test/Transforms/Coroutines/coro-async-phi.ll
+++ b/llvm/test/Transforms/Coroutines/coro-async-phi.ll
@@ -47,11 +47,11 @@ bb30:                                             ; preds = %bb
   %i47 = inttoptr i64 %i46 to ptr
   %i52 = call swiftcc ptr @swift_task_alloc(i64 24) #1
   %i54 = load ptr, ptr %i, align 8
-  %i55 = getelementptr inbounds <{ ptr, ptr, i32 }>, <{ ptr, ptr, i32 }>* %i52, i32 0, i32 0
+  %i55 = getelementptr inbounds <{ ptr, ptr, i32 }>, ptr %i52, i32 0, i32 0
   store ptr %i54, ptr %i55, align 8
   %i56 = call ptr @llvm.coro.async.resume()
   call void @use(ptr %i56)
-  %i58 = getelementptr inbounds <{ ptr, ptr, i32 }>, <{ ptr, ptr, i32 }>* %i52, i32 0, i32 1
+  %i58 = getelementptr inbounds <{ ptr, ptr, i32 }>, ptr %i52, i32 0, i32 1
   store ptr %i56, ptr %i58, align 8
   %i61 = call { ptr, ptr } (i32, ptr, ptr, ...) @llvm.coro.suspend.async.sl_p0i8p0s_swift.errorss(i32 256, ptr %i56, ptr @__swift_async_resume_project_context, ptr @__swift_suspend_dispatch_4, ptr %i47, ptr %i52, i64 %i31, i64 0, ptr %arg3)
   %i62 = extractvalue { ptr, ptr } %i61, 0

diff  --git a/llvm/test/Transforms/Coroutines/coro-async-unreachable.ll b/llvm/test/Transforms/Coroutines/coro-async-unreachable.ll
index a718c4033cffb..79ef8939b0ecc 100644
--- a/llvm/test/Transforms/Coroutines/coro-async-unreachable.ll
+++ b/llvm/test/Transforms/Coroutines/coro-async-unreachable.ll
@@ -32,7 +32,7 @@ entry:
   <{ i32 trunc ( ; Relative pointer to async function
        i64 sub (
          i64 ptrtoint (ptr @unreachable to i64),
-         i64 ptrtoint (ptr getelementptr inbounds (<{ i32, i32 }>, <{ i32, i32 }>* @unreachable_fp, i32 0, i32 1) to i64)
+         i64 ptrtoint (ptr getelementptr inbounds (<{ i32, i32 }>, ptr @unreachable_fp, i32 0, i32 1) to i64)
        )
      to i32),
      i32 128    ; Initial async context size without space for frame
@@ -45,7 +45,7 @@ entry:
   %proj.2 = getelementptr inbounds { i64, i64 }, ptr %tmp, i64 0, i32 1
 
   %id = call token @llvm.coro.id.async(i32 128, i32 16, i32 0,
-          ptr bitcast (<{i32, i32}>* @unreachable_fp to ptr))
+          ptr @unreachable_fp)
   %hdl = call ptr @llvm.coro.begin(token %id, ptr null)
   store i64 0, ptr %proj.1, align 8
   store i64 1, ptr %proj.2, align 8
@@ -53,7 +53,7 @@ entry:
 
 	; Begin lowering: apply %my_other_async_function(%args...)
   ; setup callee context
-  %arg1 = bitcast <{ i32, i32}>* @my_other_async_function_fp to ptr
+  %arg1 = bitcast ptr @my_other_async_function_fp to ptr
   %callee_context = call ptr @llvm.coro.async.context.alloc(ptr %task, ptr %arg1)
   ; store the return continuation
   %callee_context.return_to_caller.addr = getelementptr inbounds %async.ctxt, ptr %callee_context, i32 0, i32 1

diff  --git a/llvm/test/Transforms/CorrelatedValuePropagation/basic.ll b/llvm/test/Transforms/CorrelatedValuePropagation/basic.ll
index 9941d4c070d39..93ed008022e21 100644
--- a/llvm/test/Transforms/CorrelatedValuePropagation/basic.ll
+++ b/llvm/test/Transforms/CorrelatedValuePropagation/basic.ll
@@ -537,7 +537,7 @@ define i1 @arg_attribute(ptr nonnull %a) {
 ; CHECK-LABEL: @arg_attribute(
 ; CHECK-NEXT:    ret i1 false
 ;
-  %cmp = icmp eq i8* %a, null
+  %cmp = icmp eq ptr %a, null
   ret i1 %cmp
 }
 
@@ -547,7 +547,7 @@ define i1 @call_attribute() {
 ; CHECK-NEXT:    [[A:%.*]] = call ptr @return_nonnull()
 ; CHECK-NEXT:    ret i1 false
 ;
-  %a = call i8* @return_nonnull()
+  %a = call ptr @return_nonnull()
   %cmp = icmp eq ptr %a, null
   ret i1 %cmp
 }

diff  --git a/llvm/test/Transforms/CorrelatedValuePropagation/minmaxabs.ll b/llvm/test/Transforms/CorrelatedValuePropagation/minmaxabs.ll
index bd8497c9634ca..a13ec50bd053a 100644
--- a/llvm/test/Transforms/CorrelatedValuePropagation/minmaxabs.ll
+++ b/llvm/test/Transforms/CorrelatedValuePropagation/minmaxabs.ll
@@ -81,7 +81,7 @@ define void @test_abs1(ptr %p) {
 ; CHECK-NEXT:    call void @use(i1 [[C2]])
 ; CHECK-NEXT:    ret void
 ;
-  %x = load i32, i32* %p, !range !{i32 -15, i32 10}
+  %x = load i32, ptr %p, !range !{i32 -15, i32 10}
   %a = call i32 @llvm.abs.i32(i32 %x, i1 false)
   %c1 = icmp ule i32 %a, 15
   call void @use(i1 %c1)

diff  --git a/llvm/test/Transforms/CorrelatedValuePropagation/range.ll b/llvm/test/Transforms/CorrelatedValuePropagation/range.ll
index 17d979dcff23d..cc66cbe7fce61 100644
--- a/llvm/test/Transforms/CorrelatedValuePropagation/range.ll
+++ b/llvm/test/Transforms/CorrelatedValuePropagation/range.ll
@@ -243,7 +243,7 @@ define i1 @test8(ptr %p) {
 ; CHECK-NEXT:    [[A:%.*]] = load i64, ptr [[P:%.*]], align 4, !range [[RNG0:![0-9]+]]
 ; CHECK-NEXT:    ret i1 false
 ;
-  %a = load i64, i64* %p, !range !{i64 4, i64 255}
+  %a = load i64, ptr %p, !range !{i64 4, i64 255}
   %res = icmp eq i64 %a, 0
   ret i1 %res
 }
@@ -253,7 +253,7 @@ define i1 @test9(ptr %p) {
 ; CHECK-NEXT:    [[A:%.*]] = load i64, ptr [[P:%.*]], align 4, !range [[RNG1:![0-9]+]]
 ; CHECK-NEXT:    ret i1 true
 ;
-  %a = load i64, i64* %p, !range !{i64 0, i64 1}
+  %a = load i64, ptr %p, !range !{i64 0, i64 1}
   %res = icmp eq i64 %a, 0
   ret i1 %res
 }
@@ -263,7 +263,7 @@ define i1 @test10(ptr %p) {
 ; CHECK-NEXT:    [[A:%.*]] = load i64, ptr [[P:%.*]], align 4, !range [[RNG2:![0-9]+]]
 ; CHECK-NEXT:    ret i1 false
 ;
-  %a = load i64, i64* %p, !range !{i64 4, i64 8, i64 15, i64 20}
+  %a = load i64, ptr %p, !range !{i64 4, i64 8, i64 15, i64 20}
   %res = icmp eq i64 %a, 0
   ret i1 %res
 }
@@ -278,7 +278,7 @@ define i1 @test11() {
 ; CHECK:       next:
 ; CHECK-NEXT:    ret i1 true
 ;
-  %positive = load i32, i32* @g, !range !{i32 1, i32 2048}
+  %positive = load i32, ptr @g, !range !{i32 1, i32 2048}
   %add = add i32 %positive, 1
   %test = icmp sgt i32 %add, 0
   br label %next
@@ -661,7 +661,7 @@ define i1 @test15(i32 %a) {
 ; CHECK:       else:
 ; CHECK-NEXT:    ret i1 false
 ;
-  %limit = load i32, i32* @limit, !range !{i32 0, i32 256}
+  %limit = load i32, ptr @limit, !range !{i32 0, i32 256}
   %cmp = icmp ult i32 %a, %limit
   br i1 %cmp, label %then, label %else
 

diff  --git a/llvm/test/Transforms/CorrelatedValuePropagation/select.ll b/llvm/test/Transforms/CorrelatedValuePropagation/select.ll
index 9842328db7020..deece3696cab3 100644
--- a/llvm/test/Transforms/CorrelatedValuePropagation/select.ll
+++ b/llvm/test/Transforms/CorrelatedValuePropagation/select.ll
@@ -204,7 +204,7 @@ define i1 @test1(ptr %p, i1 %unknown) {
 ; CHECK:       exit:
 ; CHECK-NEXT:    ret i1 true
 ;
-  %pval = load i32, i32* %p
+  %pval = load i32, ptr %p
   %cmp1 = icmp slt i32 %pval, 255
   br i1 %cmp1, label %next, label %exit
 
@@ -231,7 +231,7 @@ define i1 @test2(ptr %p, i32 %qval, i1 %unknown) {
 ; CHECK:       exit:
 ; CHECK-NEXT:    ret i1 true
 ;
-  %pval = load i32, i32* %p
+  %pval = load i32, ptr %p
   %cmp1 = icmp slt i32 %pval, 255
   br i1 %cmp1, label %next, label %exit
 
@@ -258,7 +258,7 @@ define i1 @test3(ptr %p, i32 %qval, i1 %unknown) {
 ; CHECK:       exit:
 ; CHECK-NEXT:    ret i1 true
 ;
-  %pval = load i32, i32* %p
+  %pval = load i32, ptr %p
   %cmp1 = icmp slt i32 %pval, 255
   br i1 %cmp1, label %next, label %exit
 
@@ -288,7 +288,7 @@ define i1 @test4(ptr %p, i32 %qval, i1 %unknown) {
 ; CHECK:       exit:
 ; CHECK-NEXT:    ret i1 true
 ;
-  %pval = load i32, i32* %p
+  %pval = load i32, ptr %p
   %cmp1 = icmp slt i32 %pval, 255
   br i1 %cmp1, label %next, label %exit
 
@@ -317,7 +317,7 @@ define i1 @test5(ptr %p, i1 %unknown) {
 ; CHECK:       exit:
 ; CHECK-NEXT:    ret i1 true
 ;
-  %pval = load i32, i32* %p, !noundef !0
+  %pval = load i32, ptr %p, !noundef !0
   %cmp1 = icmp slt i32 %pval, 255
   br i1 %cmp1, label %next, label %exit
 
@@ -344,7 +344,7 @@ define i1 @test6(ptr %p, i1 %unknown) {
 ; CHECK:       exit:
 ; CHECK-NEXT:    ret i1 true
 ;
-  %pval = load i32, i32* %p, !noundef !0
+  %pval = load i32, ptr %p, !noundef !0
   %cmp1 = icmp ult i32 %pval, 255
   br i1 %cmp1, label %next, label %exit
 

diff  --git a/llvm/test/Transforms/CorrelatedValuePropagation/sub.ll b/llvm/test/Transforms/CorrelatedValuePropagation/sub.ll
index 8b67574c06cd9..353b4b0977999 100644
--- a/llvm/test/Transforms/CorrelatedValuePropagation/sub.ll
+++ b/llvm/test/Transforms/CorrelatedValuePropagation/sub.ll
@@ -312,7 +312,7 @@ define i32 @test11(ptr %p, i32 %i) {
 ; CHECK:       else:
 ; CHECK-NEXT:    ret i32 0
 ;
-  %limit = load i32, i32* %p, !range !{i32 0, i32 2147483647}
+  %limit = load i32, ptr %p, !range !{i32 0, i32 2147483647}
   %within.1 = icmp slt i32 %limit, %i
   %i.minus.7 = add i32 %i, -7
   %within.2 = icmp slt i32 %limit, %i.minus.7

diff  --git a/llvm/test/Transforms/DeadArgElim/byref.ll b/llvm/test/Transforms/DeadArgElim/byref.ll
index bcfa7a5d9612f..d8981d65980f0 100644
--- a/llvm/test/Transforms/DeadArgElim/byref.ll
+++ b/llvm/test/Transforms/DeadArgElim/byref.ll
@@ -17,6 +17,6 @@ define void @dont_replace_by_poison(ptr %ptr) {
 ; CHECK-NEXT:    call void @unused_byref_arg(ptr byref(i32) poison)
 ; CHECK-NEXT:    ret void
 ;
-  call void @unused_byref_arg(i32* byref(i32) %ptr)
+  call void @unused_byref_arg(ptr byref(i32) %ptr)
   ret void
 }

diff  --git a/llvm/test/Transforms/DeadArgElim/fct_ptr.ll b/llvm/test/Transforms/DeadArgElim/fct_ptr.ll
index d23712813c23e..bf54fb2e8b728 100644
--- a/llvm/test/Transforms/DeadArgElim/fct_ptr.ll
+++ b/llvm/test/Transforms/DeadArgElim/fct_ptr.ll
@@ -28,7 +28,7 @@ define i32 @call_indirect(ptr readnone %fct_ptr, i32 %arg1, i32 %arg2, i32 %arg3
 ; CHECK-NEXT:    [[FINAL_RES:%.*]] = phi i32 [ [[RES1]], [[CALL_EXT]] ], [ [[RES2]], [[CALL_INT]] ], [ [[RES3]], [[CALL_OTHER]] ]
 ; CHECK-NEXT:    ret i32 [[FINAL_RES]]
 ;
-  %cmp0 = icmp eq i32 (i32, i32, i32)* %fct_ptr, @external_fct
+  %cmp0 = icmp eq ptr %fct_ptr, @external_fct
   br i1 %cmp0, label %call_ext, label %chk2
 
 call_ext:

diff  --git a/llvm/test/Transforms/GVN/condprop-memdep-invalidation.ll b/llvm/test/Transforms/GVN/condprop-memdep-invalidation.ll
index c077f934c2cbb..6feef3d3b4018 100644
--- a/llvm/test/Transforms/GVN/condprop-memdep-invalidation.ll
+++ b/llvm/test/Transforms/GVN/condprop-memdep-invalidation.ll
@@ -60,7 +60,7 @@ loop.1.header:
 then.1:
   %_tmp18 = add i16 %iv.1, -1
   %gep.1 = getelementptr [4 x i16], ptr %ub.16, i16 1, i16 %_tmp18
-  %l.1 = load i16, i16* %gep.1, align 2
+  %l.1 = load i16, ptr %gep.1, align 2
   br label %cont.1
 
 cont.1:

diff  --git a/llvm/test/Transforms/GVN/pr17732.ll b/llvm/test/Transforms/GVN/pr17732.ll
index 2aa8f6753bee6..c6ebd7aac18ac 100644
--- a/llvm/test/Transforms/GVN/pr17732.ll
+++ b/llvm/test/Transforms/GVN/pr17732.ll
@@ -14,10 +14,10 @@ target triple = "x86_64-unknown-linux-gnu"
 
 define i32 @main() {
 entry:
-  tail call void @llvm.memcpy.p0.p0.i64(ptr align 4 @array_with_zeroinit, ptr align 4 getelementptr inbounds ({ [2 x i8], i32, i8, [3 x i8] }, ptr @main.obj_with_array, i64 0, i32 0, i64 0), i64 12, i1 false)
+  tail call void @llvm.memcpy.p0.p0.i64(ptr align 4 @array_with_zeroinit, ptr align 4 @main.obj_with_array, i64 12, i1 false)
   %0 = load i8, ptr getelementptr inbounds (%struct.with_array, ptr @array_with_zeroinit, i64 0, i32 2), align 4
 
-  tail call void @llvm.memcpy.p0.p0.i64(ptr align 4 @vector_with_zeroinit, ptr align 4 getelementptr inbounds ({ <2 x i8>, i32, i8, [3 x i8] }, ptr @main.obj_with_vector, i64 0, i32 0, i64 0), i64 12, i1 false)
+  tail call void @llvm.memcpy.p0.p0.i64(ptr align 4 @vector_with_zeroinit, ptr align 4 @main.obj_with_vector, i64 12, i1 false)
   %1 = load i8, ptr getelementptr inbounds (%struct.with_vector, ptr @vector_with_zeroinit, i64 0, i32 2), align 4
   %conv0 = sext i8 %0 to i32
   %conv1 = sext i8 %1 to i32

diff  --git a/llvm/test/Transforms/GVNHoist/hoist-recursive-geps.ll b/llvm/test/Transforms/GVNHoist/hoist-recursive-geps.ll
index 3ad64bcebc1e3..b45db4e32569d 100644
--- a/llvm/test/Transforms/GVNHoist/hoist-recursive-geps.ll
+++ b/llvm/test/Transforms/GVNHoist/hoist-recursive-geps.ll
@@ -20,27 +20,27 @@
 %0 = type { double, double, double }
 %1 = type { double, double, double }
 %2 = type { %3, %1, %1 }
-%3 = type { i32 (...)**, %4, %10*, %11, %11, %11, %11, %11, %11, %11, %11, %11 }
+%3 = type { ptr, %4, ptr, %11, %11, %11, %11, %11, %11, %11, %11, %11 }
 %4 = type { %5 }
 %5 = type { %6 }
 %6 = type { %7 }
 %7 = type { %8 }
 %8 = type { %9 }
-%9 = type { i64, i64, i8* }
-%10 = type <{ i32 (...)**, i32, [4 x i8] }>
+%9 = type { i64, i64, ptr }
+%10 = type <{ ptr, i32, [4 x i8] }>
 %11 = type { [4 x [4 x double]] }
 %12 = type <{ %1, %0, i32, [4 x i8] }>
-%13 = type { %1, %0, %12, %3*, %14* }
+%13 = type { %1, %0, %12, ptr, ptr }
 %14 = type opaque
 
 @d = external global %0, align 8
 @p = external global %1, align 8
 
-define zeroext i1 @fun(%2*, %12* dereferenceable(56), double*, %13*) {
-  %5 = alloca %2*, align 8
-  %6 = alloca %12*, align 8
-  %7 = alloca double*, align 8
-  %8 = alloca %13*, align 8
+define zeroext i1 @fun(ptr, ptr dereferenceable(56), ptr, ptr) {
+  %5 = alloca ptr, align 8
+  %6 = alloca ptr, align 8
+  %7 = alloca ptr, align 8
+  %8 = alloca ptr, align 8
   %9 = alloca double, align 8
   %10 = alloca double, align 8
   %11 = alloca double, align 8
@@ -48,59 +48,59 @@ define zeroext i1 @fun(%2*, %12* dereferenceable(56), double*, %13*) {
   %13 = alloca double, align 8
   %14 = alloca double, align 8
   %15 = alloca double, align 8
-  store %2* %0, %2** %5, align 8
-  store %12* %1, %12** %6, align 8
-  store double* %2, double** %7, align 8
-  store %13* %3, %13** %8, align 8
-  %16 = load %2*, %2** %5, align 8
-  %17 = load double, double* getelementptr inbounds (%0, %0* @d, i32 0, i32 0), align 8
+  store ptr %0, ptr %5, align 8
+  store ptr %1, ptr %6, align 8
+  store ptr %2, ptr %7, align 8
+  store ptr %3, ptr %8, align 8
+  %16 = load ptr, ptr %5, align 8
+  %17 = load double, ptr @d, align 8
   %18 = fdiv double 1.000000e+00, %17
-  store double %18, double* %15, align 8
-  %19 = load double, double* %15, align 8
+  store double %18, ptr %15, align 8
+  %19 = load double, ptr %15, align 8
   %20 = fcmp oge double %19, 0.000000e+00
   br i1 %20, label %21, label %36
 
 ; <label>:21:                                     ; preds = %4
-  %22 = getelementptr inbounds %2, %2* %16, i32 0, i32 1
-  %23 = getelementptr inbounds %1, %1* %22, i32 0, i32 0
-  %24 = load double, double* %23, align 8
-  %25 = load double, double* getelementptr inbounds (%1, %1* @p, i32 0, i32 0), align 8
+  %22 = getelementptr inbounds %2, ptr %16, i32 0, i32 1
+  %23 = getelementptr inbounds %1, ptr %22, i32 0, i32 0
+  %24 = load double, ptr %23, align 8
+  %25 = load double, ptr @p, align 8
   %26 = fsub double %24, %25
-  %27 = load double, double* %15, align 8
+  %27 = load double, ptr %15, align 8
   %28 = fmul double %26, %27
-  store double %28, double* %9, align 8
-  %29 = getelementptr inbounds %2, %2* %16, i32 0, i32 2
-  %30 = getelementptr inbounds %1, %1* %29, i32 0, i32 0
-  %31 = load double, double* %30, align 8
-  %32 = load double, double* getelementptr inbounds (%1, %1* @p, i32 0, i32 0), align 8
+  store double %28, ptr %9, align 8
+  %29 = getelementptr inbounds %2, ptr %16, i32 0, i32 2
+  %30 = getelementptr inbounds %1, ptr %29, i32 0, i32 0
+  %31 = load double, ptr %30, align 8
+  %32 = load double, ptr @p, align 8
   %33 = fsub double %31, %32
-  %34 = load double, double* %15, align 8
+  %34 = load double, ptr %15, align 8
   %35 = fmul double %33, %34
-  store double %35, double* %12, align 8
+  store double %35, ptr %12, align 8
   br label %51
 
 ; <label>:36:                                     ; preds = %4
-  %37 = getelementptr inbounds %2, %2* %16, i32 0, i32 2
-  %38 = getelementptr inbounds %1, %1* %37, i32 0, i32 0
-  %39 = load double, double* %38, align 8
-  %40 = load double, double* getelementptr inbounds (%1, %1* @p, i32 0, i32 0), align 8
+  %37 = getelementptr inbounds %2, ptr %16, i32 0, i32 2
+  %38 = getelementptr inbounds %1, ptr %37, i32 0, i32 0
+  %39 = load double, ptr %38, align 8
+  %40 = load double, ptr @p, align 8
   %41 = fsub double %39, %40
-  %42 = load double, double* %15, align 8
+  %42 = load double, ptr %15, align 8
   %43 = fmul double %41, %42
-  store double %43, double* %9, align 8
-  %44 = getelementptr inbounds %2, %2* %16, i32 0, i32 1
-  %45 = getelementptr inbounds %1, %1* %44, i32 0, i32 0
-  %46 = load double, double* %45, align 8
-  %47 = load double, double* getelementptr inbounds (%1, %1* @p, i32 0, i32 0), align 8
+  store double %43, ptr %9, align 8
+  %44 = getelementptr inbounds %2, ptr %16, i32 0, i32 1
+  %45 = getelementptr inbounds %1, ptr %44, i32 0, i32 0
+  %46 = load double, ptr %45, align 8
+  %47 = load double, ptr @p, align 8
   %48 = fsub double %46, %47
-  %49 = load double, double* %15, align 8
+  %49 = load double, ptr %15, align 8
   %50 = fmul double %48, %49
-  store double %50, double* %12, align 8
+  store double %50, ptr %12, align 8
   br label %51
 
 ; <label>:51:                                     ; preds = %36, %21
-  %52 = load double, double* %12, align 8
-  %53 = load double, double* %9, align 8
+  %52 = load double, ptr %12, align 8
+  %53 = load double, ptr %9, align 8
   %54 = fcmp olt double %52, %53
   ret i1 %54
 }

diff  --git a/llvm/test/Transforms/GVNHoist/infinite-loop-direct.ll b/llvm/test/Transforms/GVNHoist/infinite-loop-direct.ll
index 2b179e0bf4daf..e9d574a2966e7 100644
--- a/llvm/test/Transforms/GVNHoist/infinite-loop-direct.ll
+++ b/llvm/test/Transforms/GVNHoist/infinite-loop-direct.ll
@@ -11,39 +11,39 @@
 ; CHECK: bitcast
 ; CHECK-NEXT: load
 
-%class.bar = type { i8*, %class.base* }
-%class.base = type { i32 (...)** }
+%class.bar = type { ptr, ptr }
+%class.base = type { ptr }
 
 ; Function Attrs: noreturn nounwind uwtable
 define void @bazv1() local_unnamed_addr {
 entry:
   %agg.tmp = alloca %class.bar, align 8
-  %x.sroa.2.0..sroa_idx2 = getelementptr inbounds %class.bar, %class.bar* %agg.tmp, i64 0, i32 1
-  store %class.base* null, %class.base** %x.sroa.2.0..sroa_idx2, align 8
-  call void @_Z3foo3bar(%class.bar* nonnull %agg.tmp)
-  %0 = load %class.base*, %class.base** %x.sroa.2.0..sroa_idx2, align 8
-  %1 = bitcast %class.bar* %agg.tmp to %class.base*
-  %cmp.i = icmp eq %class.base* %0, %1
+  %x.sroa.2.0..sroa_idx2 = getelementptr inbounds %class.bar, ptr %agg.tmp, i64 0, i32 1
+  store ptr null, ptr %x.sroa.2.0..sroa_idx2, align 8
+  call void @_Z3foo3bar(ptr nonnull %agg.tmp)
+  %0 = load ptr, ptr %x.sroa.2.0..sroa_idx2, align 8
+  %1 = bitcast ptr %agg.tmp to ptr
+  %cmp.i = icmp eq ptr %0, %1
   br i1 %cmp.i, label %if.then.i, label %if.else.i
 
 if.then.i:                                        ; preds = %entry
-  %2 = bitcast %class.base* %0 to void (%class.base*)***
-  %vtable.i = load void (%class.base*)**, void (%class.base*)*** %2, align 8
-  %vfn.i = getelementptr inbounds void (%class.base*)*, void (%class.base*)** %vtable.i, i64 2
-  %3 = load void (%class.base*)*, void (%class.base*)** %vfn.i, align 8
-  call void %3(%class.base* %0)
+  %2 = bitcast ptr %0 to ptr
+  %vtable.i = load ptr, ptr %2, align 8
+  %vfn.i = getelementptr inbounds ptr, ptr %vtable.i, i64 2
+  %3 = load ptr, ptr %vfn.i, align 8
+  call void %3(ptr %0)
   br label %while.cond.preheader
 
 if.else.i:                                        ; preds = %entry
-  %tobool.i = icmp eq %class.base* %0, null
+  %tobool.i = icmp eq ptr %0, null
   br i1 %tobool.i, label %while.cond.preheader, label %if.then4.i
 
 if.then4.i:                                       ; preds = %if.else.i
-  %4 = bitcast %class.base* %0 to void (%class.base*)***
-  %vtable6.i = load void (%class.base*)**, void (%class.base*)*** %4, align 8
-  %vfn7.i = getelementptr inbounds void (%class.base*)*, void (%class.base*)** %vtable6.i, i64 3
-  %5 = load void (%class.base*)*, void (%class.base*)** %vfn7.i, align 8
-  call void %5(%class.base* nonnull %0)
+  %4 = bitcast ptr %0 to ptr
+  %vtable6.i = load ptr, ptr %4, align 8
+  %vfn7.i = getelementptr inbounds ptr, ptr %vtable6.i, i64 3
+  %5 = load ptr, ptr %vfn7.i, align 8
+  call void %5(ptr nonnull %0)
   br label %while.cond.preheader
 
 while.cond.preheader:                             ; preds = %if.then.i, %if.else.i, %if.then4.i
@@ -54,7 +54,7 @@ while.cond:                                       ; preds = %while.cond.preheade
   br label %while.cond
 }
 
-declare void @_Z3foo3bar(%class.bar*) local_unnamed_addr
+declare void @_Z3foo3bar(ptr) local_unnamed_addr
 
 declare i32 @sleep(i32) local_unnamed_addr
 
@@ -69,28 +69,28 @@ declare i32 @sleep(i32) local_unnamed_addr
 define void @bazv() {
 entry:
   %agg.tmp = alloca %class.bar, align 8
-  %x= getelementptr inbounds %class.bar, %class.bar* %agg.tmp, i64 0, i32 1
-  %0 = load %class.base*, %class.base** %x, align 8
-  %1 = bitcast %class.bar* %agg.tmp to %class.base*
-  %cmp.i = icmp eq %class.base* %0, %1
+  %x= getelementptr inbounds %class.bar, ptr %agg.tmp, i64 0, i32 1
+  %0 = load ptr, ptr %x, align 8
+  %1 = bitcast ptr %agg.tmp to ptr
+  %cmp.i = icmp eq ptr %0, %1
   br i1 %cmp.i, label %bb1, label %bb4
 
 bb1:
-  %b1 = bitcast %class.base* %0 to void (%class.base*)***
-  %i = load void (%class.base*)**, void (%class.base*)*** %b1, align 8
-  %vfn.i = getelementptr inbounds void (%class.base*)*, void (%class.base*)** %i, i64 2
-  %cmp.j = icmp eq %class.base* %0, %1
+  %b1 = bitcast ptr %0 to ptr
+  %i = load ptr, ptr %b1, align 8
+  %vfn.i = getelementptr inbounds ptr, ptr %i, i64 2
+  %cmp.j = icmp eq ptr %0, %1
   br i1 %cmp.j, label %bb2, label %bb3
 
 bb2:
-  %l1 = load void (%class.base*)*, void (%class.base*)** %vfn.i, align 8
+  %l1 = load ptr, ptr %vfn.i, align 8
   br label %bb3
 
 bb3:
-  %l2 = load void (%class.base*)*, void (%class.base*)** %vfn.i, align 8
+  %l2 = load ptr, ptr %vfn.i, align 8
   br label %bb2
 
 bb4:
-  %b2 = bitcast %class.base* %0 to void (%class.base*)***
+  %b2 = bitcast ptr %0 to ptr
   ret void
 }

diff  --git a/llvm/test/Transforms/GVNHoist/infinite-loop-indirect.ll b/llvm/test/Transforms/GVNHoist/infinite-loop-indirect.ll
index 2621b71661c1d..aef55af81dcac 100644
--- a/llvm/test/Transforms/GVNHoist/infinite-loop-indirect.ll
+++ b/llvm/test/Transforms/GVNHoist/infinite-loop-indirect.ll
@@ -3,14 +3,14 @@
 
 ; Checking gvn-hoist in case of indirect branches.
 
-%class.bar = type { i8*, %class.base* }
-%class.base = type { i32 (...)** }
+%class.bar = type { ptr, ptr }
+%class.base = type { ptr }
 
- at bar = local_unnamed_addr global i32 ()* null, align 8
- at bar1 = local_unnamed_addr global i32 ()* null, align 8
+ at bar = local_unnamed_addr global ptr null, align 8
+ at bar1 = local_unnamed_addr global ptr null, align 8
 
 ; Check that the bitcast is not hoisted because it is after an indirect call
-define i32 @foo(i32* nocapture readonly %i) {
+define i32 @foo(ptr nocapture readonly %i) {
 ; CHECK-LABEL: define i32 @foo
 ; CHECK-SAME: (ptr nocapture readonly [[I:%.*]]) {
 ; CHECK-NEXT:  entry:
@@ -36,25 +36,25 @@ define i32 @foo(i32* nocapture readonly %i) {
 ;
 entry:
   %agg.tmp = alloca %class.bar, align 8
-  %x= getelementptr inbounds %class.bar, %class.bar* %agg.tmp, i64 0, i32 1
-  %y = load %class.base*, %class.base** %x, align 8
-  %0 = load i32, i32* %i, align 4
+  %x= getelementptr inbounds %class.bar, ptr %agg.tmp, i64 0, i32 1
+  %y = load ptr, ptr %x, align 8
+  %0 = load i32, ptr %i, align 4
   %.off = add i32 %0, -1
   %switch = icmp ult i32 %.off, 2
   br i1 %switch, label %l1.preheader, label %sw.default
 
 l1.preheader:                                     ; preds = %sw.default, %entry
-  %b1 = bitcast %class.base* %y to void (%class.base*)***
+  %b1 = bitcast ptr %y to ptr
   br label %l1
 
 l1:                                               ; preds = %l1.preheader, %l1
-  %1 = load i32 ()*, i32 ()** @bar, align 8
+  %1 = load ptr, ptr @bar, align 8
   %call = tail call i32 %1()
-  %b2 = bitcast %class.base* %y to void (%class.base*)***
+  %b2 = bitcast ptr %y to ptr
   br label %l1
 
 sw.default:                                       ; preds = %entry
-  %2 = load i32 ()*, i32 ()** @bar1, align 8
+  %2 = load ptr, ptr @bar1, align 8
   %call2 = tail call i32 %2()
   br label %l1.preheader
 }
@@ -62,7 +62,7 @@ sw.default:                                       ; preds = %entry
 
 ; Any instruction inside an infinite loop will not be hoisted because
 ; there is no path to exit of the function.
-define i32 @foo1(i32* nocapture readonly %i) {
+define i32 @foo1(ptr nocapture readonly %i) {
 ; CHECK-LABEL: define i32 @foo1
 ; CHECK-SAME: (ptr nocapture readonly [[I:%.*]]) {
 ; CHECK-NEXT:  entry:
@@ -90,33 +90,33 @@ define i32 @foo1(i32* nocapture readonly %i) {
 ;
 entry:
   %agg.tmp = alloca %class.bar, align 8
-  %x= getelementptr inbounds %class.bar, %class.bar* %agg.tmp, i64 0, i32 1
-  %y = load %class.base*, %class.base** %x, align 8
-  %0 = load i32, i32* %i, align 4
+  %x= getelementptr inbounds %class.bar, ptr %agg.tmp, i64 0, i32 1
+  %y = load ptr, ptr %x, align 8
+  %0 = load i32, ptr %i, align 4
   %.off = add i32 %0, -1
   %switch = icmp ult i32 %.off, 2
   br i1 %switch, label %l1.preheader, label %sw.default
 
 l1.preheader:                                     ; preds = %sw.default, %entry
-  %b1 = bitcast %class.base* %y to void (%class.base*)***
-  %y1 = load %class.base*, %class.base** %x, align 8
+  %b1 = bitcast ptr %y to ptr
+  %y1 = load ptr, ptr %x, align 8
   br label %l1
 
 l1:                                               ; preds = %l1.preheader, %l1
-  %b2 = bitcast %class.base* %y to void (%class.base*)***
-  %1 = load i32 ()*, i32 ()** @bar, align 8
-  %y2 = load %class.base*, %class.base** %x, align 8
+  %b2 = bitcast ptr %y to ptr
+  %1 = load ptr, ptr @bar, align 8
+  %y2 = load ptr, ptr %x, align 8
   %call = tail call i32 %1()
   br label %l1
 
 sw.default:                                       ; preds = %entry
-  %2 = load i32 ()*, i32 ()** @bar1, align 8
+  %2 = load ptr, ptr @bar1, align 8
   %call2 = tail call i32 %2()
   br label %l1.preheader
 }
 
 ; Check that bitcast is hoisted even when one of them is partially redundant.
-define i32 @test13(i32* %P, i8* %Ptr, i32* nocapture readonly %i) {
+define i32 @test13(ptr %P, ptr %Ptr, ptr nocapture readonly %i) {
 ; CHECK-LABEL: define i32 @test13
 ; CHECK-SAME: (ptr [[P:%.*]], ptr [[PTR:%.*]], ptr nocapture readonly [[I:%.*]]) {
 ; CHECK-NEXT:  entry:
@@ -139,18 +139,18 @@ define i32 @test13(i32* %P, i8* %Ptr, i32* nocapture readonly %i) {
 ;
 entry:
   %agg.tmp = alloca %class.bar, align 8
-  %x= getelementptr inbounds %class.bar, %class.bar* %agg.tmp, i64 0, i32 1
-  %y = load %class.base*, %class.base** %x, align 8
-  indirectbr i8* %Ptr, [label %BrBlock, label %B2]
+  %x= getelementptr inbounds %class.bar, ptr %agg.tmp, i64 0, i32 1
+  %y = load ptr, ptr %x, align 8
+  indirectbr ptr %Ptr, [label %BrBlock, label %B2]
 
 B2:
-  %b1 = bitcast %class.base* %y to void (%class.base*)***
-  store i32 4, i32 *%P
+  %b1 = bitcast ptr %y to ptr
+  store i32 4, ptr %P
   br label %BrBlock
 
 BrBlock:
-  %b2 = bitcast %class.base* %y to void (%class.base*)***
-  %L = load i32, i32* %P
+  %b2 = bitcast ptr %y to ptr
+  %L = load i32, ptr %P
   %C = icmp eq i32 %L, 42
   br i1 %C, label %T, label %F
 
@@ -163,7 +163,7 @@ F:
 ; Check that the bitcast is not hoisted because anticipability
 ; cannot be guaranteed here as one of the indirect branch targets
 ; do not have the bitcast instruction.
-define i32 @test14(i32* %P, i8* %Ptr, i32* nocapture readonly %i) {
+define i32 @test14(ptr %P, ptr %Ptr, ptr nocapture readonly %i) {
 ; CHECK-LABEL: define i32 @test14
 ; CHECK-SAME: (ptr [[P:%.*]], ptr [[PTR:%.*]], ptr nocapture readonly [[I:%.*]]) {
 ; CHECK-NEXT:  entry:
@@ -189,33 +189,33 @@ define i32 @test14(i32* %P, i8* %Ptr, i32* nocapture readonly %i) {
 ;
 entry:
   %agg.tmp = alloca %class.bar, align 8
-  %x= getelementptr inbounds %class.bar, %class.bar* %agg.tmp, i64 0, i32 1
-  %y = load %class.base*, %class.base** %x, align 8
-  indirectbr i8* %Ptr, [label %BrBlock, label %B2, label %T]
+  %x= getelementptr inbounds %class.bar, ptr %agg.tmp, i64 0, i32 1
+  %y = load ptr, ptr %x, align 8
+  indirectbr ptr %Ptr, [label %BrBlock, label %B2, label %T]
 
 B2:
-  %b1 = bitcast %class.base* %y to void (%class.base*)***
-  store i32 4, i32 *%P
+  %b1 = bitcast ptr %y to ptr
+  store i32 4, ptr %P
   br label %BrBlock
 
 BrBlock:
-  %b2 = bitcast %class.base* %y to void (%class.base*)***
-  %L = load i32, i32* %P
+  %b2 = bitcast ptr %y to ptr
+  %L = load i32, ptr %P
   %C = icmp eq i32 %L, 42
   br i1 %C, label %T, label %F
 
 T:
-  %pi = load i32, i32* %i, align 4
+  %pi = load i32, ptr %i, align 4
   ret i32 %pi
 F:
-  %pl = load i32, i32* %P
+  %pl = load i32, ptr %P
   ret i32 %pl
 }
 
 
 ; Check that the bitcast is not hoisted because of a cycle
 ; due to indirect branches
-define i32 @test16(i32* %P, i8* %Ptr, i32* nocapture readonly %i) {
+define i32 @test16(ptr %P, ptr %Ptr, ptr nocapture readonly %i) {
 ; CHECK-LABEL: define i32 @test16
 ; CHECK-SAME: (ptr [[P:%.*]], ptr [[PTR:%.*]], ptr nocapture readonly [[I:%.*]]) {
 ; CHECK-NEXT:  entry:
@@ -240,31 +240,31 @@ define i32 @test16(i32* %P, i8* %Ptr, i32* nocapture readonly %i) {
 ;
 entry:
   %agg.tmp = alloca %class.bar, align 8
-  %x= getelementptr inbounds %class.bar, %class.bar* %agg.tmp, i64 0, i32 1
-  %y = load %class.base*, %class.base** %x, align 8
-  indirectbr i8* %Ptr, [label %BrBlock, label %B2]
+  %x= getelementptr inbounds %class.bar, ptr %agg.tmp, i64 0, i32 1
+  %y = load ptr, ptr %x, align 8
+  indirectbr ptr %Ptr, [label %BrBlock, label %B2]
 
 B2:
-  %b1 = bitcast %class.base* %y to void (%class.base*)***
-  %0 = load i32, i32* %i, align 4
-  store i32 %0, i32 *%P
+  %b1 = bitcast ptr %y to ptr
+  %0 = load i32, ptr %i, align 4
+  store i32 %0, ptr %P
   br label %BrBlock
 
 BrBlock:
-  %b2 = bitcast %class.base* %y to void (%class.base*)***
-  %L = load i32, i32* %P
+  %b2 = bitcast ptr %y to ptr
+  %L = load i32, ptr %P
   %C = icmp eq i32 %L, 42
   br i1 %C, label %T, label %F
 
 T:
-  indirectbr i32* %P, [label %BrBlock, label %B2]
+  indirectbr ptr %P, [label %BrBlock, label %B2]
 
 F:
-  indirectbr i8* %Ptr, [label %BrBlock, label %B2]
+  indirectbr ptr %Ptr, [label %BrBlock, label %B2]
 }
 
 
- at _ZTIi = external constant i8*
+ at _ZTIi = external constant ptr
 
 ; Check that an instruction is not hoisted out of landing pad (%lpad4)
 ; Also within a landing pad no redundancies are removed by gvn-hoist,
@@ -272,7 +272,7 @@ F:
 ; landing pad has direct branches (e.g., %lpad to %catch1, %catch)
 ; This CFG has a cycle (%lpad -> %catch1 -> %lpad4 -> %lpad)
 
-define i32 @foo2(i32* nocapture readonly %i) local_unnamed_addr personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define i32 @foo2(ptr nocapture readonly %i) local_unnamed_addr personality ptr @__gxx_personality_v0 {
 ; CHECK-LABEL: define i32 @foo2
 ; CHECK-SAME: (ptr nocapture readonly [[I:%.*]]) local_unnamed_addr personality ptr @__gxx_personality_v0 {
 ; CHECK-NEXT:  entry:
@@ -322,28 +322,28 @@ define i32 @foo2(i32* nocapture readonly %i) local_unnamed_addr personality i8*
 ; CHECK-NEXT:    ret i32 [[BC2]]
 ;
 entry:
-  %0 = load i32, i32* %i, align 4
+  %0 = load i32, ptr %i, align 4
   %cmp = icmp eq i32 %0, 0
   br i1 %cmp, label %try.cont, label %if.then
 
 if.then:
-  %exception = tail call i8* @__cxa_allocate_exception(i64 4) #2
-  %1 = bitcast i8* %exception to i32*
-  store i32 %0, i32* %1, align 16
-  invoke void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTIi to i8*), i8* null) #3
+  %exception = tail call ptr @__cxa_allocate_exception(i64 4) #2
+  %1 = bitcast ptr %exception to ptr
+  store i32 %0, ptr %1, align 16
+  invoke void @__cxa_throw(ptr %exception, ptr @_ZTIi, ptr null) #3
   to label %unreachable unwind label %lpad
 
 lpad:
-  %2 = landingpad { i8*, i32 }
-  catch i8* bitcast (i8** @_ZTIi to i8*)
-  catch i8* null
+  %2 = landingpad { ptr, i32 }
+  catch ptr @_ZTIi
+  catch ptr null
   %bc1 = add i32 %0, 10
-  %3 = extractvalue { i8*, i32 } %2, 0
-  %4 = extractvalue { i8*, i32 } %2, 1
-  %5 = tail call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*)) #2
+  %3 = extractvalue { ptr, i32 } %2, 0
+  %4 = extractvalue { ptr, i32 } %2, 1
+  %5 = tail call i32 @llvm.eh.typeid.for(ptr @_ZTIi) #2
   %matches = icmp eq i32 %4, %5
   %bc7 = add i32 %0, 10
-  %6 = tail call i8* @__cxa_begin_catch(i8* %3) #2
+  %6 = tail call ptr @__cxa_begin_catch(ptr %3) #2
   br i1 %matches, label %catch1, label %catch
 
 catch1:
@@ -353,17 +353,17 @@ catch1:
 
 catch:
   %bc4 = add i32 %0, 10
-  %7 = load i32, i32* %i, align 4
+  %7 = load i32, ptr %i, align 4
   %add = add nsw i32 %7, 1
   tail call void @__cxa_end_catch()
   br label %try.cont
 
 lpad4:
-  %8 = landingpad { i8*, i32 }
+  %8 = landingpad { ptr, i32 }
   cleanup
   %bc5 = add i32 %0, 10
   tail call void @__cxa_end_catch() #2
-  invoke void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTIi to i8*), i8* null) #3
+  invoke void @__cxa_throw(ptr %exception, ptr @_ZTIi, ptr null) #3
   to label %unreachable unwind label %lpad
 
 try.cont:
@@ -376,16 +376,16 @@ unreachable:
   ret i32 %bc2
 }
 
-declare i8* @__cxa_allocate_exception(i64) local_unnamed_addr
+declare ptr @__cxa_allocate_exception(i64) local_unnamed_addr
 
-declare void @__cxa_throw(i8*, i8*, i8*) local_unnamed_addr
+declare void @__cxa_throw(ptr, ptr, ptr) local_unnamed_addr
 
 declare i32 @__gxx_personality_v0(...)
 
 ; Function Attrs: nounwind readnone
-declare i32 @llvm.eh.typeid.for(i8*) #1
+declare i32 @llvm.eh.typeid.for(ptr) #1
 
-declare i8* @__cxa_begin_catch(i8*) local_unnamed_addr
+declare ptr @__cxa_begin_catch(ptr) local_unnamed_addr
 
 declare void @__cxa_end_catch() local_unnamed_addr
 

diff  --git a/llvm/test/Transforms/GlobalOpt/2007-06-04-PackedStruct.ll b/llvm/test/Transforms/GlobalOpt/2007-06-04-PackedStruct.ll
index e0d88d24bef88..b4d0e3ebab05a 100644
--- a/llvm/test/Transforms/GlobalOpt/2007-06-04-PackedStruct.ll
+++ b/llvm/test/Transforms/GlobalOpt/2007-06-04-PackedStruct.ll
@@ -28,7 +28,7 @@ entry:
 	br i1 %tmp7, label %cond_true, label %cond_next
 
 cond_true:		; preds = %entry
-	store i8 0, ptr getelementptr (%"struct.std::map<int,int,std::less<int>,std::allocator<std::pair<const int, int> > >", ptr @someMap, i32 0, i32 0, i32 0, i32 0, i32 0)
+	store i8 0, ptr @someMap
 	ret void
 
 cond_next:		; preds = %entry

diff  --git a/llvm/test/Transforms/GlobalOpt/2008-07-17-addrspace.ll b/llvm/test/Transforms/GlobalOpt/2008-07-17-addrspace.ll
index 270489d1a32e3..13de91f52a789 100644
--- a/llvm/test/Transforms/GlobalOpt/2008-07-17-addrspace.ll
+++ b/llvm/test/Transforms/GlobalOpt/2008-07-17-addrspace.ll
@@ -12,7 +12,7 @@
 @array = internal addrspace(1) global [ 2 x i32 ] zeroinitializer 
 
 define i32 @foo() {
-  %A = load i32, ptr addrspace(1) getelementptr ({ i32, i32 }, ptr addrspace(1) @struct, i32 0, i32 0)
+  %A = load i32, ptr addrspace(1) @struct
   %B = load i32, ptr addrspace(1) @array
   ; Use the loaded values, so they won't get removed completely
   %R = add i32 %A, %B
@@ -23,6 +23,6 @@ define i32 @foo() {
 ; optimized away completely.
 define void @bar(i32 %R) {
   store i32 %R, ptr addrspace(1) @array
-  store i32 %R, ptr addrspace(1) getelementptr ({ i32, i32 }, ptr addrspace(1) @struct, i32 0, i32 0)
+  store i32 %R, ptr addrspace(1) @struct
   ret void
 }

diff  --git a/llvm/test/Transforms/GlobalOpt/GSROA-section.ll b/llvm/test/Transforms/GlobalOpt/GSROA-section.ll
index 97a9cc08c649e..e0dc5f42347bb 100644
--- a/llvm/test/Transforms/GlobalOpt/GSROA-section.ll
+++ b/llvm/test/Transforms/GlobalOpt/GSROA-section.ll
@@ -12,7 +12,7 @@
 @array = internal global [ 2 x i32 ] zeroinitializer
 
 define i32 @foo() {
-  %A = load i32, ptr getelementptr ({ i32, i32 }, ptr @struct, i32 0, i32 0)
+  %A = load i32, ptr @struct
   %B = load i32, ptr @array
   ; Use the loaded values, so they won't get removed completely
   %R = add i32 %A, %B
@@ -23,7 +23,7 @@ define i32 @foo() {
 ; optimized away completely.
 define void @bar(i32 %R) {
   store i32 %R, ptr @array
-  store i32 %R, ptr getelementptr ({ i32, i32 }, ptr @struct, i32 0, i32 0)
+  store i32 %R, ptr @struct
   ret void
 }
 

diff  --git a/llvm/test/Transforms/GlobalOpt/cleanup-pointer-root-users-gep-constexpr.ll b/llvm/test/Transforms/GlobalOpt/cleanup-pointer-root-users-gep-constexpr.ll
index f9d988f96e318..648d4a56a4fb0 100644
--- a/llvm/test/Transforms/GlobalOpt/cleanup-pointer-root-users-gep-constexpr.ll
+++ b/llvm/test/Transforms/GlobalOpt/cleanup-pointer-root-users-gep-constexpr.ll
@@ -19,7 +19,7 @@ define void @stores_single_use_gep_constexpr() {
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  store ptr @fn0, ptr getelementptr inbounds (%struct.global.20ptr, ptr @global.20ptr, i64 0, i32 0), align 8
+  store ptr @fn0, ptr @global.20ptr, align 8
   store ptr @fn1, ptr getelementptr inbounds (%struct.global.20ptr, ptr @global.20ptr, i64 0, i32 1), align 8
   store ptr @fn2, ptr getelementptr inbounds (%struct.global.20ptr, ptr @global.20ptr, i64 0, i32 2), align 8
   store ptr @fn3, ptr getelementptr inbounds (%struct.global.20ptr, ptr @global.20ptr, i64 0, i32 3), align 8

diff  --git a/llvm/test/Transforms/GlobalOpt/cleanup-pointer-root-users-other-constexpr.ll b/llvm/test/Transforms/GlobalOpt/cleanup-pointer-root-users-other-constexpr.ll
index fdf888365bbda..726e9c5af95b0 100644
--- a/llvm/test/Transforms/GlobalOpt/cleanup-pointer-root-users-other-constexpr.ll
+++ b/llvm/test/Transforms/GlobalOpt/cleanup-pointer-root-users-other-constexpr.ll
@@ -28,7 +28,7 @@ define void @stores_single_use_gep_constexpr() {
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  store ptr @fn0, ptr getelementptr inbounds (%struct.global.20ptr, ptr @global.20ptr, i64 0, i32 0), align 8
+  store ptr @fn0, ptr @global.20ptr, align 8
   store ptr @fn1, ptr getelementptr inbounds (%struct.global.20ptr, ptr @global.20ptr, i64 0, i32 1), align 8
   store ptr @fn2, ptr getelementptr inbounds (%struct.global.20ptr, ptr @global.20ptr, i64 0, i32 2), align 8
   store ptr @fn3, ptr getelementptr inbounds (%struct.global.20ptr, ptr @global.20ptr, i64 0, i32 3), align 8

diff  --git a/llvm/test/Transforms/GlobalOpt/cleanup-pointer-root-users-ptrtoint-add-constexpr.ll b/llvm/test/Transforms/GlobalOpt/cleanup-pointer-root-users-ptrtoint-add-constexpr.ll
index 477b617c003ef..1e87d9266cb38 100644
--- a/llvm/test/Transforms/GlobalOpt/cleanup-pointer-root-users-ptrtoint-add-constexpr.ll
+++ b/llvm/test/Transforms/GlobalOpt/cleanup-pointer-root-users-ptrtoint-add-constexpr.ll
@@ -17,7 +17,7 @@ define void @stores_single_use_gep_constexpr() {
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  store ptr @fn0, ptr getelementptr inbounds (%struct.global.20ptr, ptr @global.20ptr, i64 0, i32 0), align 8
+  store ptr @fn0, ptr @global.20ptr, align 8
   store ptr @fn1, ptr getelementptr inbounds (%struct.global.20ptr, ptr @global.20ptr, i64 0, i32 1), align 8
   ret void
 }

diff  --git a/llvm/test/Transforms/GlobalOpt/externally-initialized-aggregate.ll b/llvm/test/Transforms/GlobalOpt/externally-initialized-aggregate.ll
index b53b9d447aea4..e669246eb1b28 100644
--- a/llvm/test/Transforms/GlobalOpt/externally-initialized-aggregate.ll
+++ b/llvm/test/Transforms/GlobalOpt/externally-initialized-aggregate.ll
@@ -26,7 +26,7 @@ define i32 @bar() {
 entry:
 ; This load uses the split global, but cannot be constant-propagated away.
 ; CHECK: %0 = load i32, ptr @b.0
-  %0 = load i32, ptr getelementptr inbounds ({i32, i32}, ptr @b, i32 0, i32 0), align 4
+  %0 = load i32, ptr @b, align 4
   ret i32 %0
 }
 
@@ -42,7 +42,7 @@ entry:
 
 ; This store uses the split global, but cannot be constant-propagated away.
 ; CHECK: store i32 3, ptr @b.0
-  store i32 3, ptr getelementptr inbounds ({i32, i32}, ptr @b, i32 0, i32 0), align 4
+  store i32 3, ptr @b, align 4
 ; This store can be removed, because the second element of @b is never read.
 ; CHECK-NOT: store i32 4, ptr @b.1
   store i32 4, ptr getelementptr inbounds ({i32, i32}, ptr @b, i32 0, i32 1), align 4

diff  --git a/llvm/test/Transforms/GlobalOpt/globalsra-partial.ll b/llvm/test/Transforms/GlobalOpt/globalsra-partial.ll
index 17cc3210b92d6..53165462bcbb4 100644
--- a/llvm/test/Transforms/GlobalOpt/globalsra-partial.ll
+++ b/llvm/test/Transforms/GlobalOpt/globalsra-partial.ll
@@ -8,7 +8,7 @@ target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:1
 ; CHECK: @G = internal unnamed_addr global { i32, [4 x float] }
 ; CHECK: 12345
 define void @onlystore() {
-        store i32 12345, ptr getelementptr ({ i32, [4 x float] }, ptr @G, i32 0, i32 0)
+        store i32 12345, ptr @G
         ret void
 }
 

diff  --git a/llvm/test/Transforms/GlobalOpt/globalsra.ll b/llvm/test/Transforms/GlobalOpt/globalsra.ll
index 3cc9259b3c686..53673f7d8a999 100644
--- a/llvm/test/Transforms/GlobalOpt/globalsra.ll
+++ b/llvm/test/Transforms/GlobalOpt/globalsra.ll
@@ -8,7 +8,7 @@ target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:1
     { double } { double 1.727000e+01 } }                ; <ptr> [#uses=3]
 
 define void @onlystore() {
-        store i32 123, ptr getelementptr ({ i32, float, { double } }, ptr @G, i32 0, i32 0)
+        store i32 123, ptr @G
         ret void
 }
 
@@ -29,7 +29,7 @@ define double @constantize() {
     { double } { double 1.727000e+01 } }                ; <ptr> [#uses=3]
 
 define void @onlystore2() {
-        store i32 123, ptr getelementptr ({ i32, float, { double } }, ptr @G2, i32 0, i32 0)
+        store i32 123, ptr @G2
         ret void
 }
 

diff  --git a/llvm/test/Transforms/GlobalOpt/invariant.ll b/llvm/test/Transforms/GlobalOpt/invariant.ll
index 1604eaf1a5ac7..60e521f78fa65 100644
--- a/llvm/test/Transforms/GlobalOpt/invariant.ll
+++ b/llvm/test/Transforms/GlobalOpt/invariant.ll
@@ -3,19 +3,19 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-declare {}* @llvm.invariant.start.p0i8(i64 %size, i8* nocapture %ptr)
+declare ptr @llvm.invariant.start.p0(i64 %size, ptr nocapture %ptr)
 
-define void @test1(i8* %ptr) {
-  call {}* @llvm.invariant.start.p0i8(i64 4, i8* %ptr)
+define void @test1(ptr %ptr) {
+  call ptr @llvm.invariant.start.p0(i64 4, ptr %ptr)
   ret void
 }
 
 @object1 = global i32 0
 ; CHECK: @object1 = constant i32 -1
 define void @ctor1() {
-  store i32 -1, i32* @object1
-  %A = bitcast i32* @object1 to i8*
-  call void @test1(i8* %A)
+  store i32 -1, ptr @object1
+  %A = bitcast ptr @object1 to ptr
+  call void @test1(ptr %A)
   ret void
 }
 
@@ -23,10 +23,10 @@ define void @ctor1() {
 @object2 = global i32 0
 ; CHECK: @object2 = global i32 0
 define void @ctor2() {
-  store i32 -1, i32* @object2
-  %A = bitcast i32* @object2 to i8*
-  %B = call {}* @llvm.invariant.start.p0i8(i64 4, i8* %A)
-  %C = bitcast {}* %B to i8*
+  store i32 -1, ptr @object2
+  %A = bitcast ptr @object2 to ptr
+  %B = call ptr @llvm.invariant.start.p0(i64 4, ptr %A)
+  %C = bitcast ptr %B to ptr
   ret void
 }
 
@@ -34,9 +34,9 @@ define void @ctor2() {
 @object3 = global i32 0
 ; CHECK: @object3 = global i32 -1
 define void @ctor3() {
-  store i32 -1, i32* @object3
-  %A = bitcast i32* @object3 to i8*
-  call {}* @llvm.invariant.start.p0i8(i64 3, i8* %A)
+  store i32 -1, ptr @object3
+  %A = bitcast ptr @object3 to ptr
+  call ptr @llvm.invariant.start.p0(i64 3, ptr %A)
   ret void
 }
 
@@ -44,16 +44,16 @@ define void @ctor3() {
 @object4 = global i32 0
 ; CHECK: @object4 = global i32 -1
 define void @ctor4() {
-  store i32 -1, i32* @object4
-  %A = bitcast i32* @object4 to i8*
-  call {}* @llvm.invariant.start.p0i8(i64 -1, i8* %A)
+  store i32 -1, ptr @object4
+  %A = bitcast ptr @object4 to ptr
+  call ptr @llvm.invariant.start.p0(i64 -1, ptr %A)
   ret void
 }
 
 
 @llvm.global_ctors = appending constant
-  [4 x { i32, void ()*, i8* }]
-  [ { i32, void ()*, i8* } { i32 65535, void ()* @ctor1, i8* null },
-    { i32, void ()*, i8* } { i32 65535, void ()* @ctor2, i8* null },
-    { i32, void ()*, i8* } { i32 65535, void ()* @ctor3, i8* null },
-    { i32, void ()*, i8* } { i32 65535, void ()* @ctor4, i8* null } ]
+  [4 x { i32, ptr, ptr }]
+  [ { i32, ptr, ptr } { i32 65535, ptr @ctor1, ptr null },
+    { i32, ptr, ptr } { i32 65535, ptr @ctor2, ptr null },
+    { i32, ptr, ptr } { i32 65535, ptr @ctor3, ptr null },
+    { i32, ptr, ptr } { i32 65535, ptr @ctor4, ptr null } ]

diff  --git a/llvm/test/Transforms/GlobalOpt/malloc-promote-opaque-ptr.ll b/llvm/test/Transforms/GlobalOpt/malloc-promote-opaque-ptr.ll
index 0988bd7d3f079..4916354709878 100644
--- a/llvm/test/Transforms/GlobalOpt/malloc-promote-opaque-ptr.ll
+++ b/llvm/test/Transforms/GlobalOpt/malloc-promote-opaque-ptr.ll
@@ -5,7 +5,7 @@
 @g2 = internal global ptr null
 @g3 = internal global ptr null
 
-declare noalias i8* @malloc(i64) allockind("alloc,uninitialized") allocsize(0)
+declare noalias ptr @malloc(i64) allockind("alloc,uninitialized") allocsize(0)
 
 ;.
 ; CHECK: @[[G1_BODY_0:[a-zA-Z0-9_$"\\.-]+]] = internal unnamed_addr global i64 undef

diff  --git a/llvm/test/Transforms/GlobalOpt/sra-many-stores-initializers.ll b/llvm/test/Transforms/GlobalOpt/sra-many-stores-initializers.ll
index 9c85f3b342470..e1fce4ed129b0 100644
--- a/llvm/test/Transforms/GlobalOpt/sra-many-stores-initializers.ll
+++ b/llvm/test/Transforms/GlobalOpt/sra-many-stores-initializers.ll
@@ -35,7 +35,7 @@ define void @store_initializer() {
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  store ptr null, ptr getelementptr inbounds (%struct.20ptr, ptr @global.20ptr, i64 0, i32 0), align 8
+  store ptr null, ptr @global.20ptr, align 8
   store ptr null, ptr getelementptr inbounds (%struct.20ptr, ptr @global.20ptr, i64 0, i32 1), align 8
   store ptr null, ptr getelementptr inbounds (%struct.20ptr, ptr @global.20ptr, i64 0, i32 2), align 8
   store ptr null, ptr getelementptr inbounds (%struct.20ptr, ptr @global.20ptr, i64 0, i32 3), align 8
@@ -53,7 +53,7 @@ entry:
   store ptr null, ptr getelementptr inbounds (%struct.20ptr, ptr @global.20ptr, i64 0, i32 15), align 8
   store ptr null, ptr getelementptr inbounds (%struct.20ptr, ptr @global.20ptr, i64 0, i32 16), align 8
 
-  %l0 = load ptr, ptr getelementptr inbounds (%struct.20ptr, ptr @global.20ptr, i64 0, i32 0), align 8
+  %l0 = load ptr, ptr @global.20ptr, align 8
   store volatile ptr %l0, ptr @c
   %l1 = load ptr, ptr getelementptr inbounds (%struct.20ptr, ptr @global.20ptr, i64 0, i32 1), align 8
   store volatile ptr %l1, ptr @c
@@ -119,7 +119,7 @@ define void @store_null_initializer_2() {
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  store ptr null, ptr getelementptr inbounds (%struct.20ptr, ptr @global.20ptr.2, i64 0, i32 0), align 8
+  store ptr null, ptr @global.20ptr.2, align 8
   store ptr null, ptr getelementptr inbounds (%struct.20ptr, ptr @global.20ptr.2, i64 0, i32 1), align 8
   store ptr null, ptr getelementptr inbounds (%struct.20ptr, ptr @global.20ptr.2, i64 0, i32 2), align 8
   store ptr null, ptr getelementptr inbounds (%struct.20ptr, ptr @global.20ptr.2, i64 0, i32 3), align 8
@@ -138,7 +138,7 @@ entry:
   store ptr null, ptr getelementptr inbounds (%struct.20ptr, ptr @global.20ptr.2, i64 0, i32 16), align 8
   store ptr null, ptr getelementptr inbounds (%struct.20ptr, ptr @global.20ptr.2, i64 0, i32 19), align 8
 
-  %l0 = load ptr, ptr getelementptr inbounds (%struct.20ptr, ptr @global.20ptr.2, i64 0, i32 0), align 8
+  %l0 = load ptr, ptr @global.20ptr.2, align 8
   store volatile ptr %l0, ptr @c
   %l1 = load ptr, ptr getelementptr inbounds (%struct.20ptr, ptr @global.20ptr.2, i64 0, i32 1), align 8
   store volatile ptr %l1, ptr @c
@@ -242,7 +242,7 @@ define void @store_mixed_initializer_negative() {
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  store ptr null, ptr getelementptr inbounds (%struct.20ptr, ptr @global.20ptr.3, i64 0, i32 0), align 8
+  store ptr null, ptr @global.20ptr.3, align 8
   store ptr null, ptr getelementptr inbounds (%struct.20ptr, ptr @global.20ptr.3, i64 0, i32 1), align 8
   store ptr null, ptr getelementptr inbounds (%struct.20ptr, ptr @global.20ptr.3, i64 0, i32 2), align 8
   store ptr null, ptr getelementptr inbounds (%struct.20ptr, ptr @global.20ptr.3, i64 0, i32 3), align 8
@@ -261,7 +261,7 @@ entry:
   store ptr null, ptr getelementptr inbounds (%struct.20ptr, ptr @global.20ptr.3, i64 0, i32 16), align 8
   store ptr null, ptr getelementptr inbounds (%struct.20ptr, ptr @global.20ptr.3, i64 0, i32 19), align 8
 
-  %l0 = load ptr, ptr getelementptr inbounds (%struct.20ptr, ptr @global.20ptr.3, i64 0, i32 0), align 8
+  %l0 = load ptr, ptr @global.20ptr.3, align 8
   store volatile ptr %l0, ptr @c
   %l1 = load ptr, ptr getelementptr inbounds (%struct.20ptr, ptr @global.20ptr.3, i64 0, i32 1), align 8
   store volatile ptr %l1, ptr @c
@@ -330,7 +330,7 @@ define void @store_mixed_initializer() {
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  store ptr null, ptr getelementptr inbounds (%struct.20ptr, ptr @global.20ptr.4, i64 0, i32 0), align 8
+  store ptr null, ptr @global.20ptr.4, align 8
   store ptr null, ptr getelementptr inbounds (%struct.20ptr, ptr @global.20ptr.4, i64 0, i32 1), align 8
   store ptr @c, ptr getelementptr inbounds (%struct.20ptr, ptr @global.20ptr.4, i64 0, i32 2), align 8
   store ptr @c, ptr getelementptr inbounds (%struct.20ptr, ptr @global.20ptr.4, i64 0, i32 3), align 8
@@ -348,7 +348,7 @@ entry:
   store ptr @c, ptr getelementptr inbounds (%struct.20ptr, ptr @global.20ptr.4, i64 0, i32 15), align 8
   store ptr @c, ptr getelementptr inbounds (%struct.20ptr, ptr @global.20ptr.4, i64 0, i32 16), align 8
 
-  %l0 = load ptr, ptr getelementptr inbounds (%struct.20ptr, ptr @global.20ptr.4, i64 0, i32 0), align 8
+  %l0 = load ptr, ptr @global.20ptr.4, align 8
   store volatile ptr %l0, ptr @c
   %l1 = load ptr, ptr getelementptr inbounds (%struct.20ptr, ptr @global.20ptr.4, i64 0, i32 1), align 8
   store volatile ptr %l1, ptr @c
@@ -413,7 +413,7 @@ define void @store_mixed_initializer_geps_without_inbounds() {
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  store ptr null, ptr getelementptr (%struct.20ptr, ptr @global.20ptr.4, i64 0, i32 0), align 8
+  store ptr null, ptr @global.20ptr.4, align 8
   store ptr null, ptr getelementptr (%struct.20ptr, ptr @global.20ptr.4, i64 0, i32 1), align 8
   store ptr @c, ptr getelementptr (%struct.20ptr, ptr @global.20ptr.4, i64 0, i32 2), align 8
   store ptr @c, ptr getelementptr (%struct.20ptr, ptr @global.20ptr.4, i64 0, i32 3), align 8
@@ -431,7 +431,7 @@ entry:
   store ptr @c, ptr getelementptr (%struct.20ptr, ptr @global.20ptr.4, i64 0, i32 15), align 8
   store ptr @c, ptr getelementptr (%struct.20ptr, ptr @global.20ptr.4, i64 0, i32 16), align 8
 
-  %l0 = load ptr, ptr getelementptr (%struct.20ptr, ptr @global.20ptr.4, i64 0, i32 0), align 8
+  %l0 = load ptr, ptr @global.20ptr.4, align 8
   store volatile ptr %l0, ptr @c
   %l1 = load ptr, ptr getelementptr (%struct.20ptr, ptr @global.20ptr.4, i64 0, i32 1), align 8
   store volatile ptr %l1, ptr @c

diff  --git a/llvm/test/Transforms/GlobalOpt/sra-many-stores-once.ll b/llvm/test/Transforms/GlobalOpt/sra-many-stores-once.ll
index 7015bfced340f..f9ab37c470a34 100644
--- a/llvm/test/Transforms/GlobalOpt/sra-many-stores-once.ll
+++ b/llvm/test/Transforms/GlobalOpt/sra-many-stores-once.ll
@@ -78,7 +78,7 @@ define void @test_stored_once_call_with_nullptr_ub() {
 ;
 entry:
   store ptr @fn0, ptr @global.20ptr, align 8
-  store ptr @fn1, ptr getelementptr inbounds (%struct.20ptr, ptr @global.20ptr, i64 0, i32 0), align 8
+  store ptr @fn1, ptr @global.20ptr, align 8
   store ptr @fn2, ptr getelementptr inbounds (%struct.20ptr, ptr @global.20ptr, i64 0, i32 1), align 8
   store ptr @fn3, ptr getelementptr inbounds (%struct.20ptr, ptr @global.20ptr, i64 0, i32 2), align 8
   store ptr @fn0, ptr getelementptr inbounds (%struct.20ptr, ptr @global.20ptr, i64 0, i32 3), align 8
@@ -96,7 +96,7 @@ entry:
   store ptr @fn0, ptr getelementptr inbounds (%struct.20ptr, ptr @global.20ptr, i64 0, i32 15), align 8
   store ptr @fn1, ptr getelementptr inbounds (%struct.20ptr, ptr @global.20ptr, i64 0, i32 16), align 8
 
-  %l0 = load ptr, ptr getelementptr inbounds (%struct.20ptr, ptr @global.20ptr, i64 0, i32 0), align 8
+  %l0 = load ptr, ptr @global.20ptr, align 8
   call void %l0()
   %l1 = load ptr, ptr getelementptr inbounds (%struct.20ptr, ptr @global.20ptr, i64 0, i32 1), align 8
   call void %l1()

diff  --git a/llvm/test/Transforms/GlobalOpt/sra-many-stores.ll b/llvm/test/Transforms/GlobalOpt/sra-many-stores.ll
index 900bcc2582f66..ce457eca7db4a 100644
--- a/llvm/test/Transforms/GlobalOpt/sra-many-stores.ll
+++ b/llvm/test/Transforms/GlobalOpt/sra-many-stores.ll
@@ -21,7 +21,7 @@ define void @write_struct() {
 ; CHECK-NEXT:    tail call fastcc void @read_struct()
 ; CHECK-NEXT:    ret void
 ;
-  store ptr null, ptr getelementptr inbounds (%struct.widget, ptr @global, i64 0, i32 0), align 8
+  store ptr null, ptr @global, align 8
   store ptr null, ptr getelementptr inbounds (%struct.widget, ptr @global, i64 0, i32 1), align 8
   store ptr null, ptr getelementptr inbounds (%struct.widget, ptr @global, i64 0, i32 2), align 8
   store ptr null, ptr getelementptr inbounds (%struct.widget, ptr @global, i64 0, i32 3), align 8
@@ -59,7 +59,7 @@ define void @store_to_struct_array() {
 ; CHECK-NEXT:    tail call fastcc void @read_non_array_field()
 ; CHECK-NEXT:    ret void
 ;
-  store i64 0, ptr getelementptr inbounds (%struct.with.array, ptr @global.array_in_struct, i64 0, i32 0, i32 0), align 8
+  store i64 0, ptr @global.array_in_struct, align 8
   store i64 0, ptr getelementptr inbounds (%struct.with.array, ptr @global.array_in_struct, i64 0, i32 0, i32 1), align 8
   store i64 0, ptr getelementptr inbounds (%struct.with.array, ptr @global.array_in_struct, i64 0, i32 0, i32 2), align 8
   store i64 0, ptr getelementptr inbounds (%struct.with.array, ptr @global.array_in_struct, i64 0, i32 0, i32 3), align 8
@@ -94,7 +94,7 @@ define void @store_to_array() {
 ; CHECK-NEXT:    tail call fastcc void @read_array()
 ; CHECK-NEXT:    ret void
 ;
-  store i64 0, ptr getelementptr inbounds ([100 x i64], ptr @global.array, i64 0, i32 0), align 8
+  store i64 0, ptr @global.array, align 8
   store i64 0, ptr getelementptr inbounds ([100 x i64], ptr @global.array, i64 0, i32 1), align 8
   store i64 0, ptr getelementptr inbounds ([100 x i64], ptr @global.array, i64 0, i32 2), align 8
   store i64 0, ptr getelementptr inbounds ([100 x i64], ptr @global.array, i64 0, i32 3), align 8

diff  --git a/llvm/test/Transforms/IROutliner/nooutline-attribute.ll b/llvm/test/Transforms/IROutliner/nooutline-attribute.ll
index 233dfc341d7ad..eaf3afa3b15a1 100644
--- a/llvm/test/Transforms/IROutliner/nooutline-attribute.ll
+++ b/llvm/test/Transforms/IROutliner/nooutline-attribute.ll
@@ -11,7 +11,7 @@ define void @outlinable() { ret void }
 define i8 @nooutline1(ptr noalias %s, ptr noalias %d, i64 %len) "nooutline" {
   %a = load i8, ptr %s
   %b = load i8, ptr %d
-  call void @llvm.memcpy.p0i8.p0i8.i64(ptr %d, ptr %s, i64 %len, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %d, ptr %s, i64 %len, i1 false)
   %c = add i8 %a, %b
   %ret = load i8, ptr %s
   ret i8 %ret
@@ -20,11 +20,11 @@ define i8 @nooutline1(ptr noalias %s, ptr noalias %d, i64 %len) "nooutline" {
 define i8 @nooutline2(ptr noalias %s, ptr noalias %d, i64 %len) "nooutline" {
   %a = load i8, ptr %s
   %b = load i8, ptr %d
-  call void @llvm.memcpy.p0i8.p0i8.i64(ptr %d, ptr %s, i64 %len, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %d, ptr %s, i64 %len, i1 false)
   %c = add i8 %a, %b
   %ret = load i8, ptr %s
   ret i8 %ret
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1)
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1)
 

diff  --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/insert-pos-assert.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/insert-pos-assert.ll
index 5dffe87d29305..4a7300b9e9302 100644
--- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/insert-pos-assert.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/insert-pos-assert.ll
@@ -80,6 +80,6 @@ define void @bitcast_insert_pos_assert_2() {
   %itp0 = inttoptr i64 %pti0 to ptr
   %itp1 = ptrtoint ptr %asc0 to i64
   %itp2 = inttoptr i64 %itp1 to ptr
-  %gep0 = getelementptr i64, i64* %itp2, i64 1
+  %gep0 = getelementptr i64, ptr %itp2, i64 1
   ret void
 }

diff  --git a/llvm/test/Transforms/Inline/call-intrinsic-objectsize.ll b/llvm/test/Transforms/Inline/call-intrinsic-objectsize.ll
index bdbbb101268e5..18f37aba82957 100644
--- a/llvm/test/Transforms/Inline/call-intrinsic-objectsize.ll
+++ b/llvm/test/Transforms/Inline/call-intrinsic-objectsize.ll
@@ -27,7 +27,7 @@ define i64 @caller() {
   ret i64 %1
 }
 
-; Testing the InlineCost of the call to @llvm.objectsize.i64.p0i8.
+; Testing the InlineCost of the call to @llvm.objectsize.i64.p0.
 ; Do not change the linkage of @callee; that will give it a severe discount in
 ; cost (LastCallToStaticBonus).
 define i64 @callee() {

diff  --git a/llvm/test/Transforms/Inline/inline-byval-bonus.ll b/llvm/test/Transforms/Inline/inline-byval-bonus.ll
index c27c964bf42e2..1418fff9395e2 100644
--- a/llvm/test/Transforms/Inline/inline-byval-bonus.ll
+++ b/llvm/test/Transforms/Inline/inline-byval-bonus.ll
@@ -9,17 +9,17 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-apple-macosx10.8.0"
 
-%struct.sphere = type { %struct.vec3, double, %struct.material, %struct.sphere* }
+%struct.sphere = type { %struct.vec3, double, %struct.material, ptr }
 %struct.vec3 = type { double, double, double }
 %struct.material = type { %struct.vec3, double, double }
 %struct.ray = type { %struct.vec3, %struct.vec3 }
 %struct.spoint = type { %struct.vec3, %struct.vec3, %struct.vec3, double }
 
-define i32 @caller(%struct.sphere* %i) ssp {
+define i32 @caller(ptr %i) ssp {
   %shadow_ray = alloca %struct.ray, align 8
-  call void @fix(%struct.ray* %shadow_ray)
+  call void @fix(ptr %shadow_ray)
 
-  %call = call i32 @ray_sphere(%struct.sphere* %i, %struct.ray* byval(%struct.ray) align 8 %shadow_ray, %struct.spoint* null)
+  %call = call i32 @ray_sphere(ptr %i, ptr byval(%struct.ray) align 8 %shadow_ray, ptr null)
   ret i32 %call
 
 ; CHECK-LABEL: @caller(
@@ -27,40 +27,40 @@ define i32 @caller(%struct.sphere* %i) ssp {
 ; CHECK: ret i32
 }
 
-declare void @fix(%struct.ray*)
+declare void @fix(ptr)
 
-define i32 @ray_sphere(%struct.sphere* nocapture %sph, %struct.ray* nocapture byval(%struct.ray) align 8 %ray, %struct.spoint* %sp) nounwind uwtable ssp {
-  %1 = getelementptr inbounds %struct.ray, %struct.ray* %ray, i64 0, i32 1, i32 0
-  %2 = load double, double* %1, align 8
+define i32 @ray_sphere(ptr nocapture %sph, ptr nocapture byval(%struct.ray) align 8 %ray, ptr %sp) nounwind uwtable ssp {
+  %1 = getelementptr inbounds %struct.ray, ptr %ray, i64 0, i32 1, i32 0
+  %2 = load double, ptr %1, align 8
   %3 = fmul double %2, %2
-  %4 = getelementptr inbounds %struct.ray, %struct.ray* %ray, i64 0, i32 1, i32 1
-  %5 = load double, double* %4, align 8
+  %4 = getelementptr inbounds %struct.ray, ptr %ray, i64 0, i32 1, i32 1
+  %5 = load double, ptr %4, align 8
   %6 = fmul double %5, %5
   %7 = fadd double %3, %6
-  %8 = getelementptr inbounds %struct.ray, %struct.ray* %ray, i64 0, i32 1, i32 2
-  %9 = load double, double* %8, align 8
+  %8 = getelementptr inbounds %struct.ray, ptr %ray, i64 0, i32 1, i32 2
+  %9 = load double, ptr %8, align 8
   %10 = fmul double %9, %9
   %11 = fadd double %7, %10
   %12 = fmul double %2, 2.000000e+00
-  %13 = getelementptr inbounds %struct.ray, %struct.ray* %ray, i64 0, i32 0, i32 0
-  %14 = load double, double* %13, align 8
-  %15 = getelementptr inbounds %struct.sphere, %struct.sphere* %sph, i64 0, i32 0, i32 0
-  %16 = load double, double* %15, align 8
+  %13 = getelementptr inbounds %struct.ray, ptr %ray, i64 0, i32 0, i32 0
+  %14 = load double, ptr %13, align 8
+  %15 = getelementptr inbounds %struct.sphere, ptr %sph, i64 0, i32 0, i32 0
+  %16 = load double, ptr %15, align 8
   %17 = fsub double %14, %16
   %18 = fmul double %12, %17
   %19 = fmul double %5, 2.000000e+00
-  %20 = getelementptr inbounds %struct.ray, %struct.ray* %ray, i64 0, i32 0, i32 1
-  %21 = load double, double* %20, align 8
-  %22 = getelementptr inbounds %struct.sphere, %struct.sphere* %sph, i64 0, i32 0, i32 1
-  %23 = load double, double* %22, align 8
+  %20 = getelementptr inbounds %struct.ray, ptr %ray, i64 0, i32 0, i32 1
+  %21 = load double, ptr %20, align 8
+  %22 = getelementptr inbounds %struct.sphere, ptr %sph, i64 0, i32 0, i32 1
+  %23 = load double, ptr %22, align 8
   %24 = fsub double %21, %23
   %25 = fmul double %19, %24
   %26 = fadd double %18, %25
   %27 = fmul double %9, 2.000000e+00
-  %28 = getelementptr inbounds %struct.ray, %struct.ray* %ray, i64 0, i32 0, i32 2
-  %29 = load double, double* %28, align 8
-  %30 = getelementptr inbounds %struct.sphere, %struct.sphere* %sph, i64 0, i32 0, i32 2
-  %31 = load double, double* %30, align 8
+  %28 = getelementptr inbounds %struct.ray, ptr %ray, i64 0, i32 0, i32 2
+  %29 = load double, ptr %28, align 8
+  %30 = getelementptr inbounds %struct.sphere, ptr %sph, i64 0, i32 0, i32 2
+  %31 = load double, ptr %30, align 8
   %32 = fsub double %29, %31
   %33 = fmul double %27, %32
   %34 = fadd double %26, %33
@@ -83,8 +83,8 @@ define i32 @ray_sphere(%struct.sphere* nocapture %sph, %struct.ray* nocapture by
   %51 = fsub double %49, %50
   %52 = fmul double %51, 2.000000e+00
   %53 = fadd double %52, %45
-  %54 = getelementptr inbounds %struct.sphere, %struct.sphere* %sph, i64 0, i32 1
-  %55 = load double, double* %54, align 8
+  %54 = getelementptr inbounds %struct.sphere, ptr %sph, i64 0, i32 1
+  %55 = load double, ptr %54, align 8
   %56 = fmul double %55, %55
   %57 = fsub double %53, %56
   %58 = fmul double %34, %34
@@ -114,7 +114,7 @@ define i32 @ray_sphere(%struct.sphere* nocapture %sph, %struct.ray* nocapture by
   br i1 %or.cond1, label %130, label %76
 
 ; <label>:76                                      ; preds = %73
-  %77 = icmp eq %struct.spoint* %sp, null
+  %77 = icmp eq ptr %sp, null
   br i1 %77, label %130, label %78
 
 ; <label>:78                                      ; preds = %76
@@ -122,38 +122,38 @@ define i32 @ray_sphere(%struct.sphere* nocapture %sph, %struct.ray* nocapture by
   %t2.0 = select i1 %72, double %t1.0, double %70
   %79 = fcmp olt double %t1.0, %t2.0
   %80 = select i1 %79, double %t1.0, double %t2.0
-  %81 = getelementptr inbounds %struct.spoint, %struct.spoint* %sp, i64 0, i32 3
-  store double %80, double* %81, align 8
+  %81 = getelementptr inbounds %struct.spoint, ptr %sp, i64 0, i32 3
+  store double %80, ptr %81, align 8
   %82 = fmul double %80, %2
   %83 = fadd double %14, %82
-  %84 = getelementptr inbounds %struct.spoint, %struct.spoint* %sp, i64 0, i32 0, i32 0
-  store double %83, double* %84, align 8
+  %84 = getelementptr inbounds %struct.spoint, ptr %sp, i64 0, i32 0, i32 0
+  store double %83, ptr %84, align 8
   %85 = fmul double %5, %80
   %86 = fadd double %21, %85
-  %87 = getelementptr inbounds %struct.spoint, %struct.spoint* %sp, i64 0, i32 0, i32 1
-  store double %86, double* %87, align 8
+  %87 = getelementptr inbounds %struct.spoint, ptr %sp, i64 0, i32 0, i32 1
+  store double %86, ptr %87, align 8
   %88 = fmul double %9, %80
   %89 = fadd double %29, %88
-  %90 = getelementptr inbounds %struct.spoint, %struct.spoint* %sp, i64 0, i32 0, i32 2
-  store double %89, double* %90, align 8
-  %91 = load double, double* %15, align 8
+  %90 = getelementptr inbounds %struct.spoint, ptr %sp, i64 0, i32 0, i32 2
+  store double %89, ptr %90, align 8
+  %91 = load double, ptr %15, align 8
   %92 = fsub double %83, %91
-  %93 = load double, double* %54, align 8
+  %93 = load double, ptr %54, align 8
   %94 = fdiv double %92, %93
-  %95 = getelementptr inbounds %struct.spoint, %struct.spoint* %sp, i64 0, i32 1, i32 0
-  store double %94, double* %95, align 8
-  %96 = load double, double* %22, align 8
+  %95 = getelementptr inbounds %struct.spoint, ptr %sp, i64 0, i32 1, i32 0
+  store double %94, ptr %95, align 8
+  %96 = load double, ptr %22, align 8
   %97 = fsub double %86, %96
-  %98 = load double, double* %54, align 8
+  %98 = load double, ptr %54, align 8
   %99 = fdiv double %97, %98
-  %100 = getelementptr inbounds %struct.spoint, %struct.spoint* %sp, i64 0, i32 1, i32 1
-  store double %99, double* %100, align 8
-  %101 = load double, double* %30, align 8
+  %100 = getelementptr inbounds %struct.spoint, ptr %sp, i64 0, i32 1, i32 1
+  store double %99, ptr %100, align 8
+  %101 = load double, ptr %30, align 8
   %102 = fsub double %89, %101
-  %103 = load double, double* %54, align 8
+  %103 = load double, ptr %54, align 8
   %104 = fdiv double %102, %103
-  %105 = getelementptr inbounds %struct.spoint, %struct.spoint* %sp, i64 0, i32 1, i32 2
-  store double %104, double* %105, align 8
+  %105 = getelementptr inbounds %struct.spoint, ptr %sp, i64 0, i32 1, i32 2
+  store double %104, ptr %105, align 8
   %106 = fmul double %2, %94
   %107 = fmul double %5, %99
   %108 = fadd double %106, %107
@@ -169,9 +169,9 @@ define i32 @ray_sphere(%struct.sphere* nocapture %sph, %struct.ray* nocapture by
   %118 = fmul double %104, %111
   %119 = fsub double %118, %9
   %120 = fsub double -0.000000e+00, %119
-  %.06 = getelementptr inbounds %struct.spoint, %struct.spoint* %sp, i64 0, i32 2, i32 0
-  %.18 = getelementptr inbounds %struct.spoint, %struct.spoint* %sp, i64 0, i32 2, i32 1
-  %.210 = getelementptr inbounds %struct.spoint, %struct.spoint* %sp, i64 0, i32 2, i32 2
+  %.06 = getelementptr inbounds %struct.spoint, ptr %sp, i64 0, i32 2, i32 0
+  %.18 = getelementptr inbounds %struct.spoint, ptr %sp, i64 0, i32 2, i32 1
+  %.210 = getelementptr inbounds %struct.spoint, ptr %sp, i64 0, i32 2, i32 2
   %121 = fmul double %113, %113
   %122 = fmul double %116, %116
   %123 = fadd double %121, %122
@@ -179,11 +179,11 @@ define i32 @ray_sphere(%struct.sphere* nocapture %sph, %struct.ray* nocapture by
   %125 = fadd double %123, %124
   %126 = tail call double @sqrt(double %125) nounwind readnone
   %127 = fdiv double %114, %126
-  store double %127, double* %.06, align 8
+  store double %127, ptr %.06, align 8
   %128 = fdiv double %117, %126
-  store double %128, double* %.18, align 8
+  store double %128, ptr %.18, align 8
   %129 = fdiv double %120, %126
-  store double %129, double* %.210, align 8
+  store double %129, ptr %.210, align 8
   br label %130
 
 ; <label>:130                                     ; preds = %78, %76, %73, %63, %0

diff  --git a/llvm/test/Transforms/Inline/inlined-loop-metadata-inseltpoison.ll b/llvm/test/Transforms/Inline/inlined-loop-metadata-inseltpoison.ll
index b054852a23faf..0f4de0082d7fa 100644
--- a/llvm/test/Transforms/Inline/inlined-loop-metadata-inseltpoison.ll
+++ b/llvm/test/Transforms/Inline/inlined-loop-metadata-inseltpoison.ll
@@ -31,7 +31,7 @@
 ;   return Bar;
 ; }
 
-@"?Array@@3PEAHEA" = external dso_local local_unnamed_addr global i32*, align 8
+@"?Array@@3PEAHEA" = external dso_local local_unnamed_addr global ptr, align 8
 
 define dso_local i32 @"?foo@@YAHI at Z"(i32 %x) local_unnamed_addr !dbg !8 {
 entry:
@@ -45,7 +45,7 @@ entry:
   br i1 %cmp7, label %for.cond.cleanup, label %for.body.lr.ph, !dbg !13
 
 for.body.lr.ph:                                   ; preds = %entry
-  %0 = load i32*, i32** @"?Array@@3PEAHEA", align 8, !dbg !14, !tbaa !15
+  %0 = load ptr, ptr @"?Array@@3PEAHEA", align 8, !dbg !14, !tbaa !15
   %wide.trip.count = zext i32 %x to i64, !dbg !14
   %min.iters.check = icmp ult i64 %wide.trip.count, 8, !dbg !13
   br i1 %min.iters.check, label %scalar.ph, label %vector.ph, !dbg !13
@@ -70,14 +70,14 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %6 = add i64 %index, 5, !dbg !13
   %7 = add i64 %index, 6, !dbg !13
   %8 = add i64 %index, 7, !dbg !13
-  %9 = getelementptr inbounds i32, i32* %0, i64 %1, !dbg !19
-  %10 = getelementptr inbounds i32, i32* %0, i64 %5, !dbg !19
-  %11 = getelementptr inbounds i32, i32* %9, i32 0, !dbg !19
-  %12 = bitcast i32* %11 to <4 x i32>*, !dbg !19
-  %wide.load = load <4 x i32>, <4 x i32>* %12, align 4, !dbg !19, !tbaa !20
-  %13 = getelementptr inbounds i32, i32* %9, i32 4, !dbg !19
-  %14 = bitcast i32* %13 to <4 x i32>*, !dbg !19
-  %wide.load3 = load <4 x i32>, <4 x i32>* %14, align 4, !dbg !19, !tbaa !20
+  %9 = getelementptr inbounds i32, ptr %0, i64 %1, !dbg !19
+  %10 = getelementptr inbounds i32, ptr %0, i64 %5, !dbg !19
+  %11 = getelementptr inbounds i32, ptr %9, i32 0, !dbg !19
+  %12 = bitcast ptr %11 to ptr, !dbg !19
+  %wide.load = load <4 x i32>, ptr %12, align 4, !dbg !19, !tbaa !20
+  %13 = getelementptr inbounds i32, ptr %9, i32 4, !dbg !19
+  %14 = bitcast ptr %13 to ptr, !dbg !19
+  %wide.load3 = load <4 x i32>, ptr %14, align 4, !dbg !19, !tbaa !20
   %step.add5 = add <4 x i32> %vec.ind4, <i32 4, i32 4, i32 4, i32 4>, !dbg !19
   %15 = mul <4 x i32> %wide.load, %vec.ind4, !dbg !19
   %16 = mul <4 x i32> %wide.load3, %step.add5, !dbg !19
@@ -115,8 +115,8 @@ for.cond.cleanup:                                 ; preds = %for.cond.cleanup.lo
 for.body:                                         ; preds = %for.body, %scalar.ph
   %indvars.iv = phi i64 [ %bc.resume.val, %scalar.ph ], [ %indvars.iv.next, %for.body ]
   %Ret.08 = phi i32 [ %bc.merge.rdx, %scalar.ph ], [ %add, %for.body ]
-  %arrayidx = getelementptr inbounds i32, i32* %0, i64 %indvars.iv, !dbg !19
-  %21 = load i32, i32* %arrayidx, align 4, !dbg !19, !tbaa !20
+  %arrayidx = getelementptr inbounds i32, ptr %0, i64 %indvars.iv, !dbg !19
+  %21 = load i32, ptr %arrayidx, align 4, !dbg !19, !tbaa !20
   %22 = trunc i64 %indvars.iv to i32, !dbg !19
   %mul = mul i32 %21, %22, !dbg !19
   %add = add i32 %mul, %Ret.08, !dbg !19

diff  --git a/llvm/test/Transforms/Inline/inlined-loop-metadata.ll b/llvm/test/Transforms/Inline/inlined-loop-metadata.ll
index 1ca94459beddf..92d0282a23521 100644
--- a/llvm/test/Transforms/Inline/inlined-loop-metadata.ll
+++ b/llvm/test/Transforms/Inline/inlined-loop-metadata.ll
@@ -31,7 +31,7 @@
 ;   return Bar;
 ; }
 
-@"?Array@@3PEAHEA" = external dso_local local_unnamed_addr global i32*, align 8
+@"?Array@@3PEAHEA" = external dso_local local_unnamed_addr global ptr, align 8
 
 define dso_local i32 @"?foo@@YAHI at Z"(i32 %x) local_unnamed_addr !dbg !8 {
 entry:
@@ -45,7 +45,7 @@ entry:
   br i1 %cmp7, label %for.cond.cleanup, label %for.body.lr.ph, !dbg !13
 
 for.body.lr.ph:                                   ; preds = %entry
-  %0 = load i32*, i32** @"?Array@@3PEAHEA", align 8, !dbg !14, !tbaa !15
+  %0 = load ptr, ptr @"?Array@@3PEAHEA", align 8, !dbg !14, !tbaa !15
   %wide.trip.count = zext i32 %x to i64, !dbg !14
   %min.iters.check = icmp ult i64 %wide.trip.count, 8, !dbg !13
   br i1 %min.iters.check, label %scalar.ph, label %vector.ph, !dbg !13
@@ -70,14 +70,14 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %6 = add i64 %index, 5, !dbg !13
   %7 = add i64 %index, 6, !dbg !13
   %8 = add i64 %index, 7, !dbg !13
-  %9 = getelementptr inbounds i32, i32* %0, i64 %1, !dbg !19
-  %10 = getelementptr inbounds i32, i32* %0, i64 %5, !dbg !19
-  %11 = getelementptr inbounds i32, i32* %9, i32 0, !dbg !19
-  %12 = bitcast i32* %11 to <4 x i32>*, !dbg !19
-  %wide.load = load <4 x i32>, <4 x i32>* %12, align 4, !dbg !19, !tbaa !20
-  %13 = getelementptr inbounds i32, i32* %9, i32 4, !dbg !19
-  %14 = bitcast i32* %13 to <4 x i32>*, !dbg !19
-  %wide.load3 = load <4 x i32>, <4 x i32>* %14, align 4, !dbg !19, !tbaa !20
+  %9 = getelementptr inbounds i32, ptr %0, i64 %1, !dbg !19
+  %10 = getelementptr inbounds i32, ptr %0, i64 %5, !dbg !19
+  %11 = getelementptr inbounds i32, ptr %9, i32 0, !dbg !19
+  %12 = bitcast ptr %11 to ptr, !dbg !19
+  %wide.load = load <4 x i32>, ptr %12, align 4, !dbg !19, !tbaa !20
+  %13 = getelementptr inbounds i32, ptr %9, i32 4, !dbg !19
+  %14 = bitcast ptr %13 to ptr, !dbg !19
+  %wide.load3 = load <4 x i32>, ptr %14, align 4, !dbg !19, !tbaa !20
   %step.add5 = add <4 x i32> %vec.ind4, <i32 4, i32 4, i32 4, i32 4>, !dbg !19
   %15 = mul <4 x i32> %wide.load, %vec.ind4, !dbg !19
   %16 = mul <4 x i32> %wide.load3, %step.add5, !dbg !19
@@ -115,8 +115,8 @@ for.cond.cleanup:                                 ; preds = %for.cond.cleanup.lo
 for.body:                                         ; preds = %for.body, %scalar.ph
   %indvars.iv = phi i64 [ %bc.resume.val, %scalar.ph ], [ %indvars.iv.next, %for.body ]
   %Ret.08 = phi i32 [ %bc.merge.rdx, %scalar.ph ], [ %add, %for.body ]
-  %arrayidx = getelementptr inbounds i32, i32* %0, i64 %indvars.iv, !dbg !19
-  %21 = load i32, i32* %arrayidx, align 4, !dbg !19, !tbaa !20
+  %arrayidx = getelementptr inbounds i32, ptr %0, i64 %indvars.iv, !dbg !19
+  %21 = load i32, ptr %arrayidx, align 4, !dbg !19, !tbaa !20
   %22 = trunc i64 %indvars.iv to i32, !dbg !19
   %mul = mul i32 %21, %22, !dbg !19
   %add = add i32 %mul, %Ret.08, !dbg !19

diff  --git a/llvm/test/Transforms/InstCombine/alloca.ll b/llvm/test/Transforms/InstCombine/alloca.ll
index 4247d6a301931..15b43e780f149 100644
--- a/llvm/test/Transforms/InstCombine/alloca.ll
+++ b/llvm/test/Transforms/InstCombine/alloca.ll
@@ -41,7 +41,7 @@ define void @test() {
   call void (...) @use( ptr %Z )
   %size = load i32, ptr @int
   %A = alloca {{}}, i32 %size
-  call void (...) @use( {{}}* %A )
+  call void (...) @use( ptr %A )
   ret void
 }
 

diff  --git a/llvm/test/Transforms/InstCombine/call.ll b/llvm/test/Transforms/InstCombine/call.ll
index f1fc0682b4af3..52de13ba06b35 100644
--- a/llvm/test/Transforms/InstCombine/call.ll
+++ b/llvm/test/Transforms/InstCombine/call.ll
@@ -240,7 +240,7 @@ define ptr @test11_mixed_as() {
 ; CHECK-NEXT:    [[X:%.*]] = call ptr @test11a_mixed_as()
 ; CHECK-NEXT:    ret ptr [[X]]
 ;
-  %X = call ptr ()* @test11a_mixed_as()
+  %X = call ptr @test11a_mixed_as()
   ret ptr %X
 }
 

diff  --git a/llvm/test/Transforms/InstCombine/fmul.ll b/llvm/test/Transforms/InstCombine/fmul.ll
index 8ecbb85018428..fe99d7284487b 100644
--- a/llvm/test/Transforms/InstCombine/fmul.ll
+++ b/llvm/test/Transforms/InstCombine/fmul.ll
@@ -1051,7 +1051,7 @@ define float @fmul_fdiv_factor_extra_use(float %x, float %y) {
   ret float %mul
 }
 
-define void @fmul_loop_invariant_fdiv(float* %a, float %x) {
+define void @fmul_loop_invariant_fdiv(ptr %a, float %x) {
 ; CHECK-LABEL: @fmul_loop_invariant_fdiv(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
@@ -1078,10 +1078,10 @@ for.cond.cleanup:
 for.body:
   %i.08 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
   %idxprom = zext i32 %i.08 to i64
-  %arrayidx = getelementptr inbounds float, float* %a, i64 %idxprom
-  %f = load float, float* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds float, ptr %a, i64 %idxprom
+  %f = load float, ptr %arrayidx, align 4
   %m = fmul fast float %f, %d
-  store float %m, float* %arrayidx, align 4
+  store float %m, ptr %arrayidx, align 4
   %inc = add nuw nsw i32 %i.08, 1
   %cmp.not = icmp eq i32 %inc, 1024
   br i1 %cmp.not, label %for.cond.cleanup, label %for.body

diff  --git a/llvm/test/Transforms/InstCombine/memchr-8.ll b/llvm/test/Transforms/InstCombine/memchr-8.ll
index aefd518fd635d..0e878b77e40d7 100644
--- a/llvm/test/Transforms/InstCombine/memchr-8.ll
+++ b/llvm/test/Transforms/InstCombine/memchr-8.ll
@@ -18,7 +18,7 @@ define ptr @call_a_pi32max_p1() {
 ; CHECK-NEXT:    [[CHR:%.*]] = tail call ptr @memrchr(ptr noundef nonnull dereferenceable(2147483647) getelementptr inbounds (<{ i8, [4294967295 x i8] }>, ptr @a, i64 0, i32 1, i64 2147483647), i32 0, i64 2147483647)
 ; CHECK-NEXT:    ret ptr [[CHR]]
 ;
-  %ptr = getelementptr <{ i8, [4294967295 x i8] }>, <{ i8, [4294967295 x i8] }>* @a, i32 0, i32 1, i32 2147483647
+  %ptr = getelementptr <{ i8, [4294967295 x i8] }>, ptr @a, i32 0, i32 1, i32 2147483647
   %chr = tail call ptr @memrchr(ptr %ptr, i32 0, i64 2147483647)
   ret ptr %chr
 }
@@ -31,7 +31,7 @@ define ptr @call_a_pi32max() {
 ; CHECK-NEXT:    [[CHR:%.*]] = tail call ptr @memrchr(ptr noundef nonnull dereferenceable(2147483647) getelementptr inbounds (<{ i8, [4294967295 x i8] }>, ptr @a, i64 0, i32 1, i64 2147483648), i32 0, i64 2147483647)
 ; CHECK-NEXT:    ret ptr [[CHR]]
 ;
-  %ptr = getelementptr <{ i8, [4294967295 x i8] }>, <{ i8, [4294967295 x i8] }>* @a, i32 0, i32 1, i64 2147483648
+  %ptr = getelementptr <{ i8, [4294967295 x i8] }>, ptr @a, i32 0, i32 1, i64 2147483648
   %chr = tail call ptr @memrchr(ptr %ptr, i32 0, i64 2147483647)
   ret ptr %chr
 }
@@ -45,7 +45,7 @@ define ptr @call_a_pui32max() {
 ; CHECK-NEXT:    [[CHR:%.*]] = tail call ptr @memrchr(ptr noundef nonnull dereferenceable(4294967295) getelementptr inbounds (<{ i8, [4294967295 x i8] }>, ptr @a, i64 0, i32 1, i64 0), i32 0, i64 4294967295)
 ; CHECK-NEXT:    ret ptr [[CHR]]
 ;
-  %ptr = getelementptr <{ i8, [4294967295 x i8] }>, <{ i8, [4294967295 x i8] }>* @a, i32 0, i32 1, i32 0
+  %ptr = getelementptr <{ i8, [4294967295 x i8] }>, ptr @a, i32 0, i32 1, i32 0
   %chr = tail call ptr @memrchr(ptr %ptr, i32 0, i64 4294967295)
   ret ptr %chr
 }
@@ -57,7 +57,7 @@ define ptr @call_a_puimax_p1() {
 ; CHECK-NEXT:    [[CHR:%.*]] = tail call ptr @memrchr(ptr noundef nonnull dereferenceable(4294967296) @a, i32 0, i64 4294967296)
 ; CHECK-NEXT:    ret ptr [[CHR]]
 ;
-  %ptr = getelementptr <{ i8, [4294967295 x i8] }>, <{ i8, [4294967295 x i8] }>* @a, i32 0, i32 0
+  %ptr = getelementptr <{ i8, [4294967295 x i8] }>, ptr @a, i32 0, i32 0
   %chr = tail call ptr @memrchr(ptr %ptr, i32 0, i64 4294967296)
   ret ptr %chr
 }

diff  --git a/llvm/test/Transforms/InstCombine/scalable-vector-struct.ll b/llvm/test/Transforms/InstCombine/scalable-vector-struct.ll
index b56099343f921..c9966be72fb51 100644
--- a/llvm/test/Transforms/InstCombine/scalable-vector-struct.ll
+++ b/llvm/test/Transforms/InstCombine/scalable-vector-struct.ll
@@ -3,19 +3,19 @@
 
 %struct.test = type { <vscale x 1 x i32>, <vscale x 1 x i32> }
 
-define <vscale x 1 x i32> @load(%struct.test* %x) {
+define <vscale x 1 x i32> @load(ptr %x) {
 ; CHECK-LABEL: define <vscale x 1 x i32> @load
 ; CHECK-SAME: (ptr [[X:%.*]]) {
 ; CHECK-NEXT:    [[A:%.*]] = load [[STRUCT_TEST:%.*]], ptr [[X]], align 4
 ; CHECK-NEXT:    [[B:%.*]] = extractvalue [[STRUCT_TEST]] [[A]], 1
 ; CHECK-NEXT:    ret <vscale x 1 x i32> [[B]]
 ;
-  %a = load %struct.test, %struct.test* %x
+  %a = load %struct.test, ptr %x
   %b = extractvalue %struct.test %a, 1
   ret <vscale x 1 x i32> %b
 }
 
-define void @store(%struct.test* %x, <vscale x 1 x i32> %y, <vscale x 1 x i32> %z) {
+define void @store(ptr %x, <vscale x 1 x i32> %y, <vscale x 1 x i32> %z) {
 ; CHECK-LABEL: define void @store
 ; CHECK-SAME: (ptr [[X:%.*]], <vscale x 1 x i32> [[Y:%.*]], <vscale x 1 x i32> [[Z:%.*]]) {
 ; CHECK-NEXT:    [[A:%.*]] = insertvalue [[STRUCT_TEST:%.*]] undef, <vscale x 1 x i32> [[Y]], 0
@@ -25,6 +25,6 @@ define void @store(%struct.test* %x, <vscale x 1 x i32> %y, <vscale x 1 x i32> %
 ;
   %a = insertvalue %struct.test undef, <vscale x 1 x i32> %y, 0
   %b = insertvalue %struct.test %a, <vscale x 1 x i32> %z, 1
-  store %struct.test %b, %struct.test* %x
+  store %struct.test %b, ptr %x
   ret void
 }

diff  --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/opaque_ptr.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/opaque_ptr.ll
index 1c9ab6f6624ab..c62744082f8ec 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/opaque_ptr.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/opaque_ptr.ll
@@ -16,9 +16,9 @@ define i64 @order() {
 ; CHECK-NEXT:    ret i64 [[ADD]]
 ;
   %idx1 = getelementptr inbounds %S, ptr @S, i32 0, i32 1
-  %l1 = load i64, i64* %idx1, align 8
+  %l1 = load i64, ptr %idx1, align 8
   %idx0 = getelementptr inbounds %S, ptr @S, i32 0, i32 0
-  %l0 = load i64, i64* %idx0, align 8
+  %l0 = load i64, ptr %idx0, align 8
   %add = add i64 %l0, %l1
   ret i64 %add
 }

diff  --git a/llvm/test/Transforms/LoopDistribute/symbolic-stride.ll b/llvm/test/Transforms/LoopDistribute/symbolic-stride.ll
index ac071b671c57c..07c50b5b61162 100644
--- a/llvm/test/Transforms/LoopDistribute/symbolic-stride.ll
+++ b/llvm/test/Transforms/LoopDistribute/symbolic-stride.ll
@@ -110,7 +110,7 @@ define void @f(ptr noalias %a,
 ; NO-VERSION:       for.end:
 ; NO-VERSION-NEXT:    ret void
 ;
-  i32* noalias %b,
+  ptr noalias %b,
   ptr noalias %c,
   ptr noalias %d,
   i64 %stride) {

diff  --git a/llvm/test/Transforms/LoopFlatten/loop-flatten-negative.ll b/llvm/test/Transforms/LoopFlatten/loop-flatten-negative.ll
index 479b5c3388f89..50da17c9280a0 100644
--- a/llvm/test/Transforms/LoopFlatten/loop-flatten-negative.ll
+++ b/llvm/test/Transforms/LoopFlatten/loop-flatten-negative.ll
@@ -507,7 +507,7 @@ for.end19:                                        ; preds = %for.end16
 ;      for (int k = 0; k < N; ++k)
 ;        f(&A[i + N * (j + N * k)]);
 ;
-define void @d3_1(i32* %A, i32 %N) {
+define void @d3_1(ptr %A, i32 %N) {
 entry:
   %cmp35 = icmp sgt i32 %N, 0
   br i1 %cmp35, label %for.cond1.preheader.lr.ph, label %for.cond.cleanup
@@ -577,7 +577,7 @@ for.cond.cleanup:
 ;         g();
 ;     }
 ;
-define void @d3_3(i32* nocapture %A, i32 %N, i32 %M) {
+define void @d3_3(ptr nocapture %A, i32 %N, i32 %M) {
 entry:
   %cmp29 = icmp sgt i32 %N, 0
   br i1 %cmp29, label %for.cond1.preheader.lr.ph, label %for.cond.cleanup

diff  --git a/llvm/test/Transforms/LoopFlatten/loop-flatten-version.ll b/llvm/test/Transforms/LoopFlatten/loop-flatten-version.ll
index dec323d135f35..85072bf3a43f4 100644
--- a/llvm/test/Transforms/LoopFlatten/loop-flatten-version.ll
+++ b/llvm/test/Transforms/LoopFlatten/loop-flatten-version.ll
@@ -164,7 +164,7 @@ for.end:
 ;      for (int j = 0; j < M; ++j)
 ;        f(&A[i*M+j]);
 ;
-define void @d3_2(i32* %A, i32 %N, i32 %M) {
+define void @d3_2(ptr %A, i32 %N, i32 %M) {
 ; CHECK-LABEL: define void @d3_2(
 ; CHECK-SAME: ptr [[A:%.*]], i32 [[N:%.*]], i32 [[M:%.*]]) {
 ; CHECK-NEXT:  entry:

diff  --git a/llvm/test/Transforms/LoopFlatten/widen-iv.ll b/llvm/test/Transforms/LoopFlatten/widen-iv.ll
index dc32b8193e34a..2feca405eea7a 100644
--- a/llvm/test/Transforms/LoopFlatten/widen-iv.ll
+++ b/llvm/test/Transforms/LoopFlatten/widen-iv.ll
@@ -119,7 +119,7 @@ for.cond.cleanup:
 ; It is very similar to test case @foo above, but the CFG is slightly
 ; 
diff erent, making the analysis slightly 
diff erent.
 ;
-define void @foo2_sext(i32* nocapture readonly %A, i32 %N, i32 %M) {
+define void @foo2_sext(ptr nocapture readonly %A, i32 %N, i32 %M) {
 ; CHECK-LABEL: @foo2_sext(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[CMP17:%.*]] = icmp sgt i32 [[N:%.*]], 0
@@ -257,7 +257,7 @@ for.cond.cleanup:
 ;       for (unsigned j = 0; j < M; ++j)
 ;         f(A[i*M+j]);
 ;
-define void @foo2_zext(i32* nocapture readonly %A, i32 %N, i32 %M) {
+define void @foo2_zext(ptr nocapture readonly %A, i32 %N, i32 %M) {
 ; CHECK-LABEL: @foo2_zext(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[CMP17_NOT:%.*]] = icmp eq i32 [[N:%.*]], 0

diff  --git a/llvm/test/Transforms/LoopIdiom/lir-heurs-multi-block-loop.ll b/llvm/test/Transforms/LoopIdiom/lir-heurs-multi-block-loop.ll
index d4cbf1a1e1154..90aa09287fd64 100644
--- a/llvm/test/Transforms/LoopIdiom/lir-heurs-multi-block-loop.ll
+++ b/llvm/test/Transforms/LoopIdiom/lir-heurs-multi-block-loop.ll
@@ -16,7 +16,7 @@ declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1)
 ; CHECK: call void @llvm.memset
 ; CHECK: for.body:
 ;
-define i32 @LoopMemset([2048 x i8]* noalias nocapture %DST, i32 %SIZE) local_unnamed_addr optsize {
+define i32 @LoopMemset(ptr noalias nocapture %DST, i32 %SIZE) local_unnamed_addr optsize {
 entry:
   %cmp12 = icmp sgt i32 %SIZE, 0
   br i1 %cmp12, label %for.body.preheader, label %for.end
@@ -71,7 +71,7 @@ for.end:                                          ; preds = %for.end.loopexit, %
 ; CHECK: call void @llvm.memset
 ; CHECK: for.cond1.preheader:
 ;
-define i32 @NestedMemset_LoopMemset([2046 x i8]* noalias nocapture %DST, i32 %SIZE) local_unnamed_addr optsize {
+define i32 @NestedMemset_LoopMemset(ptr noalias nocapture %DST, i32 %SIZE) local_unnamed_addr optsize {
 entry:
   %cmp25 = icmp sgt i32 %SIZE, 0
   br i1 %cmp25, label %for.cond1.preheader.preheader, label %for.end11
@@ -133,7 +133,7 @@ for.end11:                                        ; preds = %for.end11.loopexit,
 ; CHECK-LABEL: @Non_NestedMemset
 ; CHECK-NOT: call void @llvm.memset
 ;
-define i32 @Non_NestedMemset(i8* noalias nocapture %DST, i32 %SIZE) local_unnamed_addr optsize {
+define i32 @Non_NestedMemset(ptr noalias nocapture %DST, i32 %SIZE) local_unnamed_addr optsize {
 entry:
   %cmp12 = icmp sgt i32 %SIZE, 0
   br i1 %cmp12, label %for.body.preheader, label %for.end

diff  --git a/llvm/test/Transforms/LoopInterchange/profitability.ll b/llvm/test/Transforms/LoopInterchange/profitability.ll
index 3ee106c2e3c4d..505c6c422beb6 100644
--- a/llvm/test/Transforms/LoopInterchange/profitability.ll
+++ b/llvm/test/Transforms/LoopInterchange/profitability.ll
@@ -203,13 +203,13 @@ for2.preheader:
 for2:
   %j = phi i64 [ %i.next, %for2 ], [ 1, %for2.preheader ]
   %j.prev = add nsw i64 %j,  -1
-  %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %j, i64 0
-  %lv1 = load i32, i32* %arrayidx5
-  %arrayidx9 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @B, i64 0, i64 %j,  i64 %i30
-  %lv2 = load i32, i32* %arrayidx9
+  %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], ptr @A, i64 0, i64 %j, i64 0
+  %lv1 = load i32, ptr %arrayidx5
+  %arrayidx9 = getelementptr inbounds [100 x [100 x i32]], ptr @B, i64 0, i64 %j,  i64 %i30
+  %lv2 = load i32, ptr %arrayidx9
   %add = add nsw i32 %lv1, %lv2
-  %arrayidx13 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %j,  i64 0
-  store i32 %add, i32* %arrayidx13
+  %arrayidx13 = getelementptr inbounds [100 x [100 x i32]], ptr @A, i64 0, i64 %j,  i64 0
+  store i32 %add, ptr %arrayidx13
   %i.next = add nuw nsw i64 %j,  1
   %exitcond = icmp eq i64 %j,  99
   br i1 %exitcond, label %for1.inc14, label %for2

diff  --git a/llvm/test/Transforms/LoopLoadElim/type-mismatch-opaque-ptr.ll b/llvm/test/Transforms/LoopLoadElim/type-mismatch-opaque-ptr.ll
index d198e28458826..f38f948c2a2e5 100644
--- a/llvm/test/Transforms/LoopLoadElim/type-mismatch-opaque-ptr.ll
+++ b/llvm/test/Transforms/LoopLoadElim/type-mismatch-opaque-ptr.ll
@@ -180,7 +180,7 @@ for.body:                                         ; preds = %for.body, %entry
 
   %a = load ptr, ptr %Aidx , align 8
   %c = getelementptr i8, ptr %a, i64 57
-  %c.i64p = ptrtoint i8* %c to i64
+  %c.i64p = ptrtoint ptr %c to i64
   store i64 %c.i64p, ptr %Cidx, align 8
 
   %exitcond = icmp eq i64 %indvars.iv.next, %N
@@ -241,7 +241,7 @@ for.body:                                         ; preds = %for.body, %entry
   %a_p1 = add i32 %b, 2
   store i32 %a_p1, ptr %Aidx_next, align 4
 
-  %a = load <2 x half>, <2 x half>* %Aidx, align 4
+  %a = load <2 x half>, ptr %Aidx, align 4
   %c = fmul <2 x half> %a, <half 2.0, half 2.0>
   %c.int = bitcast <2 x half> %c to i32
   store i32 %c.int, ptr %Cidx, align 4
@@ -297,7 +297,7 @@ for.body:                                         ; preds = %for.body, %entry
 
   %a = load ptr, ptr %Aidx , align 8
   %c = getelementptr i8, ptr %a, i32 57
-  %c.i64p = ptrtoint i8* %c to i32
+  %c.i64p = ptrtoint ptr %c to i32
   store i32 %c.i64p, ptr %Cidx, align 8
 
   %exitcond = icmp eq i64 %indvars.iv.next, %N

diff  --git a/llvm/test/Transforms/LoopSimplify/do-preheader-dbg-inseltpoison.ll b/llvm/test/Transforms/LoopSimplify/do-preheader-dbg-inseltpoison.ll
index a31ca6203fcfa..efeab891d2941 100644
--- a/llvm/test/Transforms/LoopSimplify/do-preheader-dbg-inseltpoison.ll
+++ b/llvm/test/Transforms/LoopSimplify/do-preheader-dbg-inseltpoison.ll
@@ -22,7 +22,7 @@
 ;     return Total;
 ; }
 
-define dso_local i32 @"foo"(i8* nocapture readonly %Bytes, i32 %Count) local_unnamed_addr !dbg !8 {
+define dso_local i32 @"foo"(ptr nocapture readonly %Bytes, i32 %Count) local_unnamed_addr !dbg !8 {
 entry:
   %0 = sext i32 %Count to i64, !dbg !10
   %min.iters.check = icmp ult i32 %Count, 8, !dbg !10
@@ -39,15 +39,15 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %vec.phi5 = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %12, %vector.body ]
   %1 = xor i64 %index, -1, !dbg !11
   %2 = add i64 %1, %0, !dbg !11
-  %3 = getelementptr inbounds i8, i8* %Bytes, i64 %2, !dbg !11
-  %4 = getelementptr inbounds i8, i8* %3, i64 -3, !dbg !11
-  %5 = bitcast i8* %4 to <4 x i8>*, !dbg !11
-  %wide.load = load <4 x i8>, <4 x i8>* %5, align 1, !dbg !11, !tbaa !12
+  %3 = getelementptr inbounds i8, ptr %Bytes, i64 %2, !dbg !11
+  %4 = getelementptr inbounds i8, ptr %3, i64 -3, !dbg !11
+  %5 = bitcast ptr %4 to ptr, !dbg !11
+  %wide.load = load <4 x i8>, ptr %5, align 1, !dbg !11, !tbaa !12
   %reverse = shufflevector <4 x i8> %wide.load, <4 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>, !dbg !11
-  %6 = getelementptr inbounds i8, i8* %3, i64 -4, !dbg !11
-  %7 = getelementptr inbounds i8, i8* %6, i64 -3, !dbg !11
-  %8 = bitcast i8* %7 to <4 x i8>*, !dbg !11
-  %wide.load6 = load <4 x i8>, <4 x i8>* %8, align 1, !dbg !11, !tbaa !12
+  %6 = getelementptr inbounds i8, ptr %3, i64 -4, !dbg !11
+  %7 = getelementptr inbounds i8, ptr %6, i64 -3, !dbg !11
+  %8 = bitcast ptr %7 to ptr, !dbg !11
+  %wide.load6 = load <4 x i8>, ptr %8, align 1, !dbg !11, !tbaa !12
   %reverse7 = shufflevector <4 x i8> %wide.load6, <4 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>, !dbg !11
   %9 = sext <4 x i8> %reverse to <4 x i32>, !dbg !11
   %10 = sext <4 x i8> %reverse7 to <4 x i32>, !dbg !11
@@ -78,8 +78,8 @@ do.body:                                          ; preds = %do.body.preheader,
   %indvars.iv = phi i64 [ %indvars.iv.next, %do.body ], [ %indvars.iv.ph, %do.body.preheader ]
   %Total.0 = phi i32 [ %add, %do.body ], [ %Total.0.ph, %do.body.preheader ], !dbg !18
   %indvars.iv.next = add nsw i64 %indvars.iv, -1, !dbg !11
-  %arrayidx = getelementptr inbounds i8, i8* %Bytes, i64 %indvars.iv.next, !dbg !11
-  %15 = load i8, i8* %arrayidx, align 1, !dbg !11, !tbaa !12
+  %arrayidx = getelementptr inbounds i8, ptr %Bytes, i64 %indvars.iv.next, !dbg !11
+  %15 = load i8, ptr %arrayidx, align 1, !dbg !11, !tbaa !12
   %conv = sext i8 %15 to i32, !dbg !11
   %add = add nsw i32 %Total.0, %conv, !dbg !11
   %16 = icmp eq i64 %indvars.iv.next, 0

diff  --git a/llvm/test/Transforms/LoopSimplify/do-preheader-dbg.ll b/llvm/test/Transforms/LoopSimplify/do-preheader-dbg.ll
index 0d956d929cf7c..c06a305afc43d 100644
--- a/llvm/test/Transforms/LoopSimplify/do-preheader-dbg.ll
+++ b/llvm/test/Transforms/LoopSimplify/do-preheader-dbg.ll
@@ -22,7 +22,7 @@
 ;     return Total;
 ; }
 
-define dso_local i32 @"foo"(i8* nocapture readonly %Bytes, i32 %Count) local_unnamed_addr !dbg !8 {
+define dso_local i32 @"foo"(ptr nocapture readonly %Bytes, i32 %Count) local_unnamed_addr !dbg !8 {
 entry:
   %0 = sext i32 %Count to i64, !dbg !10
   %min.iters.check = icmp ult i32 %Count, 8, !dbg !10
@@ -39,15 +39,15 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %vec.phi5 = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %12, %vector.body ]
   %1 = xor i64 %index, -1, !dbg !11
   %2 = add i64 %1, %0, !dbg !11
-  %3 = getelementptr inbounds i8, i8* %Bytes, i64 %2, !dbg !11
-  %4 = getelementptr inbounds i8, i8* %3, i64 -3, !dbg !11
-  %5 = bitcast i8* %4 to <4 x i8>*, !dbg !11
-  %wide.load = load <4 x i8>, <4 x i8>* %5, align 1, !dbg !11, !tbaa !12
+  %3 = getelementptr inbounds i8, ptr %Bytes, i64 %2, !dbg !11
+  %4 = getelementptr inbounds i8, ptr %3, i64 -3, !dbg !11
+  %5 = bitcast ptr %4 to ptr, !dbg !11
+  %wide.load = load <4 x i8>, ptr %5, align 1, !dbg !11, !tbaa !12
   %reverse = shufflevector <4 x i8> %wide.load, <4 x i8> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>, !dbg !11
-  %6 = getelementptr inbounds i8, i8* %3, i64 -4, !dbg !11
-  %7 = getelementptr inbounds i8, i8* %6, i64 -3, !dbg !11
-  %8 = bitcast i8* %7 to <4 x i8>*, !dbg !11
-  %wide.load6 = load <4 x i8>, <4 x i8>* %8, align 1, !dbg !11, !tbaa !12
+  %6 = getelementptr inbounds i8, ptr %3, i64 -4, !dbg !11
+  %7 = getelementptr inbounds i8, ptr %6, i64 -3, !dbg !11
+  %8 = bitcast ptr %7 to ptr, !dbg !11
+  %wide.load6 = load <4 x i8>, ptr %8, align 1, !dbg !11, !tbaa !12
   %reverse7 = shufflevector <4 x i8> %wide.load6, <4 x i8> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>, !dbg !11
   %9 = sext <4 x i8> %reverse to <4 x i32>, !dbg !11
   %10 = sext <4 x i8> %reverse7 to <4 x i32>, !dbg !11
@@ -78,8 +78,8 @@ do.body:                                          ; preds = %do.body.preheader,
   %indvars.iv = phi i64 [ %indvars.iv.next, %do.body ], [ %indvars.iv.ph, %do.body.preheader ]
   %Total.0 = phi i32 [ %add, %do.body ], [ %Total.0.ph, %do.body.preheader ], !dbg !18
   %indvars.iv.next = add nsw i64 %indvars.iv, -1, !dbg !11
-  %arrayidx = getelementptr inbounds i8, i8* %Bytes, i64 %indvars.iv.next, !dbg !11
-  %15 = load i8, i8* %arrayidx, align 1, !dbg !11, !tbaa !12
+  %arrayidx = getelementptr inbounds i8, ptr %Bytes, i64 %indvars.iv.next, !dbg !11
+  %15 = load i8, ptr %arrayidx, align 1, !dbg !11, !tbaa !12
   %conv = sext i8 %15 to i32, !dbg !11
   %add = add nsw i32 %Total.0, %conv, !dbg !11
   %16 = icmp eq i64 %indvars.iv.next, 0

diff  --git a/llvm/test/Transforms/LoopStrengthReduce/Power/memory-intrinsic.ll b/llvm/test/Transforms/LoopStrengthReduce/Power/memory-intrinsic.ll
index 82e3ab761fb3d..92f3d4d0abeb3 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/Power/memory-intrinsic.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/Power/memory-intrinsic.ll
@@ -9,11 +9,11 @@ target triple = "powerpc64le-unknown-linux-gnu"
 ; CHECK-NOT: LSR Use: Kind=Basic
 ; CHECK-NOT: LSR Use: Kind=Basic
 
-declare <4 x i32> @llvm.ppc.altivec.lvx(i8*)
-declare void @llvm.ppc.altivec.stvx(<4 x i32>, i8*)
+declare <4 x i32> @llvm.ppc.altivec.lvx(ptr)
+declare void @llvm.ppc.altivec.stvx(<4 x i32>, ptr)
 
 ; Function Attrs: nofree norecurse nounwind
-define void @foo(<4 x i32>* %0, <4 x i32>* %1, i32 signext %2) {
+define void @foo(ptr %0, ptr %1, i32 signext %2) {
   %4 = icmp sgt i32 %2, 0
   br i1 %4, label %5, label %7
 
@@ -26,12 +26,12 @@ define void @foo(<4 x i32>* %0, <4 x i32>* %1, i32 signext %2) {
 
 8:                                                ; preds = %5, %8
   %9 = phi i64 [ 0, %5 ], [ %15, %8 ]
-  %10 = getelementptr inbounds <4 x i32>, <4 x i32>* %1, i64 %9
-  %11 = bitcast <4 x i32>* %10 to i8*
-  %12 = call <4 x i32> @llvm.ppc.altivec.lvx(i8* %11)
-  %13 = getelementptr inbounds <4 x i32>, <4 x i32>* %0, i64 %9
-  %14 = bitcast <4 x i32>* %13 to i8*
-  call void @llvm.ppc.altivec.stvx(<4 x i32> %12, i8* %14)
+  %10 = getelementptr inbounds <4 x i32>, ptr %1, i64 %9
+  %11 = bitcast ptr %10 to ptr
+  %12 = call <4 x i32> @llvm.ppc.altivec.lvx(ptr %11)
+  %13 = getelementptr inbounds <4 x i32>, ptr %0, i64 %9
+  %14 = bitcast ptr %13 to ptr
+  call void @llvm.ppc.altivec.stvx(<4 x i32> %12, ptr %14)
   %15 = add nuw nsw i64 %9, 10
   %16 = icmp ult i64 %15, %6
   br i1 %16, label %8, label %7

diff  --git a/llvm/test/Transforms/LoopStrengthReduce/X86/2012-01-13-phielim.ll b/llvm/test/Transforms/LoopStrengthReduce/X86/2012-01-13-phielim.ll
index 32646b4c2b42d..7fef404eaf147 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/X86/2012-01-13-phielim.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/X86/2012-01-13-phielim.ll
@@ -147,7 +147,7 @@ while.end:                                        ; preds = %entry
 ; Test redundant phi elimination when the deleted phi's increment is
 ; itself a phi.
 ;
-define fastcc void @test3(double* nocapture %u) nounwind uwtable ssp {
+define fastcc void @test3(ptr nocapture %u) nounwind uwtable ssp {
 ; CHECK-LABEL: @test3(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 undef, label [[MESHBB1_PREHEADER:%.*]], label [[MESHBB5:%.*]]

diff  --git a/llvm/test/Transforms/LoopStrengthReduce/lsr-term-fold-negative-testcase.ll b/llvm/test/Transforms/LoopStrengthReduce/lsr-term-fold-negative-testcase.ll
index e7a8acb82c20f..2d3d3a4b72a1a 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/lsr-term-fold-negative-testcase.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/lsr-term-fold-negative-testcase.ll
@@ -136,7 +136,7 @@ for.end:                                          ; preds = %for.body
 
 @fp_inc = common global float 0.000000e+00, align 4
 
-define void @NonSCEVableIV(float %init, float* %A, i32 %N) {
+define void @NonSCEVableIV(float %init, ptr %A, i32 %N) {
 ; CHECK-LABEL: define void @NonSCEVableIV
 ; CHECK-SAME: (float [[INIT:%.*]], ptr [[A:%.*]], i32 [[N:%.*]]) {
 ; CHECK-NEXT:  entry:
@@ -157,14 +157,14 @@ define void @NonSCEVableIV(float %init, float* %A, i32 %N) {
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %0 = load float, float* @fp_inc, align 4
+  %0 = load float, ptr @fp_inc, align 4
   br label %for.body
 
 for.body:                                         ; preds = %entry
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
   %x.05 = phi float [ %init, %entry ], [ %add, %for.body ]
-  %arrayidx = getelementptr inbounds float, float* %A, i64 %indvars.iv
-  store float %x.05, float* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds float, ptr %A, i64 %indvars.iv
+  store float %x.05, ptr %arrayidx, align 4
   %add = fsub float %x.05, %0
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   %lftr.wideiv = trunc i64 %indvars.iv.next to i32

diff  --git a/llvm/test/Transforms/LoopUnroll/ARM/mve-nounroll.ll b/llvm/test/Transforms/LoopUnroll/ARM/mve-nounroll.ll
index 12cfa39cafd45..af075a053e61a 100644
--- a/llvm/test/Transforms/LoopUnroll/ARM/mve-nounroll.ll
+++ b/llvm/test/Transforms/LoopUnroll/ARM/mve-nounroll.ll
@@ -8,7 +8,7 @@
 ; CHECK: for.body:
 ; CHECK:   br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body, !llvm.loop !2
 
-define void @loopfn(float* %s1, float* %s2, float* %d, i32 %n) {
+define void @loopfn(ptr %s1, ptr %s2, ptr %d, i32 %n) {
 entry:
   %cmp10 = icmp sgt i32 %n, 0
   br i1 %cmp10, label %for.body.preheader, label %for.cond.cleanup
@@ -27,16 +27,16 @@ vector.ph:                                        ; preds = %for.body.preheader
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds float, float* %s1, i32 %index
-  %1 = bitcast float* %0 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %1, align 4
-  %2 = getelementptr inbounds float, float* %s2, i32 %index
-  %3 = bitcast float* %2 to <4 x float>*
-  %wide.load12 = load <4 x float>, <4 x float>* %3, align 4
+  %0 = getelementptr inbounds float, ptr %s1, i32 %index
+  %1 = bitcast ptr %0 to ptr
+  %wide.load = load <4 x float>, ptr %1, align 4
+  %2 = getelementptr inbounds float, ptr %s2, i32 %index
+  %3 = bitcast ptr %2 to ptr
+  %wide.load12 = load <4 x float>, ptr %3, align 4
   %4 = fadd fast <4 x float> %wide.load12, %wide.load
-  %5 = getelementptr inbounds float, float* %d, i32 %index
-  %6 = bitcast float* %5 to <4 x float>*
-  store <4 x float> %4, <4 x float>* %6, align 4
+  %5 = getelementptr inbounds float, ptr %d, i32 %index
+  %6 = bitcast ptr %5 to ptr
+  store <4 x float> %4, ptr %6, align 4
   %index.next = add i32 %index, 4
   %7 = icmp eq i32 %index.next, %n.vec
   br i1 %7, label %middle.block, label %vector.body, !llvm.loop !0
@@ -53,13 +53,13 @@ for.cond.cleanup:                                 ; preds = %for.cond.cleanup.lo
 
 for.body:                                         ; preds = %for.body.preheader13, %for.body
   %i.011 = phi i32 [ %add3, %for.body ], [ %i.011.ph, %for.body.preheader13 ]
-  %arrayidx = getelementptr inbounds float, float* %s1, i32 %i.011
-  %8 = load float, float* %arrayidx, align 4
-  %arrayidx1 = getelementptr inbounds float, float* %s2, i32 %i.011
-  %9 = load float, float* %arrayidx1, align 4
+  %arrayidx = getelementptr inbounds float, ptr %s1, i32 %i.011
+  %8 = load float, ptr %arrayidx, align 4
+  %arrayidx1 = getelementptr inbounds float, ptr %s2, i32 %i.011
+  %9 = load float, ptr %arrayidx1, align 4
   %add = fadd fast float %9, %8
-  %arrayidx2 = getelementptr inbounds float, float* %d, i32 %i.011
-  store float %add, float* %arrayidx2, align 4
+  %arrayidx2 = getelementptr inbounds float, ptr %d, i32 %i.011
+  store float %add, ptr %arrayidx2, align 4
   %add3 = add nuw nsw i32 %i.011, 1
   %exitcond = icmp eq i32 %add3, %n
   br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body, !llvm.loop !2
@@ -76,7 +76,7 @@ for.body:                                         ; preds = %for.body.preheader1
 ; CHECK: for.body:
 ; CHECK:   br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body, !llvm.loop !0
 
-define void @remainder(float* %s1, float* %s2, float* %d, i32 %n) {
+define void @remainder(ptr %s1, ptr %s2, ptr %d, i32 %n) {
 entry:
   %cmp10 = icmp sgt i32 %n, 0
   br i1 %cmp10, label %for.body.preheader, label %for.cond.cleanup
@@ -95,16 +95,16 @@ vector.ph:                                        ; preds = %for.body.preheader
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds float, float* %s1, i32 %index
-  %1 = bitcast float* %0 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %1, align 4
-  %2 = getelementptr inbounds float, float* %s2, i32 %index
-  %3 = bitcast float* %2 to <4 x float>*
-  %wide.load12 = load <4 x float>, <4 x float>* %3, align 4
+  %0 = getelementptr inbounds float, ptr %s1, i32 %index
+  %1 = bitcast ptr %0 to ptr
+  %wide.load = load <4 x float>, ptr %1, align 4
+  %2 = getelementptr inbounds float, ptr %s2, i32 %index
+  %3 = bitcast ptr %2 to ptr
+  %wide.load12 = load <4 x float>, ptr %3, align 4
   %4 = fadd fast <4 x float> %wide.load12, %wide.load
-  %5 = getelementptr inbounds float, float* %d, i32 %index
-  %6 = bitcast float* %5 to <4 x float>*
-  store <4 x float> %4, <4 x float>* %6, align 4
+  %5 = getelementptr inbounds float, ptr %d, i32 %index
+  %6 = bitcast ptr %5 to ptr
+  store <4 x float> %4, ptr %6, align 4
   %index.next = add i32 %index, 4
   %7 = icmp eq i32 %index.next, %n.vec
   br i1 %7, label %middle.block, label %vector.body, !llvm.loop !0
@@ -121,13 +121,13 @@ for.cond.cleanup:                                 ; preds = %for.cond.cleanup.lo
 
 for.body:                                         ; preds = %for.body.preheader13, %for.body
   %i.011 = phi i32 [ %add3, %for.body ], [ %i.011.ph, %for.body.preheader13 ]
-  %arrayidx = getelementptr inbounds float, float* %s1, i32 %i.011
-  %8 = load float, float* %arrayidx, align 4
-  %arrayidx1 = getelementptr inbounds float, float* %s2, i32 %i.011
-  %9 = load float, float* %arrayidx1, align 4
+  %arrayidx = getelementptr inbounds float, ptr %s1, i32 %i.011
+  %8 = load float, ptr %arrayidx, align 4
+  %arrayidx1 = getelementptr inbounds float, ptr %s2, i32 %i.011
+  %9 = load float, ptr %arrayidx1, align 4
   %add = fadd fast float %9, %8
-  %arrayidx2 = getelementptr inbounds float, float* %d, i32 %i.011
-  store float %add, float* %arrayidx2, align 4
+  %arrayidx2 = getelementptr inbounds float, ptr %d, i32 %i.011
+  store float %add, ptr %arrayidx2, align 4
   %add3 = add nuw nsw i32 %i.011, 1
   %exitcond = icmp eq i32 %add3, %n
   br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body, !llvm.loop !0
@@ -143,7 +143,7 @@ for.body:                                         ; preds = %for.body.preheader1
 ; CHECK: for.latch:
 ; CHECK:   br i1 %exitcond34, label %for.cond.cleanup.loopexit, label %for.outer
 
-define void @nested(float* %s1, float* %s2, float* %d, i32 %n) {
+define void @nested(ptr %s1, ptr %s2, ptr %d, i32 %n) {
 entry:
   %cmp31 = icmp eq i32 %n, 0
   br i1 %cmp31, label %for.cond.cleanup, label %for.outer.preheader
@@ -162,16 +162,16 @@ for.outer:                           ; preds = %for.outer.preheader, %for.cond1.
 vector.body:                                      ; preds = %for.outer, %vector.body
   %index = phi i32 [ %index.next, %vector.body ], [ 0, %for.outer ]
   %0 = add i32 %index, %mul.us
-  %1 = getelementptr inbounds float, float* %s1, i32 %0
-  %2 = bitcast float* %1 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %2, align 4
-  %3 = getelementptr inbounds float, float* %s2, i32 %0
-  %4 = bitcast float* %3 to <4 x float>*
-  %wide.load35 = load <4 x float>, <4 x float>* %4, align 4
+  %1 = getelementptr inbounds float, ptr %s1, i32 %0
+  %2 = bitcast ptr %1 to ptr
+  %wide.load = load <4 x float>, ptr %2, align 4
+  %3 = getelementptr inbounds float, ptr %s2, i32 %0
+  %4 = bitcast ptr %3 to ptr
+  %wide.load35 = load <4 x float>, ptr %4, align 4
   %5 = fadd fast <4 x float> %wide.load35, %wide.load
-  %6 = getelementptr inbounds float, float* %d, i32 %0
-  %7 = bitcast float* %6 to <4 x float>*
-  store <4 x float> %5, <4 x float>* %7, align 4
+  %6 = getelementptr inbounds float, ptr %d, i32 %0
+  %7 = bitcast ptr %6 to ptr
+  store <4 x float> %5, ptr %7, align 4
   %index.next = add i32 %index, 4
   %8 = icmp eq i32 %index.next, %n.vec
   br i1 %8, label %for.latch, label %vector.body, !llvm.loop !0
@@ -193,7 +193,7 @@ for.cond.cleanup:                                 ; preds = %for.cond1.for.cond.
 ; CHECK-LABEL: test_intrinsics
 ; CHECK: call <16 x i8> @llvm.arm.mve.sub
 ; CHECK-NOT: call <16 x i8> @llvm.arm.mve.sub
-define dso_local arm_aapcs_vfpcc void @test_intrinsics(i8* noalias nocapture readonly %a, i8* noalias nocapture readonly %b, i8* noalias nocapture %c, i32 %N) {
+define dso_local arm_aapcs_vfpcc void @test_intrinsics(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, ptr noalias nocapture %c, i32 %N) {
 entry:
   %cmp8 = icmp eq i32 %N, 0
   %tmp8 = add i32 %N, 15
@@ -211,18 +211,18 @@ vector.body:
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %tmp14 = phi i32 [ %tmp13, %vector.ph ], [ %tmp15, %vector.body ]
   %0 = phi i32 [ %N, %vector.ph ], [ %2, %vector.body ]
-  %tmp = getelementptr inbounds i8, i8* %a, i32 %index
+  %tmp = getelementptr inbounds i8, ptr %a, i32 %index
   %1 = call <16 x i1> @llvm.arm.mve.vctp8(i32 %0)
   %2 = sub i32 %0, 16
-  %tmp2 = bitcast i8* %tmp to <16 x i8>*
-  %wide.masked.load = tail call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %tmp2, i32 4, <16 x i1> %1, <16 x i8> undef)
-  %tmp3 = getelementptr inbounds i8, i8* %b, i32 %index
-  %tmp4 = bitcast i8* %tmp3 to <16 x i8>*
-  %wide.masked.load2 = tail call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %tmp4, i32 4, <16 x i1> %1, <16 x i8> undef)
+  %tmp2 = bitcast ptr %tmp to ptr
+  %wide.masked.load = tail call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %tmp2, i32 4, <16 x i1> %1, <16 x i8> undef)
+  %tmp3 = getelementptr inbounds i8, ptr %b, i32 %index
+  %tmp4 = bitcast ptr %tmp3 to ptr
+  %wide.masked.load2 = tail call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %tmp4, i32 4, <16 x i1> %1, <16 x i8> undef)
   %sub = call <16 x i8> @llvm.arm.mve.sub.predicated.v16i8.v16i1(<16 x i8> %wide.masked.load2, <16 x i8> %wide.masked.load, <16 x i1> %1, <16 x i8> undef)
-  %tmp6 = getelementptr inbounds i8, i8* %c, i32 %index
-  %tmp7 = bitcast i8* %tmp6 to <16 x i8>*
-  tail call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %sub, <16 x i8>* %tmp7, i32 4, <16 x i1> %1)
+  %tmp6 = getelementptr inbounds i8, ptr %c, i32 %index
+  %tmp7 = bitcast ptr %tmp6 to ptr
+  tail call void @llvm.masked.store.v16i8.p0(<16 x i8> %sub, ptr %tmp7, i32 4, <16 x i1> %1)
   %index.next = add i32 %index, 16
   %tmp15 = sub i32 %tmp14, 1
   %tmp16 = icmp ne i32 %tmp15, 0
@@ -233,9 +233,9 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
 }
 
 declare <16 x i1> @llvm.arm.mve.vctp8(i32)
-declare <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>*, i32, <16 x i1>, <16 x i8>)
+declare <16 x i8> @llvm.masked.load.v16i8.p0(ptr, i32, <16 x i1>, <16 x i8>)
 declare <16 x i8> @llvm.arm.mve.sub.predicated.v16i8.v16i1(<16 x i8>, <16 x i8>, <16 x i1>, <16 x i8>)
-declare void @llvm.masked.store.v16i8.p0v16i8(<16 x i8>, <16 x i8>*, i32, <16 x i1>)
+declare void @llvm.masked.store.v16i8.p0(<16 x i8>, ptr, i32, <16 x i1>)
 
 
 !0 = distinct !{!0, !1}

diff  --git a/llvm/test/Transforms/LoopUnroll/peel-loop-conditions.ll b/llvm/test/Transforms/LoopUnroll/peel-loop-conditions.ll
index c339de54ccaac..4d08645a1e536 100644
--- a/llvm/test/Transforms/LoopUnroll/peel-loop-conditions.ll
+++ b/llvm/test/Transforms/LoopUnroll/peel-loop-conditions.ll
@@ -1637,7 +1637,7 @@ for.body:                                         ; preds = %entry, %if.end
   br i1 %or.cond, label %if.then, label %if.end
 
 if.then:                                          ; preds = %for.body
-  tail call void (i32, ...) bitcast (void (...)* @f1 to void (i32, ...)*)(i32 %a)
+  tail call void (i32, ...) @f1(i32 %a)
   br label %if.end
 
 if.end:                                           ; preds = %for.body, %if.then
@@ -1745,7 +1745,7 @@ for.body:                                         ; preds = %entry, %if.end
   br i1 %or.cond, label %if.then, label %if.end
 
 if.then:                                          ; preds = %for.body
-  tail call void (i32, ...) bitcast (void (...)* @f1 to void (i32, ...)*)(i32 %a)
+  tail call void (i32, ...) @f1(i32 %a)
   br label %if.end
 
 if.end:                                           ; preds = %for.body, %if.then
@@ -1971,7 +1971,7 @@ for.body:                                         ; preds = %entry, %if.end
   br i1 %cmp1, label %if.then, label %if.end
 
 if.then:                                          ; preds = %for.body
-  tail call void (i32, ...) bitcast (void (...)* @f1 to void (i32, ...)*)(i32 %a)
+  tail call void (i32, ...) @f1(i32 %a)
   br label %if.end
 
 if.end:                                           ; preds = %for.body, %if.then
@@ -2049,7 +2049,7 @@ for.body:                                         ; preds = %entry, %if.end
   br i1 %or.cond5, label %if.then, label %if.end
 
 if.then:                                          ; preds = %for.body
-  tail call void (i32, ...) bitcast (void (...)* @f1 to void (i32, ...)*)(i32 %a)
+  tail call void (i32, ...) @f1(i32 %a)
   br label %if.end
 
 if.end:                                           ; preds = %for.body, %if.then

diff  --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll
index 16718bb41751b..c07b3c8d49227 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll
@@ -65,17 +65,17 @@ entry:
 
 for.body:                                         ; preds = %for.body, %entry
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
-  %arrayidx0 = getelementptr inbounds [1024 x i32], [1024 x i32]* @AB, i64 0, i64 %indvars.iv
-  %load1 = load i32, i32* %arrayidx0, align 4
+  %arrayidx0 = getelementptr inbounds [1024 x i32], ptr @AB, i64 0, i64 %indvars.iv
+  %load1 = load i32, ptr %arrayidx0, align 4
   %or = or disjoint i64 %indvars.iv, 1
-  %arrayidx1 = getelementptr inbounds [1024 x i32], [1024 x i32]* @AB, i64 0, i64 %or
-  %load2 = load i32, i32* %arrayidx1, align 4
+  %arrayidx1 = getelementptr inbounds [1024 x i32], ptr @AB, i64 0, i64 %or
+  %load2 = load i32, ptr %arrayidx1, align 4
   %add = add nsw i32 %load1, %C
   %mul = mul nsw i32 %load2, %D
-  %arrayidx2 = getelementptr inbounds [1024 x i32], [1024 x i32]* @CD, i64 0, i64 %indvars.iv
-  store i32 %add, i32* %arrayidx2, align 4
-  %arrayidx3 = getelementptr inbounds [1024 x i32], [1024 x i32]* @CD, i64 0, i64 %or
-  store i32 %mul, i32* %arrayidx3, align 4
+  %arrayidx2 = getelementptr inbounds [1024 x i32], ptr @CD, i64 0, i64 %indvars.iv
+  store i32 %add, ptr %arrayidx2, align 4
+  %arrayidx3 = getelementptr inbounds [1024 x i32], ptr @CD, i64 0, i64 %or
+  store i32 %mul, ptr %arrayidx3, align 4
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
   %cmp = icmp slt i64 %indvars.iv.next, 1024
   br i1 %cmp, label %for.body, label %for.end
@@ -154,19 +154,19 @@ entry:
 
 for.body:                                         ; preds = %entry, %for.body
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
-  %arrayidx = getelementptr inbounds [1024 x i16], [1024 x i16]* @AB_i16, i64 0, i64 %indvars.iv
-  %0 = load i16, i16* %arrayidx, align 2
+  %arrayidx = getelementptr inbounds [1024 x i16], ptr @AB_i16, i64 0, i64 %indvars.iv
+  %0 = load i16, ptr %arrayidx, align 2
   %1 = or disjoint i64 %indvars.iv, 1
-  %arrayidx2 = getelementptr inbounds [1024 x i16], [1024 x i16]* @AB_i16, i64 0, i64 %1
-  %2 = load i16, i16* %arrayidx2, align 2
+  %arrayidx2 = getelementptr inbounds [1024 x i16], ptr @AB_i16, i64 0, i64 %1
+  %2 = load i16, ptr %arrayidx2, align 2
   %conv = sext i16 %0 to i32
   %add3 = add nsw i32 %conv, %C
-  %arrayidx5 = getelementptr inbounds [1024 x i32], [1024 x i32]* @CD, i64 0, i64 %indvars.iv
-  store i32 %add3, i32* %arrayidx5, align 4
+  %arrayidx5 = getelementptr inbounds [1024 x i32], ptr @CD, i64 0, i64 %indvars.iv
+  store i32 %add3, ptr %arrayidx5, align 4
   %conv6 = sext i16 %2 to i32
   %mul = mul nsw i32 %conv6, %D
-  %arrayidx9 = getelementptr inbounds [1024 x i32], [1024 x i32]* @CD, i64 0, i64 %1
-  store i32 %mul, i32* %arrayidx9, align 4
+  %arrayidx9 = getelementptr inbounds [1024 x i32], ptr @CD, i64 0, i64 %1
+  store i32 %mul, ptr %arrayidx9, align 4
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
   %cmp = icmp ult i64 %indvars.iv, 1022
   br i1 %cmp, label %for.body, label %for.end
@@ -246,19 +246,19 @@ entry:
 
 for.body:                                         ; preds = %entry, %for.body
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
-  %arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @AB, i64 0, i64 %indvars.iv
-  %0 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds [1024 x i32], ptr @AB, i64 0, i64 %indvars.iv
+  %0 = load i32, ptr %arrayidx, align 4
   %1 = or disjoint i64 %indvars.iv, 1
-  %arrayidx2 = getelementptr inbounds [1024 x i32], [1024 x i32]* @AB, i64 0, i64 %1
-  %2 = load i32, i32* %arrayidx2, align 4
+  %arrayidx2 = getelementptr inbounds [1024 x i32], ptr @AB, i64 0, i64 %1
+  %2 = load i32, ptr %arrayidx2, align 4
   %add3 = add nsw i32 %0, %C
   %conv = trunc i32 %add3 to i16
-  %arrayidx5 = getelementptr inbounds [1024 x i16], [1024 x i16]* @CD_i16, i64 0, i64 %indvars.iv
-  store i16 %conv, i16* %arrayidx5, align 2
+  %arrayidx5 = getelementptr inbounds [1024 x i16], ptr @CD_i16, i64 0, i64 %indvars.iv
+  store i16 %conv, ptr %arrayidx5, align 2
   %mul = mul nsw i32 %2, %D
   %conv6 = trunc i32 %mul to i16
-  %arrayidx9 = getelementptr inbounds [1024 x i16], [1024 x i16]* @CD_i16, i64 0, i64 %1
-  store i16 %conv6, i16* %arrayidx9, align 2
+  %arrayidx9 = getelementptr inbounds [1024 x i16], ptr @CD_i16, i64 0, i64 %1
+  store i16 %conv6, ptr %arrayidx9, align 2
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
   %cmp = icmp ult i64 %indvars.iv, 1022
   br i1 %cmp, label %for.body, label %for.end
@@ -272,7 +272,7 @@ for.end:                                 ; preds = %for.body
 
 %struct.ST6 = type { i32, i32, i32, i32, i32, i32 }
 
-define i32 @test_struct_load6(%struct.ST6* %S) #1 {
+define i32 @test_struct_load6(ptr %S) #1 {
 ; CHECK-LABEL: @test_struct_load6(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
@@ -328,18 +328,18 @@ entry:
 for.body:                                         ; preds = %entry, %for.body
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
   %r.041 = phi i32 [ 0, %entry ], [ %sub14, %for.body ]
-  %x = getelementptr inbounds %struct.ST6, %struct.ST6* %S, i64 %indvars.iv, i32 0
-  %0 = load i32, i32* %x, align 4
-  %y = getelementptr inbounds %struct.ST6, %struct.ST6* %S, i64 %indvars.iv, i32 1
-  %1 = load i32, i32* %y, align 4
-  %z = getelementptr inbounds %struct.ST6, %struct.ST6* %S, i64 %indvars.iv, i32 2
-  %2 = load i32, i32* %z, align 4
-  %w = getelementptr inbounds %struct.ST6, %struct.ST6* %S, i64 %indvars.iv, i32 3
-  %3 = load i32, i32* %w, align 4
-  %a = getelementptr inbounds %struct.ST6, %struct.ST6* %S, i64 %indvars.iv, i32 4
-  %4 = load i32, i32* %a, align 4
-  %b = getelementptr inbounds %struct.ST6, %struct.ST6* %S, i64 %indvars.iv, i32 5
-  %5 = load i32, i32* %b, align 4
+  %x = getelementptr inbounds %struct.ST6, ptr %S, i64 %indvars.iv, i32 0
+  %0 = load i32, ptr %x, align 4
+  %y = getelementptr inbounds %struct.ST6, ptr %S, i64 %indvars.iv, i32 1
+  %1 = load i32, ptr %y, align 4
+  %z = getelementptr inbounds %struct.ST6, ptr %S, i64 %indvars.iv, i32 2
+  %2 = load i32, ptr %z, align 4
+  %w = getelementptr inbounds %struct.ST6, ptr %S, i64 %indvars.iv, i32 3
+  %3 = load i32, ptr %w, align 4
+  %a = getelementptr inbounds %struct.ST6, ptr %S, i64 %indvars.iv, i32 4
+  %4 = load i32, ptr %a, align 4
+  %b = getelementptr inbounds %struct.ST6, ptr %S, i64 %indvars.iv, i32 5
+  %5 = load i32, ptr %b, align 4
   %.neg36 = add i32 %0, %r.041
   %6 = add i32 %.neg36, %2
   %7 = add i32 %1, %3
@@ -376,7 +376,7 @@ for.cond.cleanup:                                 ; preds = %for.body
 
 %struct.ST2 = type { i32, i32 }
 
-define void @test_reversed_load2_store2(%struct.ST2* noalias nocapture readonly %A, %struct.ST2* noalias nocapture %B) #1 {
+define void @test_reversed_load2_store2(ptr noalias nocapture readonly %A, ptr noalias nocapture %B) #1 {
 ; CHECK-LABEL: @test_reversed_load2_store2(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
@@ -439,17 +439,17 @@ for.cond.cleanup:                                 ; preds = %for.body
 
 for.body:                                         ; preds = %for.body, %entry
   %indvars.iv = phi i64 [ 1023, %entry ], [ %indvars.iv.next, %for.body ]
-  %x = getelementptr inbounds %struct.ST2, %struct.ST2* %A, i64 %indvars.iv, i32 0
-  %load1 = load i32, i32* %x, align 4
+  %x = getelementptr inbounds %struct.ST2, ptr %A, i64 %indvars.iv, i32 0
+  %load1 = load i32, ptr %x, align 4
   %trunc = trunc i64 %indvars.iv to i32
   %add = add nsw i32 %load1, %trunc
-  %y = getelementptr inbounds %struct.ST2, %struct.ST2* %A, i64 %indvars.iv, i32 1
-  %load2 = load i32, i32* %y, align 4
+  %y = getelementptr inbounds %struct.ST2, ptr %A, i64 %indvars.iv, i32 1
+  %load2 = load i32, ptr %y, align 4
   %sub = sub nsw i32 %load2, %trunc
-  %x5 = getelementptr inbounds %struct.ST2, %struct.ST2* %B, i64 %indvars.iv, i32 0
-  store i32 %add, i32* %x5, align 4
-  %y8 = getelementptr inbounds %struct.ST2, %struct.ST2* %B, i64 %indvars.iv, i32 1
-  store i32 %sub, i32* %y8, align 4
+  %x5 = getelementptr inbounds %struct.ST2, ptr %B, i64 %indvars.iv, i32 0
+  store i32 %add, ptr %x5, align 4
+  %y8 = getelementptr inbounds %struct.ST2, ptr %B, i64 %indvars.iv, i32 1
+  store i32 %sub, ptr %y8, align 4
   %indvars.iv.next = add nsw i64 %indvars.iv, -1
   %cmp = icmp sgt i64 %indvars.iv, 0
   br i1 %cmp, label %for.body, label %for.cond.cleanup
@@ -466,7 +466,7 @@ for.body:                                         ; preds = %for.body, %entry
 ; }
 
 
-define void @even_load_static_tc(i32* noalias nocapture readonly %A, i32* noalias nocapture %B) #1 {
+define void @even_load_static_tc(ptr noalias nocapture readonly %A, ptr noalias nocapture %B) #1 {
 ; CHECK-LABEL: @even_load_static_tc(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
@@ -519,12 +519,12 @@ for.cond.cleanup:                                 ; preds = %for.body
 
 for.body:                                         ; preds = %for.body, %entry
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
-  %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
-  %load = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
+  %load = load i32, ptr %arrayidx, align 4
   %mul = shl nsw i32 %load, 1
   %lshr = lshr exact i64 %indvars.iv, 1
-  %arrayidx2 = getelementptr inbounds i32, i32* %B, i64 %lshr
-  store i32 %mul, i32* %arrayidx2, align 4
+  %arrayidx2 = getelementptr inbounds i32, ptr %B, i64 %lshr
+  store i32 %mul, ptr %arrayidx2, align 4
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
   %cmp = icmp ult i64 %indvars.iv.next, 1024
   br i1 %cmp, label %for.body, label %for.cond.cleanup
@@ -541,7 +541,7 @@ for.body:                                         ; preds = %for.body, %entry
 ; }
 
 
-define void @even_load_dynamic_tc(i32* noalias nocapture readonly %A, i32* noalias nocapture %B, i64 %N) #1 {
+define void @even_load_dynamic_tc(ptr noalias nocapture readonly %A, ptr noalias nocapture %B, i64 %N) #1 {
 ; CHECK-LABEL: @even_load_dynamic_tc(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[N:%.*]], i64 2)
@@ -605,12 +605,12 @@ for.cond.cleanup:                                 ; preds = %for.body
 
 for.body:                                         ; preds = %for.body, %entry
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
-  %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
-  %load = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
+  %load = load i32, ptr %arrayidx, align 4
   %mul = shl nsw i32 %load, 1
   %lshr = lshr exact i64 %indvars.iv, 1
-  %arrayidx2 = getelementptr inbounds i32, i32* %B, i64 %lshr
-  store i32 %mul, i32* %arrayidx2, align 4
+  %arrayidx2 = getelementptr inbounds i32, ptr %B, i64 %lshr
+  store i32 %mul, ptr %arrayidx2, align 4
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
   %cmp = icmp ult i64 %indvars.iv.next, %N
   br i1 %cmp, label %for.body, label %for.cond.cleanup
@@ -635,7 +635,7 @@ for.body:                                         ; preds = %for.body, %entry
 ; }
 
 %pair = type { i64, i64 }
-define void @load_gap_reverse(%pair* noalias nocapture readonly %P1, %pair* noalias nocapture readonly %P2, i64 %X) #1 {
+define void @load_gap_reverse(ptr noalias nocapture readonly %P1, ptr noalias nocapture readonly %P2, i64 %X) #1 {
 ; CHECK-LABEL: @load_gap_reverse(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
@@ -680,12 +680,12 @@ entry:
 for.body:
   %i = phi i64 [ 1023, %entry ], [ %i.next, %for.body ]
   %0 = add nsw i64 %X, %i
-  %1 = getelementptr inbounds %pair, %pair* %P1, i64 %i, i32 0
-  %2 = getelementptr inbounds %pair, %pair* %P2, i64 %i, i32 1
-  %3 = load i64, i64* %2, align 8
+  %1 = getelementptr inbounds %pair, ptr %P1, i64 %i, i32 0
+  %2 = getelementptr inbounds %pair, ptr %P2, i64 %i, i32 1
+  %3 = load i64, ptr %2, align 8
   %4 = sub nsw i64 %3, %i
-  store i64 %0, i64* %1, align 8
-  store i64 %4, i64* %2, align 8
+  store i64 %0, ptr %1, align 8
+  store i64 %4, ptr %2, align 8
   %i.next = add nsw i64 %i, -1
   %cond = icmp sgt i64 %i, 0
   br i1 %cond, label %for.body, label %for.exit
@@ -704,7 +704,7 @@ for.exit:
 ; }
 
 
-define void @mixed_load2_store2(i32* noalias nocapture readonly %A, i32* noalias nocapture %B) #1 {
+define void @mixed_load2_store2(ptr noalias nocapture readonly %A, ptr noalias nocapture %B) #1 {
 ; CHECK-LABEL: @mixed_load2_store2(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
@@ -750,19 +750,19 @@ for.cond.cleanup:                                 ; preds = %for.body
 
 for.body:                                         ; preds = %for.body, %entry
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
-  %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
-  %load1 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
+  %load1 = load i32, ptr %arrayidx, align 4
   %or = or disjoint i64 %indvars.iv, 1
-  %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %or
-  %load2 = load i32, i32* %arrayidx2, align 4
+  %arrayidx2 = getelementptr inbounds i32, ptr %A, i64 %or
+  %load2 = load i32, ptr %arrayidx2, align 4
   %mul = mul nsw i32 %load2, %load1
-  %arrayidx4 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
-  store i32 %mul, i32* %arrayidx4, align 4
-  %load3 = load i32, i32* %arrayidx, align 4
-  %load4 = load i32, i32* %arrayidx2, align 4
+  %arrayidx4 = getelementptr inbounds i32, ptr %B, i64 %indvars.iv
+  store i32 %mul, ptr %arrayidx4, align 4
+  %load3 = load i32, ptr %arrayidx, align 4
+  %load4 = load i32, ptr %arrayidx2, align 4
   %add10 = add nsw i32 %load4, %load3
-  %arrayidx13 = getelementptr inbounds i32, i32* %B, i64 %or
-  store i32 %add10, i32* %arrayidx13, align 4
+  %arrayidx13 = getelementptr inbounds i32, ptr %B, i64 %or
+  store i32 %add10, ptr %arrayidx13, align 4
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
   %cmp = icmp ult i64 %indvars.iv.next, 1024
   br i1 %cmp, label %for.body, label %for.cond.cleanup
@@ -797,7 +797,7 @@ for.body:                                         ; preds = %for.body, %entry
 @SA = common global i32 0, align 4
 @SB = common global float 0.000000e+00, align 4
 
-define void @int_float_struct(%struct.IntFloat* nocapture readonly %p) #0 {
+define void @int_float_struct(ptr nocapture readonly %p) #0 {
 ; CHECK-LABEL: @int_float_struct(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
@@ -839,19 +839,19 @@ entry:
   br label %for.body
 
 for.cond.cleanup:                                 ; preds = %for.body
-  store i32 %add, i32* @SA, align 4
-  store float %add3, float* @SB, align 4
+  store i32 %add, ptr @SA, align 4
+  store float %add3, ptr @SB, align 4
   ret void
 
 for.body:                                         ; preds = %for.body, %entry
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
   %SumB.014 = phi float [ undef, %entry ], [ %add3, %for.body ]
   %SumA.013 = phi i32 [ undef, %entry ], [ %add, %for.body ]
-  %a = getelementptr inbounds %struct.IntFloat, %struct.IntFloat* %p, i64 %indvars.iv, i32 0
-  %load1 = load i32, i32* %a, align 4
+  %a = getelementptr inbounds %struct.IntFloat, ptr %p, i64 %indvars.iv, i32 0
+  %load1 = load i32, ptr %a, align 4
   %add = add nsw i32 %load1, %SumA.013
-  %b = getelementptr inbounds %struct.IntFloat, %struct.IntFloat* %p, i64 %indvars.iv, i32 1
-  %load2 = load float, float* %b, align 4
+  %b = getelementptr inbounds %struct.IntFloat, ptr %p, i64 %indvars.iv, i32 1
+  %load2 = load float, ptr %b, align 4
   %add3 = fadd fast float %SumB.014, %load2
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   %exitcond = icmp eq i64 %indvars.iv.next, 1024
@@ -876,7 +876,7 @@ for.body:                                         ; preds = %for.body, %entry
 ; }
 
 %pair.i32 = type { i32, i32 }
-define void @PR27626_0(%pair.i32 *%p, i32 %z, i64 %n) #1 {
+define void @PR27626_0(ptr %p, i32 %z, i64 %n) #1 {
 ; CHECK-LABEL: @PR27626_0(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N:%.*]], i64 1)
@@ -939,11 +939,11 @@ entry:
 
 for.body:
   %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
-  %p_i.x = getelementptr inbounds %pair.i32, %pair.i32* %p, i64 %i, i32 0
-  %p_i.y = getelementptr inbounds %pair.i32, %pair.i32* %p, i64 %i, i32 1
-  store i32 %z, i32* %p_i.x, align 4
-  %0 = load i32, i32* %p_i.x, align 4
-  store i32 %0, i32 *%p_i.y, align 4
+  %p_i.x = getelementptr inbounds %pair.i32, ptr %p, i64 %i, i32 0
+  %p_i.y = getelementptr inbounds %pair.i32, ptr %p, i64 %i, i32 1
+  store i32 %z, ptr %p_i.x, align 4
+  %0 = load i32, ptr %p_i.x, align 4
+  store i32 %0, ptr %p_i.y, align 4
   %i.next = add nuw nsw i64 %i, 1
   %cond = icmp slt i64 %i.next, %n
   br i1 %cond, label %for.body, label %for.end
@@ -963,7 +963,7 @@ for.end:
 ;   }
 ; }
 
-define i32 @PR27626_1(%pair.i32 *%p, i64 %n) #1 {
+define i32 @PR27626_1(ptr %p, i64 %n) #1 {
 ; CHECK-LABEL: @PR27626_1(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N:%.*]], i64 1)
@@ -1033,11 +1033,11 @@ entry:
 for.body:
   %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
   %s = phi i32 [ %2, %for.body ], [ 0, %entry ]
-  %p_i.x = getelementptr inbounds %pair.i32, %pair.i32* %p, i64 %i, i32 0
-  %p_i.y = getelementptr inbounds %pair.i32, %pair.i32* %p, i64 %i, i32 1
-  %0 = load i32, i32* %p_i.x, align 4
-  store i32 %0, i32* %p_i.y, align 4
-  %1 = load i32, i32* %p_i.y, align 4
+  %p_i.x = getelementptr inbounds %pair.i32, ptr %p, i64 %i, i32 0
+  %p_i.y = getelementptr inbounds %pair.i32, ptr %p, i64 %i, i32 1
+  %0 = load i32, ptr %p_i.x, align 4
+  store i32 %0, ptr %p_i.y, align 4
+  %1 = load i32, ptr %p_i.y, align 4
   %2 = add nsw i32 %1, %s
   %i.next = add nuw nsw i64 %i, 1
   %cond = icmp slt i64 %i.next, %n
@@ -1058,7 +1058,7 @@ for.end:
 ;   }
 ; }
 
-define void @PR27626_2(%pair.i32 *%p, i64 %n, i32 %z) #1 {
+define void @PR27626_2(ptr %p, i64 %n, i32 %z) #1 {
 ; CHECK-LABEL: @PR27626_2(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N:%.*]], i64 1)
@@ -1124,12 +1124,12 @@ entry:
 for.body:
   %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
   %i_minus_1 = add nuw nsw i64 %i, -1
-  %p_i.x = getelementptr inbounds %pair.i32, %pair.i32* %p, i64 %i, i32 0
-  %p_i_minus_1.x = getelementptr inbounds %pair.i32, %pair.i32* %p, i64 %i_minus_1, i32 0
-  %p_i.y = getelementptr inbounds %pair.i32, %pair.i32* %p, i64 %i, i32 1
-  store i32 %z, i32* %p_i.x, align 4
-  %0 = load i32, i32* %p_i_minus_1.x, align 4
-  store i32 %0, i32 *%p_i.y, align 4
+  %p_i.x = getelementptr inbounds %pair.i32, ptr %p, i64 %i, i32 0
+  %p_i_minus_1.x = getelementptr inbounds %pair.i32, ptr %p, i64 %i_minus_1, i32 0
+  %p_i.y = getelementptr inbounds %pair.i32, ptr %p, i64 %i, i32 1
+  store i32 %z, ptr %p_i.x, align 4
+  %0 = load i32, ptr %p_i_minus_1.x, align 4
+  store i32 %0, ptr %p_i.y, align 4
   %i.next = add nuw nsw i64 %i, 1
   %cond = icmp slt i64 %i.next, %n
   br i1 %cond, label %for.body, label %for.end
@@ -1148,7 +1148,7 @@ for.end:
 ;   }
 ; }
 
-define i32 @PR27626_3(%pair.i32 *%p, i64 %n, i32 %z) #1 {
+define i32 @PR27626_3(ptr %p, i64 %n, i32 %z) #1 {
 ; CHECK-LABEL: @PR27626_3(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N:%.*]], i64 1)
@@ -1223,12 +1223,12 @@ for.body:
   %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
   %s = phi i32 [ %2, %for.body ], [ 0, %entry ]
   %i_plus_1 = add nuw nsw i64 %i, 1
-  %p_i.x = getelementptr inbounds %pair.i32, %pair.i32* %p, i64 %i, i32 0
-  %p_i.y = getelementptr inbounds %pair.i32, %pair.i32* %p, i64 %i, i32 1
-  %p_i_plus_1.y = getelementptr inbounds %pair.i32, %pair.i32* %p, i64 %i_plus_1, i32 1
-  %0 = load i32, i32* %p_i.x, align 4
-  store i32 %0, i32* %p_i_plus_1.y, align 4
-  %1 = load i32, i32* %p_i.y, align 4
+  %p_i.x = getelementptr inbounds %pair.i32, ptr %p, i64 %i, i32 0
+  %p_i.y = getelementptr inbounds %pair.i32, ptr %p, i64 %i, i32 1
+  %p_i_plus_1.y = getelementptr inbounds %pair.i32, ptr %p, i64 %i_plus_1, i32 1
+  %0 = load i32, ptr %p_i.x, align 4
+  store i32 %0, ptr %p_i_plus_1.y, align 4
+  %1 = load i32, ptr %p_i.y, align 4
   %2 = add nsw i32 %1, %s
   %i.next = add nuw nsw i64 %i, 1
   %cond = icmp slt i64 %i.next, %n
@@ -1251,7 +1251,7 @@ for.end:
 ;   }
 ; }
 
-define void @PR27626_4(i32 *%a, i32 %x, i32 %y, i32 %z, i64 %n) #1 {
+define void @PR27626_4(ptr %a, i32 %x, i32 %y, i32 %z, i64 %n) #1 {
 ; CHECK-LABEL: @PR27626_4(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N:%.*]], i64 2)
@@ -1322,11 +1322,11 @@ entry:
 for.body:
   %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
   %i_plus_1 = add i64 %i, 1
-  %a_i = getelementptr inbounds i32, i32* %a, i64 %i
-  %a_i_plus_1 = getelementptr inbounds i32, i32* %a, i64 %i_plus_1
-  store i32 %x, i32* %a_i, align 4
-  store i32 %y, i32* %a_i, align 4
-  store i32 %z, i32* %a_i_plus_1, align 4
+  %a_i = getelementptr inbounds i32, ptr %a, i64 %i
+  %a_i_plus_1 = getelementptr inbounds i32, ptr %a, i64 %i_plus_1
+  store i32 %x, ptr %a_i, align 4
+  store i32 %y, ptr %a_i, align 4
+  store i32 %z, ptr %a_i_plus_1, align 4
   %i.next = add nuw nsw i64 %i, 2
   %cond = icmp slt i64 %i.next, %n
   br i1 %cond, label %for.body, label %for.end
@@ -1346,7 +1346,7 @@ for.end:
 ;   }
 ; }
 
-define void @PR27626_5(i32 *%a, i32 %x, i32 %y, i32 %z, i64 %n) #1 {
+define void @PR27626_5(ptr %a, i32 %x, i32 %y, i32 %z, i64 %n) #1 {
 ; CHECK-LABEL: @PR27626_5(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N:%.*]], i64 5)
@@ -1423,12 +1423,12 @@ for.body:
   %i = phi i64 [ %i.next, %for.body ], [ 3, %entry ]
   %i_minus_1 = sub i64 %i, 1
   %i_minus_3 = sub i64 %i_minus_1, 2
-  %a_i = getelementptr inbounds i32, i32* %a, i64 %i
-  %a_i_minus_1 = getelementptr inbounds i32, i32* %a, i64 %i_minus_1
-  %a_i_minus_3 = getelementptr inbounds i32, i32* %a, i64 %i_minus_3
-  store i32 %x, i32* %a_i_minus_1, align 4
-  store i32 %y, i32* %a_i_minus_3, align 4
-  store i32 %z, i32* %a_i, align 4
+  %a_i = getelementptr inbounds i32, ptr %a, i64 %i
+  %a_i_minus_1 = getelementptr inbounds i32, ptr %a, i64 %i_minus_1
+  %a_i_minus_3 = getelementptr inbounds i32, ptr %a, i64 %i_minus_3
+  store i32 %x, ptr %a_i_minus_1, align 4
+  store i32 %y, ptr %a_i_minus_3, align 4
+  store i32 %z, ptr %a_i, align 4
   %i.next = add nuw nsw i64 %i, 2
   %cond = icmp slt i64 %i.next, %n
   br i1 %cond, label %for.body, label %for.end
@@ -1446,7 +1446,7 @@ for.end:
 ;   }
 ; }
 
-define void @PR34743(i16* %a, i32* %b, i64 %n) #1 {
+define void @PR34743(ptr %a, ptr %b, i64 %n) #1 {
 ; CHECK-LABEL: @PR34743(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[DOTPRE:%.*]] = load i16, ptr [[A:%.*]], align 2
@@ -1544,7 +1544,7 @@ define void @PR34743(i16* %a, i32* %b, i64 %n) #1 {
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %.pre = load i16, i16* %a
+  %.pre = load i16, ptr %a
   br label %loop
 
 loop:
@@ -1555,16 +1555,16 @@ loop:
   %i1 = add nuw nsw i64 %i, 1
   %iv1 = add nuw nsw i64 %iv, 1
   %iv2 = add nuw nsw i64 %iv, 2
-  %gep1 = getelementptr inbounds i16, i16* %a, i64 %iv1
-  %load1 = load i16, i16* %gep1, align 4
+  %gep1 = getelementptr inbounds i16, ptr %a, i64 %iv1
+  %load1 = load i16, ptr %gep1, align 4
   %conv1 = sext i16 %load1 to i32
-  %gep2 = getelementptr inbounds i16, i16* %a, i64 %iv2
-  %load2 = load i16, i16* %gep2, align 4
+  %gep2 = getelementptr inbounds i16, ptr %a, i64 %iv2
+  %load2 = load i16, ptr %gep2, align 4
   %conv2 = sext i16 %load2 to i32
   %mul01 = mul nsw i32 %conv, %conv1
   %mul012 = mul nsw i32 %mul01, %conv2
-  %arrayidx5 = getelementptr inbounds i32, i32* %b, i64 %i
-  store i32 %mul012, i32* %arrayidx5
+  %arrayidx5 = getelementptr inbounds i32, ptr %b, i64 %i
+  store i32 %mul012, ptr %arrayidx5
   %exitcond = icmp eq i64 %iv, %n
   br i1 %exitcond, label %end, label %loop
 

diff  --git a/llvm/test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll b/llvm/test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll
index c7eaac315fd86..cc7c1d8a61887 100644
--- a/llvm/test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll
+++ b/llvm/test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll
@@ -210,7 +210,7 @@ for.end:
 ; INTER:       getelementptr inbounds %pair, ptr %p, i64 %[[I3]], i32 0
 ; INTER:       br i1 {{.*}}, label %middle.block, label %vector.body
 ;
-define void @predicated_store(%pair *%p, i32 %x, i64 %n) {
+define void @predicated_store(ptr %p, i32 %x, i64 %n) {
 entry:
   br label %for.body
 
@@ -459,7 +459,7 @@ for.end:
 ; INTER-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
 ; INTER:         br i1 {{.*}}, label %middle.block, label %vector.body
 ;
-define void @pointer_operand_geps_with_
diff erent_indexed_types(i64* %A, i8* %B, i64 %n) {
+define void @pointer_operand_geps_with_
diff erent_indexed_types(ptr %A, ptr %B, i64 %n) {
 entry:
   br label %for.body
 

diff  --git a/llvm/test/Transforms/LoopVectorize/lcssa-crashes.ll b/llvm/test/Transforms/LoopVectorize/lcssa-crashes.ll
index 295d2e73d6f0a..8d88c625c4c4e 100644
--- a/llvm/test/Transforms/LoopVectorize/lcssa-crashes.ll
+++ b/llvm/test/Transforms/LoopVectorize/lcssa-crashes.ll
@@ -104,7 +104,7 @@ while.body:
   %pos.337 = phi i32 [ %inc46, %while.body ], [ %add41, %entry ]
   %inc46 = add i32 %pos.337, 1
   %arrayidx48 = getelementptr inbounds [1024 x i8], ptr undef, i64 0, i64 %idxprom4738
-  store i8 0, i8* %arrayidx48, align 1
+  store i8 0, ptr %arrayidx48, align 1
   %and43 = and i32 %inc46, 3
   %cmp44 = icmp eq i32 %and43, 0
   %idxprom47 = zext i32 %inc46 to i64

diff  --git a/llvm/test/Transforms/LoopVectorize/runtime-checks-
diff erence.ll b/llvm/test/Transforms/LoopVectorize/runtime-checks-
diff erence.ll
index 5fe1686fa7b5d..55bbf54d1f39d 100644
--- a/llvm/test/Transforms/LoopVectorize/runtime-checks-
diff erence.ll
+++ b/llvm/test/Transforms/LoopVectorize/runtime-checks-
diff erence.ll
@@ -3,7 +3,7 @@
 
 target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
 
-define void @same_step_and_size(ptr %a, i32* %b, i64 %n) {
+define void @same_step_and_size(ptr %a, ptr %b, i64 %n) {
 ; CHECK-LABEL: @same_step_and_size(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[A2:%.*]] = ptrtoint ptr [[A:%.*]] to i64

diff  --git a/llvm/test/Transforms/MemCpyOpt/vscale-crashes.ll b/llvm/test/Transforms/MemCpyOpt/vscale-crashes.ll
index 42ad92ee03f4d..adab2238ab5b2 100644
--- a/llvm/test/Transforms/MemCpyOpt/vscale-crashes.ll
+++ b/llvm/test/Transforms/MemCpyOpt/vscale-crashes.ll
@@ -79,7 +79,7 @@ define void @callslotoptzn(<vscale x 4 x float> %val, ptr %out) {
   %alloc = alloca <vscale x 4 x float>, align 16
   %idx = tail call <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
   %stride = getelementptr inbounds float, ptr %alloc, <vscale x 4 x i32> %idx
-  call void @llvm.masked.scatter.nxv4f32.nxv4p0f32(<vscale x 4 x float> %val, <vscale x 4 x ptr> %stride, i32 4, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i32 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
+  call void @llvm.masked.scatter.nxv4f32.nxv4p0(<vscale x 4 x float> %val, <vscale x 4 x ptr> %stride, i32 4, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i32 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
   %li = load <vscale x 4 x float>, ptr %alloc, align 4
   store <vscale x 4 x float> %li, ptr %out, align 4
   ret void
@@ -124,4 +124,4 @@ define void @memmove_agg2(ptr %a, ptr %b) {
 }
 
 declare <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
-declare void @llvm.masked.scatter.nxv4f32.nxv4p0f32(<vscale x 4 x float> , <vscale x 4 x ptr> , i32, <vscale x 4 x i1>)
+declare void @llvm.masked.scatter.nxv4f32.nxv4p0(<vscale x 4 x float> , <vscale x 4 x ptr> , i32, <vscale x 4 x i1>)

diff  --git a/llvm/test/Transforms/MoveAutoInit/clobber.ll b/llvm/test/Transforms/MoveAutoInit/clobber.ll
index 0d70d85119f50..09084b6ddc51b 100644
--- a/llvm/test/Transforms/MoveAutoInit/clobber.ll
+++ b/llvm/test/Transforms/MoveAutoInit/clobber.ll
@@ -45,25 +45,25 @@ define i32 @foo(i32 noundef %0, i32 noundef %1, i32 noundef %2) #0 {
 
   %4 = alloca [100 x i8], align 16
   %5 = alloca [2 x i8], align 1
-  %6 = getelementptr inbounds [100 x i8], [100 x i8]* %4, i64 0, i64 0
-  call void @llvm.lifetime.start.p0i8(i64 100, i8* nonnull %6) #3
+  %6 = getelementptr inbounds [100 x i8], ptr %4, i64 0, i64 0
+  call void @llvm.lifetime.start.p0(i64 100, ptr nonnull %6) #3
   ; This memset must move.
-  call void @llvm.memset.p0i8.i64(i8* noundef nonnull align 16 dereferenceable(100) %6, i8 -86, i64 100, i1 false), !annotation !0
-  %7 = getelementptr inbounds [2 x i8], [2 x i8]* %5, i64 0, i64 0
-  call void @llvm.lifetime.start.p0i8(i64 2, i8* nonnull %7) #3
+  call void @llvm.memset.p0.i64(ptr noundef nonnull align 16 dereferenceable(100) %6, i8 -86, i64 100, i1 false), !annotation !0
+  %7 = getelementptr inbounds [2 x i8], ptr %5, i64 0, i64 0
+  call void @llvm.lifetime.start.p0(i64 2, ptr nonnull %7) #3
   ; This store must move.
-  store i8 -86, i8* %7, align 1, !annotation !0
-  %8 = getelementptr inbounds [2 x i8], [2 x i8]* %5, i64 0, i64 1
+  store i8 -86, ptr %7, align 1, !annotation !0
+  %8 = getelementptr inbounds [2 x i8], ptr %5, i64 0, i64 1
   ; This store must move.
-  store i8 -86, i8* %8, align 1, !annotation !0
+  store i8 -86, ptr %8, align 1, !annotation !0
   %9 = icmp eq i32 %1, 0
   br i1 %9, label %15, label %10
 
 10:
   %11 = sext i32 %0 to i64
-  %12 = getelementptr inbounds [100 x i8], [100 x i8]* %4, i64 0, i64 %11
-  store i8 12, i8* %12, align 1
-  %13 = load i8, i8* %6, align 16
+  %12 = getelementptr inbounds [100 x i8], ptr %4, i64 0, i64 %11
+  store i8 12, ptr %12, align 1
+  %13 = load i8, ptr %6, align 16
   %14 = sext i8 %13 to i32
   br label %22
 
@@ -73,24 +73,24 @@ define i32 @foo(i32 noundef %0, i32 noundef %1, i32 noundef %2) #0 {
 
 17:
   %18 = sext i32 %0 to i64
-  %19 = getelementptr inbounds [2 x i8], [2 x i8]* %5, i64 0, i64 %18
-  store i8 12, i8* %19, align 1
-  %20 = load i8, i8* %7, align 1
+  %19 = getelementptr inbounds [2 x i8], ptr %5, i64 0, i64 %18
+  store i8 12, ptr %19, align 1
+  %20 = load i8, ptr %7, align 1
   %21 = sext i8 %20 to i32
   br label %22
 
 22:
   %23 = phi i32 [ %14, %10 ], [ %21, %17 ], [ 0, %15 ]
-  call void @llvm.lifetime.end.p0i8(i64 2, i8* nonnull %7) #3
-  call void @llvm.lifetime.end.p0i8(i64 100, i8* nonnull %6) #3
+  call void @llvm.lifetime.end.p0(i64 2, ptr nonnull %7) #3
+  call void @llvm.lifetime.end.p0(i64 100, ptr nonnull %6) #3
   ret i32 %23
 }
 
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
 
-declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg) #2
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) #2
 
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
 
 attributes #0 = { mustprogress nofree nosync nounwind readnone uwtable willreturn }
 attributes #1 = { argmemonly mustprogress nofree nosync nounwind willreturn }

diff  --git a/llvm/test/Transforms/NewGVN/flags-simplify.ll b/llvm/test/Transforms/NewGVN/flags-simplify.ll
index ac0815d48d17a..1c5d3f08c7c26 100644
--- a/llvm/test/Transforms/NewGVN/flags-simplify.ll
+++ b/llvm/test/Transforms/NewGVN/flags-simplify.ll
@@ -52,7 +52,7 @@ define i64 @lshr_lsh_nuw(i64 %tmp) {
 ;
 entry:
   %conv3 = shl nuw i64 %tmp, 32
-  store i64 %conv3, i64* @f, align 8
+  store i64 %conv3, ptr @f, align 8
   %sext = shl i64 %tmp, 32
   %lshr = lshr i64 %sext, 32
   ret i64 %lshr

diff  --git a/llvm/test/Transforms/NewGVN/no_speculative_loads_with_asan.ll b/llvm/test/Transforms/NewGVN/no_speculative_loads_with_asan.ll
index b1d71e8757284..126522a81f174 100644
--- a/llvm/test/Transforms/NewGVN/no_speculative_loads_with_asan.ll
+++ b/llvm/test/Transforms/NewGVN/no_speculative_loads_with_asan.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -passes=newgvn -S %s | FileCheck %s
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
-declare noalias i8* @_Znam(i64) #1
+declare noalias ptr @_Znam(i64) #1
 
 define i32 @TestNoAsan() {
 ; CHECK-LABEL: @TestNoAsan(

diff  --git a/llvm/test/Transforms/NewGVN/pr17732.ll b/llvm/test/Transforms/NewGVN/pr17732.ll
index 6d26035ce45ed..427543d3aae7a 100644
--- a/llvm/test/Transforms/NewGVN/pr17732.ll
+++ b/llvm/test/Transforms/NewGVN/pr17732.ll
@@ -13,10 +13,10 @@ target triple = "x86_64-unknown-linux-gnu"
 
 define i32 @main() {
 entry:
-  tail call void @llvm.memcpy.p0.p0.i64(ptr align 4 @array_with_zeroinit, ptr align 4 getelementptr inbounds ({ [2 x i8], i32, i8, [3 x i8] }, ptr @main.obj_with_array, i64 0, i32 0, i64 0), i64 12, i1 false)
+  tail call void @llvm.memcpy.p0.p0.i64(ptr align 4 @array_with_zeroinit, ptr align 4 @main.obj_with_array, i64 12, i1 false)
   %0 = load i8, ptr getelementptr inbounds (%struct.with_array, ptr @array_with_zeroinit, i64 0, i32 2), align 4
 
-  tail call void @llvm.memcpy.p0.p0.i64(ptr align 4 @vector_with_zeroinit, ptr align 4 getelementptr inbounds ({ <2 x i8>, i32, i8, [3 x i8] }, ptr @main.obj_with_vector, i64 0, i32 0, i64 0), i64 12, i1 false)
+  tail call void @llvm.memcpy.p0.p0.i64(ptr align 4 @vector_with_zeroinit, ptr align 4 @main.obj_with_vector, i64 12, i1 false)
   %1 = load i8, ptr getelementptr inbounds (%struct.with_vector, ptr @vector_with_zeroinit, i64 0, i32 2), align 4
   %conv0 = sext i8 %0 to i32
   %conv1 = sext i8 %1 to i32

diff  --git a/llvm/test/Transforms/NewGVN/unreachable_block_infinite_loop.ll b/llvm/test/Transforms/NewGVN/unreachable_block_infinite_loop.ll
index 66a77c01da5d8..7fbf50680a32e 100644
--- a/llvm/test/Transforms/NewGVN/unreachable_block_infinite_loop.ll
+++ b/llvm/test/Transforms/NewGVN/unreachable_block_infinite_loop.ll
@@ -17,13 +17,13 @@ entry:
   br label %bb0
 
 bb1:
-  %ptr1 = ptrtoint i32* %ptr2 to i64
-  %ptr2 = inttoptr i64 %ptr1 to i32*
+  %ptr1 = ptrtoint ptr %ptr2 to i64
+  %ptr2 = inttoptr i64 %ptr1 to ptr
   br i1 undef, label %bb0, label %bb1
 
 bb0:
-  %phi = phi i32* [ undef, %entry ], [ %ptr2, %bb1 ]
-  %load = load i32, i32* %phi
+  %phi = phi ptr [ undef, %entry ], [ %ptr2, %bb1 ]
+  %load = load i32, ptr %phi
   ret i32 %load
 }
 
@@ -32,12 +32,12 @@ entry:
   br label %bb0
 
 bb1:
-  %ptr1 = getelementptr i32, i32* %ptr2, i32 0
-  %ptr2 = getelementptr i32, i32* %ptr1, i32 0
+  %ptr1 = getelementptr i32, ptr %ptr2, i32 0
+  %ptr2 = getelementptr i32, ptr %ptr1, i32 0
   br i1 undef, label %bb0, label %bb1
 
 bb0:
-  %phi = phi i32* [ undef, %entry ], [ %ptr2, %bb1 ]
-  %load = load i32, i32* %phi
+  %phi = phi ptr [ undef, %entry ], [ %ptr2, %bb1 ]
+  %load = load i32, ptr %phi
   ret i32 %load
 }

diff  --git a/llvm/test/Transforms/PGOProfile/coverage.ll b/llvm/test/Transforms/PGOProfile/coverage.ll
index 1b7658a2ea9ed..f9877cad8916f 100644
--- a/llvm/test/Transforms/PGOProfile/coverage.ll
+++ b/llvm/test/Transforms/PGOProfile/coverage.ll
@@ -81,8 +81,8 @@ entry:
   ; ENTRY: call void @llvm.instrprof.cover({{.*}})
   %a.addr = alloca i32, align 4
   %i = alloca i32, align 4
-  store i32 %a, i32* %a.addr, align 4
-  %0 = load i32, i32* %a.addr, align 4
+  store i32 %a, ptr %a.addr, align 4
+  %0 = load i32, ptr %a.addr, align 4
   %rem = srem i32 %0, 2
   %cmp = icmp eq i32 %rem, 0
   br i1 %cmp, label %if.then, label %if.else
@@ -100,20 +100,20 @@ if.else:                                          ; preds = %entry
 
 ; CHECK-LABEL: if.end:
 if.end:                                           ; preds = %if.else, %if.then
-  store i32 1, i32* %i, align 4
+  store i32 1, ptr %i, align 4
   br label %for.cond
 
 ; CHECK-LABEL: for.cond:
 for.cond:                                         ; preds = %for.inc, %if.end
-  %1 = load i32, i32* %i, align 4
-  %2 = load i32, i32* %a.addr, align 4
+  %1 = load i32, ptr %i, align 4
+  %2 = load i32, ptr %a.addr, align 4
   %cmp1 = icmp slt i32 %1, %2
   br i1 %cmp1, label %for.body, label %for.end
   ; USE: br i1 %cmp1, label %for.body, label %for.end, !prof ![[WEIGHTS1]]
 
 ; CHECK-LABEL: for.body:
 for.body:                                         ; preds = %for.cond
-  %3 = load i32, i32* %a.addr, align 4
+  %3 = load i32, ptr %a.addr, align 4
   %rem2 = srem i32 %3, 3
   %cmp3 = icmp eq i32 %rem2, 0
   br i1 %cmp3, label %if.then4, label %if.else5
@@ -126,7 +126,7 @@ if.then4:                                         ; preds = %for.body
 
 ; CHECK-LABEL: if.else5:
 if.else5:                                         ; preds = %for.body
-  %4 = load i32, i32* %a.addr, align 4
+  %4 = load i32, ptr %a.addr, align 4
   %rem6 = srem i32 %4, 1001
   %cmp7 = icmp eq i32 %rem6, 0
   br i1 %cmp7, label %if.then8, label %if.end9
@@ -148,9 +148,9 @@ if.end10:                                         ; preds = %if.end9, %if.then4
 
 ; CHECK-LABEL: for.inc:
 for.inc:                                          ; preds = %if.end10
-  %5 = load i32, i32* %i, align 4
+  %5 = load i32, ptr %i, align 4
   %inc = add nsw i32 %5, 1
-  store i32 %inc, i32* %i, align 4
+  store i32 %inc, ptr %i, align 4
   br label %for.cond
 
 ; CHECK-LABEL: for.end:

diff  --git a/llvm/test/Transforms/SLPVectorizer/AArch64/slp-abs.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/slp-abs.ll
index 9a13ca7713d5d..2613bf657553d 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/slp-abs.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/slp-abs.ll
@@ -18,12 +18,12 @@ define void @abs_v2i64() {
 ;
 
 entry:
-  %a0 = load i64, i64* getelementptr inbounds (i64, ptr @a, i64 0), align 8
-  %a1 = load i64, i64* getelementptr inbounds (i64, ptr @a, i64 1), align 8
+  %a0 = load i64, ptr @a, align 8
+  %a1 = load i64, ptr getelementptr inbounds (i64, ptr @a, i64 1), align 8
   %r0 = call i64 @llvm.abs.i64(i64 %a0, i1 false)
   %r1 = call i64 @llvm.abs.i64(i64 %a1, i1 false)
-  store i64 %r0, i64* getelementptr inbounds (i64, ptr @a, i64 0), align 8
-  store i64 %r1, i64* getelementptr inbounds (i64, ptr @a, i64 1), align 8
+  store i64 %r0, ptr @a, align 8
+  store i64 %r1, ptr getelementptr inbounds (i64, ptr @a, i64 1), align 8
   ret void
 }
 
@@ -37,18 +37,18 @@ define void @abs_v4i32() {
 ;
 
 entry:
-  %a0 = load i32, i32* getelementptr inbounds (i32, ptr @a, i64 0), align 8
-  %a1 = load i32, i32* getelementptr inbounds (i32, ptr @a, i64 1), align 8
-  %a2 = load i32, i32* getelementptr inbounds (i32, ptr @a, i64 2), align 8
-  %a3 = load i32, i32* getelementptr inbounds (i32, ptr @a, i64 3), align 8
+  %a0 = load i32, ptr @a, align 8
+  %a1 = load i32, ptr getelementptr inbounds (i32, ptr @a, i64 1), align 8
+  %a2 = load i32, ptr getelementptr inbounds (i32, ptr @a, i64 2), align 8
+  %a3 = load i32, ptr getelementptr inbounds (i32, ptr @a, i64 3), align 8
   %r0 = call i32 @llvm.abs.i32(i32 %a0, i1 false)
   %r1 = call i32 @llvm.abs.i32(i32 %a1, i1 false)
   %r2 = call i32 @llvm.abs.i32(i32 %a2, i1 false)
   %r3 = call i32 @llvm.abs.i32(i32 %a3, i1 false)
-  store i32 %r0, i32* getelementptr inbounds (i32, ptr @a, i64 0), align 8
-  store i32 %r1, i32* getelementptr inbounds (i32, ptr @a, i64 1), align 8
-  store i32 %r2, i32* getelementptr inbounds (i32, ptr @a, i64 2), align 8
-  store i32 %r3, i32* getelementptr inbounds (i32, ptr @a, i64 3), align 8
+  store i32 %r0, ptr @a, align 8
+  store i32 %r1, ptr getelementptr inbounds (i32, ptr @a, i64 1), align 8
+  store i32 %r2, ptr getelementptr inbounds (i32, ptr @a, i64 2), align 8
+  store i32 %r3, ptr getelementptr inbounds (i32, ptr @a, i64 3), align 8
   ret void
 }
 
@@ -62,14 +62,14 @@ define void @abs_v8i16() {
 ;
 
 entry:
-  %a0  = load i16, i16* getelementptr inbounds (i16, ptr @a, i64 0), align 8
-  %a1  = load i16, i16* getelementptr inbounds (i16, ptr @a, i64 1), align 8
-  %a2  = load i16, i16* getelementptr inbounds (i16, ptr @a, i64 2), align 8
-  %a3  = load i16, i16* getelementptr inbounds (i16, ptr @a, i64 3), align 8
-  %a4  = load i16, i16* getelementptr inbounds (i16, ptr @a, i64 4), align 8
-  %a5  = load i16, i16* getelementptr inbounds (i16, ptr @a, i64 5), align 8
-  %a6  = load i16, i16* getelementptr inbounds (i16, ptr @a, i64 6), align 8
-  %a7  = load i16, i16* getelementptr inbounds (i16, ptr @a, i64 7), align 8
+  %a0  = load i16, ptr @a, align 8
+  %a1  = load i16, ptr getelementptr inbounds (i16, ptr @a, i64 1), align 8
+  %a2  = load i16, ptr getelementptr inbounds (i16, ptr @a, i64 2), align 8
+  %a3  = load i16, ptr getelementptr inbounds (i16, ptr @a, i64 3), align 8
+  %a4  = load i16, ptr getelementptr inbounds (i16, ptr @a, i64 4), align 8
+  %a5  = load i16, ptr getelementptr inbounds (i16, ptr @a, i64 5), align 8
+  %a6  = load i16, ptr getelementptr inbounds (i16, ptr @a, i64 6), align 8
+  %a7  = load i16, ptr getelementptr inbounds (i16, ptr @a, i64 7), align 8
   %r0  = call i16 @llvm.abs.i16(i16 %a0, i1 false)
   %r1  = call i16 @llvm.abs.i16(i16 %a1, i1 false)
   %r2  = call i16 @llvm.abs.i16(i16 %a2, i1 false)
@@ -78,14 +78,14 @@ entry:
   %r5  = call i16 @llvm.abs.i16(i16 %a5, i1 false)
   %r6  = call i16 @llvm.abs.i16(i16 %a6, i1 false)
   %r7  = call i16 @llvm.abs.i16(i16 %a7, i1 false)
-  store i16 %r0,  i16* getelementptr inbounds (i16, ptr @a, i64 0), align 8
-  store i16 %r1,  i16* getelementptr inbounds (i16, ptr @a, i64 1), align 8
-  store i16 %r2,  i16* getelementptr inbounds (i16, ptr @a, i64 2), align 8
-  store i16 %r3,  i16* getelementptr inbounds (i16, ptr @a, i64 3), align 8
-  store i16 %r4,  i16* getelementptr inbounds (i16, ptr @a, i64 4), align 8
-  store i16 %r5,  i16* getelementptr inbounds (i16, ptr @a, i64 5), align 8
-  store i16 %r6,  i16* getelementptr inbounds (i16, ptr @a, i64 6), align 8
-  store i16 %r7,  i16* getelementptr inbounds (i16, ptr @a, i64 7), align 8
+  store i16 %r0,  ptr @a, align 8
+  store i16 %r1,  ptr getelementptr inbounds (i16, ptr @a, i64 1), align 8
+  store i16 %r2,  ptr getelementptr inbounds (i16, ptr @a, i64 2), align 8
+  store i16 %r3,  ptr getelementptr inbounds (i16, ptr @a, i64 3), align 8
+  store i16 %r4,  ptr getelementptr inbounds (i16, ptr @a, i64 4), align 8
+  store i16 %r5,  ptr getelementptr inbounds (i16, ptr @a, i64 5), align 8
+  store i16 %r6,  ptr getelementptr inbounds (i16, ptr @a, i64 6), align 8
+  store i16 %r7,  ptr getelementptr inbounds (i16, ptr @a, i64 7), align 8
   ret void
 }
 
@@ -99,22 +99,22 @@ define void @abs_v16i8() {
 ;
 
 entry:
-  %a0  = load i8, i8* getelementptr inbounds (i8, ptr @a, i64  0), align 8
-  %a1  = load i8, i8* getelementptr inbounds (i8, ptr @a, i64  1), align 8
-  %a2  = load i8, i8* getelementptr inbounds (i8, ptr @a, i64  2), align 8
-  %a3  = load i8, i8* getelementptr inbounds (i8, ptr @a, i64  3), align 8
-  %a4  = load i8, i8* getelementptr inbounds (i8, ptr @a, i64  4), align 8
-  %a5  = load i8, i8* getelementptr inbounds (i8, ptr @a, i64  5), align 8
-  %a6  = load i8, i8* getelementptr inbounds (i8, ptr @a, i64  6), align 8
-  %a7  = load i8, i8* getelementptr inbounds (i8, ptr @a, i64  7), align 8
-  %a8  = load i8, i8* getelementptr inbounds (i8, ptr @a, i64  8), align 8
-  %a9  = load i8, i8* getelementptr inbounds (i8, ptr @a, i64  9), align 8
-  %a10 = load i8, i8* getelementptr inbounds (i8, ptr @a, i64 10), align 8
-  %a11 = load i8, i8* getelementptr inbounds (i8, ptr @a, i64 11), align 8
-  %a12 = load i8, i8* getelementptr inbounds (i8, ptr @a, i64 12), align 8
-  %a13 = load i8, i8* getelementptr inbounds (i8, ptr @a, i64 13), align 8
-  %a14 = load i8, i8* getelementptr inbounds (i8, ptr @a, i64 14), align 8
-  %a15 = load i8, i8* getelementptr inbounds (i8, ptr @a, i64 15), align 8
+  %a0  = load i8, ptr @a, align 8
+  %a1  = load i8, ptr getelementptr inbounds (i8, ptr @a, i64  1), align 8
+  %a2  = load i8, ptr getelementptr inbounds (i8, ptr @a, i64  2), align 8
+  %a3  = load i8, ptr getelementptr inbounds (i8, ptr @a, i64  3), align 8
+  %a4  = load i8, ptr getelementptr inbounds (i8, ptr @a, i64  4), align 8
+  %a5  = load i8, ptr getelementptr inbounds (i8, ptr @a, i64  5), align 8
+  %a6  = load i8, ptr getelementptr inbounds (i8, ptr @a, i64  6), align 8
+  %a7  = load i8, ptr getelementptr inbounds (i8, ptr @a, i64  7), align 8
+  %a8  = load i8, ptr getelementptr inbounds (i8, ptr @a, i64  8), align 8
+  %a9  = load i8, ptr getelementptr inbounds (i8, ptr @a, i64  9), align 8
+  %a10 = load i8, ptr getelementptr inbounds (i8, ptr @a, i64 10), align 8
+  %a11 = load i8, ptr getelementptr inbounds (i8, ptr @a, i64 11), align 8
+  %a12 = load i8, ptr getelementptr inbounds (i8, ptr @a, i64 12), align 8
+  %a13 = load i8, ptr getelementptr inbounds (i8, ptr @a, i64 13), align 8
+  %a14 = load i8, ptr getelementptr inbounds (i8, ptr @a, i64 14), align 8
+  %a15 = load i8, ptr getelementptr inbounds (i8, ptr @a, i64 15), align 8
   %r0  = call i8 @llvm.abs.i8(i8 %a0,  i1 false)
   %r1  = call i8 @llvm.abs.i8(i8 %a1,  i1 false)
   %r2  = call i8 @llvm.abs.i8(i8 %a2,  i1 false)
@@ -131,21 +131,21 @@ entry:
   %r13 = call i8 @llvm.abs.i8(i8 %a13, i1 false)
   %r14 = call i8 @llvm.abs.i8(i8 %a14, i1 false)
   %r15 = call i8 @llvm.abs.i8(i8 %a15, i1 false)
-  store i8 %r0,  i8* getelementptr inbounds (i8, ptr @a, i64  0), align 8
-  store i8 %r1,  i8* getelementptr inbounds (i8, ptr @a, i64  1), align 8
-  store i8 %r2,  i8* getelementptr inbounds (i8, ptr @a, i64  2), align 8
-  store i8 %r3,  i8* getelementptr inbounds (i8, ptr @a, i64  3), align 8
-  store i8 %r4,  i8* getelementptr inbounds (i8, ptr @a, i64  4), align 8
-  store i8 %r5,  i8* getelementptr inbounds (i8, ptr @a, i64  5), align 8
-  store i8 %r6,  i8* getelementptr inbounds (i8, ptr @a, i64  6), align 8
-  store i8 %r7,  i8* getelementptr inbounds (i8, ptr @a, i64  7), align 8
-  store i8 %r8,  i8* getelementptr inbounds (i8, ptr @a, i64  8), align 8
-  store i8 %r9,  i8* getelementptr inbounds (i8, ptr @a, i64  9), align 8
-  store i8 %r10, i8* getelementptr inbounds (i8, ptr @a, i64 10), align 8
-  store i8 %r11, i8* getelementptr inbounds (i8, ptr @a, i64 11), align 8
-  store i8 %r12, i8* getelementptr inbounds (i8, ptr @a, i64 12), align 8
-  store i8 %r13, i8* getelementptr inbounds (i8, ptr @a, i64 13), align 8
-  store i8 %r14, i8* getelementptr inbounds (i8, ptr @a, i64 14), align 8
-  store i8 %r15, i8* getelementptr inbounds (i8, ptr @a, i64 15), align 8
+  store i8 %r0,  ptr @a, align 8
+  store i8 %r1,  ptr getelementptr inbounds (i8, ptr @a, i64  1), align 8
+  store i8 %r2,  ptr getelementptr inbounds (i8, ptr @a, i64  2), align 8
+  store i8 %r3,  ptr getelementptr inbounds (i8, ptr @a, i64  3), align 8
+  store i8 %r4,  ptr getelementptr inbounds (i8, ptr @a, i64  4), align 8
+  store i8 %r5,  ptr getelementptr inbounds (i8, ptr @a, i64  5), align 8
+  store i8 %r6,  ptr getelementptr inbounds (i8, ptr @a, i64  6), align 8
+  store i8 %r7,  ptr getelementptr inbounds (i8, ptr @a, i64  7), align 8
+  store i8 %r8,  ptr getelementptr inbounds (i8, ptr @a, i64  8), align 8
+  store i8 %r9,  ptr getelementptr inbounds (i8, ptr @a, i64  9), align 8
+  store i8 %r10, ptr getelementptr inbounds (i8, ptr @a, i64 10), align 8
+  store i8 %r11, ptr getelementptr inbounds (i8, ptr @a, i64 11), align 8
+  store i8 %r12, ptr getelementptr inbounds (i8, ptr @a, i64 12), align 8
+  store i8 %r13, ptr getelementptr inbounds (i8, ptr @a, i64 13), align 8
+  store i8 %r14, ptr getelementptr inbounds (i8, ptr @a, i64 14), align 8
+  store i8 %r15, ptr getelementptr inbounds (i8, ptr @a, i64 15), align 8
   ret void
 }

diff  --git a/llvm/test/Transforms/SLPVectorizer/RISCV/reductions.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/reductions.ll
index 35908ee72bc20..000e7a56df377 100644
--- a/llvm/test/Transforms/SLPVectorizer/RISCV/reductions.ll
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/reductions.ll
@@ -160,51 +160,51 @@ define i8 @reduce_and(ptr %a, ptr %b) {
 ;
 entry:
   %arrayidx = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 0
-  %0 = load i8, i8* %arrayidx, align 1
+  %0 = load i8, ptr %arrayidx, align 1
   %arrayidx3 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 0
-  %1 = load i8, i8* %arrayidx3, align 1
+  %1 = load i8, ptr %arrayidx3, align 1
   %xor12 = xor i8 %1, %0
   %and13 = and i8 %xor12, 1
   %arrayidx.1 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 1
-  %2 = load i8, i8* %arrayidx.1, align 1
+  %2 = load i8, ptr %arrayidx.1, align 1
   %arrayidx3.1 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 1
-  %3 = load i8, i8* %arrayidx3.1, align 1
+  %3 = load i8, ptr %arrayidx3.1, align 1
   %xor12.1 = xor i8 %3, %2
   %and13.1 = and i8 %xor12.1, %and13
   %arrayidx.2 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 2
-  %4 = load i8, i8* %arrayidx.2, align 1
+  %4 = load i8, ptr %arrayidx.2, align 1
   %arrayidx3.2 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 2
-  %5 = load i8, i8* %arrayidx3.2, align 1
+  %5 = load i8, ptr %arrayidx3.2, align 1
   %xor12.2 = xor i8 %5, %4
   %and13.2 = and i8 %xor12.2, %and13.1
   %arrayidx.3 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 3
-  %6 = load i8, i8* %arrayidx.3, align 1
+  %6 = load i8, ptr %arrayidx.3, align 1
   %arrayidx3.3 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 3
-  %7 = load i8, i8* %arrayidx3.3, align 1
+  %7 = load i8, ptr %arrayidx3.3, align 1
   %xor12.3 = xor i8 %7, %6
   %and13.3 = and i8 %xor12.3, %and13.2
   %arrayidx.4 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 4
-  %8 = load i8, i8* %arrayidx.4, align 1
+  %8 = load i8, ptr %arrayidx.4, align 1
   %arrayidx3.4 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 4
-  %9 = load i8, i8* %arrayidx3.4, align 1
+  %9 = load i8, ptr %arrayidx3.4, align 1
   %xor12.4 = xor i8 %9, %8
   %and13.4 = and i8 %xor12.4, %and13.3
   %arrayidx.5 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 5
-  %10 = load i8, i8* %arrayidx.5, align 1
+  %10 = load i8, ptr %arrayidx.5, align 1
   %arrayidx3.5 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 5
-  %11 = load i8, i8* %arrayidx3.5, align 1
+  %11 = load i8, ptr %arrayidx3.5, align 1
   %xor12.5 = xor i8 %11, %10
   %and13.5 = and i8 %xor12.5, %and13.4
   %arrayidx.6 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 6
-  %12 = load i8, i8* %arrayidx.6, align 1
+  %12 = load i8, ptr %arrayidx.6, align 1
   %arrayidx3.6 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 6
-  %13 = load i8, i8* %arrayidx3.6, align 1
+  %13 = load i8, ptr %arrayidx3.6, align 1
   %xor12.6 = xor i8 %13, %12
   %and13.6 = and i8 %xor12.6, %and13.5
   %arrayidx.7 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 7
-  %14 = load i8, i8* %arrayidx.7, align 1
+  %14 = load i8, ptr %arrayidx.7, align 1
   %arrayidx3.7 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 7
-  %15 = load i8, i8* %arrayidx3.7, align 1
+  %15 = load i8, ptr %arrayidx3.7, align 1
   %xor12.7 = xor i8 %15, %14
   %and13.7 = and i8 %xor12.7, %and13.6
   ret i8 %and13.7
@@ -224,50 +224,50 @@ define i8 @reduce_or_1(ptr %a, ptr %b) {
 
 entry:
   %arrayidx = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 0
-  %0 = load i8, i8* %arrayidx, align 1
+  %0 = load i8, ptr %arrayidx, align 1
   %arrayidx3 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 0
-  %1 = load i8, i8* %arrayidx3, align 1
+  %1 = load i8, ptr %arrayidx3, align 1
   %xor12 = xor i8 %1, %0
   %arrayidx.1 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 1
-  %2 = load i8, i8* %arrayidx.1, align 1
+  %2 = load i8, ptr %arrayidx.1, align 1
   %arrayidx3.1 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 1
-  %3 = load i8, i8* %arrayidx3.1, align 1
+  %3 = load i8, ptr %arrayidx3.1, align 1
   %xor12.1 = xor i8 %3, %2
   %or13.1 = or i8 %xor12.1, %xor12
   %arrayidx.2 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 2
-  %4 = load i8, i8* %arrayidx.2, align 1
+  %4 = load i8, ptr %arrayidx.2, align 1
   %arrayidx3.2 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 2
-  %5 = load i8, i8* %arrayidx3.2, align 1
+  %5 = load i8, ptr %arrayidx3.2, align 1
   %xor12.2 = xor i8 %5, %4
   %or13.2 = or i8 %xor12.2, %or13.1
   %arrayidx.3 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 3
-  %6 = load i8, i8* %arrayidx.3, align 1
+  %6 = load i8, ptr %arrayidx.3, align 1
   %arrayidx3.3 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 3
-  %7 = load i8, i8* %arrayidx3.3, align 1
+  %7 = load i8, ptr %arrayidx3.3, align 1
   %xor12.3 = xor i8 %7, %6
   %or13.3 = or i8 %xor12.3, %or13.2
   %arrayidx.4 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 4
-  %8 = load i8, i8* %arrayidx.4, align 1
+  %8 = load i8, ptr %arrayidx.4, align 1
   %arrayidx3.4 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 4
-  %9 = load i8, i8* %arrayidx3.4, align 1
+  %9 = load i8, ptr %arrayidx3.4, align 1
   %xor12.4 = xor i8 %9, %8
   %or13.4 = or i8 %xor12.4, %or13.3
   %arrayidx.5 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 5
-  %10 = load i8, i8* %arrayidx.5, align 1
+  %10 = load i8, ptr %arrayidx.5, align 1
   %arrayidx3.5 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 5
-  %11 = load i8, i8* %arrayidx3.5, align 1
+  %11 = load i8, ptr %arrayidx3.5, align 1
   %xor12.5 = xor i8 %11, %10
   %or13.5 = or i8 %xor12.5, %or13.4
   %arrayidx.6 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 6
-  %12 = load i8, i8* %arrayidx.6, align 1
+  %12 = load i8, ptr %arrayidx.6, align 1
   %arrayidx3.6 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 6
-  %13 = load i8, i8* %arrayidx3.6, align 1
+  %13 = load i8, ptr %arrayidx3.6, align 1
   %xor12.6 = xor i8 %13, %12
   %or13.6 = or i8 %xor12.6, %or13.5
   %arrayidx.7 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 7
-  %14 = load i8, i8* %arrayidx.7, align 1
+  %14 = load i8, ptr %arrayidx.7, align 1
   %arrayidx3.7 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 7
-  %15 = load i8, i8* %arrayidx3.7, align 1
+  %15 = load i8, ptr %arrayidx3.7, align 1
   %xor12.7 = xor i8 %15, %14
   %or13.7 = or i8 %xor12.7, %or13.6
   ret i8 %or13.7
@@ -403,50 +403,50 @@ define i8 @reduce_xor(ptr %a, ptr %b) {
 ;
 entry:
   %arrayidx = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 0
-  %0 = load i8, i8* %arrayidx, align 1
+  %0 = load i8, ptr %arrayidx, align 1
   %arrayidx3 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 0
-  %1 = load i8, i8* %arrayidx3, align 1
+  %1 = load i8, ptr %arrayidx3, align 1
   %and12 = and i8 %1, %0
   %arrayidx.1 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 1
-  %2 = load i8, i8* %arrayidx.1, align 1
+  %2 = load i8, ptr %arrayidx.1, align 1
   %arrayidx3.1 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 1
-  %3 = load i8, i8* %arrayidx3.1, align 1
+  %3 = load i8, ptr %arrayidx3.1, align 1
   %and12.1 = and i8 %3, %2
   %4 = xor i8 %and12, %and12.1
   %arrayidx.2 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 2
-  %5 = load i8, i8* %arrayidx.2, align 1
+  %5 = load i8, ptr %arrayidx.2, align 1
   %arrayidx3.2 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 2
-  %6 = load i8, i8* %arrayidx3.2, align 1
+  %6 = load i8, ptr %arrayidx3.2, align 1
   %and12.2 = and i8 %6, %5
   %7 = xor i8 %4, %and12.2
   %arrayidx.3 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 3
-  %8 = load i8, i8* %arrayidx.3, align 1
+  %8 = load i8, ptr %arrayidx.3, align 1
   %arrayidx3.3 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 3
-  %9 = load i8, i8* %arrayidx3.3, align 1
+  %9 = load i8, ptr %arrayidx3.3, align 1
   %and12.3 = and i8 %9, %8
   %10 = xor i8 %7, %and12.3
   %arrayidx.4 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 4
-  %11 = load i8, i8* %arrayidx.4, align 1
+  %11 = load i8, ptr %arrayidx.4, align 1
   %arrayidx3.4 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 4
-  %12 = load i8, i8* %arrayidx3.4, align 1
+  %12 = load i8, ptr %arrayidx3.4, align 1
   %and12.4 = and i8 %12, %11
   %13 = xor i8 %10, %and12.4
   %arrayidx.5 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 5
-  %14 = load i8, i8* %arrayidx.5, align 1
+  %14 = load i8, ptr %arrayidx.5, align 1
   %arrayidx3.5 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 5
-  %15 = load i8, i8* %arrayidx3.5, align 1
+  %15 = load i8, ptr %arrayidx3.5, align 1
   %and12.5 = and i8 %15, %14
   %16 = xor i8 %13, %and12.5
   %arrayidx.6 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 6
-  %17 = load i8, i8* %arrayidx.6, align 1
+  %17 = load i8, ptr %arrayidx.6, align 1
   %arrayidx3.6 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 6
-  %18 = load i8, i8* %arrayidx3.6, align 1
+  %18 = load i8, ptr %arrayidx3.6, align 1
   %and12.6 = and i8 %18, %17
   %19 = xor i8 %16, %and12.6
   %arrayidx.7 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 7
-  %20 = load i8, i8* %arrayidx.7, align 1
+  %20 = load i8, ptr %arrayidx.7, align 1
   %arrayidx3.7 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 7
-  %21 = load i8, i8* %arrayidx3.7, align 1
+  %21 = load i8, ptr %arrayidx3.7, align 1
   %and12.7 = and i8 %21, %20
   %22 = xor i8 %19, %and12.7
   %xor13.7 = xor i8 %22, 1
@@ -469,50 +469,50 @@ define i8 @reduce_add(ptr %a, ptr %b) {
 ;
 entry:
   %arrayidx = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 0
-  %0 = load i8, i8* %arrayidx, align 1
+  %0 = load i8, ptr %arrayidx, align 1
   %arrayidx3 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 0
-  %1 = load i8, i8* %arrayidx3, align 1
+  %1 = load i8, ptr %arrayidx3, align 1
   %and12 = and i8 %1, %0
   %arrayidx.1 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 1
-  %2 = load i8, i8* %arrayidx.1, align 1
+  %2 = load i8, ptr %arrayidx.1, align 1
   %arrayidx3.1 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 1
-  %3 = load i8, i8* %arrayidx3.1, align 1
+  %3 = load i8, ptr %arrayidx3.1, align 1
   %and12.1 = and i8 %3, %2
   %4 = add i8 %and12, %and12.1
   %arrayidx.2 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 2
-  %5 = load i8, i8* %arrayidx.2, align 1
+  %5 = load i8, ptr %arrayidx.2, align 1
   %arrayidx3.2 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 2
-  %6 = load i8, i8* %arrayidx3.2, align 1
+  %6 = load i8, ptr %arrayidx3.2, align 1
   %and12.2 = and i8 %6, %5
   %7 = add i8 %4, %and12.2
   %arrayidx.3 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 3
-  %8 = load i8, i8* %arrayidx.3, align 1
+  %8 = load i8, ptr %arrayidx.3, align 1
   %arrayidx3.3 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 3
-  %9 = load i8, i8* %arrayidx3.3, align 1
+  %9 = load i8, ptr %arrayidx3.3, align 1
   %and12.3 = and i8 %9, %8
   %10 = add i8 %7, %and12.3
   %arrayidx.4 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 4
-  %11 = load i8, i8* %arrayidx.4, align 1
+  %11 = load i8, ptr %arrayidx.4, align 1
   %arrayidx3.4 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 4
-  %12 = load i8, i8* %arrayidx3.4, align 1
+  %12 = load i8, ptr %arrayidx3.4, align 1
   %and12.4 = and i8 %12, %11
   %13 = add i8 %10, %and12.4
   %arrayidx.5 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 5
-  %14 = load i8, i8* %arrayidx.5, align 1
+  %14 = load i8, ptr %arrayidx.5, align 1
   %arrayidx3.5 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 5
-  %15 = load i8, i8* %arrayidx3.5, align 1
+  %15 = load i8, ptr %arrayidx3.5, align 1
   %and12.5 = and i8 %15, %14
   %16 = add i8 %13, %and12.5
   %arrayidx.6 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 6
-  %17 = load i8, i8* %arrayidx.6, align 1
+  %17 = load i8, ptr %arrayidx.6, align 1
   %arrayidx3.6 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 6
-  %18 = load i8, i8* %arrayidx3.6, align 1
+  %18 = load i8, ptr %arrayidx3.6, align 1
   %and12.6 = and i8 %18, %17
   %19 = add i8 %16, %and12.6
   %arrayidx.7 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 7
-  %20 = load i8, i8* %arrayidx.7, align 1
+  %20 = load i8, ptr %arrayidx.7, align 1
   %arrayidx3.7 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 7
-  %21 = load i8, i8* %arrayidx3.7, align 1
+  %21 = load i8, ptr %arrayidx3.7, align 1
   %and12.7 = and i8 %21, %20
   %22 = add i8 %19, %and12.7
   %add13.7 = add i8 %22, 1
@@ -534,50 +534,50 @@ define i8 @reduce_smin(ptr %a, ptr %b) {
 ;
 entry:
   %arrayidx = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 0
-  %0 = load i8, i8* %arrayidx, align 1
+  %0 = load i8, ptr %arrayidx, align 1
   %arrayidx3 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 0
-  %1 = load i8, i8* %arrayidx3, align 1
+  %1 = load i8, ptr %arrayidx3, align 1
   %and12 = and i8 %1, %0
   %arrayidx.1 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 1
-  %2 = load i8, i8* %arrayidx.1, align 1
+  %2 = load i8, ptr %arrayidx.1, align 1
   %arrayidx3.1 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 1
-  %3 = load i8, i8* %arrayidx3.1, align 1
+  %3 = load i8, ptr %arrayidx3.1, align 1
   %and12.1 = and i8 %3, %2
   %4 = tail call i8 @llvm.smin.i8(i8 %and12, i8 %and12.1)
   %arrayidx.2 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 2
-  %5 = load i8, i8* %arrayidx.2, align 1
+  %5 = load i8, ptr %arrayidx.2, align 1
   %arrayidx3.2 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 2
-  %6 = load i8, i8* %arrayidx3.2, align 1
+  %6 = load i8, ptr %arrayidx3.2, align 1
   %and12.2 = and i8 %6, %5
   %7 = tail call i8 @llvm.smin.i8(i8 %4, i8 %and12.2)
   %arrayidx.3 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 3
-  %8 = load i8, i8* %arrayidx.3, align 1
+  %8 = load i8, ptr %arrayidx.3, align 1
   %arrayidx3.3 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 3
-  %9 = load i8, i8* %arrayidx3.3, align 1
+  %9 = load i8, ptr %arrayidx3.3, align 1
   %and12.3 = and i8 %9, %8
   %10 = tail call i8 @llvm.smin.i8(i8 %7, i8 %and12.3)
   %arrayidx.4 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 4
-  %11 = load i8, i8* %arrayidx.4, align 1
+  %11 = load i8, ptr %arrayidx.4, align 1
   %arrayidx3.4 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 4
-  %12 = load i8, i8* %arrayidx3.4, align 1
+  %12 = load i8, ptr %arrayidx3.4, align 1
   %and12.4 = and i8 %12, %11
   %13 = tail call i8 @llvm.smin.i8(i8 %10, i8 %and12.4)
   %arrayidx.5 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 5
-  %14 = load i8, i8* %arrayidx.5, align 1
+  %14 = load i8, ptr %arrayidx.5, align 1
   %arrayidx3.5 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 5
-  %15 = load i8, i8* %arrayidx3.5, align 1
+  %15 = load i8, ptr %arrayidx3.5, align 1
   %and12.5 = and i8 %15, %14
   %16 = tail call i8 @llvm.smin.i8(i8 %13, i8 %and12.5)
   %arrayidx.6 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 6
-  %17 = load i8, i8* %arrayidx.6, align 1
+  %17 = load i8, ptr %arrayidx.6, align 1
   %arrayidx3.6 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 6
-  %18 = load i8, i8* %arrayidx3.6, align 1
+  %18 = load i8, ptr %arrayidx3.6, align 1
   %and12.6 = and i8 %18, %17
   %19 = tail call i8 @llvm.smin.i8(i8 %16, i8 %and12.6)
   %arrayidx.7 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 7
-  %20 = load i8, i8* %arrayidx.7, align 1
+  %20 = load i8, ptr %arrayidx.7, align 1
   %arrayidx3.7 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 7
-  %21 = load i8, i8* %arrayidx3.7, align 1
+  %21 = load i8, ptr %arrayidx3.7, align 1
   %and12.7 = and i8 %21, %20
   %22 = tail call i8 @llvm.smin.i8(i8 %19, i8 %and12.7)
   ret i8 %22
@@ -598,50 +598,50 @@ define i8 @reduce_smax(ptr %a, ptr %b) {
 ;
 entry:
   %arrayidx = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 0
-  %0 = load i8, i8* %arrayidx, align 1
+  %0 = load i8, ptr %arrayidx, align 1
   %arrayidx3 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 0
-  %1 = load i8, i8* %arrayidx3, align 1
+  %1 = load i8, ptr %arrayidx3, align 1
   %and12 = and i8 %1, %0
   %arrayidx.1 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 1
-  %2 = load i8, i8* %arrayidx.1, align 1
+  %2 = load i8, ptr %arrayidx.1, align 1
   %arrayidx3.1 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 1
-  %3 = load i8, i8* %arrayidx3.1, align 1
+  %3 = load i8, ptr %arrayidx3.1, align 1
   %and12.1 = and i8 %3, %2
   %4 = tail call i8 @llvm.smax.i8(i8 %and12, i8 %and12.1)
   %arrayidx.2 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 2
-  %5 = load i8, i8* %arrayidx.2, align 1
+  %5 = load i8, ptr %arrayidx.2, align 1
   %arrayidx3.2 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 2
-  %6 = load i8, i8* %arrayidx3.2, align 1
+  %6 = load i8, ptr %arrayidx3.2, align 1
   %and12.2 = and i8 %6, %5
   %7 = tail call i8 @llvm.smax.i8(i8 %4, i8 %and12.2)
   %arrayidx.3 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 3
-  %8 = load i8, i8* %arrayidx.3, align 1
+  %8 = load i8, ptr %arrayidx.3, align 1
   %arrayidx3.3 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 3
-  %9 = load i8, i8* %arrayidx3.3, align 1
+  %9 = load i8, ptr %arrayidx3.3, align 1
   %and12.3 = and i8 %9, %8
   %10 = tail call i8 @llvm.smax.i8(i8 %7, i8 %and12.3)
   %arrayidx.4 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 4
-  %11 = load i8, i8* %arrayidx.4, align 1
+  %11 = load i8, ptr %arrayidx.4, align 1
   %arrayidx3.4 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 4
-  %12 = load i8, i8* %arrayidx3.4, align 1
+  %12 = load i8, ptr %arrayidx3.4, align 1
   %and12.4 = and i8 %12, %11
   %13 = tail call i8 @llvm.smax.i8(i8 %10, i8 %and12.4)
   %arrayidx.5 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 5
-  %14 = load i8, i8* %arrayidx.5, align 1
+  %14 = load i8, ptr %arrayidx.5, align 1
   %arrayidx3.5 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 5
-  %15 = load i8, i8* %arrayidx3.5, align 1
+  %15 = load i8, ptr %arrayidx3.5, align 1
   %and12.5 = and i8 %15, %14
   %16 = tail call i8 @llvm.smax.i8(i8 %13, i8 %and12.5)
   %arrayidx.6 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 6
-  %17 = load i8, i8* %arrayidx.6, align 1
+  %17 = load i8, ptr %arrayidx.6, align 1
   %arrayidx3.6 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 6
-  %18 = load i8, i8* %arrayidx3.6, align 1
+  %18 = load i8, ptr %arrayidx3.6, align 1
   %and12.6 = and i8 %18, %17
   %19 = tail call i8 @llvm.smax.i8(i8 %16, i8 %and12.6)
   %arrayidx.7 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 7
-  %20 = load i8, i8* %arrayidx.7, align 1
+  %20 = load i8, ptr %arrayidx.7, align 1
   %arrayidx3.7 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 7
-  %21 = load i8, i8* %arrayidx3.7, align 1
+  %21 = load i8, ptr %arrayidx3.7, align 1
   %and12.7 = and i8 %21, %20
   %22 = tail call i8 @llvm.smax.i8(i8 %19, i8 %and12.7)
   ret i8 %22
@@ -662,50 +662,50 @@ define i8 @reduce_umax(ptr %a, ptr %b) {
 ;
 entry:
   %arrayidx = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 0
-  %0 = load i8, i8* %arrayidx, align 1
+  %0 = load i8, ptr %arrayidx, align 1
   %arrayidx3 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 0
-  %1 = load i8, i8* %arrayidx3, align 1
+  %1 = load i8, ptr %arrayidx3, align 1
   %and12 = and i8 %1, %0
   %arrayidx.1 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 1
-  %2 = load i8, i8* %arrayidx.1, align 1
+  %2 = load i8, ptr %arrayidx.1, align 1
   %arrayidx3.1 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 1
-  %3 = load i8, i8* %arrayidx3.1, align 1
+  %3 = load i8, ptr %arrayidx3.1, align 1
   %and12.1 = and i8 %3, %2
   %4 = tail call i8 @llvm.umax.i8(i8 %and12, i8 %and12.1)
   %arrayidx.2 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 2
-  %5 = load i8, i8* %arrayidx.2, align 1
+  %5 = load i8, ptr %arrayidx.2, align 1
   %arrayidx3.2 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 2
-  %6 = load i8, i8* %arrayidx3.2, align 1
+  %6 = load i8, ptr %arrayidx3.2, align 1
   %and12.2 = and i8 %6, %5
   %7 = tail call i8 @llvm.umax.i8(i8 %4, i8 %and12.2)
   %arrayidx.3 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 3
-  %8 = load i8, i8* %arrayidx.3, align 1
+  %8 = load i8, ptr %arrayidx.3, align 1
   %arrayidx3.3 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 3
-  %9 = load i8, i8* %arrayidx3.3, align 1
+  %9 = load i8, ptr %arrayidx3.3, align 1
   %and12.3 = and i8 %9, %8
   %10 = tail call i8 @llvm.umax.i8(i8 %7, i8 %and12.3)
   %arrayidx.4 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 4
-  %11 = load i8, i8* %arrayidx.4, align 1
+  %11 = load i8, ptr %arrayidx.4, align 1
   %arrayidx3.4 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 4
-  %12 = load i8, i8* %arrayidx3.4, align 1
+  %12 = load i8, ptr %arrayidx3.4, align 1
   %and12.4 = and i8 %12, %11
   %13 = tail call i8 @llvm.umax.i8(i8 %10, i8 %and12.4)
   %arrayidx.5 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 5
-  %14 = load i8, i8* %arrayidx.5, align 1
+  %14 = load i8, ptr %arrayidx.5, align 1
   %arrayidx3.5 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 5
-  %15 = load i8, i8* %arrayidx3.5, align 1
+  %15 = load i8, ptr %arrayidx3.5, align 1
   %and12.5 = and i8 %15, %14
   %16 = tail call i8 @llvm.umax.i8(i8 %13, i8 %and12.5)
   %arrayidx.6 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 6
-  %17 = load i8, i8* %arrayidx.6, align 1
+  %17 = load i8, ptr %arrayidx.6, align 1
   %arrayidx3.6 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 6
-  %18 = load i8, i8* %arrayidx3.6, align 1
+  %18 = load i8, ptr %arrayidx3.6, align 1
   %and12.6 = and i8 %18, %17
   %19 = tail call i8 @llvm.umax.i8(i8 %16, i8 %and12.6)
   %arrayidx.7 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 7
-  %20 = load i8, i8* %arrayidx.7, align 1
+  %20 = load i8, ptr %arrayidx.7, align 1
   %arrayidx3.7 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 7
-  %21 = load i8, i8* %arrayidx3.7, align 1
+  %21 = load i8, ptr %arrayidx3.7, align 1
   %and12.7 = and i8 %21, %20
   %22 = tail call i8 @llvm.umax.i8(i8 %19, i8 %and12.7)
   ret i8 %22
@@ -726,50 +726,50 @@ define i8 @reduce_umin(ptr %a, ptr %b) {
 ;
 entry:
   %arrayidx = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 0
-  %0 = load i8, i8* %arrayidx, align 1
+  %0 = load i8, ptr %arrayidx, align 1
   %arrayidx3 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 0
-  %1 = load i8, i8* %arrayidx3, align 1
+  %1 = load i8, ptr %arrayidx3, align 1
   %and12 = and i8 %1, %0
   %arrayidx.1 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 1
-  %2 = load i8, i8* %arrayidx.1, align 1
+  %2 = load i8, ptr %arrayidx.1, align 1
   %arrayidx3.1 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 1
-  %3 = load i8, i8* %arrayidx3.1, align 1
+  %3 = load i8, ptr %arrayidx3.1, align 1
   %and12.1 = and i8 %3, %2
   %4 = tail call i8 @llvm.umin.i8(i8 %and12, i8 %and12.1)
   %arrayidx.2 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 2
-  %5 = load i8, i8* %arrayidx.2, align 1
+  %5 = load i8, ptr %arrayidx.2, align 1
   %arrayidx3.2 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 2
-  %6 = load i8, i8* %arrayidx3.2, align 1
+  %6 = load i8, ptr %arrayidx3.2, align 1
   %and12.2 = and i8 %6, %5
   %7 = tail call i8 @llvm.umin.i8(i8 %4, i8 %and12.2)
   %arrayidx.3 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 3
-  %8 = load i8, i8* %arrayidx.3, align 1
+  %8 = load i8, ptr %arrayidx.3, align 1
   %arrayidx3.3 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 3
-  %9 = load i8, i8* %arrayidx3.3, align 1
+  %9 = load i8, ptr %arrayidx3.3, align 1
   %and12.3 = and i8 %9, %8
   %10 = tail call i8 @llvm.umin.i8(i8 %7, i8 %and12.3)
   %arrayidx.4 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 4
-  %11 = load i8, i8* %arrayidx.4, align 1
+  %11 = load i8, ptr %arrayidx.4, align 1
   %arrayidx3.4 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 4
-  %12 = load i8, i8* %arrayidx3.4, align 1
+  %12 = load i8, ptr %arrayidx3.4, align 1
   %and12.4 = and i8 %12, %11
   %13 = tail call i8 @llvm.umin.i8(i8 %10, i8 %and12.4)
   %arrayidx.5 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 5
-  %14 = load i8, i8* %arrayidx.5, align 1
+  %14 = load i8, ptr %arrayidx.5, align 1
   %arrayidx3.5 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 5
-  %15 = load i8, i8* %arrayidx3.5, align 1
+  %15 = load i8, ptr %arrayidx3.5, align 1
   %and12.5 = and i8 %15, %14
   %16 = tail call i8 @llvm.umin.i8(i8 %13, i8 %and12.5)
   %arrayidx.6 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 6
-  %17 = load i8, i8* %arrayidx.6, align 1
+  %17 = load i8, ptr %arrayidx.6, align 1
   %arrayidx3.6 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 6
-  %18 = load i8, i8* %arrayidx3.6, align 1
+  %18 = load i8, ptr %arrayidx3.6, align 1
   %and12.6 = and i8 %18, %17
   %19 = tail call i8 @llvm.umin.i8(i8 %16, i8 %and12.6)
   %arrayidx.7 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 7
-  %20 = load i8, i8* %arrayidx.7, align 1
+  %20 = load i8, ptr %arrayidx.7, align 1
   %arrayidx3.7 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 7
-  %21 = load i8, i8* %arrayidx3.7, align 1
+  %21 = load i8, ptr %arrayidx3.7, align 1
   %and12.7 = and i8 %21, %20
   %22 = tail call i8 @llvm.umin.i8(i8 %19, i8 %and12.7)
   ret i8 %22

diff  --git a/llvm/test/Transforms/SLPVectorizer/RISCV/strided-loads-vectorized.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/strided-loads-vectorized.ll
index 27e8f084e553d..a4cc311d12a21 100644
--- a/llvm/test/Transforms/SLPVectorizer/RISCV/strided-loads-vectorized.ll
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/strided-loads-vectorized.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -passes=slp-vectorizer -S -mtriple=riscv64-unknown-linux-gnu -mattr=+v < %s | FileCheck %s
 
-define void @test([48 x float]* %p, float* noalias %s) {
+define void @test(ptr %p, ptr noalias %s) {
 ; CHECK-LABEL: @test(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [48 x float], ptr [[P:%.*]], i64 0, i64 0
@@ -63,66 +63,66 @@ define void @test([48 x float]* %p, float* noalias %s) {
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %arrayidx = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 0
-  %i = load float, float* %arrayidx, align 4
-  %arrayidx1 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 30
-  %i1 = load float, float* %arrayidx1, align 4
+  %arrayidx = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 0
+  %i = load float, ptr %arrayidx, align 4
+  %arrayidx1 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 30
+  %i1 = load float, ptr %arrayidx1, align 4
   %add = fsub fast float %i1, %i
-  %arrayidx2 = getelementptr inbounds float, float* %s, i64 0
-  store float %add, float* %arrayidx2, align 4
-  %arrayidx4 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 4
-  %i2 = load float, float* %arrayidx4, align 4
-  %arrayidx6 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 26
-  %i3 = load float, float* %arrayidx6, align 4
+  %arrayidx2 = getelementptr inbounds float, ptr %s, i64 0
+  store float %add, ptr %arrayidx2, align 4
+  %arrayidx4 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 4
+  %i2 = load float, ptr %arrayidx4, align 4
+  %arrayidx6 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 26
+  %i3 = load float, ptr %arrayidx6, align 4
   %add7 = fsub fast float %i3, %i2
-  %arrayidx9 = getelementptr inbounds float, float* %s, i64 1
-  store float %add7, float* %arrayidx9, align 4
-  %arrayidx11 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 8
-  %i4 = load float, float* %arrayidx11, align 4
-  %arrayidx13 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 22
-  %i5 = load float, float* %arrayidx13, align 4
+  %arrayidx9 = getelementptr inbounds float, ptr %s, i64 1
+  store float %add7, ptr %arrayidx9, align 4
+  %arrayidx11 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 8
+  %i4 = load float, ptr %arrayidx11, align 4
+  %arrayidx13 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 22
+  %i5 = load float, ptr %arrayidx13, align 4
   %add14 = fsub fast float %i5, %i4
-  %arrayidx16 = getelementptr inbounds float, float* %s, i64 2
-  store float %add14, float* %arrayidx16, align 4
-  %arrayidx18 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 12
-  %i6 = load float, float* %arrayidx18, align 4
-  %arrayidx20 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 18
-  %i7 = load float, float* %arrayidx20, align 4
+  %arrayidx16 = getelementptr inbounds float, ptr %s, i64 2
+  store float %add14, ptr %arrayidx16, align 4
+  %arrayidx18 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 12
+  %i6 = load float, ptr %arrayidx18, align 4
+  %arrayidx20 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 18
+  %i7 = load float, ptr %arrayidx20, align 4
   %add21 = fsub fast float %i7, %i6
-  %arrayidx23 = getelementptr inbounds float, float* %s, i64 3
-  store float %add21, float* %arrayidx23, align 4
-  %arrayidx25 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 16
-  %i8 = load float, float* %arrayidx25, align 4
-  %arrayidx27 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 14
-  %i9 = load float, float* %arrayidx27, align 4
+  %arrayidx23 = getelementptr inbounds float, ptr %s, i64 3
+  store float %add21, ptr %arrayidx23, align 4
+  %arrayidx25 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 16
+  %i8 = load float, ptr %arrayidx25, align 4
+  %arrayidx27 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 14
+  %i9 = load float, ptr %arrayidx27, align 4
   %add28 = fsub fast float %i9, %i8
-  %arrayidx30 = getelementptr inbounds float, float* %s, i64 4
-  store float %add28, float* %arrayidx30, align 4
-  %arrayidx32 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 20
-  %i10 = load float, float* %arrayidx32, align 4
-  %arrayidx34 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 10
-  %i11 = load float, float* %arrayidx34, align 4
+  %arrayidx30 = getelementptr inbounds float, ptr %s, i64 4
+  store float %add28, ptr %arrayidx30, align 4
+  %arrayidx32 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 20
+  %i10 = load float, ptr %arrayidx32, align 4
+  %arrayidx34 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 10
+  %i11 = load float, ptr %arrayidx34, align 4
   %add35 = fsub fast float %i11, %i10
-  %arrayidx37 = getelementptr inbounds float, float* %s, i64 5
-  store float %add35, float* %arrayidx37, align 4
-  %arrayidx39 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 24
-  %i12 = load float, float* %arrayidx39, align 4
-  %arrayidx41 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 6
-  %i13 = load float, float* %arrayidx41, align 4
+  %arrayidx37 = getelementptr inbounds float, ptr %s, i64 5
+  store float %add35, ptr %arrayidx37, align 4
+  %arrayidx39 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 24
+  %i12 = load float, ptr %arrayidx39, align 4
+  %arrayidx41 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 6
+  %i13 = load float, ptr %arrayidx41, align 4
   %add42 = fsub fast float %i13, %i12
-  %arrayidx44 = getelementptr inbounds float, float* %s, i64 6
-  store float %add42, float* %arrayidx44, align 4
-  %arrayidx46 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 28
-  %i14 = load float, float* %arrayidx46, align 4
-  %arrayidx48 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 2
-  %i15 = load float, float* %arrayidx48, align 4
+  %arrayidx44 = getelementptr inbounds float, ptr %s, i64 6
+  store float %add42, ptr %arrayidx44, align 4
+  %arrayidx46 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 28
+  %i14 = load float, ptr %arrayidx46, align 4
+  %arrayidx48 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 2
+  %i15 = load float, ptr %arrayidx48, align 4
   %add49 = fsub fast float %i15, %i14
-  %arrayidx51 = getelementptr inbounds float, float* %s, i64 7
-  store float %add49, float* %arrayidx51, align 4
+  %arrayidx51 = getelementptr inbounds float, ptr %s, i64 7
+  store float %add49, ptr %arrayidx51, align 4
   ret void
 }
 
-define void @test1([48 x float]* %p, float* noalias %s, i32 %stride) {
+define void @test1(ptr %p, ptr noalias %s, i32 %stride) {
 ; CHECK-LABEL: @test1(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[STR:%.*]] = zext i32 [[STRIDE:%.*]] to i64
@@ -192,72 +192,72 @@ define void @test1([48 x float]* %p, float* noalias %s, i32 %stride) {
 ;
 entry:
   %str = zext i32 %stride to i64
-  %arrayidx = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 0
-  %i = load float, float* %arrayidx, align 4
-  %arrayidx1 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 30
-  %i1 = load float, float* %arrayidx1, align 4
+  %arrayidx = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 0
+  %i = load float, ptr %arrayidx, align 4
+  %arrayidx1 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 30
+  %i1 = load float, ptr %arrayidx1, align 4
   %add = fsub fast float %i1, %i
-  %arrayidx2 = getelementptr inbounds float, float* %s, i64 0
-  store float %add, float* %arrayidx2, align 4
-  %arrayidx4 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 %str
-  %i2 = load float, float* %arrayidx4, align 4
-  %arrayidx6 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 26
-  %i3 = load float, float* %arrayidx6, align 4
+  %arrayidx2 = getelementptr inbounds float, ptr %s, i64 0
+  store float %add, ptr %arrayidx2, align 4
+  %arrayidx4 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 %str
+  %i2 = load float, ptr %arrayidx4, align 4
+  %arrayidx6 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 26
+  %i3 = load float, ptr %arrayidx6, align 4
   %add7 = fsub fast float %i3, %i2
-  %arrayidx9 = getelementptr inbounds float, float* %s, i64 1
-  store float %add7, float* %arrayidx9, align 4
+  %arrayidx9 = getelementptr inbounds float, ptr %s, i64 1
+  store float %add7, ptr %arrayidx9, align 4
   %st1 = mul i64 %str, 2
-  %arrayidx11 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 %st1
-  %i4 = load float, float* %arrayidx11, align 4
-  %arrayidx13 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 22
-  %i5 = load float, float* %arrayidx13, align 4
+  %arrayidx11 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 %st1
+  %i4 = load float, ptr %arrayidx11, align 4
+  %arrayidx13 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 22
+  %i5 = load float, ptr %arrayidx13, align 4
   %add14 = fsub fast float %i5, %i4
-  %arrayidx16 = getelementptr inbounds float, float* %s, i64 2
-  store float %add14, float* %arrayidx16, align 4
+  %arrayidx16 = getelementptr inbounds float, ptr %s, i64 2
+  store float %add14, ptr %arrayidx16, align 4
   %st2 = mul i64 %str, 3
-  %arrayidx18 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 %st2
-  %i6 = load float, float* %arrayidx18, align 4
-  %arrayidx20 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 18
-  %i7 = load float, float* %arrayidx20, align 4
+  %arrayidx18 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 %st2
+  %i6 = load float, ptr %arrayidx18, align 4
+  %arrayidx20 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 18
+  %i7 = load float, ptr %arrayidx20, align 4
   %add21 = fsub fast float %i7, %i6
-  %arrayidx23 = getelementptr inbounds float, float* %s, i64 3
-  store float %add21, float* %arrayidx23, align 4
+  %arrayidx23 = getelementptr inbounds float, ptr %s, i64 3
+  store float %add21, ptr %arrayidx23, align 4
   %st3 = mul i64 %str, 4
-  %arrayidx25 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 %st3
-  %i8 = load float, float* %arrayidx25, align 4
-  %arrayidx27 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 14
-  %i9 = load float, float* %arrayidx27, align 4
+  %arrayidx25 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 %st3
+  %i8 = load float, ptr %arrayidx25, align 4
+  %arrayidx27 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 14
+  %i9 = load float, ptr %arrayidx27, align 4
   %add28 = fsub fast float %i9, %i8
-  %arrayidx30 = getelementptr inbounds float, float* %s, i64 4
-  store float %add28, float* %arrayidx30, align 4
+  %arrayidx30 = getelementptr inbounds float, ptr %s, i64 4
+  store float %add28, ptr %arrayidx30, align 4
   %st4 = mul i64 %str, 5
-  %arrayidx32 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 %st4
-  %i10 = load float, float* %arrayidx32, align 4
-  %arrayidx34 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 10
-  %i11 = load float, float* %arrayidx34, align 4
+  %arrayidx32 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 %st4
+  %i10 = load float, ptr %arrayidx32, align 4
+  %arrayidx34 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 10
+  %i11 = load float, ptr %arrayidx34, align 4
   %add35 = fsub fast float %i11, %i10
-  %arrayidx37 = getelementptr inbounds float, float* %s, i64 5
-  store float %add35, float* %arrayidx37, align 4
+  %arrayidx37 = getelementptr inbounds float, ptr %s, i64 5
+  store float %add35, ptr %arrayidx37, align 4
   %st5 = mul i64 %str, 6
-  %arrayidx39 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 %st5
-  %i12 = load float, float* %arrayidx39, align 4
-  %arrayidx41 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 6
-  %i13 = load float, float* %arrayidx41, align 4
+  %arrayidx39 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 %st5
+  %i12 = load float, ptr %arrayidx39, align 4
+  %arrayidx41 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 6
+  %i13 = load float, ptr %arrayidx41, align 4
   %add42 = fsub fast float %i13, %i12
-  %arrayidx44 = getelementptr inbounds float, float* %s, i64 6
-  store float %add42, float* %arrayidx44, align 4
+  %arrayidx44 = getelementptr inbounds float, ptr %s, i64 6
+  store float %add42, ptr %arrayidx44, align 4
   %st6 = mul i64 %str, 7
-  %arrayidx46 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 %st6
-  %i14 = load float, float* %arrayidx46, align 4
-  %arrayidx48 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 2
-  %i15 = load float, float* %arrayidx48, align 4
+  %arrayidx46 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 %st6
+  %i14 = load float, ptr %arrayidx46, align 4
+  %arrayidx48 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 2
+  %i15 = load float, ptr %arrayidx48, align 4
   %add49 = fsub fast float %i15, %i14
-  %arrayidx51 = getelementptr inbounds float, float* %s, i64 7
-  store float %add49, float* %arrayidx51, align 4
+  %arrayidx51 = getelementptr inbounds float, ptr %s, i64 7
+  store float %add49, ptr %arrayidx51, align 4
   ret void
 }
 
-define void @test2([48 x float]* %p, float* noalias %s, i32 %stride) {
+define void @test2(ptr %p, ptr noalias %s, i32 %stride) {
 ; CHECK-LABEL: @test2(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[STR:%.*]] = zext i32 [[STRIDE:%.*]] to i64
@@ -327,72 +327,72 @@ define void @test2([48 x float]* %p, float* noalias %s, i32 %stride) {
 ;
 entry:
   %str = zext i32 %stride to i64
-  %arrayidx = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 2
-  %i = load float, float* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 2
+  %i = load float, ptr %arrayidx, align 4
   %st6 = mul i64 %str, 7
-  %arrayidx1 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 %st6
-  %i1 = load float, float* %arrayidx1, align 4
+  %arrayidx1 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 %st6
+  %i1 = load float, ptr %arrayidx1, align 4
   %add = fsub fast float %i1, %i
-  %arrayidx2 = getelementptr inbounds float, float* %s, i64 0
-  store float %add, float* %arrayidx2, align 4
-  %arrayidx4 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 6
-  %i2 = load float, float* %arrayidx4, align 4
+  %arrayidx2 = getelementptr inbounds float, ptr %s, i64 0
+  store float %add, ptr %arrayidx2, align 4
+  %arrayidx4 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 6
+  %i2 = load float, ptr %arrayidx4, align 4
   %st5 = mul i64 %str, 6
-  %arrayidx6 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 %st5
-  %i3 = load float, float* %arrayidx6, align 4
+  %arrayidx6 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 %st5
+  %i3 = load float, ptr %arrayidx6, align 4
   %add7 = fsub fast float %i3, %i2
-  %arrayidx9 = getelementptr inbounds float, float* %s, i64 1
-  store float %add7, float* %arrayidx9, align 4
-  %arrayidx11 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 10
-  %i4 = load float, float* %arrayidx11, align 4
+  %arrayidx9 = getelementptr inbounds float, ptr %s, i64 1
+  store float %add7, ptr %arrayidx9, align 4
+  %arrayidx11 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 10
+  %i4 = load float, ptr %arrayidx11, align 4
   %st4 = mul i64 %str, 5
-  %arrayidx13 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 %st4
-  %i5 = load float, float* %arrayidx13, align 4
+  %arrayidx13 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 %st4
+  %i5 = load float, ptr %arrayidx13, align 4
   %add14 = fsub fast float %i5, %i4
-  %arrayidx16 = getelementptr inbounds float, float* %s, i64 2
-  store float %add14, float* %arrayidx16, align 4
-  %arrayidx18 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 14
-  %i6 = load float, float* %arrayidx18, align 4
+  %arrayidx16 = getelementptr inbounds float, ptr %s, i64 2
+  store float %add14, ptr %arrayidx16, align 4
+  %arrayidx18 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 14
+  %i6 = load float, ptr %arrayidx18, align 4
   %st3 = mul i64 %str, 4
-  %arrayidx20 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 %st3
-  %i7 = load float, float* %arrayidx20, align 4
+  %arrayidx20 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 %st3
+  %i7 = load float, ptr %arrayidx20, align 4
   %add21 = fsub fast float %i7, %i6
-  %arrayidx23 = getelementptr inbounds float, float* %s, i64 3
-  store float %add21, float* %arrayidx23, align 4
-  %arrayidx25 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 18
+  %arrayidx23 = getelementptr inbounds float, ptr %s, i64 3
+  store float %add21, ptr %arrayidx23, align 4
+  %arrayidx25 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 18
   %st2 = mul i64 %str, 3
-  %i8 = load float, float* %arrayidx25, align 4
-  %arrayidx27 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 %st2
-  %i9 = load float, float* %arrayidx27, align 4
+  %i8 = load float, ptr %arrayidx25, align 4
+  %arrayidx27 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 %st2
+  %i9 = load float, ptr %arrayidx27, align 4
   %add28 = fsub fast float %i9, %i8
-  %arrayidx30 = getelementptr inbounds float, float* %s, i64 4
-  store float %add28, float* %arrayidx30, align 4
-  %arrayidx32 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 22
-  %i10 = load float, float* %arrayidx32, align 4
+  %arrayidx30 = getelementptr inbounds float, ptr %s, i64 4
+  store float %add28, ptr %arrayidx30, align 4
+  %arrayidx32 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 22
+  %i10 = load float, ptr %arrayidx32, align 4
   %st1 = mul i64 %str, 2
-  %arrayidx34 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 %st1
-  %i11 = load float, float* %arrayidx34, align 4
+  %arrayidx34 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 %st1
+  %i11 = load float, ptr %arrayidx34, align 4
   %add35 = fsub fast float %i11, %i10
-  %arrayidx37 = getelementptr inbounds float, float* %s, i64 5
-  store float %add35, float* %arrayidx37, align 4
-  %arrayidx39 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 26
-  %i12 = load float, float* %arrayidx39, align 4
-  %arrayidx41 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 %str
-  %i13 = load float, float* %arrayidx41, align 4
+  %arrayidx37 = getelementptr inbounds float, ptr %s, i64 5
+  store float %add35, ptr %arrayidx37, align 4
+  %arrayidx39 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 26
+  %i12 = load float, ptr %arrayidx39, align 4
+  %arrayidx41 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 %str
+  %i13 = load float, ptr %arrayidx41, align 4
   %add42 = fsub fast float %i13, %i12
-  %arrayidx44 = getelementptr inbounds float, float* %s, i64 6
-  store float %add42, float* %arrayidx44, align 4
-  %arrayidx46 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 30
-  %i14 = load float, float* %arrayidx46, align 4
-  %arrayidx48 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 0
-  %i15 = load float, float* %arrayidx48, align 4
+  %arrayidx44 = getelementptr inbounds float, ptr %s, i64 6
+  store float %add42, ptr %arrayidx44, align 4
+  %arrayidx46 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 30
+  %i14 = load float, ptr %arrayidx46, align 4
+  %arrayidx48 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 0
+  %i15 = load float, ptr %arrayidx48, align 4
   %add49 = fsub fast float %i15, %i14
-  %arrayidx51 = getelementptr inbounds float, float* %s, i64 7
-  store float %add49, float* %arrayidx51, align 4
+  %arrayidx51 = getelementptr inbounds float, ptr %s, i64 7
+  store float %add49, ptr %arrayidx51, align 4
   ret void
 }
 
-define void @test3([48 x float]* %p, float* noalias %s) {
+define void @test3(ptr %p, ptr noalias %s) {
 ; CHECK-LABEL: @test3(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [48 x float], ptr [[P:%.*]], i64 0, i64 0
@@ -421,62 +421,62 @@ define void @test3([48 x float]* %p, float* noalias %s) {
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %arrayidx = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 0
-  %i = load float, float* %arrayidx, align 4
-  %arrayidx1 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 30
-  %i1 = load float, float* %arrayidx1, align 4
+  %arrayidx = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 0
+  %i = load float, ptr %arrayidx, align 4
+  %arrayidx1 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 30
+  %i1 = load float, ptr %arrayidx1, align 4
   %add = fsub fast float %i1, %i
-  %arrayidx2 = getelementptr inbounds float, float* %s, i64 0
-  store float %add, float* %arrayidx2, align 4
-  %arrayidx4 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 4
-  %i2 = load float, float* %arrayidx4, align 4
-  %arrayidx6 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 29
-  %i3 = load float, float* %arrayidx6, align 4
+  %arrayidx2 = getelementptr inbounds float, ptr %s, i64 0
+  store float %add, ptr %arrayidx2, align 4
+  %arrayidx4 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 4
+  %i2 = load float, ptr %arrayidx4, align 4
+  %arrayidx6 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 29
+  %i3 = load float, ptr %arrayidx6, align 4
   %add7 = fsub fast float %i3, %i2
-  %arrayidx9 = getelementptr inbounds float, float* %s, i64 1
-  store float %add7, float* %arrayidx9, align 4
-  %arrayidx11 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 8
-  %i4 = load float, float* %arrayidx11, align 4
-  %arrayidx13 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 28
-  %i5 = load float, float* %arrayidx13, align 4
+  %arrayidx9 = getelementptr inbounds float, ptr %s, i64 1
+  store float %add7, ptr %arrayidx9, align 4
+  %arrayidx11 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 8
+  %i4 = load float, ptr %arrayidx11, align 4
+  %arrayidx13 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 28
+  %i5 = load float, ptr %arrayidx13, align 4
   %add14 = fsub fast float %i5, %i4
-  %arrayidx16 = getelementptr inbounds float, float* %s, i64 2
-  store float %add14, float* %arrayidx16, align 4
-  %arrayidx18 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 12
-  %i6 = load float, float* %arrayidx18, align 4
-  %arrayidx20 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 27
-  %i7 = load float, float* %arrayidx20, align 4
+  %arrayidx16 = getelementptr inbounds float, ptr %s, i64 2
+  store float %add14, ptr %arrayidx16, align 4
+  %arrayidx18 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 12
+  %i6 = load float, ptr %arrayidx18, align 4
+  %arrayidx20 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 27
+  %i7 = load float, ptr %arrayidx20, align 4
   %add21 = fsub fast float %i7, %i6
-  %arrayidx23 = getelementptr inbounds float, float* %s, i64 3
-  store float %add21, float* %arrayidx23, align 4
-  %arrayidx25 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 16
-  %i8 = load float, float* %arrayidx25, align 4
-  %arrayidx27 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 26
-  %i9 = load float, float* %arrayidx27, align 4
+  %arrayidx23 = getelementptr inbounds float, ptr %s, i64 3
+  store float %add21, ptr %arrayidx23, align 4
+  %arrayidx25 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 16
+  %i8 = load float, ptr %arrayidx25, align 4
+  %arrayidx27 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 26
+  %i9 = load float, ptr %arrayidx27, align 4
   %add28 = fsub fast float %i9, %i8
-  %arrayidx30 = getelementptr inbounds float, float* %s, i64 4
-  store float %add28, float* %arrayidx30, align 4
-  %arrayidx32 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 20
-  %i10 = load float, float* %arrayidx32, align 4
-  %arrayidx34 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 25
-  %i11 = load float, float* %arrayidx34, align 4
+  %arrayidx30 = getelementptr inbounds float, ptr %s, i64 4
+  store float %add28, ptr %arrayidx30, align 4
+  %arrayidx32 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 20
+  %i10 = load float, ptr %arrayidx32, align 4
+  %arrayidx34 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 25
+  %i11 = load float, ptr %arrayidx34, align 4
   %add35 = fsub fast float %i11, %i10
-  %arrayidx37 = getelementptr inbounds float, float* %s, i64 5
-  store float %add35, float* %arrayidx37, align 4
-  %arrayidx39 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 24
-  %i12 = load float, float* %arrayidx39, align 4
-  %arrayidx41 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 24
-  %i13 = load float, float* %arrayidx41, align 4
+  %arrayidx37 = getelementptr inbounds float, ptr %s, i64 5
+  store float %add35, ptr %arrayidx37, align 4
+  %arrayidx39 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 24
+  %i12 = load float, ptr %arrayidx39, align 4
+  %arrayidx41 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 24
+  %i13 = load float, ptr %arrayidx41, align 4
   %add42 = fsub fast float %i13, %i12
-  %arrayidx44 = getelementptr inbounds float, float* %s, i64 6
-  store float %add42, float* %arrayidx44, align 4
-  %arrayidx46 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 28
-  %i14 = load float, float* %arrayidx46, align 4
-  %arrayidx48 = getelementptr inbounds [48 x float], [48 x float]* %p, i64 0, i64 23
-  %i15 = load float, float* %arrayidx48, align 4
+  %arrayidx44 = getelementptr inbounds float, ptr %s, i64 6
+  store float %add42, ptr %arrayidx44, align 4
+  %arrayidx46 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 28
+  %i14 = load float, ptr %arrayidx46, align 4
+  %arrayidx48 = getelementptr inbounds [48 x float], ptr %p, i64 0, i64 23
+  %i15 = load float, ptr %arrayidx48, align 4
   %add49 = fsub fast float %i15, %i14
-  %arrayidx51 = getelementptr inbounds float, float* %s, i64 7
-  store float %add49, float* %arrayidx51, align 4
+  %arrayidx51 = getelementptr inbounds float, ptr %s, i64 7
+  store float %add49, ptr %arrayidx51, align 4
   ret void
 }
 

diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-fshl-rot.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-fshl-rot.ll
index 7f986c74f207f..639aa0a1c6a2c 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/arith-fshl-rot.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-fshl-rot.ll
@@ -153,7 +153,7 @@ define void @fshl_v8i64() {
   %r5 = call i64 @llvm.fshl.i64(i64 %a5, i64 %a5, i64 %b5)
   %r6 = call i64 @llvm.fshl.i64(i64 %a6, i64 %a6, i64 %b6)
   %r7 = call i64 @llvm.fshl.i64(i64 %a7, i64 %a7, i64 %b7)
-  store i64 %r0, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 0), align 8
+  store i64 %r0, ptr @d64, align 8
   store i64 %r1, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 1), align 8
   store i64 %r2, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 2), align 8
   store i64 %r3, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 3), align 8

diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-fshl.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-fshl.ll
index 5153dc34e7a4f..daf28b9a0bb4d 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/arith-fshl.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-fshl.ll
@@ -162,7 +162,7 @@ define void @fshl_v8i64() {
   %r5 = call i64 @llvm.fshl.i64(i64 %a5, i64 %b5, i64 %c5)
   %r6 = call i64 @llvm.fshl.i64(i64 %a6, i64 %b6, i64 %c6)
   %r7 = call i64 @llvm.fshl.i64(i64 %a7, i64 %b7, i64 %c7)
-  store i64 %r0, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 0), align 8
+  store i64 %r0, ptr @d64, align 8
   store i64 %r1, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 1), align 8
   store i64 %r2, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 2), align 8
   store i64 %r3, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 3), align 8

diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-fshr-rot.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-fshr-rot.ll
index 52c6f14f28e18..c557c9647551a 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/arith-fshr-rot.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-fshr-rot.ll
@@ -153,7 +153,7 @@ define void @fshr_v8i64() {
   %r5 = call i64 @llvm.fshr.i64(i64 %a5, i64 %a5, i64 %b5)
   %r6 = call i64 @llvm.fshr.i64(i64 %a6, i64 %a6, i64 %b6)
   %r7 = call i64 @llvm.fshr.i64(i64 %a7, i64 %a7, i64 %b7)
-  store i64 %r0, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 0), align 8
+  store i64 %r0, ptr @d64, align 8
   store i64 %r1, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 1), align 8
   store i64 %r2, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 2), align 8
   store i64 %r3, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 3), align 8

diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-fshr.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-fshr.ll
index b456742337abd..fb7532768c4b3 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/arith-fshr.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-fshr.ll
@@ -162,7 +162,7 @@ define void @fshr_v8i64() {
   %r5 = call i64 @llvm.fshr.i64(i64 %a5, i64 %b5, i64 %c5)
   %r6 = call i64 @llvm.fshr.i64(i64 %a6, i64 %b6, i64 %c6)
   %r7 = call i64 @llvm.fshr.i64(i64 %a7, i64 %b7, i64 %c7)
-  store i64 %r0, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 0), align 8
+  store i64 %r0, ptr @d64, align 8
   store i64 %r1, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 1), align 8
   store i64 %r2, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 2), align 8
   store i64 %r3, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 3), align 8

diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/horizontal.ll b/llvm/test/Transforms/SLPVectorizer/X86/horizontal.ll
index 197ffd11b245f..b44e5f3bde3a1 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/horizontal.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/horizontal.ll
@@ -1135,7 +1135,7 @@ define float @fadd_v4f32_fmf(ptr %p) {
 ; CHECK-NEXT:    [[TMP2:%.*]] = call reassoc nsz float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[TMP1]])
 ; CHECK-NEXT:    ret float [[TMP2]]
 ;
-  %p1 = getelementptr inbounds float, float* %p, i64 1
+  %p1 = getelementptr inbounds float, ptr %p, i64 1
   %p2 = getelementptr inbounds float, ptr %p, i64 2
   %p3 = getelementptr inbounds float, ptr %p, i64 3
   %t0 = load float, ptr %p, align 4
@@ -1158,7 +1158,7 @@ define float @fadd_v4f32_fmf_intersect(ptr %p) {
 ; CHECK-NEXT:    [[TMP2:%.*]] = call reassoc ninf nsz float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[TMP1]])
 ; CHECK-NEXT:    ret float [[TMP2]]
 ;
-  %p1 = getelementptr inbounds float, float* %p, i64 1
+  %p1 = getelementptr inbounds float, ptr %p, i64 1
   %p2 = getelementptr inbounds float, ptr %p, i64 2
   %p3 = getelementptr inbounds float, ptr %p, i64 3
   %t0 = load float, ptr %p, align 4

diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/opaque-ptr.ll b/llvm/test/Transforms/SLPVectorizer/X86/opaque-ptr.ll
index a2c2e41d81b42..3801fa5c787b6 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/opaque-ptr.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/opaque-ptr.ll
@@ -50,7 +50,7 @@ define void @test(ptr %r, ptr %p, ptr %q) #0 {
   ret void
 }
 
-define void @test2(i64* %a, i64* %b) {
+define void @test2(ptr %a, ptr %b) {
 ; CHECK-LABEL: @test2(
 ; CHECK-NEXT:    [[A1:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 1
 ; CHECK-NEXT:    [[A2:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 2
@@ -65,16 +65,16 @@ define void @test2(i64* %a, i64* %b) {
 ; CHECK-NEXT:    store i64 [[ADD2]], ptr [[A2]], align 8
 ; CHECK-NEXT:    ret void
 ;
-  %a1 = getelementptr inbounds i64, i64* %a, i64 1
-  %a2 = getelementptr inbounds i64, i64* %a, i64 2
-  %i1 = ptrtoint i64* %a1 to i64
-  %b3 = getelementptr inbounds i64, i64* %b, i64 3
-  %i2 = ptrtoint i64* %b3 to i64
-  %v1 = load i64, i64* %a1, align 8
-  %v2 = load i64, i64* %a2, align 8
+  %a1 = getelementptr inbounds i64, ptr %a, i64 1
+  %a2 = getelementptr inbounds i64, ptr %a, i64 2
+  %i1 = ptrtoint ptr %a1 to i64
+  %b3 = getelementptr inbounds i64, ptr %b, i64 3
+  %i2 = ptrtoint ptr %b3 to i64
+  %v1 = load i64, ptr %a1, align 8
+  %v2 = load i64, ptr %a2, align 8
   %add1 = add i64 %i1, %v1
   %add2 = add i64 %i2, %v2
-  store i64 %add1, i64* %a1, align 8
-  store i64 %add2, i64* %a2, align 8
+  store i64 %add1, ptr %a1, align 8
+  store i64 %add2, ptr %a2, align 8
   ret void
 }

diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/stackrestore-dependence.ll b/llvm/test/Transforms/SLPVectorizer/X86/stackrestore-dependence.ll
index d83251feac3af..f4a0fc84cee8e 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/stackrestore-dependence.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/stackrestore-dependence.ll
@@ -26,7 +26,7 @@ define void @stackrestore1(ptr %out) {
   %val1 = load float, ptr %addr1, align 4
   %val2 = load float, ptr %addr2, align 4
   %val3 = load float, ptr %addr3, align 4
-  call void @llvm.stackrestore(i8* %stack)
+  call void @llvm.stackrestore(ptr %stack)
   %outaddr2 = getelementptr inbounds float, ptr %out, i64 2
   store float %val0, ptr %outaddr2, align 4
   %outaddr3 = getelementptr inbounds float, ptr %out, i64 3
@@ -37,5 +37,5 @@ define void @stackrestore1(ptr %out) {
   ret void
 }
 
-declare i8* @llvm.stacksave()
-declare void @llvm.stackrestore(i8*)
+declare ptr @llvm.stacksave()
+declare void @llvm.stackrestore(ptr)

diff  --git a/llvm/test/Transforms/SROA/invariant-group.ll b/llvm/test/Transforms/SROA/invariant-group.ll
index bcce0b6b9f284..1be6f6e2fc32b 100644
--- a/llvm/test/Transforms/SROA/invariant-group.ll
+++ b/llvm/test/Transforms/SROA/invariant-group.ll
@@ -155,7 +155,7 @@ define void @partial_promotion_of_alloca() {
   ret void
 }
 
-declare void @use(i32*)
+declare void @use(ptr)
 
 !0 = !{}
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:

diff  --git a/llvm/test/Transforms/SROA/phi-gep.ll b/llvm/test/Transforms/SROA/phi-gep.ll
index a8c3331d6281c..c5aa1cdd9cf65 100644
--- a/llvm/test/Transforms/SROA/phi-gep.ll
+++ b/llvm/test/Transforms/SROA/phi-gep.ll
@@ -228,7 +228,7 @@ for:
   %phi_i = phi i32 [ 0, %entry ], [ %i, %for ]
   %phi = phi ptr [ %gep_a, %entry], [ %gep_for, %for ]
   %i = add i32 %phi_i, 1
-  %gep_for = getelementptr inbounds i32, i32* %phi, i32 0
+  %gep_for = getelementptr inbounds i32, ptr %phi, i32 0
   %loop.cond = icmp ult i32 %i, 10
   br i1 %loop.cond, label %for, label %end
 
@@ -397,7 +397,7 @@ for:
   %phi_i = phi i32 [ 0, %entry ], [ %i, %for ]
   %phi = phi ptr [ %gep_a, %entry], [ %gep_for, %for ]
   %i = add i32 %phi_i, 1
-  %gep_for = getelementptr inbounds float, float* %phi, i32 0
+  %gep_for = getelementptr inbounds float, ptr %phi, i32 0
   %loop.cond = icmp ult i32 %i, 10
   br i1 %loop.cond, label %for, label %end
 

diff  --git a/llvm/test/Transforms/SROA/scalable-vector-struct.ll b/llvm/test/Transforms/SROA/scalable-vector-struct.ll
index 1af4fbbd9254b..57f89d285e531 100644
--- a/llvm/test/Transforms/SROA/scalable-vector-struct.ll
+++ b/llvm/test/Transforms/SROA/scalable-vector-struct.ll
@@ -16,8 +16,8 @@ define %struct.test @alloca(<vscale x 1 x i32> %x, <vscale x 1 x i32> %y) {
   %addr = alloca %struct.test, align 4
   %agg0 = insertvalue %struct.test undef, <vscale x 1 x i32> %x, 0
   %agg1 = insertvalue %struct.test %agg0, <vscale x 1 x i32> %y, 1
-  store %struct.test %agg1, %struct.test* %addr, align 4
-  %val = load %struct.test, %struct.test* %addr, align 4
+  store %struct.test %agg1, ptr %addr, align 4
+  %val = load %struct.test, ptr %addr, align 4
   ret %struct.test %val
 }
 

diff  --git a/llvm/test/Transforms/SROA/sroa-common-type-fail-promotion.ll b/llvm/test/Transforms/SROA/sroa-common-type-fail-promotion.ll
index 8f30aef9d8355..72014912edd20 100644
--- a/llvm/test/Transforms/SROA/sroa-common-type-fail-promotion.ll
+++ b/llvm/test/Transforms/SROA/sroa-common-type-fail-promotion.ll
@@ -29,7 +29,7 @@ define amdgpu_kernel void @test_zeroinit() #0 {
 entry:
   %b_blockwise_copy = alloca %"struct.b", align 16
   store <8 x half> zeroinitializer, ptr %b_blockwise_copy, align 16
-  %data = load <4 x float>, <4 x float>* undef
+  %data = load <4 x float>, ptr undef
   store <4 x float> %data, ptr %b_blockwise_copy, align 16
   br label %bb
 
@@ -60,7 +60,7 @@ define amdgpu_kernel void @test_memset() #0 {
 entry:
   %b_blockwise_copy = alloca %"struct.b", align 16
   call void @llvm.memset.p0.i64(ptr align 16 %b_blockwise_copy, i8 0, i64 16, i1 false)
-  %data = load <4 x float>, <4 x float>* undef
+  %data = load <4 x float>, ptr undef
   store <4 x float> %data, ptr %b_blockwise_copy, align 16
   br label %bb
 
@@ -91,7 +91,7 @@ define amdgpu_kernel void @vector_type_alloca() #0 {
 entry:
   %b_blockwise_copy = alloca <8 x half>, align 16
   store <8 x half> zeroinitializer, ptr %b_blockwise_copy, align 16
-  %data = load <4 x float>, <4 x float>* undef
+  %data = load <4 x float>, ptr undef
   store <4 x float> %data, ptr %b_blockwise_copy, align 16
   br label %bb
 
@@ -122,7 +122,7 @@ define amdgpu_kernel void @test_struct_contain_multiple_types1() #0 {
 entry:
   %b_blockwise_copy = alloca %"struct.c", align 16
   store <8 x half> zeroinitializer, ptr %b_blockwise_copy, align 16
-  %data = load <4 x float>, <4 x float>* undef
+  %data = load <4 x float>, ptr undef
   store <4 x float> %data, ptr %b_blockwise_copy, align 16
   br label %bb
 
@@ -158,11 +158,11 @@ define amdgpu_kernel void @test_struct_contain_multiple_types2() #0 {
 entry:
   %b_blockwise_copy = alloca %"struct.d", align 16
   call void @llvm.memset.p0.i32(ptr align 16 %b_blockwise_copy, i8 0, i32 16, i1 false)
-  %data1 = load [4 x i32], [4 x i32]* undef
+  %data1 = load [4 x i32], ptr undef
   store [4 x i32] %data1, ptr %b_blockwise_copy, align 16
   %data2_gep = getelementptr inbounds i8, ptr %b_blockwise_copy, i64 16
   store <8 x half> zeroinitializer, ptr %data2_gep, align 16
-  %data2 = load <4 x float>, <4 x float>* undef
+  %data2 = load <4 x float>, ptr undef
   store <4 x float> %data2, ptr %data2_gep, align 16
   br label %bb
 
@@ -196,9 +196,9 @@ entry:
   store <8 x half> zeroinitializer, ptr %b_blockwise_copy, align 16
   %0 = getelementptr inbounds i8, ptr %b_blockwise_copy, i64 16
   store <8 x half> zeroinitializer, ptr %0, align 16
-  %data0 = load <4 x float>, <4 x float>* undef
+  %data0 = load <4 x float>, ptr undef
   store <4 x float> %data0, ptr %b_blockwise_copy, align 16
-  %data1 = load <4 x float>, <4 x float>* undef
+  %data1 = load <4 x float>, ptr undef
   store <4 x float> %data1, ptr %0, align 16
   br label %bb
 
@@ -226,9 +226,9 @@ define amdgpu_kernel void @test_struct_array_vector_i16() #0 {
 entry:
   %b_blockwise_copy = alloca %"struct.f", align 16
   call void @llvm.memset.p0.i32(ptr align 16 %b_blockwise_copy, i8 0, i32 32, i1 false)
-  %data = load <4 x i32>, <4 x i32>* undef
+  %data = load <4 x i32>, ptr undef
   store <4 x i32> %data, ptr %b_blockwise_copy, align 16
-  %data2 = load <4 x i32>, <4 x i32>* undef
+  %data2 = load <4 x i32>, ptr undef
   %data2_gep = getelementptr inbounds i8, ptr %b_blockwise_copy, i64 16
   store <4 x i32> %data2, ptr %data2_gep, align 16
   br label %bb
@@ -269,7 +269,7 @@ define amdgpu_kernel void @test_half_array() #0 {
 entry:
   %b_blockwise_copy = alloca [8 x half], align 16
   call void @llvm.memset.p0.i32(ptr align 16 %b_blockwise_copy, i8 0, i32 16, i1 false)
-  %data = load [4 x float], [4 x float]* undef
+  %data = load [4 x float], ptr undef
   store [4 x float] %data, ptr %b_blockwise_copy, align 16
   br label %bb
 
@@ -302,7 +302,7 @@ define amdgpu_kernel void @test_array_vector() #0 {
 entry:
   %b_blockwise_copy = alloca %"array.a", align 16
   call void @llvm.memset.p0.i32(ptr align 16 %b_blockwise_copy, i8 0, i32 32, i1 false)
-  %data = load <4 x float>, <4 x float>* undef
+  %data = load <4 x float>, ptr undef
   store <4 x float> %data, ptr %b_blockwise_copy, align 16
   br label %bb
 
@@ -335,7 +335,7 @@ define amdgpu_kernel void @test_array_vector2() #0 {
 entry:
   %b_blockwise_copy = alloca %"array.b", align 16
   call void @llvm.memset.p0.i32(ptr align 16 %b_blockwise_copy, i8 0, i32 32, i1 false)
-  %data = load <4 x float>, <4 x float>* undef
+  %data = load <4 x float>, ptr undef
   store <4 x float> %data, ptr %b_blockwise_copy, align 16
   br label %bb
 
@@ -388,10 +388,10 @@ define amdgpu_kernel void @test_array_vector_no_vector_common_type() #0 {
 entry:
   %b_blockwise_copy = alloca %"array.a", align 16
   call void @llvm.memset.p0.i32(ptr align 16 %b_blockwise_copy, i8 0, i32 32, i1 false)
-  %data1 = load float, float* undef
-  %data2 = load float, float* undef
-  %data3 = load float, float* undef
-  %data4 = load float, float* undef
+  %data1 = load float, ptr undef
+  %data2 = load float, ptr undef
+  %data3 = load float, ptr undef
+  %data4 = load float, ptr undef
   store float %data1, ptr %b_blockwise_copy, align 16
   %data_ptr1 = getelementptr inbounds i8, ptr %b_blockwise_copy, i64 4
   store float %data2, ptr %data_ptr1, align 16

diff  --git a/llvm/test/Transforms/SROA/vector-promotion.ll b/llvm/test/Transforms/SROA/vector-promotion.ll
index ee35a0fd5fea6..e2aa1e2ee1c70 100644
--- a/llvm/test/Transforms/SROA/vector-promotion.ll
+++ b/llvm/test/Transforms/SROA/vector-promotion.ll
@@ -966,8 +966,8 @@ define i32 @test14(<2 x i64> %x) {
 ;
 entry:
   %x.addr = alloca <2 x i64>, align 16
-  store <2 x i64> %x, <2 x i64>* %x.addr, align 16
-  %x.cast = bitcast <2 x i64>* %x.addr to i32*
+  store <2 x i64> %x, ptr %x.addr, align 16
+  %x.cast = bitcast ptr %x.addr to ptr
   %a = load i32, ptr %x.cast
   %x.tmp2 = getelementptr inbounds i32, ptr %x.cast, i64 1
   %b = load i32, ptr %x.tmp2

diff  --git a/llvm/test/Transforms/ScalarizeMaskedMemIntrin/AArch64/streaming-compatible-expand-masked-gather-scatter.ll b/llvm/test/Transforms/ScalarizeMaskedMemIntrin/AArch64/streaming-compatible-expand-masked-gather-scatter.ll
index 9409e8913bcea..ee67ab3411175 100644
--- a/llvm/test/Transforms/ScalarizeMaskedMemIntrin/AArch64/streaming-compatible-expand-masked-gather-scatter.ll
+++ b/llvm/test/Transforms/ScalarizeMaskedMemIntrin/AArch64/streaming-compatible-expand-masked-gather-scatter.ll
@@ -27,7 +27,7 @@ define <2 x i32> @scalarize_v2i32(<2 x ptr> %p, <2 x i1> %mask, <2 x i32> %passt
 ; CHECK-NEXT:    [[RES_PHI_ELSE3:%.*]] = phi <2 x i32> [ [[RES1]], [[COND_LOAD1]] ], [ [[RES_PHI_ELSE]], [[ELSE]] ]
 ; CHECK-NEXT:    ret <2 x i32> [[RES_PHI_ELSE3]]
 ;
-  %ret = call <2 x i32> @llvm.masked.gather.v2i32.v2p0i32(<2 x ptr> %p, i32 8, <2 x i1> %mask, <2 x i32> %passthru)
+  %ret = call <2 x i32> @llvm.masked.gather.v2i32.v2p0(<2 x ptr> %p, i32 8, <2 x i1> %mask, <2 x i32> %passthru)
   ret <2 x i32> %ret
 }
 
@@ -54,9 +54,9 @@ define void @scalarize_v2i64(<2 x ptr> %p, <2 x i1> %mask, <2 x i64> %value) {
 ; CHECK:       else2:
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.masked.scatter.v2i64.v2p0i64(<2 x i64> %value, <2 x ptr> %p, i32 8, <2 x i1> %mask)
+  call void @llvm.masked.scatter.v2i64.v2p0(<2 x i64> %value, <2 x ptr> %p, i32 8, <2 x i1> %mask)
   ret void
 }
 
-declare <2 x i32> @llvm.masked.gather.v2i32.v2p0i32(<2 x i32*>, i32, <2 x i1>, <2 x i32>)
-declare void @llvm.masked.scatter.v2i64.v2p0i64(<2 x i64>, <2 x i64*>, i32, <2 x i1>)
+declare <2 x i32> @llvm.masked.gather.v2i32.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x i32>)
+declare void @llvm.masked.scatter.v2i64.v2p0(<2 x i64>, <2 x ptr>, i32, <2 x i1>)

diff  --git a/llvm/test/Transforms/Util/pr49185.ll b/llvm/test/Transforms/Util/pr49185.ll
index 76bef2db425d8..044ccadc27bc6 100644
--- a/llvm/test/Transforms/Util/pr49185.ll
+++ b/llvm/test/Transforms/Util/pr49185.ll
@@ -53,7 +53,7 @@ land.end1851:                                     ; preds = %land.rhs1834, %lbl_
   br i1 %tobool2351, label %if.then2352, label %if.else3029
 
 if.then2352:                                      ; preds = %land.end1851
-  %3 = load i16, ptr getelementptr inbounds ({ i16, i16 }, ptr @g_79, i32 0, i32 0), align 1, !tbaa !1
+  %3 = load i16, ptr @g_79, align 1, !tbaa !1
   %tobool3011 = icmp ne i16 %3, 0
   call void @llvm.assume(i1 %tobool3011)
   store i32 11, ptr %cleanup.dest.slot, align 1

diff  --git a/llvm/test/Transforms/VectorCombine/AArch64/select-shuffle.ll b/llvm/test/Transforms/VectorCombine/AArch64/select-shuffle.ll
index 8a3382a75090d..eae0879004839 100644
--- a/llvm/test/Transforms/VectorCombine/AArch64/select-shuffle.ll
+++ b/llvm/test/Transforms/VectorCombine/AArch64/select-shuffle.ll
@@ -487,7 +487,7 @@ define void @test_1652048214(ptr %src, ptr %dst) {
   ret void
 }
 
-define dso_local i32 @full(i8* nocapture noundef readonly %p1, i32 noundef %st1, i8* nocapture noundef readonly %p2, i32 noundef %st2) {
+define dso_local i32 @full(ptr nocapture noundef readonly %p1, i32 noundef %st1, ptr nocapture noundef readonly %p2, i32 noundef %st2) {
 ; CHECK-LABEL: @full(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[IDX_EXT:%.*]] = sext i32 [[ST1:%.*]] to i64
@@ -606,54 +606,54 @@ define dso_local i32 @full(i8* nocapture noundef readonly %p1, i32 noundef %st1,
 entry:
   %idx.ext = sext i32 %st1 to i64
   %idx.ext63 = sext i32 %st2 to i64
-  %arrayidx3 = getelementptr inbounds i8, i8* %p1, i64 4
-  %arrayidx5 = getelementptr inbounds i8, i8* %p2, i64 4
-  %add.ptr = getelementptr inbounds i8, i8* %p1, i64 %idx.ext
-  %add.ptr64 = getelementptr inbounds i8, i8* %p2, i64 %idx.ext63
-  %arrayidx3.1 = getelementptr inbounds i8, i8* %add.ptr, i64 4
-  %arrayidx5.1 = getelementptr inbounds i8, i8* %add.ptr64, i64 4
-  %add.ptr.1 = getelementptr inbounds i8, i8* %add.ptr, i64 %idx.ext
-  %add.ptr64.1 = getelementptr inbounds i8, i8* %add.ptr64, i64 %idx.ext63
-  %arrayidx3.2 = getelementptr inbounds i8, i8* %add.ptr.1, i64 4
-  %arrayidx5.2 = getelementptr inbounds i8, i8* %add.ptr64.1, i64 4
-  %add.ptr.2 = getelementptr inbounds i8, i8* %add.ptr.1, i64 %idx.ext
-  %add.ptr64.2 = getelementptr inbounds i8, i8* %add.ptr64.1, i64 %idx.ext63
-  %arrayidx3.3 = getelementptr inbounds i8, i8* %add.ptr.2, i64 4
-  %arrayidx5.3 = getelementptr inbounds i8, i8* %add.ptr64.2, i64 4
-  %0 = bitcast i8* %p1 to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %p2 to <4 x i8>*
-  %3 = load <4 x i8>, <4 x i8>* %2, align 1
-  %4 = bitcast i8* %arrayidx3 to <4 x i8>*
-  %5 = load <4 x i8>, <4 x i8>* %4, align 1
-  %6 = bitcast i8* %arrayidx5 to <4 x i8>*
-  %7 = load <4 x i8>, <4 x i8>* %6, align 1
-  %8 = bitcast i8* %add.ptr to <4 x i8>*
-  %9 = load <4 x i8>, <4 x i8>* %8, align 1
-  %10 = bitcast i8* %add.ptr64 to <4 x i8>*
-  %11 = load <4 x i8>, <4 x i8>* %10, align 1
-  %12 = bitcast i8* %arrayidx3.1 to <4 x i8>*
-  %13 = load <4 x i8>, <4 x i8>* %12, align 1
-  %14 = bitcast i8* %arrayidx5.1 to <4 x i8>*
-  %15 = load <4 x i8>, <4 x i8>* %14, align 1
-  %16 = bitcast i8* %add.ptr.1 to <4 x i8>*
-  %17 = load <4 x i8>, <4 x i8>* %16, align 1
-  %18 = bitcast i8* %add.ptr64.1 to <4 x i8>*
-  %19 = load <4 x i8>, <4 x i8>* %18, align 1
-  %20 = bitcast i8* %arrayidx3.2 to <4 x i8>*
-  %21 = load <4 x i8>, <4 x i8>* %20, align 1
-  %22 = bitcast i8* %arrayidx5.2 to <4 x i8>*
-  %23 = load <4 x i8>, <4 x i8>* %22, align 1
-  %24 = bitcast i8* %add.ptr.2 to <4 x i8>*
-  %25 = load <4 x i8>, <4 x i8>* %24, align 1
+  %arrayidx3 = getelementptr inbounds i8, ptr %p1, i64 4
+  %arrayidx5 = getelementptr inbounds i8, ptr %p2, i64 4
+  %add.ptr = getelementptr inbounds i8, ptr %p1, i64 %idx.ext
+  %add.ptr64 = getelementptr inbounds i8, ptr %p2, i64 %idx.ext63
+  %arrayidx3.1 = getelementptr inbounds i8, ptr %add.ptr, i64 4
+  %arrayidx5.1 = getelementptr inbounds i8, ptr %add.ptr64, i64 4
+  %add.ptr.1 = getelementptr inbounds i8, ptr %add.ptr, i64 %idx.ext
+  %add.ptr64.1 = getelementptr inbounds i8, ptr %add.ptr64, i64 %idx.ext63
+  %arrayidx3.2 = getelementptr inbounds i8, ptr %add.ptr.1, i64 4
+  %arrayidx5.2 = getelementptr inbounds i8, ptr %add.ptr64.1, i64 4
+  %add.ptr.2 = getelementptr inbounds i8, ptr %add.ptr.1, i64 %idx.ext
+  %add.ptr64.2 = getelementptr inbounds i8, ptr %add.ptr64.1, i64 %idx.ext63
+  %arrayidx3.3 = getelementptr inbounds i8, ptr %add.ptr.2, i64 4
+  %arrayidx5.3 = getelementptr inbounds i8, ptr %add.ptr64.2, i64 4
+  %0 = bitcast ptr %p1 to ptr
+  %1 = load <4 x i8>, ptr %0, align 1
+  %2 = bitcast ptr %p2 to ptr
+  %3 = load <4 x i8>, ptr %2, align 1
+  %4 = bitcast ptr %arrayidx3 to ptr
+  %5 = load <4 x i8>, ptr %4, align 1
+  %6 = bitcast ptr %arrayidx5 to ptr
+  %7 = load <4 x i8>, ptr %6, align 1
+  %8 = bitcast ptr %add.ptr to ptr
+  %9 = load <4 x i8>, ptr %8, align 1
+  %10 = bitcast ptr %add.ptr64 to ptr
+  %11 = load <4 x i8>, ptr %10, align 1
+  %12 = bitcast ptr %arrayidx3.1 to ptr
+  %13 = load <4 x i8>, ptr %12, align 1
+  %14 = bitcast ptr %arrayidx5.1 to ptr
+  %15 = load <4 x i8>, ptr %14, align 1
+  %16 = bitcast ptr %add.ptr.1 to ptr
+  %17 = load <4 x i8>, ptr %16, align 1
+  %18 = bitcast ptr %add.ptr64.1 to ptr
+  %19 = load <4 x i8>, ptr %18, align 1
+  %20 = bitcast ptr %arrayidx3.2 to ptr
+  %21 = load <4 x i8>, ptr %20, align 1
+  %22 = bitcast ptr %arrayidx5.2 to ptr
+  %23 = load <4 x i8>, ptr %22, align 1
+  %24 = bitcast ptr %add.ptr.2 to ptr
+  %25 = load <4 x i8>, ptr %24, align 1
   %26 = shufflevector <4 x i8> %25, <4 x i8> %17, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %27 = shufflevector <4 x i8> %9, <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %28 = shufflevector <16 x i8> %26, <16 x i8> %27, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 undef, i32 undef, i32 undef, i32 undef>
   %29 = shufflevector <4 x i8> %1, <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %30 = shufflevector <16 x i8> %28, <16 x i8> %29, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>
   %31 = zext <16 x i8> %30 to <16 x i32>
-  %32 = bitcast i8* %add.ptr64.2 to <4 x i8>*
-  %33 = load <4 x i8>, <4 x i8>* %32, align 1
+  %32 = bitcast ptr %add.ptr64.2 to ptr
+  %33 = load <4 x i8>, ptr %32, align 1
   %34 = shufflevector <4 x i8> %33, <4 x i8> %19, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %35 = shufflevector <4 x i8> %11, <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %36 = shufflevector <16 x i8> %34, <16 x i8> %35, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -661,16 +661,16 @@ entry:
   %38 = shufflevector <16 x i8> %36, <16 x i8> %37, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>
   %39 = zext <16 x i8> %38 to <16 x i32>
   %40 = sub nsw <16 x i32> %31, %39
-  %41 = bitcast i8* %arrayidx3.3 to <4 x i8>*
-  %42 = load <4 x i8>, <4 x i8>* %41, align 1
+  %41 = bitcast ptr %arrayidx3.3 to ptr
+  %42 = load <4 x i8>, ptr %41, align 1
   %43 = shufflevector <4 x i8> %42, <4 x i8> %21, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %44 = shufflevector <4 x i8> %13, <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %45 = shufflevector <16 x i8> %43, <16 x i8> %44, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 undef, i32 undef, i32 undef, i32 undef>
   %46 = shufflevector <4 x i8> %5, <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %47 = shufflevector <16 x i8> %45, <16 x i8> %46, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>
   %48 = zext <16 x i8> %47 to <16 x i32>
-  %49 = bitcast i8* %arrayidx5.3 to <4 x i8>*
-  %50 = load <4 x i8>, <4 x i8>* %49, align 1
+  %49 = bitcast ptr %arrayidx5.3 to ptr
+  %50 = load <4 x i8>, ptr %49, align 1
   %51 = shufflevector <4 x i8> %50, <4 x i8> %23, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %52 = shufflevector <4 x i8> %15, <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %53 = shufflevector <16 x i8> %51, <16 x i8> %52, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 undef, i32 undef, i32 undef, i32 undef>

diff  --git a/llvm/test/Transforms/VectorCombine/X86/load-widening.ll b/llvm/test/Transforms/VectorCombine/X86/load-widening.ll
index c1030c7d54462..a53abab8b7d14 100644
--- a/llvm/test/Transforms/VectorCombine/X86/load-widening.ll
+++ b/llvm/test/Transforms/VectorCombine/X86/load-widening.ll
@@ -79,7 +79,7 @@ define <3 x float> @vec_with_3elts_underaligned(ptr align 8 dereferenceable(16)
 
 ; We don't know we can load 128 bits, but since it's aligned, we still can do wide load.
 ; FIXME: this should still get widened.
-define <3 x float> @vec_with_3elts_underdereferenceable(<3 x float>* align 16 dereferenceable(12) %p) {
+define <3 x float> @vec_with_3elts_underdereferenceable(ptr align 16 dereferenceable(12) %p) {
 ; CHECK-LABEL: @vec_with_3elts_underdereferenceable(
 ; CHECK-NEXT:    [[R:%.*]] = load <3 x float>, ptr [[P:%.*]], align 16
 ; CHECK-NEXT:    ret <3 x float> [[R]]


        


More information about the llvm-commits mailing list