[llvm] c603cef - [DSE] Convert tests to opaque pointers (NFC)

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Wed Oct 5 07:35:33 PDT 2022


Author: Nikita Popov
Date: 2022-10-05T16:00:44+02:00
New Revision: c603cefbda1300983e81da545a307572fdb79d1e

URL: https://github.com/llvm/llvm-project/commit/c603cefbda1300983e81da545a307572fdb79d1e
DIFF: https://github.com/llvm/llvm-project/commit/c603cefbda1300983e81da545a307572fdb79d1e.diff

LOG: [DSE] Convert tests to opaque pointers (NFC)

Using https://gist.github.com/nikic/98357b71fd67756b0f064c9517b62a34.

Added: 
    

Modified: 
    llvm/test/Transforms/DeadStoreElimination/2011-03-25-DSEMiscompile.ll
    llvm/test/Transforms/DeadStoreElimination/2011-09-06-EndOfFunction.ll
    llvm/test/Transforms/DeadStoreElimination/2011-09-06-MemCpy.ll
    llvm/test/Transforms/DeadStoreElimination/2016-07-17-UseAfterFree.ll
    llvm/test/Transforms/DeadStoreElimination/OverwriteStoreBegin.ll
    llvm/test/Transforms/DeadStoreElimination/OverwriteStoreEnd.ll
    llvm/test/Transforms/DeadStoreElimination/PartialStore.ll
    llvm/test/Transforms/DeadStoreElimination/PartialStore2.ll
    llvm/test/Transforms/DeadStoreElimination/X86/gather-null-pointer.ll
    llvm/test/Transforms/DeadStoreElimination/assume.ll
    llvm/test/Transforms/DeadStoreElimination/atomic-overlapping.ll
    llvm/test/Transforms/DeadStoreElimination/atomic-todo.ll
    llvm/test/Transforms/DeadStoreElimination/atomic.ll
    llvm/test/Transforms/DeadStoreElimination/calloc-store.ll
    llvm/test/Transforms/DeadStoreElimination/captures-before-call.ll
    llvm/test/Transforms/DeadStoreElimination/captures-before-load.ll
    llvm/test/Transforms/DeadStoreElimination/combined-partial-overwrites.ll
    llvm/test/Transforms/DeadStoreElimination/const-pointers.ll
    llvm/test/Transforms/DeadStoreElimination/crash.ll
    llvm/test/Transforms/DeadStoreElimination/cs-cs-aliasing.ll
    llvm/test/Transforms/DeadStoreElimination/debug-counter.ll
    llvm/test/Transforms/DeadStoreElimination/debuginfo.ll
    llvm/test/Transforms/DeadStoreElimination/dominate.ll
    llvm/test/Transforms/DeadStoreElimination/fence-todo.ll
    llvm/test/Transforms/DeadStoreElimination/fence.ll
    llvm/test/Transforms/DeadStoreElimination/free.ll
    llvm/test/Transforms/DeadStoreElimination/inst-limits.ll
    llvm/test/Transforms/DeadStoreElimination/int_sideeffect.ll
    llvm/test/Transforms/DeadStoreElimination/invariant.start.ll
    llvm/test/Transforms/DeadStoreElimination/launder.invariant.group.ll
    llvm/test/Transforms/DeadStoreElimination/libcalls-darwin.ll
    llvm/test/Transforms/DeadStoreElimination/libcalls.ll
    llvm/test/Transforms/DeadStoreElimination/lifetime.ll
    llvm/test/Transforms/DeadStoreElimination/loop-invariant-entry-block.ll
    llvm/test/Transforms/DeadStoreElimination/masked-dead-store-inseltpoison.ll
    llvm/test/Transforms/DeadStoreElimination/masked-dead-store.ll
    llvm/test/Transforms/DeadStoreElimination/mda-with-dbg-values.ll
    llvm/test/Transforms/DeadStoreElimination/memcpy-complete-overwrite.ll
    llvm/test/Transforms/DeadStoreElimination/memcpy-lifetimes.ll
    llvm/test/Transforms/DeadStoreElimination/memintrinsics.ll
    llvm/test/Transforms/DeadStoreElimination/memory-intrinsics-sizes.ll
    llvm/test/Transforms/DeadStoreElimination/memoryssa-scan-limit.ll
    llvm/test/Transforms/DeadStoreElimination/memset-and-memcpy.ll
    llvm/test/Transforms/DeadStoreElimination/memset-missing-debugloc.ll
    llvm/test/Transforms/DeadStoreElimination/memset-unknown-sizes.ll
    llvm/test/Transforms/DeadStoreElimination/merge-stores-big-endian.ll
    llvm/test/Transforms/DeadStoreElimination/merge-stores.ll
    llvm/test/Transforms/DeadStoreElimination/multiblock-captures.ll
    llvm/test/Transforms/DeadStoreElimination/multiblock-exceptions.ll
    llvm/test/Transforms/DeadStoreElimination/multiblock-loop-carried-dependence.ll
    llvm/test/Transforms/DeadStoreElimination/multiblock-loops.ll
    llvm/test/Transforms/DeadStoreElimination/multiblock-malloc-free.ll
    llvm/test/Transforms/DeadStoreElimination/multiblock-memintrinsics.ll
    llvm/test/Transforms/DeadStoreElimination/multiblock-memoryphis.ll
    llvm/test/Transforms/DeadStoreElimination/multiblock-multipath-throwing.ll
    llvm/test/Transforms/DeadStoreElimination/multiblock-multipath.ll
    llvm/test/Transforms/DeadStoreElimination/multiblock-overlap.ll
    llvm/test/Transforms/DeadStoreElimination/multiblock-partial.ll
    llvm/test/Transforms/DeadStoreElimination/multiblock-simple.ll
    llvm/test/Transforms/DeadStoreElimination/multiblock-throwing.ll
    llvm/test/Transforms/DeadStoreElimination/multiblock-unreachable.ll
    llvm/test/Transforms/DeadStoreElimination/no-targetdata.ll
    llvm/test/Transforms/DeadStoreElimination/noop-stores.ll
    llvm/test/Transforms/DeadStoreElimination/nounwind-invoke.ll
    llvm/test/Transforms/DeadStoreElimination/offsetted-overlapping-stores.ll
    llvm/test/Transforms/DeadStoreElimination/operand-bundles.ll
    llvm/test/Transforms/DeadStoreElimination/out-of-bounds-stores.ll
    llvm/test/Transforms/DeadStoreElimination/overlap.ll
    llvm/test/Transforms/DeadStoreElimination/phi-translation.ll
    llvm/test/Transforms/DeadStoreElimination/pr11390.ll
    llvm/test/Transforms/DeadStoreElimination/pr47285-not-overwritten-on-all-exit-paths.ll
    llvm/test/Transforms/DeadStoreElimination/read-clobber-after-overwrite.ll
    llvm/test/Transforms/DeadStoreElimination/scoped-noalias.ll
    llvm/test/Transforms/DeadStoreElimination/simple-preservation.ll
    llvm/test/Transforms/DeadStoreElimination/simple.ll
    llvm/test/Transforms/DeadStoreElimination/stats.ll
    llvm/test/Transforms/DeadStoreElimination/store-after-loop.ll
    llvm/test/Transforms/DeadStoreElimination/stores-of-existing-values.ll
    llvm/test/Transforms/DeadStoreElimination/tail-byval.ll
    llvm/test/Transforms/DeadStoreElimination/trivial-dse-calls.ll
    llvm/test/Transforms/DeadStoreElimination/wrong-malloc-size.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/DeadStoreElimination/2011-03-25-DSEMiscompile.ll b/llvm/test/Transforms/DeadStoreElimination/2011-03-25-DSEMiscompile.ll
index 25c2d5ffe7f56..3c297761e30fa 100644
--- a/llvm/test/Transforms/DeadStoreElimination/2011-03-25-DSEMiscompile.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/2011-03-25-DSEMiscompile.ll
@@ -5,19 +5,17 @@ target triple = "i386-apple-darwin9.8"
 
 @A = external global [0 x i32]
 
-declare ghccc void @Func2(i32*, i32*, i32*, i32)
+declare ghccc void @Func2(ptr, ptr, ptr, i32)
 
-define ghccc void @Func1(i32* noalias %Arg1, i32* noalias %Arg2, i32* %Arg3, i32 %Arg4) {
+define ghccc void @Func1(ptr noalias %Arg1, ptr noalias %Arg2, ptr %Arg3, i32 %Arg4) {
 entry:
-  store i32 add (i32 ptrtoint ([0 x i32]* @A to i32), i32 1), i32* %Arg2
-; CHECK: store i32 add (i32 ptrtoint ([0 x i32]* @A to i32), i32 1), i32* %Arg2
-  %ln2gz = getelementptr i32, i32* %Arg1, i32 14
-  %ln2gA = bitcast i32* %ln2gz to double*
-  %ln2gB = load double, double* %ln2gA
-  %ln2gD = getelementptr i32, i32* %Arg2, i32 -3
-  %ln2gE = bitcast i32* %ln2gD to double*
-  store double %ln2gB, double* %ln2gE
-; CHECK: store double %ln2gB, double* %ln2gE
-  tail call ghccc void @Func2(i32* %Arg1, i32* %Arg2, i32* %Arg3, i32 %Arg4) nounwind
+  store i32 add (i32 ptrtoint (ptr @A to i32), i32 1), ptr %Arg2
+; CHECK: store i32 add (i32 ptrtoint (ptr @A to i32), i32 1), ptr %Arg2
+  %ln2gz = getelementptr i32, ptr %Arg1, i32 14
+  %ln2gB = load double, ptr %ln2gz
+  %ln2gD = getelementptr i32, ptr %Arg2, i32 -3
+  store double %ln2gB, ptr %ln2gD
+; CHECK: store double %ln2gB, ptr %ln2gD
+  tail call ghccc void @Func2(ptr %Arg1, ptr %Arg2, ptr %Arg3, i32 %Arg4) nounwind
   ret void
 }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/2011-09-06-EndOfFunction.ll b/llvm/test/Transforms/DeadStoreElimination/2011-09-06-EndOfFunction.ll
index e94fd9cb23d43..0ff96234fb927 100644
--- a/llvm/test/Transforms/DeadStoreElimination/2011-09-06-EndOfFunction.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/2011-09-06-EndOfFunction.ll
@@ -3,21 +3,19 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
 target triple = "x86_64-apple-darwin"
 
-%"class.std::auto_ptr" = type { i32* }
+%"class.std::auto_ptr" = type { ptr }
 
 ; CHECK-LABEL: @_Z3foov(
-define void @_Z3foov(%"class.std::auto_ptr"* noalias nocapture sret(%"class.std::auto_ptr") %agg.result) uwtable ssp {
+define void @_Z3foov(ptr noalias nocapture sret(%"class.std::auto_ptr") %agg.result) uwtable ssp {
 _ZNSt8auto_ptrIiED1Ev.exit:
   %temp.lvalue = alloca %"class.std::auto_ptr", align 8
-  call void @_Z3barv(%"class.std::auto_ptr"* sret(%"class.std::auto_ptr") %temp.lvalue)
-  %_M_ptr.i.i = getelementptr inbounds %"class.std::auto_ptr", %"class.std::auto_ptr"* %temp.lvalue, i64 0, i32 0
-  %tmp.i.i = load i32*, i32** %_M_ptr.i.i, align 8
-; CHECK-NOT: store i32* null
-  store i32* null, i32** %_M_ptr.i.i, align 8
-  %_M_ptr.i.i4 = getelementptr inbounds %"class.std::auto_ptr", %"class.std::auto_ptr"* %agg.result, i64 0, i32 0
-  store i32* %tmp.i.i, i32** %_M_ptr.i.i4, align 8
+  call void @_Z3barv(ptr sret(%"class.std::auto_ptr") %temp.lvalue)
+  %tmp.i.i = load ptr, ptr %temp.lvalue, align 8
+; CHECK-NOT: store ptr null
+  store ptr null, ptr %temp.lvalue, align 8
+  store ptr %tmp.i.i, ptr %agg.result, align 8
 ; CHECK: ret void
   ret void
 }
 
-declare void @_Z3barv(%"class.std::auto_ptr"* sret(%"class.std::auto_ptr"))
+declare void @_Z3barv(ptr sret(%"class.std::auto_ptr"))

diff  --git a/llvm/test/Transforms/DeadStoreElimination/2011-09-06-MemCpy.ll b/llvm/test/Transforms/DeadStoreElimination/2011-09-06-MemCpy.ll
index 665d772d03b91..2d25a59b96f80 100644
--- a/llvm/test/Transforms/DeadStoreElimination/2011-09-06-MemCpy.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/2011-09-06-MemCpy.ll
@@ -2,84 +2,79 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-f128:128:128-n8:16:32:64"
 target triple = "x86_64-unknown-linux-gnu"
 
-%struct.pair.162 = type { %struct.BasicBlock*, i32, [4 x i8] }
-%struct.BasicBlock = type { %struct.Value, %struct.ilist_node.24, %struct.iplist.22, %struct.Function* }
-%struct.Value = type { i32 (...)**, i8, i8, i16, %struct.Type*, %struct.Use*, %struct.StringMapEntry* }
-%struct.Type = type { %struct.LLVMContext*, i8, [3 x i8], i32, {}* }
-%struct.LLVMContext = type { %struct.LLVMContextImpl* }
+%struct.pair.162 = type { ptr, i32, [4 x i8] }
+%struct.BasicBlock = type { %struct.Value, %struct.ilist_node.24, %struct.iplist.22, ptr }
+%struct.Value = type { ptr, i8, i8, i16, ptr, ptr, ptr }
+%struct.Type = type { ptr, i8, [3 x i8], i32, ptr }
+%struct.LLVMContext = type { ptr }
 %struct.LLVMContextImpl = type opaque
-%struct.Use = type { %struct.Value*, %struct.Use*, %struct.PointerIntPair }
+%struct.Use = type { ptr, ptr, %struct.PointerIntPair }
 %struct.PointerIntPair = type { i64 }
 %struct.StringMapEntry = type opaque
-%struct.ilist_node.24 = type { %struct.ilist_half_node.23, %struct.BasicBlock* }
-%struct.ilist_half_node.23 = type { %struct.BasicBlock* }
-%struct.iplist.22 = type { %struct.ilist_traits.21, %struct.Instruction* }
+%struct.ilist_node.24 = type { %struct.ilist_half_node.23, ptr }
+%struct.ilist_half_node.23 = type { ptr }
+%struct.iplist.22 = type { %struct.ilist_traits.21, ptr }
 %struct.ilist_traits.21 = type { %struct.ilist_half_node.25 }
-%struct.ilist_half_node.25 = type { %struct.Instruction* }
-%struct.Instruction = type { [52 x i8], %struct.ilist_node.26, %struct.BasicBlock*, %struct.DebugLoc }
-%struct.ilist_node.26 = type { %struct.ilist_half_node.25, %struct.Instruction* }
+%struct.ilist_half_node.25 = type { ptr }
+%struct.Instruction = type { [52 x i8], %struct.ilist_node.26, ptr, %struct.DebugLoc }
+%struct.ilist_node.26 = type { %struct.ilist_half_node.25, ptr }
 %struct.DebugLoc = type { i32, i32 }
-%struct.Function = type { %struct.GlobalValue, %struct.ilist_node.14, %struct.iplist.4, %struct.iplist, %struct.ValueSymbolTable*, %struct.AttrListPtr }
-%struct.GlobalValue = type <{ [52 x i8], [4 x i8], %struct.Module*, i8, i16, [5 x i8], %struct.basic_string }>
-%struct.Module = type { %struct.LLVMContext*, %struct.iplist.20, %struct.iplist.16, %struct.iplist.12, %struct.vector.2, %struct.ilist, %struct.basic_string, %struct.ValueSymbolTable*, %struct.OwningPtr, %struct.basic_string, %struct.basic_string, %struct.basic_string, i8* }
-%struct.iplist.20 = type { %struct.ilist_traits.19, %struct.GlobalVariable* }
+%struct.Function = type { %struct.GlobalValue, %struct.ilist_node.14, %struct.iplist.4, %struct.iplist, ptr, %struct.AttrListPtr }
+%struct.GlobalValue = type <{ [52 x i8], [4 x i8], ptr, i8, i16, [5 x i8], %struct.basic_string }>
+%struct.Module = type { ptr, %struct.iplist.20, %struct.iplist.16, %struct.iplist.12, %struct.vector.2, %struct.ilist, %struct.basic_string, ptr, %struct.OwningPtr, %struct.basic_string, %struct.basic_string, %struct.basic_string, ptr }
+%struct.iplist.20 = type { %struct.ilist_traits.19, ptr }
 %struct.ilist_traits.19 = type { %struct.ilist_node.18 }
-%struct.ilist_node.18 = type { %struct.ilist_half_node.17, %struct.GlobalVariable* }
-%struct.ilist_half_node.17 = type { %struct.GlobalVariable* }
+%struct.ilist_node.18 = type { %struct.ilist_half_node.17, ptr }
+%struct.ilist_half_node.17 = type { ptr }
 %struct.GlobalVariable = type { %struct.GlobalValue, %struct.ilist_node.18, i8, [7 x i8] }
-%struct.iplist.16 = type { %struct.ilist_traits.15, %struct.Function* }
+%struct.iplist.16 = type { %struct.ilist_traits.15, ptr }
 %struct.ilist_traits.15 = type { %struct.ilist_node.14 }
-%struct.ilist_node.14 = type { %struct.ilist_half_node.13, %struct.Function* }
-%struct.ilist_half_node.13 = type { %struct.Function* }
-%struct.iplist.12 = type { %struct.ilist_traits.11, %struct.GlobalAlias* }
+%struct.ilist_node.14 = type { %struct.ilist_half_node.13, ptr }
+%struct.ilist_half_node.13 = type { ptr }
+%struct.iplist.12 = type { %struct.ilist_traits.11, ptr }
 %struct.ilist_traits.11 = type { %struct.ilist_node.10 }
-%struct.ilist_node.10 = type { %struct.ilist_half_node.9, %struct.GlobalAlias* }
-%struct.ilist_half_node.9 = type { %struct.GlobalAlias* }
+%struct.ilist_node.10 = type { %struct.ilist_half_node.9, ptr }
+%struct.ilist_half_node.9 = type { ptr }
 %struct.GlobalAlias = type { %struct.GlobalValue, %struct.ilist_node.10 }
 %struct.vector.2 = type { %struct._Vector_base.1 }
 %struct._Vector_base.1 = type { %struct._Vector_impl.0 }
-%struct._Vector_impl.0 = type { %struct.basic_string*, %struct.basic_string*, %struct.basic_string* }
+%struct._Vector_impl.0 = type { ptr, ptr, ptr }
 %struct.basic_string = type { %struct._Alloc_hider }
-%struct._Alloc_hider = type { i8* }
+%struct._Alloc_hider = type { ptr }
 %struct.ilist = type { %struct.iplist.8 }
-%struct.iplist.8 = type { %struct.ilist_traits.7, %struct.NamedMDNode* }
+%struct.iplist.8 = type { %struct.ilist_traits.7, ptr }
 %struct.ilist_traits.7 = type { %struct.ilist_node.6 }
-%struct.ilist_node.6 = type { %struct.ilist_half_node.5, %struct.NamedMDNode* }
-%struct.ilist_half_node.5 = type { %struct.NamedMDNode* }
-%struct.NamedMDNode = type { %struct.ilist_node.6, %struct.basic_string, %struct.Module*, i8* }
+%struct.ilist_node.6 = type { %struct.ilist_half_node.5, ptr }
+%struct.ilist_half_node.5 = type { ptr }
+%struct.NamedMDNode = type { %struct.ilist_node.6, %struct.basic_string, ptr, ptr }
 %struct.ValueSymbolTable = type opaque
-%struct.OwningPtr = type { %struct.GVMaterializer* }
+%struct.OwningPtr = type { ptr }
 %struct.GVMaterializer = type opaque
-%struct.iplist.4 = type { %struct.ilist_traits.3, %struct.BasicBlock* }
+%struct.iplist.4 = type { %struct.ilist_traits.3, ptr }
 %struct.ilist_traits.3 = type { %struct.ilist_half_node.23 }
-%struct.iplist = type { %struct.ilist_traits, %struct.Argument* }
+%struct.iplist = type { %struct.ilist_traits, ptr }
 %struct.ilist_traits = type { %struct.ilist_half_node }
-%struct.ilist_half_node = type { %struct.Argument* }
-%struct.Argument = type { %struct.Value, %struct.ilist_node, %struct.Function* }
-%struct.ilist_node = type { %struct.ilist_half_node, %struct.Argument* }
-%struct.AttrListPtr = type { %struct.AttributeListImpl* }
+%struct.ilist_half_node = type { ptr }
+%struct.Argument = type { %struct.Value, %struct.ilist_node, ptr }
+%struct.ilist_node = type { %struct.ilist_half_node, ptr }
+%struct.AttrListPtr = type { ptr }
 %struct.AttributeListImpl = type opaque
 
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
 
 ; CHECK: _ZSt9iter_swapIPSt4pairIPN4llvm10BasicBlockEjES5_EvT_T0_
 ; CHECK: store
 ; CHECK: ret void
-define void @_ZSt9iter_swapIPSt4pairIPN4llvm10BasicBlockEjES5_EvT_T0_(%struct.pair.162* %__a, %struct.pair.162* %__b) nounwind uwtable inlinehint {
+define void @_ZSt9iter_swapIPSt4pairIPN4llvm10BasicBlockEjES5_EvT_T0_(ptr %__a, ptr %__b) nounwind uwtable inlinehint {
 entry:
   %memtmp = alloca %struct.pair.162, align 8
-  %0 = getelementptr inbounds %struct.pair.162, %struct.pair.162* %memtmp, i64 0, i32 0
-  %1 = getelementptr inbounds %struct.pair.162, %struct.pair.162* %__a, i64 0, i32 0
-  %2 = load %struct.BasicBlock*, %struct.BasicBlock** %1, align 8
-  store %struct.BasicBlock* %2, %struct.BasicBlock** %0, align 8
-  %3 = getelementptr inbounds %struct.pair.162, %struct.pair.162* %memtmp, i64 0, i32 1
-  %4 = getelementptr inbounds %struct.pair.162, %struct.pair.162* %__a, i64 0, i32 1
-  %5 = load i32, i32* %4, align 4
-  store i32 %5, i32* %3, align 8
-  %6 = bitcast %struct.pair.162* %__a to i8*
-  %7 = bitcast %struct.pair.162* %__b to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %6, i8* %7, i64 12, i1 false)
-  %8 = bitcast %struct.pair.162* %memtmp to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %7, i8* %8, i64 12, i1 false)
+  %0 = load ptr, ptr %__a, align 8
+  store ptr %0, ptr %memtmp, align 8
+  %1 = getelementptr inbounds %struct.pair.162, ptr %memtmp, i64 0, i32 1
+  %2 = getelementptr inbounds %struct.pair.162, ptr %__a, i64 0, i32 1
+  %3 = load i32, ptr %2, align 4
+  store i32 %3, ptr %1, align 8
+  call void @llvm.memcpy.p0.p0.i64(ptr %__a, ptr %__b, i64 12, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %__b, ptr %memtmp, i64 12, i1 false)
   ret void
 }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/2016-07-17-UseAfterFree.ll b/llvm/test/Transforms/DeadStoreElimination/2016-07-17-UseAfterFree.ll
index 4bbbcb870e5a2..b4c417c1f7ae2 100644
--- a/llvm/test/Transforms/DeadStoreElimination/2016-07-17-UseAfterFree.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/2016-07-17-UseAfterFree.ll
@@ -5,28 +5,27 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
 ; Function Attrs: nounwind
-define void @_UPT_destroy(i8* nocapture %ptr) local_unnamed_addr #0 {
+define void @_UPT_destroy(ptr nocapture %ptr) local_unnamed_addr #0 {
 entry:
-  %edi = getelementptr inbounds i8, i8* %ptr, i64 8
+  %edi = getelementptr inbounds i8, ptr %ptr, i64 8
 
-; CHECK-NOT: tail call void @llvm.memset.p0i8.i64(i8* align 8 %edi, i8 0, i64 176, i1 false)
-; CHECK-NOT: store i32 -1, i32* %addr
+; CHECK-NOT: tail call void @llvm.memset.p0.i64(ptr align 8 %edi, i8 0, i64 176, i1 false)
+; CHECK-NOT: store i32 -1, ptr %addr
 
-  tail call void @llvm.memset.p0i8.i64(i8* align 8 %edi, i8 0, i64 176, i1 false)
-  %format4.i = getelementptr inbounds i8, i8* %ptr, i64 144
-  %addr = bitcast i8* %format4.i to i32*
-  store i32 -1, i32* %addr, align 8
+  tail call void @llvm.memset.p0.i64(ptr align 8 %edi, i8 0, i64 176, i1 false)
+  %format4.i = getelementptr inbounds i8, ptr %ptr, i64 144
+  store i32 -1, ptr %format4.i, align 8
 
 ; CHECK: tail call void @free
-  tail call void @free(i8* nonnull %ptr)
+  tail call void @free(ptr nonnull %ptr)
   ret void
 }
 
 ; Function Attrs: nounwind
-declare void @free(i8* nocapture allocptr) local_unnamed_addr #0
+declare void @free(ptr nocapture allocptr) local_unnamed_addr #0
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1) #1
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1) #1
 
 attributes #0 = { nounwind allockind("free")}
 attributes #1 = { argmemonly nounwind }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/OverwriteStoreBegin.ll b/llvm/test/Transforms/DeadStoreElimination/OverwriteStoreBegin.ll
index 03f0d7205d6a4..8ea3a6a700bef 100644
--- a/llvm/test/Transforms/DeadStoreElimination/OverwriteStoreBegin.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/OverwriteStoreBegin.ll
@@ -1,489 +1,404 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -basic-aa -dse -S | FileCheck %s
 
-define void @write4to7(i32* nocapture %p) {
+define void @write4to7(ptr nocapture %p) {
 ; CHECK-LABEL: @write4to7(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 1
-; CHECK-NEXT:    [[P3:%.*]] = bitcast i32* [[ARRAYIDX0]] to i8*
-; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, i8* [[P3]], i64 4
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 4 [[TMP0]], i8 0, i64 24, i1 false)
-; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 1
-; CHECK-NEXT:    store i32 1, i32* [[ARRAYIDX1]], align 4
+; CHECK-NEXT:    [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 1
+; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[ARRAYIDX0]], i64 4
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 4 [[TMP0]], i8 0, i64 24, i1 false)
+; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 1
+; CHECK-NEXT:    store i32 1, ptr [[ARRAYIDX1]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %arrayidx0 = getelementptr inbounds i32, i32* %p, i64 1
-  %p3 = bitcast i32* %arrayidx0 to i8*
-  call void @llvm.memset.p0i8.i64(i8* align 4 %p3, i8 0, i64 28, i1 false)
-  %arrayidx1 = getelementptr inbounds i32, i32* %p, i64 1
-  store i32 1, i32* %arrayidx1, align 4
+  %arrayidx0 = getelementptr inbounds i32, ptr %p, i64 1
+  call void @llvm.memset.p0.i64(ptr align 4 %arrayidx0, i8 0, i64 28, i1 false)
+  %arrayidx1 = getelementptr inbounds i32, ptr %p, i64 1
+  store i32 1, ptr %arrayidx1, align 4
   ret void
 }
 
-define void @write4to7_weird_element_type(i32* nocapture %p) {
+define void @write4to7_weird_element_type(ptr nocapture %p) {
 ; CHECK-LABEL: @write4to7_weird_element_type(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 1
-; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i32* [[ARRAYIDX0]] to i8*
-; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, i8* [[TMP0]], i64 4
-; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i32*
-; CHECK-NEXT:    call void @llvm.memset.p0i32.i64(i32* align 4 [[TMP2]], i8 0, i64 24, i1 false)
-; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 1
-; CHECK-NEXT:    store i32 1, i32* [[ARRAYIDX1]], align 4
+; CHECK-NEXT:    [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 1
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[ARRAYIDX0]], i64 4
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 4 [[TMP1]], i8 0, i64 24, i1 false)
+; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 1
+; CHECK-NEXT:    store i32 1, ptr [[ARRAYIDX1]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %arrayidx0 = getelementptr inbounds i32, i32* %p, i64 1
-  call void @llvm.memset.p0i32.i64(i32* align 4 %arrayidx0, i8 0, i64 28, i1 false)
-  %arrayidx1 = getelementptr inbounds i32, i32* %p, i64 1
-  store i32 1, i32* %arrayidx1, align 4
+  %arrayidx0 = getelementptr inbounds i32, ptr %p, i64 1
+  call void @llvm.memset.p0.i64(ptr align 4 %arrayidx0, i8 0, i64 28, i1 false)
+  %arrayidx1 = getelementptr inbounds i32, ptr %p, i64 1
+  store i32 1, ptr %arrayidx1, align 4
   ret void
 }
 
-define void @write4to7_addrspace(i32 addrspace(1)* nocapture %p) {
+define void @write4to7_addrspace(ptr addrspace(1) nocapture %p) {
 ; CHECK-LABEL: @write4to7_addrspace(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, i32 addrspace(1)* [[P:%.*]], i64 1
-; CHECK-NEXT:    [[P3:%.*]] = bitcast i32 addrspace(1)* [[ARRAYIDX0]] to i8 addrspace(1)*
-; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, i8 addrspace(1)* [[P3]], i64 4
-; CHECK-NEXT:    call void @llvm.memset.p1i8.i64(i8 addrspace(1)* align 4 [[TMP0]], i8 0, i64 24, i1 false)
-; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32 addrspace(1)* [[P]], i64 1
-; CHECK-NEXT:    store i32 1, i32 addrspace(1)* [[ARRAYIDX1]], align 4
+; CHECK-NEXT:    [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[P:%.*]], i64 1
+; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[ARRAYIDX0]], i64 4
+; CHECK-NEXT:    call void @llvm.memset.p1.i64(ptr addrspace(1) align 4 [[TMP0]], i8 0, i64 24, i1 false)
+; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[P]], i64 1
+; CHECK-NEXT:    store i32 1, ptr addrspace(1) [[ARRAYIDX1]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %arrayidx0 = getelementptr inbounds i32, i32 addrspace(1)* %p, i64 1
-  %p3 = bitcast i32 addrspace(1)* %arrayidx0 to i8 addrspace(1)*
-  call void @llvm.memset.p1i8.i64(i8 addrspace(1)* align 4 %p3, i8 0, i64 28, i1 false)
-  %arrayidx1 = getelementptr inbounds i32, i32 addrspace(1)* %p, i64 1
-  store i32 1, i32 addrspace(1)* %arrayidx1, align 4
+  %arrayidx0 = getelementptr inbounds i32, ptr addrspace(1) %p, i64 1
+  call void @llvm.memset.p1.i64(ptr addrspace(1) align 4 %arrayidx0, i8 0, i64 28, i1 false)
+  %arrayidx1 = getelementptr inbounds i32, ptr addrspace(1) %p, i64 1
+  store i32 1, ptr addrspace(1) %arrayidx1, align 4
   ret void
 }
 
-define void @write4to7_atomic(i32* nocapture %p) {
+define void @write4to7_atomic(ptr nocapture %p) {
 ; CHECK-LABEL: @write4to7_atomic(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 1
-; CHECK-NEXT:    [[P3:%.*]] = bitcast i32* [[ARRAYIDX0]] to i8*
-; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, i8* [[P3]], i64 4
-; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 [[TMP0]], i8 0, i64 24, i32 4)
-; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 1
-; CHECK-NEXT:    store atomic i32 1, i32* [[ARRAYIDX1]] unordered, align 4
+; CHECK-NEXT:    [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 1
+; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[ARRAYIDX0]], i64 4
+; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 4 [[TMP0]], i8 0, i64 24, i32 4)
+; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 1
+; CHECK-NEXT:    store atomic i32 1, ptr [[ARRAYIDX1]] unordered, align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %arrayidx0 = getelementptr inbounds i32, i32* %p, i64 1
-  %p3 = bitcast i32* %arrayidx0 to i8*
-  call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 %p3, i8 0, i64 28, i32 4)
-  %arrayidx1 = getelementptr inbounds i32, i32* %p, i64 1
-  store atomic i32 1, i32* %arrayidx1 unordered, align 4
+  %arrayidx0 = getelementptr inbounds i32, ptr %p, i64 1
+  call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 4 %arrayidx0, i8 0, i64 28, i32 4)
+  %arrayidx1 = getelementptr inbounds i32, ptr %p, i64 1
+  store atomic i32 1, ptr %arrayidx1 unordered, align 4
   ret void
 }
 
-define void @write0to3(i32* nocapture %p) {
+define void @write0to3(ptr nocapture %p) {
 ; CHECK-LABEL: @write0to3(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[P3:%.*]] = bitcast i32* [[P:%.*]] to i8*
-; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, i8* [[P3]], i64 4
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 4 [[TMP0]], i8 0, i64 24, i1 false)
-; CHECK-NEXT:    store i32 1, i32* [[P]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[P:%.*]], i64 4
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 4 [[TMP0]], i8 0, i64 24, i1 false)
+; CHECK-NEXT:    store i32 1, ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %p3 = bitcast i32* %p to i8*
-  call void @llvm.memset.p0i8.i64(i8* align 4 %p3, i8 0, i64 28, i1 false)
-  store i32 1, i32* %p, align 4
+  call void @llvm.memset.p0.i64(ptr align 4 %p, i8 0, i64 28, i1 false)
+  store i32 1, ptr %p, align 4
   ret void
 }
 
-define void @write0to3_atomic(i32* nocapture %p) {
+define void @write0to3_atomic(ptr nocapture %p) {
 ; CHECK-LABEL: @write0to3_atomic(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[P3:%.*]] = bitcast i32* [[P:%.*]] to i8*
-; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, i8* [[P3]], i64 4
-; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 [[TMP0]], i8 0, i64 24, i32 4)
-; CHECK-NEXT:    store atomic i32 1, i32* [[P]] unordered, align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[P:%.*]], i64 4
+; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 4 [[TMP0]], i8 0, i64 24, i32 4)
+; CHECK-NEXT:    store atomic i32 1, ptr [[P]] unordered, align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %p3 = bitcast i32* %p to i8*
-  call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 %p3, i8 0, i64 28, i32 4)
-  store atomic i32 1, i32* %p unordered, align 4
+  call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 4 %p, i8 0, i64 28, i32 4)
+  store atomic i32 1, ptr %p unordered, align 4
   ret void
 }
 
 ; Atomicity of the store is weaker from the memset
-define void @write0to3_atomic_weaker(i32* nocapture %p) {
+define void @write0to3_atomic_weaker(ptr nocapture %p) {
 ; CHECK-LABEL: @write0to3_atomic_weaker(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[P3:%.*]] = bitcast i32* [[P:%.*]] to i8*
-; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, i8* [[P3]], i64 4
-; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 [[TMP0]], i8 0, i64 24, i32 4)
-; CHECK-NEXT:    store i32 1, i32* [[P]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[P:%.*]], i64 4
+; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 4 [[TMP0]], i8 0, i64 24, i32 4)
+; CHECK-NEXT:    store i32 1, ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %p3 = bitcast i32* %p to i8*
-  call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 %p3, i8 0, i64 28, i32 4)
-  store i32 1, i32* %p, align 4
+  call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 4 %p, i8 0, i64 28, i32 4)
+  store i32 1, ptr %p, align 4
   ret void
 }
 
-define void @write0to7(i32* nocapture %p) {
+define void @write0to7(ptr nocapture %p) {
 ; CHECK-LABEL: @write0to7(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[P3:%.*]] = bitcast i32* [[P:%.*]] to i8*
-; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, i8* [[P3]], i64 8
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 4 [[TMP0]], i8 0, i64 24, i1 false)
-; CHECK-NEXT:    [[P4:%.*]] = bitcast i32* [[P]] to i64*
-; CHECK-NEXT:    store i64 1, i64* [[P4]], align 8
+; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[P:%.*]], i64 8
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 4 [[TMP0]], i8 0, i64 24, i1 false)
+; CHECK-NEXT:    store i64 1, ptr [[P]], align 8
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %p3 = bitcast i32* %p to i8*
-  call void @llvm.memset.p0i8.i64(i8* align 4 %p3, i8 0, i64 32, i1 false)
-  %p4 = bitcast i32* %p to i64*
-  store i64 1, i64* %p4, align 8
+  call void @llvm.memset.p0.i64(ptr align 4 %p, i8 0, i64 32, i1 false)
+  store i64 1, ptr %p, align 8
   ret void
 }
 
 ; Changing the memset start and length is okay here because the
 ; store is a multiple of the memset element size
-define void @write0to7_atomic(i32* nocapture %p) {
+define void @write0to7_atomic(ptr nocapture %p) {
 ; CHECK-LABEL: @write0to7_atomic(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[P3:%.*]] = bitcast i32* [[P:%.*]] to i8*
-; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, i8* [[P3]], i64 8
-; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 [[TMP0]], i8 0, i64 24, i32 4)
-; CHECK-NEXT:    [[P4:%.*]] = bitcast i32* [[P]] to i64*
-; CHECK-NEXT:    store atomic i64 1, i64* [[P4]] unordered, align 8
+; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[P:%.*]], i64 8
+; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 4 [[TMP0]], i8 0, i64 24, i32 4)
+; CHECK-NEXT:    store atomic i64 1, ptr [[P]] unordered, align 8
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %p3 = bitcast i32* %p to i8*
-  call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 %p3, i8 0, i64 32, i32 4)
-  %p4 = bitcast i32* %p to i64*
-  store atomic i64 1, i64* %p4 unordered, align 8
+  call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 4 %p, i8 0, i64 32, i32 4)
+  store atomic i64 1, ptr %p unordered, align 8
   ret void
 }
 
-define void @write0to7_2(i32* nocapture %p) {
+define void @write0to7_2(ptr nocapture %p) {
 ; CHECK-LABEL: @write0to7_2(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 1
-; CHECK-NEXT:    [[P3:%.*]] = bitcast i32* [[ARRAYIDX0]] to i8*
-; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, i8* [[P3]], i64 4
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 4 [[TMP0]], i8 0, i64 24, i1 false)
-; CHECK-NEXT:    [[P4:%.*]] = bitcast i32* [[P]] to i64*
-; CHECK-NEXT:    store i64 1, i64* [[P4]], align 8
+; CHECK-NEXT:    [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 1
+; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[ARRAYIDX0]], i64 4
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 4 [[TMP0]], i8 0, i64 24, i1 false)
+; CHECK-NEXT:    store i64 1, ptr [[P]], align 8
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %arrayidx0 = getelementptr inbounds i32, i32* %p, i64 1
-  %p3 = bitcast i32* %arrayidx0 to i8*
-  call void @llvm.memset.p0i8.i64(i8* align 4 %p3, i8 0, i64 28, i1 false)
-  %p4 = bitcast i32* %p to i64*
-  store i64 1, i64* %p4, align 8
+  %arrayidx0 = getelementptr inbounds i32, ptr %p, i64 1
+  call void @llvm.memset.p0.i64(ptr align 4 %arrayidx0, i8 0, i64 28, i1 false)
+  store i64 1, ptr %p, align 8
   ret void
 }
 
-define void @write0to7_2_atomic(i32* nocapture %p) {
+define void @write0to7_2_atomic(ptr nocapture %p) {
 ; CHECK-LABEL: @write0to7_2_atomic(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 1
-; CHECK-NEXT:    [[P3:%.*]] = bitcast i32* [[ARRAYIDX0]] to i8*
-; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, i8* [[P3]], i64 4
-; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 [[TMP0]], i8 0, i64 24, i32 4)
-; CHECK-NEXT:    [[P4:%.*]] = bitcast i32* [[P]] to i64*
-; CHECK-NEXT:    store atomic i64 1, i64* [[P4]] unordered, align 8
+; CHECK-NEXT:    [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 1
+; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[ARRAYIDX0]], i64 4
+; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 4 [[TMP0]], i8 0, i64 24, i32 4)
+; CHECK-NEXT:    store atomic i64 1, ptr [[P]] unordered, align 8
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %arrayidx0 = getelementptr inbounds i32, i32* %p, i64 1
-  %p3 = bitcast i32* %arrayidx0 to i8*
-  call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 %p3, i8 0, i64 28, i32 4)
-  %p4 = bitcast i32* %p to i64*
-  store atomic i64 1, i64* %p4 unordered, align 8
+  %arrayidx0 = getelementptr inbounds i32, ptr %p, i64 1
+  call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 4 %arrayidx0, i8 0, i64 28, i32 4)
+  store atomic i64 1, ptr %p unordered, align 8
   ret void
 }
 
 ; We do not trim the beginning of the eariler write if the alignment of the
 ; start pointer is changed.
-define void @dontwrite0to3_align8(i32* nocapture %p) {
+define void @dontwrite0to3_align8(ptr nocapture %p) {
 ; CHECK-LABEL: @dontwrite0to3_align8(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[P3:%.*]] = bitcast i32* [[P:%.*]] to i8*
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 8 [[P3]], i8 0, i64 32, i1 false)
-; CHECK-NEXT:    store i32 1, i32* [[P]], align 4
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[P:%.*]], i8 0, i64 32, i1 false)
+; CHECK-NEXT:    store i32 1, ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %p3 = bitcast i32* %p to i8*
-  call void @llvm.memset.p0i8.i64(i8* align 8 %p3, i8 0, i64 32, i1 false)
-  store i32 1, i32* %p, align 4
+  call void @llvm.memset.p0.i64(ptr align 8 %p, i8 0, i64 32, i1 false)
+  store i32 1, ptr %p, align 4
   ret void
 }
 
-define void @dontwrite0to3_align8_atomic(i32* nocapture %p) {
+define void @dontwrite0to3_align8_atomic(ptr nocapture %p) {
 ; CHECK-LABEL: @dontwrite0to3_align8_atomic(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[P3:%.*]] = bitcast i32* [[P:%.*]] to i8*
-; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 8 [[P3]], i8 0, i64 32, i32 4)
-; CHECK-NEXT:    store atomic i32 1, i32* [[P]] unordered, align 4
+; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 8 [[P:%.*]], i8 0, i64 32, i32 4)
+; CHECK-NEXT:    store atomic i32 1, ptr [[P]] unordered, align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %p3 = bitcast i32* %p to i8*
-  call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 8 %p3, i8 0, i64 32, i32 4)
-  store atomic i32 1, i32* %p unordered, align 4
+  call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 8 %p, i8 0, i64 32, i32 4)
+  store atomic i32 1, ptr %p unordered, align 4
   ret void
 }
 
-define void @dontwrite0to1(i32* nocapture %p) {
+define void @dontwrite0to1(ptr nocapture %p) {
 ; CHECK-LABEL: @dontwrite0to1(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[P3:%.*]] = bitcast i32* [[P:%.*]] to i8*
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 4 [[P3]], i8 0, i64 32, i1 false)
-; CHECK-NEXT:    [[P4:%.*]] = bitcast i32* [[P]] to i16*
-; CHECK-NEXT:    store i16 1, i16* [[P4]], align 4
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 4 [[P:%.*]], i8 0, i64 32, i1 false)
+; CHECK-NEXT:    store i16 1, ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %p3 = bitcast i32* %p to i8*
-  call void @llvm.memset.p0i8.i64(i8* align 4 %p3, i8 0, i64 32, i1 false)
-  %p4 = bitcast i32* %p to i16*
-  store i16 1, i16* %p4, align 4
+  call void @llvm.memset.p0.i64(ptr align 4 %p, i8 0, i64 32, i1 false)
+  store i16 1, ptr %p, align 4
   ret void
 }
 
-define void @dontwrite0to1_atomic(i32* nocapture %p) {
+define void @dontwrite0to1_atomic(ptr nocapture %p) {
 ; CHECK-LABEL: @dontwrite0to1_atomic(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[P3:%.*]] = bitcast i32* [[P:%.*]] to i8*
-; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 [[P3]], i8 0, i64 32, i32 4)
-; CHECK-NEXT:    [[P4:%.*]] = bitcast i32* [[P]] to i16*
-; CHECK-NEXT:    store atomic i16 1, i16* [[P4]] unordered, align 4
+; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 4 [[P:%.*]], i8 0, i64 32, i32 4)
+; CHECK-NEXT:    store atomic i16 1, ptr [[P]] unordered, align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %p3 = bitcast i32* %p to i8*
-  call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 %p3, i8 0, i64 32, i32 4)
-  %p4 = bitcast i32* %p to i16*
-  store atomic i16 1, i16* %p4 unordered, align 4
+  call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 4 %p, i8 0, i64 32, i32 4)
+  store atomic i16 1, ptr %p unordered, align 4
   ret void
 }
 
-define void @write2to10(i32* nocapture %p) {
+define void @write2to10(ptr nocapture %p) {
 ; CHECK-LABEL: @write2to10(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 1
-; CHECK-NEXT:    [[P3:%.*]] = bitcast i32* [[ARRAYIDX0]] to i8*
-; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, i8* [[P3]], i64 4
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 4 [[TMP0]], i8 0, i64 28, i1 false)
-; CHECK-NEXT:    [[P4:%.*]] = bitcast i32* [[P]] to i16*
-; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[P4]], i64 1
-; CHECK-NEXT:    [[P5:%.*]] = bitcast i16* [[ARRAYIDX2]] to i64*
-; CHECK-NEXT:    store i64 1, i64* [[P5]], align 8
+; CHECK-NEXT:    [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 1
+; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[ARRAYIDX0]], i64 4
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 4 [[TMP0]], i8 0, i64 28, i1 false)
+; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 1
+; CHECK-NEXT:    store i64 1, ptr [[ARRAYIDX2]], align 8
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %arrayidx0 = getelementptr inbounds i32, i32* %p, i64 1
-  %p3 = bitcast i32* %arrayidx0 to i8*
-  call void @llvm.memset.p0i8.i64(i8* align 4 %p3, i8 0, i64 32, i1 false)
-  %p4 = bitcast i32* %p to i16*
-  %arrayidx2 = getelementptr inbounds i16, i16* %p4, i64 1
-  %p5 = bitcast i16* %arrayidx2 to i64*
-  store i64 1, i64* %p5, align 8
+  %arrayidx0 = getelementptr inbounds i32, ptr %p, i64 1
+  call void @llvm.memset.p0.i64(ptr align 4 %arrayidx0, i8 0, i64 32, i1 false)
+  %arrayidx2 = getelementptr inbounds i16, ptr %p, i64 1
+  store i64 1, ptr %arrayidx2, align 8
   ret void
 }
 
-define void @write2to10_atomic(i32* nocapture %p) {
+define void @write2to10_atomic(ptr nocapture %p) {
 ; CHECK-LABEL: @write2to10_atomic(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 1
-; CHECK-NEXT:    [[P3:%.*]] = bitcast i32* [[ARRAYIDX0]] to i8*
-; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, i8* [[P3]], i64 4
-; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 [[TMP0]], i8 0, i64 28, i32 4)
-; CHECK-NEXT:    [[P4:%.*]] = bitcast i32* [[P]] to i16*
-; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[P4]], i64 1
-; CHECK-NEXT:    [[P5:%.*]] = bitcast i16* [[ARRAYIDX2]] to i64*
-; CHECK-NEXT:    store atomic i64 1, i64* [[P5]] unordered, align 8
+; CHECK-NEXT:    [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 1
+; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[ARRAYIDX0]], i64 4
+; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 4 [[TMP0]], i8 0, i64 28, i32 4)
+; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 1
+; CHECK-NEXT:    store atomic i64 1, ptr [[ARRAYIDX2]] unordered, align 8
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %arrayidx0 = getelementptr inbounds i32, i32* %p, i64 1
-  %p3 = bitcast i32* %arrayidx0 to i8*
-  call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 %p3, i8 0, i64 32, i32 4)
-  %p4 = bitcast i32* %p to i16*
-  %arrayidx2 = getelementptr inbounds i16, i16* %p4, i64 1
-  %p5 = bitcast i16* %arrayidx2 to i64*
-  store atomic i64 1, i64* %p5 unordered, align 8
+  %arrayidx0 = getelementptr inbounds i32, ptr %p, i64 1
+  call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 4 %arrayidx0, i8 0, i64 32, i32 4)
+  %arrayidx2 = getelementptr inbounds i16, ptr %p, i64 1
+  store atomic i64 1, ptr %arrayidx2 unordered, align 8
   ret void
 }
 
-define void @write8To15AndThen0To7(i64* nocapture %P) {
+define void @write8To15AndThen0To7(ptr nocapture %P) {
 ; CHECK-LABEL: @write8To15AndThen0To7(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[BASE0:%.*]] = bitcast i64* [[P:%.*]] to i8*
-; CHECK-NEXT:    [[MYBASE0:%.*]] = getelementptr inbounds i8, i8* [[BASE0]], i64 0
-; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, i8* [[MYBASE0]], i64 16
-; CHECK-NEXT:    tail call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP0]], i8 0, i64 16, i1 false)
-; CHECK-NEXT:    [[BASE64_0:%.*]] = getelementptr inbounds i64, i64* [[P]], i64 0
-; CHECK-NEXT:    [[BASE64_1:%.*]] = getelementptr inbounds i64, i64* [[P]], i64 1
-; CHECK-NEXT:    store i64 1, i64* [[BASE64_1]], align 4
-; CHECK-NEXT:    store i64 2, i64* [[BASE64_0]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[P:%.*]], i64 16
+; CHECK-NEXT:    tail call void @llvm.memset.p0.i64(ptr align 8 [[TMP0]], i8 0, i64 16, i1 false)
+; CHECK-NEXT:    [[BASE64_1:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 1
+; CHECK-NEXT:    store i64 1, ptr [[BASE64_1]], align 4
+; CHECK-NEXT:    store i64 2, ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
 
-  %base0 = bitcast i64* %P to i8*
-  %mybase0 = getelementptr inbounds i8, i8* %base0, i64 0
-  tail call void @llvm.memset.p0i8.i64(i8* align 8 %mybase0, i8 0, i64 32, i1 false)
+  tail call void @llvm.memset.p0.i64(ptr align 8 %P, i8 0, i64 32, i1 false)
 
-  %base64_0 = getelementptr inbounds i64, i64* %P, i64 0
-  %base64_1 = getelementptr inbounds i64, i64* %P, i64 1
+  %base64_1 = getelementptr inbounds i64, ptr %P, i64 1
 
-  store i64 1, i64* %base64_1
-  store i64 2, i64* %base64_0
+  store i64 1, ptr %base64_1
+  store i64 2, ptr %P
   ret void
 }
 
-define void @write8To15AndThen0To7_atomic(i64* nocapture %P) {
+define void @write8To15AndThen0To7_atomic(ptr nocapture %P) {
 ; CHECK-LABEL: @write8To15AndThen0To7_atomic(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[BASE0:%.*]] = bitcast i64* [[P:%.*]] to i8*
-; CHECK-NEXT:    [[MYBASE0:%.*]] = getelementptr inbounds i8, i8* [[BASE0]], i64 0
-; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, i8* [[MYBASE0]], i64 16
-; CHECK-NEXT:    tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 8 [[TMP0]], i8 0, i64 16, i32 8)
-; CHECK-NEXT:    [[BASE64_0:%.*]] = getelementptr inbounds i64, i64* [[P]], i64 0
-; CHECK-NEXT:    [[BASE64_1:%.*]] = getelementptr inbounds i64, i64* [[P]], i64 1
-; CHECK-NEXT:    store atomic i64 1, i64* [[BASE64_1]] unordered, align 8
-; CHECK-NEXT:    store atomic i64 2, i64* [[BASE64_0]] unordered, align 8
+; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[P:%.*]], i64 16
+; CHECK-NEXT:    tail call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 8 [[TMP0]], i8 0, i64 16, i32 8)
+; CHECK-NEXT:    [[BASE64_1:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 1
+; CHECK-NEXT:    store atomic i64 1, ptr [[BASE64_1]] unordered, align 8
+; CHECK-NEXT:    store atomic i64 2, ptr [[P]] unordered, align 8
 ; CHECK-NEXT:    ret void
 ;
 entry:
 
-  %base0 = bitcast i64* %P to i8*
-  %mybase0 = getelementptr inbounds i8, i8* %base0, i64 0
-  tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 8 %mybase0, i8 0, i64 32, i32 8)
+  tail call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 8 %P, i8 0, i64 32, i32 8)
 
-  %base64_0 = getelementptr inbounds i64, i64* %P, i64 0
-  %base64_1 = getelementptr inbounds i64, i64* %P, i64 1
+  %base64_1 = getelementptr inbounds i64, ptr %P, i64 1
 
-  store atomic i64 1, i64* %base64_1 unordered, align 8
-  store atomic i64 2, i64* %base64_0 unordered, align 8
+  store atomic i64 1, ptr %base64_1 unordered, align 8
+  store atomic i64 2, ptr %P unordered, align 8
   ret void
 }
 
-define void @write8To15AndThen0To7_atomic_weaker(i64* nocapture %P) {
+define void @write8To15AndThen0To7_atomic_weaker(ptr nocapture %P) {
 ; CHECK-LABEL: @write8To15AndThen0To7_atomic_weaker(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[BASE0:%.*]] = bitcast i64* [[P:%.*]] to i8*
-; CHECK-NEXT:    [[MYBASE0:%.*]] = getelementptr inbounds i8, i8* [[BASE0]], i64 0
-; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, i8* [[MYBASE0]], i64 16
-; CHECK-NEXT:    tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 8 [[TMP0]], i8 0, i64 16, i32 8)
-; CHECK-NEXT:    [[BASE64_0:%.*]] = getelementptr inbounds i64, i64* [[P]], i64 0
-; CHECK-NEXT:    [[BASE64_1:%.*]] = getelementptr inbounds i64, i64* [[P]], i64 1
-; CHECK-NEXT:    store atomic i64 1, i64* [[BASE64_1]] unordered, align 8
-; CHECK-NEXT:    store i64 2, i64* [[BASE64_0]], align 8
+; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[P:%.*]], i64 16
+; CHECK-NEXT:    tail call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 8 [[TMP0]], i8 0, i64 16, i32 8)
+; CHECK-NEXT:    [[BASE64_1:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 1
+; CHECK-NEXT:    store atomic i64 1, ptr [[BASE64_1]] unordered, align 8
+; CHECK-NEXT:    store i64 2, ptr [[P]], align 8
 ; CHECK-NEXT:    ret void
 ;
 entry:
 
-  %base0 = bitcast i64* %P to i8*
-  %mybase0 = getelementptr inbounds i8, i8* %base0, i64 0
-  tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 8 %mybase0, i8 0, i64 32, i32 8)
+  tail call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 8 %P, i8 0, i64 32, i32 8)
 
-  %base64_0 = getelementptr inbounds i64, i64* %P, i64 0
-  %base64_1 = getelementptr inbounds i64, i64* %P, i64 1
+  %base64_1 = getelementptr inbounds i64, ptr %P, i64 1
 
-  store atomic i64 1, i64* %base64_1 unordered, align 8
-  store i64 2, i64* %base64_0, align 8
+  store atomic i64 1, ptr %base64_1 unordered, align 8
+  store i64 2, ptr %P, align 8
   ret void
 }
 
-define void @write8To15AndThen0To7_atomic_weaker_2(i64* nocapture %P) {
+define void @write8To15AndThen0To7_atomic_weaker_2(ptr nocapture %P) {
 ; CHECK-LABEL: @write8To15AndThen0To7_atomic_weaker_2(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[BASE0:%.*]] = bitcast i64* [[P:%.*]] to i8*
-; CHECK-NEXT:    [[MYBASE0:%.*]] = getelementptr inbounds i8, i8* [[BASE0]], i64 0
-; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, i8* [[MYBASE0]], i64 16
-; CHECK-NEXT:    tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 8 [[TMP0]], i8 0, i64 16, i32 8)
-; CHECK-NEXT:    [[BASE64_0:%.*]] = getelementptr inbounds i64, i64* [[P]], i64 0
-; CHECK-NEXT:    [[BASE64_1:%.*]] = getelementptr inbounds i64, i64* [[P]], i64 1
-; CHECK-NEXT:    store i64 1, i64* [[BASE64_1]], align 8
-; CHECK-NEXT:    store atomic i64 2, i64* [[BASE64_0]] unordered, align 8
+; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[P:%.*]], i64 16
+; CHECK-NEXT:    tail call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 8 [[TMP0]], i8 0, i64 16, i32 8)
+; CHECK-NEXT:    [[BASE64_1:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 1
+; CHECK-NEXT:    store i64 1, ptr [[BASE64_1]], align 8
+; CHECK-NEXT:    store atomic i64 2, ptr [[P]] unordered, align 8
 ; CHECK-NEXT:    ret void
 ;
 entry:
 
-  %base0 = bitcast i64* %P to i8*
-  %mybase0 = getelementptr inbounds i8, i8* %base0, i64 0
-  tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 8 %mybase0, i8 0, i64 32, i32 8)
+  tail call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 8 %P, i8 0, i64 32, i32 8)
 
-  %base64_0 = getelementptr inbounds i64, i64* %P, i64 0
-  %base64_1 = getelementptr inbounds i64, i64* %P, i64 1
+  %base64_1 = getelementptr inbounds i64, ptr %P, i64 1
 
-  store i64 1, i64* %base64_1, align 8
-  store atomic i64 2, i64* %base64_0 unordered, align 8
+  store i64 1, ptr %base64_1, align 8
+  store atomic i64 2, ptr %P unordered, align 8
   ret void
 }
 
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
-declare void @llvm.memset.p0i32.i64(i32* nocapture, i8, i64, i1) nounwind
-declare void @llvm.memset.p1i8.i64(i8 addrspace(1)* nocapture, i8, i64, i1) nounwind
-declare void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* nocapture, i8, i64, i32) nounwind
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind
+declare void @llvm.memset.p1.i64(ptr addrspace(1) nocapture, i8, i64, i1) nounwind
+declare void @llvm.memset.element.unordered.atomic.p0.i64(ptr nocapture, i8, i64, i32) nounwind
 
-define void @ow_begin_align1(i8* nocapture %p) {
+define void @ow_begin_align1(ptr nocapture %p) {
 ; CHECK-LABEL: @ow_begin_align1(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[P1:%.*]] = getelementptr inbounds i8, i8* [[P:%.*]], i64 1
-; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, i8* [[P1]], i64 7
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 1 [[TMP0]], i8 0, i64 25, i1 false)
-; CHECK-NEXT:    [[P2:%.*]] = bitcast i8* [[P]] to i64*
-; CHECK-NEXT:    store i64 1, i64* [[P2]], align 1
+; CHECK-NEXT:    [[P1:%.*]] = getelementptr inbounds i8, ptr [[P:%.*]], i64 1
+; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 7
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 1 [[TMP0]], i8 0, i64 25, i1 false)
+; CHECK-NEXT:    store i64 1, ptr [[P]], align 1
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %p1 = getelementptr inbounds i8, i8* %p, i64 1
-  call void @llvm.memset.p0i8.i64(i8* align 1 %p1, i8 0, i64 32, i1 false)
-  %p2 = bitcast i8* %p to i64*
-  store i64 1, i64* %p2, align 1
+  %p1 = getelementptr inbounds i8, ptr %p, i64 1
+  call void @llvm.memset.p0.i64(ptr align 1 %p1, i8 0, i64 32, i1 false)
+  store i64 1, ptr %p, align 1
   ret void
 }
 
-define void @ow_end_align4(i8* nocapture %p) {
+define void @ow_end_align4(ptr nocapture %p) {
 ; CHECK-LABEL: @ow_end_align4(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[P1:%.*]] = getelementptr inbounds i8, i8* [[P:%.*]], i64 1
-; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, i8* [[P1]], i64 4
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 4 [[TMP0]], i8 0, i64 28, i1 false)
-; CHECK-NEXT:    [[P2:%.*]] = bitcast i8* [[P]] to i64*
-; CHECK-NEXT:    store i64 1, i64* [[P2]], align 1
+; CHECK-NEXT:    [[P1:%.*]] = getelementptr inbounds i8, ptr [[P:%.*]], i64 1
+; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 4
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 4 [[TMP0]], i8 0, i64 28, i1 false)
+; CHECK-NEXT:    store i64 1, ptr [[P]], align 1
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %p1 = getelementptr inbounds i8, i8* %p, i64 1
-  call void @llvm.memset.p0i8.i64(i8* align 4 %p1, i8 0, i64 32, i1 false)
-  %p2 = bitcast i8* %p to i64*
-  store i64 1, i64* %p2, align 1
+  %p1 = getelementptr inbounds i8, ptr %p, i64 1
+  call void @llvm.memset.p0.i64(ptr align 4 %p1, i8 0, i64 32, i1 false)
+  store i64 1, ptr %p, align 1
   ret void
 }
 
-define void @ow_end_align8(i8* nocapture %p) {
+define void @ow_end_align8(ptr nocapture %p) {
 ; CHECK-LABEL: @ow_end_align8(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[P1:%.*]] = getelementptr inbounds i8, i8* [[P:%.*]], i64 1
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 8 [[P1]], i8 0, i64 32, i1 false)
-; CHECK-NEXT:    [[P2:%.*]] = bitcast i8* [[P]] to i64*
-; CHECK-NEXT:    store i64 1, i64* [[P2]], align 1
+; CHECK-NEXT:    [[P1:%.*]] = getelementptr inbounds i8, ptr [[P:%.*]], i64 1
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[P1]], i8 0, i64 32, i1 false)
+; CHECK-NEXT:    store i64 1, ptr [[P]], align 1
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %p1 = getelementptr inbounds i8, i8* %p, i64 1
-  call void @llvm.memset.p0i8.i64(i8* align 8 %p1, i8 0, i64 32, i1 false)
-  %p2 = bitcast i8* %p to i64*
-  store i64 1, i64* %p2, align 1
+  %p1 = getelementptr inbounds i8, ptr %p, i64 1
+  call void @llvm.memset.p0.i64(ptr align 8 %p1, i8 0, i64 32, i1 false)
+  store i64 1, ptr %p, align 1
   ret void
 }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/OverwriteStoreEnd.ll b/llvm/test/Transforms/DeadStoreElimination/OverwriteStoreEnd.ll
index c3f377e085a2a..cab5a6e704e53 100644
--- a/llvm/test/Transforms/DeadStoreElimination/OverwriteStoreEnd.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/OverwriteStoreEnd.ll
@@ -8,441 +8,387 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
 @glob1 = global %struct.vec2 zeroinitializer, align 16
 @glob2 = global %struct.vec2plusi zeroinitializer, align 16
 
-define void @write24to28(i32* nocapture %p) nounwind uwtable ssp {
+define void @write24to28(ptr nocapture %p) nounwind uwtable ssp {
 ; CHECK-LABEL: @write24to28(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 1
-; CHECK-NEXT:    [[P3:%.*]] = bitcast i32* [[ARRAYIDX0]] to i8*
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 4 [[P3]], i8 0, i64 24, i1 false)
-; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 7
-; CHECK-NEXT:    store i32 1, i32* [[ARRAYIDX1]], align 4
+; CHECK-NEXT:    [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 1
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 4 [[ARRAYIDX0]], i8 0, i64 24, i1 false)
+; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 7
+; CHECK-NEXT:    store i32 1, ptr [[ARRAYIDX1]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %arrayidx0 = getelementptr inbounds i32, i32* %p, i64 1
-  %p3 = bitcast i32* %arrayidx0 to i8*
-  call void @llvm.memset.p0i8.i64(i8* align 4 %p3, i8 0, i64 28, i1 false)
-  %arrayidx1 = getelementptr inbounds i32, i32* %p, i64 7
-  store i32 1, i32* %arrayidx1, align 4
+  %arrayidx0 = getelementptr inbounds i32, ptr %p, i64 1
+  call void @llvm.memset.p0.i64(ptr align 4 %arrayidx0, i8 0, i64 28, i1 false)
+  %arrayidx1 = getelementptr inbounds i32, ptr %p, i64 7
+  store i32 1, ptr %arrayidx1, align 4
   ret void
 }
 
-define void @write24to28_atomic(i32* nocapture %p) nounwind uwtable ssp {
+define void @write24to28_atomic(ptr nocapture %p) nounwind uwtable ssp {
 ; CHECK-LABEL: @write24to28_atomic(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 1
-; CHECK-NEXT:    [[P3:%.*]] = bitcast i32* [[ARRAYIDX0]] to i8*
-; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 [[P3]], i8 0, i64 24, i32 4)
-; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 7
-; CHECK-NEXT:    store atomic i32 1, i32* [[ARRAYIDX1]] unordered, align 4
+; CHECK-NEXT:    [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 1
+; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 4 [[ARRAYIDX0]], i8 0, i64 24, i32 4)
+; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 7
+; CHECK-NEXT:    store atomic i32 1, ptr [[ARRAYIDX1]] unordered, align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %arrayidx0 = getelementptr inbounds i32, i32* %p, i64 1
-  %p3 = bitcast i32* %arrayidx0 to i8*
-  call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 %p3, i8 0, i64 28, i32 4)
-  %arrayidx1 = getelementptr inbounds i32, i32* %p, i64 7
-  store atomic i32 1, i32* %arrayidx1 unordered, align 4
+  %arrayidx0 = getelementptr inbounds i32, ptr %p, i64 1
+  call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 4 %arrayidx0, i8 0, i64 28, i32 4)
+  %arrayidx1 = getelementptr inbounds i32, ptr %p, i64 7
+  store atomic i32 1, ptr %arrayidx1 unordered, align 4
   ret void
 }
 
 ; Atomicity of the store is weaker from the memset
-define void @write24to28_atomic_weaker(i32* nocapture %p) nounwind uwtable ssp {
+define void @write24to28_atomic_weaker(ptr nocapture %p) nounwind uwtable ssp {
 ; CHECK-LABEL: @write24to28_atomic_weaker(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 1
-; CHECK-NEXT:    [[P3:%.*]] = bitcast i32* [[ARRAYIDX0]] to i8*
-; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 [[P3]], i8 0, i64 24, i32 4)
-; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 7
-; CHECK-NEXT:    store i32 1, i32* [[ARRAYIDX1]], align 4
+; CHECK-NEXT:    [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 1
+; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 4 [[ARRAYIDX0]], i8 0, i64 24, i32 4)
+; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 7
+; CHECK-NEXT:    store i32 1, ptr [[ARRAYIDX1]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %arrayidx0 = getelementptr inbounds i32, i32* %p, i64 1
-  %p3 = bitcast i32* %arrayidx0 to i8*
-  call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 %p3, i8 0, i64 28, i32 4)
-  %arrayidx1 = getelementptr inbounds i32, i32* %p, i64 7
-  store i32 1, i32* %arrayidx1, align 4
+  %arrayidx0 = getelementptr inbounds i32, ptr %p, i64 1
+  call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 4 %arrayidx0, i8 0, i64 28, i32 4)
+  %arrayidx1 = getelementptr inbounds i32, ptr %p, i64 7
+  store i32 1, ptr %arrayidx1, align 4
   ret void
 }
 
-define void @write28to32(i32* nocapture %p) nounwind uwtable ssp {
+define void @write28to32(ptr nocapture %p) nounwind uwtable ssp {
 ; CHECK-LABEL: @write28to32(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[P3:%.*]] = bitcast i32* [[P:%.*]] to i8*
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 4 [[P3]], i8 0, i64 28, i1 false)
-; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 7
-; CHECK-NEXT:    store i32 1, i32* [[ARRAYIDX1]], align 4
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 4 [[P:%.*]], i8 0, i64 28, i1 false)
+; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 7
+; CHECK-NEXT:    store i32 1, ptr [[ARRAYIDX1]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %p3 = bitcast i32* %p to i8*
-  call void @llvm.memset.p0i8.i64(i8* align 4 %p3, i8 0, i64 32, i1 false)
-  %arrayidx1 = getelementptr inbounds i32, i32* %p, i64 7
-  store i32 1, i32* %arrayidx1, align 4
+  call void @llvm.memset.p0.i64(ptr align 4 %p, i8 0, i64 32, i1 false)
+  %arrayidx1 = getelementptr inbounds i32, ptr %p, i64 7
+  store i32 1, ptr %arrayidx1, align 4
   ret void
 }
 
-define void @write28to32_atomic(i32* nocapture %p) nounwind uwtable ssp {
+define void @write28to32_atomic(ptr nocapture %p) nounwind uwtable ssp {
 ; CHECK-LABEL: @write28to32_atomic(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[P3:%.*]] = bitcast i32* [[P:%.*]] to i8*
-; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 [[P3]], i8 0, i64 28, i32 4)
-; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 7
-; CHECK-NEXT:    store atomic i32 1, i32* [[ARRAYIDX1]] unordered, align 4
+; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 4 [[P:%.*]], i8 0, i64 28, i32 4)
+; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 7
+; CHECK-NEXT:    store atomic i32 1, ptr [[ARRAYIDX1]] unordered, align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %p3 = bitcast i32* %p to i8*
-  call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 %p3, i8 0, i64 32, i32 4)
-  %arrayidx1 = getelementptr inbounds i32, i32* %p, i64 7
-  store atomic i32 1, i32* %arrayidx1 unordered, align 4
+  call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 4 %p, i8 0, i64 32, i32 4)
+  %arrayidx1 = getelementptr inbounds i32, ptr %p, i64 7
+  store atomic i32 1, ptr %arrayidx1 unordered, align 4
   ret void
 }
 
-define void @dontwrite28to32memset(i32* nocapture %p) nounwind uwtable ssp {
+define void @dontwrite28to32memset(ptr nocapture %p) nounwind uwtable ssp {
 ; CHECK-LABEL: @dontwrite28to32memset(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[P3:%.*]] = bitcast i32* [[P:%.*]] to i8*
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 16 [[P3]], i8 0, i64 32, i1 false)
-; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 7
-; CHECK-NEXT:    store i32 1, i32* [[ARRAYIDX1]], align 4
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[P:%.*]], i8 0, i64 32, i1 false)
+; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 7
+; CHECK-NEXT:    store i32 1, ptr [[ARRAYIDX1]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %p3 = bitcast i32* %p to i8*
-  call void @llvm.memset.p0i8.i64(i8* align 16 %p3, i8 0, i64 32, i1 false)
-  %arrayidx1 = getelementptr inbounds i32, i32* %p, i64 7
-  store i32 1, i32* %arrayidx1, align 4
+  call void @llvm.memset.p0.i64(ptr align 16 %p, i8 0, i64 32, i1 false)
+  %arrayidx1 = getelementptr inbounds i32, ptr %p, i64 7
+  store i32 1, ptr %arrayidx1, align 4
   ret void
 }
 
-define void @dontwrite28to32memset_atomic(i32* nocapture %p) nounwind uwtable ssp {
+define void @dontwrite28to32memset_atomic(ptr nocapture %p) nounwind uwtable ssp {
 ; CHECK-LABEL: @dontwrite28to32memset_atomic(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[P3:%.*]] = bitcast i32* [[P:%.*]] to i8*
-; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 16 [[P3]], i8 0, i64 32, i32 4)
-; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 7
-; CHECK-NEXT:    store atomic i32 1, i32* [[ARRAYIDX1]] unordered, align 4
+; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 16 [[P:%.*]], i8 0, i64 32, i32 4)
+; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 7
+; CHECK-NEXT:    store atomic i32 1, ptr [[ARRAYIDX1]] unordered, align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %p3 = bitcast i32* %p to i8*
-  call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 16 %p3, i8 0, i64 32, i32 4)
-  %arrayidx1 = getelementptr inbounds i32, i32* %p, i64 7
-  store atomic i32 1, i32* %arrayidx1 unordered, align 4
+  call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 16 %p, i8 0, i64 32, i32 4)
+  %arrayidx1 = getelementptr inbounds i32, ptr %p, i64 7
+  store atomic i32 1, ptr %arrayidx1 unordered, align 4
   ret void
 }
 
-define void @write32to36(%struct.vec2plusi* nocapture %p) nounwind uwtable ssp {
+define void @write32to36(ptr nocapture %p) nounwind uwtable ssp {
 ; CHECK-LABEL: @write32to36(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = bitcast %struct.vec2plusi* [[P:%.*]] to i8*
-; CHECK-NEXT:    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[TMP0]], i8* align 16 bitcast (%struct.vec2plusi* @glob2 to i8*), i64 32, i1 false)
-; CHECK-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_VEC2PLUSI:%.*]], %struct.vec2plusi* [[P]], i64 0, i32 2
-; CHECK-NEXT:    store i32 1, i32* [[C]], align 4
+; CHECK-NEXT:    tail call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[P:%.*]], ptr align 16 @glob2, i64 32, i1 false)
+; CHECK-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_VEC2PLUSI:%.*]], ptr [[P]], i64 0, i32 2
+; CHECK-NEXT:    store i32 1, ptr [[C]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %0 = bitcast %struct.vec2plusi* %p to i8*
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %0, i8* align 16 bitcast (%struct.vec2plusi* @glob2 to i8*), i64 36, i1 false)
-  %c = getelementptr inbounds %struct.vec2plusi, %struct.vec2plusi* %p, i64 0, i32 2
-  store i32 1, i32* %c, align 4
+  tail call void @llvm.memcpy.p0.p0.i64(ptr align 16 %p, ptr align 16 @glob2, i64 36, i1 false)
+  %c = getelementptr inbounds %struct.vec2plusi, ptr %p, i64 0, i32 2
+  store i32 1, ptr %c, align 4
   ret void
 }
 
-define void @write32to36_atomic(%struct.vec2plusi* nocapture %p) nounwind uwtable ssp {
+define void @write32to36_atomic(ptr nocapture %p) nounwind uwtable ssp {
 ; CHECK-LABEL: @write32to36_atomic(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = bitcast %struct.vec2plusi* [[P:%.*]] to i8*
-; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 16 [[TMP0]], i8* align 16 bitcast (%struct.vec2plusi* @glob2 to i8*), i64 32, i32 4)
-; CHECK-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_VEC2PLUSI:%.*]], %struct.vec2plusi* [[P]], i64 0, i32 2
-; CHECK-NEXT:    store atomic i32 1, i32* [[C]] unordered, align 4
+; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 16 [[P:%.*]], ptr align 16 @glob2, i64 32, i32 4)
+; CHECK-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_VEC2PLUSI:%.*]], ptr [[P]], i64 0, i32 2
+; CHECK-NEXT:    store atomic i32 1, ptr [[C]] unordered, align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %0 = bitcast %struct.vec2plusi* %p to i8*
-  tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 16 %0, i8* align 16 bitcast (%struct.vec2plusi* @glob2 to i8*), i64 36, i32 4)
-  %c = getelementptr inbounds %struct.vec2plusi, %struct.vec2plusi* %p, i64 0, i32 2
-  store atomic i32 1, i32* %c unordered, align 4
+  tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 16 %p, ptr align 16 @glob2, i64 36, i32 4)
+  %c = getelementptr inbounds %struct.vec2plusi, ptr %p, i64 0, i32 2
+  store atomic i32 1, ptr %c unordered, align 4
   ret void
 }
 
 ; Atomicity of the store is weaker than the memcpy
-define void @write32to36_atomic_weaker(%struct.vec2plusi* nocapture %p) nounwind uwtable ssp {
+define void @write32to36_atomic_weaker(ptr nocapture %p) nounwind uwtable ssp {
 ; CHECK-LABEL: @write32to36_atomic_weaker(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = bitcast %struct.vec2plusi* [[P:%.*]] to i8*
-; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 16 [[TMP0]], i8* align 16 bitcast (%struct.vec2plusi* @glob2 to i8*), i64 32, i32 4)
-; CHECK-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_VEC2PLUSI:%.*]], %struct.vec2plusi* [[P]], i64 0, i32 2
-; CHECK-NEXT:    store i32 1, i32* [[C]], align 4
+; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 16 [[P:%.*]], ptr align 16 @glob2, i64 32, i32 4)
+; CHECK-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_VEC2PLUSI:%.*]], ptr [[P]], i64 0, i32 2
+; CHECK-NEXT:    store i32 1, ptr [[C]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %0 = bitcast %struct.vec2plusi* %p to i8*
-  tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 16 %0, i8* align 16 bitcast (%struct.vec2plusi* @glob2 to i8*), i64 36, i32 4)
-  %c = getelementptr inbounds %struct.vec2plusi, %struct.vec2plusi* %p, i64 0, i32 2
-  store i32 1, i32* %c, align 4
+  tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 16 %p, ptr align 16 @glob2, i64 36, i32 4)
+  %c = getelementptr inbounds %struct.vec2plusi, ptr %p, i64 0, i32 2
+  store i32 1, ptr %c, align 4
   ret void
 }
 
-define void @write16to32(%struct.vec2* nocapture %p) nounwind uwtable ssp {
+define void @write16to32(ptr nocapture %p) nounwind uwtable ssp {
 ; CHECK-LABEL: @write16to32(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = bitcast %struct.vec2* [[P:%.*]] to i8*
-; CHECK-NEXT:    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[TMP0]], i8* align 16 bitcast (%struct.vec2* @glob1 to i8*), i64 16, i1 false)
-; CHECK-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_VEC2:%.*]], %struct.vec2* [[P]], i64 0, i32 1
-; CHECK-NEXT:    store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32>* [[C]], align 4
+; CHECK-NEXT:    tail call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[P:%.*]], ptr align 16 @glob1, i64 16, i1 false)
+; CHECK-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_VEC2:%.*]], ptr [[P]], i64 0, i32 1
+; CHECK-NEXT:    store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr [[C]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %0 = bitcast %struct.vec2* %p to i8*
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %0, i8* align 16 bitcast (%struct.vec2* @glob1 to i8*), i64 32, i1 false)
-  %c = getelementptr inbounds %struct.vec2, %struct.vec2* %p, i64 0, i32 1
-  store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32>* %c, align 4
+  tail call void @llvm.memcpy.p0.p0.i64(ptr align 16 %p, ptr align 16 @glob1, i64 32, i1 false)
+  %c = getelementptr inbounds %struct.vec2, ptr %p, i64 0, i32 1
+  store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr %c, align 4
   ret void
 }
 
-define void @write16to32_atomic(%struct.vec2* nocapture %p) nounwind uwtable ssp {
+define void @write16to32_atomic(ptr nocapture %p) nounwind uwtable ssp {
 ; CHECK-LABEL: @write16to32_atomic(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = bitcast %struct.vec2* [[P:%.*]] to i8*
-; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 16 [[TMP0]], i8* align 16 bitcast (%struct.vec2* @glob1 to i8*), i64 16, i32 4)
-; CHECK-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_VEC2:%.*]], %struct.vec2* [[P]], i64 0, i32 1
-; CHECK-NEXT:    store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32>* [[C]], align 4
+; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 16 [[P:%.*]], ptr align 16 @glob1, i64 16, i32 4)
+; CHECK-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_VEC2:%.*]], ptr [[P]], i64 0, i32 1
+; CHECK-NEXT:    store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr [[C]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %0 = bitcast %struct.vec2* %p to i8*
-  tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 16 %0, i8* align 16 bitcast (%struct.vec2* @glob1 to i8*), i64 32, i32 4)
-  %c = getelementptr inbounds %struct.vec2, %struct.vec2* %p, i64 0, i32 1
-  store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32>* %c, align 4
+  tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 16 %p, ptr align 16 @glob1, i64 32, i32 4)
+  %c = getelementptr inbounds %struct.vec2, ptr %p, i64 0, i32 1
+  store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr %c, align 4
   ret void
 }
 
-define void @dontwrite28to32memcpy(%struct.vec2* nocapture %p) nounwind uwtable ssp {
+define void @dontwrite28to32memcpy(ptr nocapture %p) nounwind uwtable ssp {
 ; CHECK-LABEL: @dontwrite28to32memcpy(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = bitcast %struct.vec2* [[P:%.*]] to i8*
-; CHECK-NEXT:    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[TMP0]], i8* align 16 bitcast (%struct.vec2* @glob1 to i8*), i64 32, i1 false)
-; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds [[STRUCT_VEC2:%.*]], %struct.vec2* [[P]], i64 0, i32 0, i64 7
-; CHECK-NEXT:    store i32 1, i32* [[ARRAYIDX1]], align 4
+; CHECK-NEXT:    tail call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[P:%.*]], ptr align 16 @glob1, i64 32, i1 false)
+; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds [[STRUCT_VEC2:%.*]], ptr [[P]], i64 0, i32 0, i64 7
+; CHECK-NEXT:    store i32 1, ptr [[ARRAYIDX1]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %0 = bitcast %struct.vec2* %p to i8*
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %0, i8* align 16 bitcast (%struct.vec2* @glob1 to i8*), i64 32, i1 false)
-  %arrayidx1 = getelementptr inbounds %struct.vec2, %struct.vec2* %p, i64 0, i32 0, i64 7
-  store i32 1, i32* %arrayidx1, align 4
+  tail call void @llvm.memcpy.p0.p0.i64(ptr align 16 %p, ptr align 16 @glob1, i64 32, i1 false)
+  %arrayidx1 = getelementptr inbounds %struct.vec2, ptr %p, i64 0, i32 0, i64 7
+  store i32 1, ptr %arrayidx1, align 4
   ret void
 }
 
-define void @dontwrite28to32memcpy_atomic(%struct.vec2* nocapture %p) nounwind uwtable ssp {
+define void @dontwrite28to32memcpy_atomic(ptr nocapture %p) nounwind uwtable ssp {
 ; CHECK-LABEL: @dontwrite28to32memcpy_atomic(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = bitcast %struct.vec2* [[P:%.*]] to i8*
-; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 16 [[TMP0]], i8* align 16 bitcast (%struct.vec2* @glob1 to i8*), i64 32, i32 4)
-; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds [[STRUCT_VEC2:%.*]], %struct.vec2* [[P]], i64 0, i32 0, i64 7
-; CHECK-NEXT:    store atomic i32 1, i32* [[ARRAYIDX1]] unordered, align 4
+; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 16 [[P:%.*]], ptr align 16 @glob1, i64 32, i32 4)
+; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds [[STRUCT_VEC2:%.*]], ptr [[P]], i64 0, i32 0, i64 7
+; CHECK-NEXT:    store atomic i32 1, ptr [[ARRAYIDX1]] unordered, align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %0 = bitcast %struct.vec2* %p to i8*
-  tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 16 %0, i8* align 16 bitcast (%struct.vec2* @glob1 to i8*), i64 32, i32 4)
-  %arrayidx1 = getelementptr inbounds %struct.vec2, %struct.vec2* %p, i64 0, i32 0, i64 7
-  store atomic i32 1, i32* %arrayidx1 unordered, align 4
+  tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 16 %p, ptr align 16 @glob1, i64 32, i32 4)
+  %arrayidx1 = getelementptr inbounds %struct.vec2, ptr %p, i64 0, i32 0, i64 7
+  store atomic i32 1, ptr %arrayidx1 unordered, align 4
   ret void
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
-declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32) nounwind
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
-declare void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* nocapture, i8, i64, i32) nounwind
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
+declare void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i32) nounwind
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind
+declare void @llvm.memset.element.unordered.atomic.p0.i64(ptr nocapture, i8, i64, i32) nounwind
 
 %struct.trapframe = type { i64, i64, i64 }
 
 ; bugzilla 11455 - make sure negative GEP's don't break this optimisation
-define void @cpu_lwp_fork(%struct.trapframe* %md_regs, i64 %pcb_rsp0) nounwind uwtable noinline ssp {
+define void @cpu_lwp_fork(ptr %md_regs, i64 %pcb_rsp0) nounwind uwtable noinline ssp {
 ; CHECK-LABEL: @cpu_lwp_fork(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = inttoptr i64 [[PCB_RSP0:%.*]] to %struct.trapframe*
-; CHECK-NEXT:    [[ADD_PTR:%.*]] = getelementptr inbounds [[STRUCT_TRAPFRAME:%.*]], %struct.trapframe* [[TMP0]], i64 -1
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast %struct.trapframe* [[ADD_PTR]] to i8*
-; CHECK-NEXT:    [[TMP2:%.*]] = bitcast %struct.trapframe* [[MD_REGS:%.*]] to i8*
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[TMP1]], i8* [[TMP2]], i64 24, i1 false)
-; CHECK-NEXT:    [[TF_TRAPNO:%.*]] = getelementptr inbounds [[STRUCT_TRAPFRAME]], %struct.trapframe* [[TMP0]], i64 -1, i32 1
-; CHECK-NEXT:    store i64 3, i64* [[TF_TRAPNO]], align 8
+; CHECK-NEXT:    [[TMP0:%.*]] = inttoptr i64 [[PCB_RSP0:%.*]] to ptr
+; CHECK-NEXT:    [[ADD_PTR:%.*]] = getelementptr inbounds [[STRUCT_TRAPFRAME:%.*]], ptr [[TMP0]], i64 -1
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr [[ADD_PTR]], ptr [[MD_REGS:%.*]], i64 24, i1 false)
+; CHECK-NEXT:    [[TF_TRAPNO:%.*]] = getelementptr inbounds [[STRUCT_TRAPFRAME]], ptr [[TMP0]], i64 -1, i32 1
+; CHECK-NEXT:    store i64 3, ptr [[TF_TRAPNO]], align 8
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %0 = inttoptr i64 %pcb_rsp0 to %struct.trapframe*
-  %add.ptr = getelementptr inbounds %struct.trapframe, %struct.trapframe* %0, i64 -1
-  %1 = bitcast %struct.trapframe* %add.ptr to i8*
-  %2 = bitcast %struct.trapframe* %md_regs to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 24, i1 false)
-  %tf_trapno = getelementptr inbounds %struct.trapframe, %struct.trapframe* %0, i64 -1, i32 1
-  store i64 3, i64* %tf_trapno, align 8
+  %0 = inttoptr i64 %pcb_rsp0 to ptr
+  %add.ptr = getelementptr inbounds %struct.trapframe, ptr %0, i64 -1
+  call void @llvm.memcpy.p0.p0.i64(ptr %add.ptr, ptr %md_regs, i64 24, i1 false)
+  %tf_trapno = getelementptr inbounds %struct.trapframe, ptr %0, i64 -1, i32 1
+  store i64 3, ptr %tf_trapno, align 8
   ret void
 }
 
-define void @write16To23AndThen24To31(i64* nocapture %P, i64 %n64, i32 %n32, i16 %n16, i8 %n8) {
+define void @write16To23AndThen24To31(ptr nocapture %P, i64 %n64, i32 %n32, i16 %n16, i8 %n8) {
 ; CHECK-LABEL: @write16To23AndThen24To31(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[BASE0:%.*]] = bitcast i64* [[P:%.*]] to i8*
-; CHECK-NEXT:    [[MYBASE0:%.*]] = getelementptr inbounds i8, i8* [[BASE0]], i64 0
-; CHECK-NEXT:    tail call void @llvm.memset.p0i8.i64(i8* align 8 [[MYBASE0]], i8 0, i64 16, i1 false)
-; CHECK-NEXT:    [[BASE64_2:%.*]] = getelementptr inbounds i64, i64* [[P]], i64 2
-; CHECK-NEXT:    [[BASE64_3:%.*]] = getelementptr inbounds i64, i64* [[P]], i64 3
-; CHECK-NEXT:    store i64 3, i64* [[BASE64_2]]
-; CHECK-NEXT:    store i64 3, i64* [[BASE64_3]]
+; CHECK-NEXT:    tail call void @llvm.memset.p0.i64(ptr align 8 [[P:%.*]], i8 0, i64 16, i1 false)
+; CHECK-NEXT:    [[BASE64_2:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 2
+; CHECK-NEXT:    [[BASE64_3:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 3
+; CHECK-NEXT:    store i64 3, ptr [[BASE64_2]]
+; CHECK-NEXT:    store i64 3, ptr [[BASE64_3]]
 ; CHECK-NEXT:    ret void
 ;
 entry:
 
-  %base0 = bitcast i64* %P to i8*
-  %mybase0 = getelementptr inbounds i8, i8* %base0, i64 0
-  tail call void @llvm.memset.p0i8.i64(i8* align 8 %mybase0, i8 0, i64 32, i1 false)
+  tail call void @llvm.memset.p0.i64(ptr align 8 %P, i8 0, i64 32, i1 false)
 
-  %base64_2 = getelementptr inbounds i64, i64* %P, i64 2
-  %base64_3 = getelementptr inbounds i64, i64* %P, i64 3
+  %base64_2 = getelementptr inbounds i64, ptr %P, i64 2
+  %base64_3 = getelementptr inbounds i64, ptr %P, i64 3
 
-  store i64 3, i64* %base64_2
-  store i64 3, i64* %base64_3
+  store i64 3, ptr %base64_2
+  store i64 3, ptr %base64_3
   ret void
 }
 
-define void @write16To23AndThen24To31_atomic(i64* nocapture %P, i64 %n64, i32 %n32, i16 %n16, i8 %n8) {
+define void @write16To23AndThen24To31_atomic(ptr nocapture %P, i64 %n64, i32 %n32, i16 %n16, i8 %n8) {
 ; CHECK-LABEL: @write16To23AndThen24To31_atomic(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[BASE0:%.*]] = bitcast i64* [[P:%.*]] to i8*
-; CHECK-NEXT:    [[MYBASE0:%.*]] = getelementptr inbounds i8, i8* [[BASE0]], i64 0
-; CHECK-NEXT:    tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 8 [[MYBASE0]], i8 0, i64 16, i32 8)
-; CHECK-NEXT:    [[BASE64_2:%.*]] = getelementptr inbounds i64, i64* [[P]], i64 2
-; CHECK-NEXT:    [[BASE64_3:%.*]] = getelementptr inbounds i64, i64* [[P]], i64 3
-; CHECK-NEXT:    store atomic i64 3, i64* [[BASE64_2]] unordered, align 8
-; CHECK-NEXT:    store atomic i64 3, i64* [[BASE64_3]] unordered, align 8
+; CHECK-NEXT:    tail call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 8 [[P:%.*]], i8 0, i64 16, i32 8)
+; CHECK-NEXT:    [[BASE64_2:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 2
+; CHECK-NEXT:    [[BASE64_3:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 3
+; CHECK-NEXT:    store atomic i64 3, ptr [[BASE64_2]] unordered, align 8
+; CHECK-NEXT:    store atomic i64 3, ptr [[BASE64_3]] unordered, align 8
 ; CHECK-NEXT:    ret void
 ;
 entry:
 
-  %base0 = bitcast i64* %P to i8*
-  %mybase0 = getelementptr inbounds i8, i8* %base0, i64 0
-  tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 8 %mybase0, i8 0, i64 32, i32 8)
+  tail call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 8 %P, i8 0, i64 32, i32 8)
 
-  %base64_2 = getelementptr inbounds i64, i64* %P, i64 2
-  %base64_3 = getelementptr inbounds i64, i64* %P, i64 3
+  %base64_2 = getelementptr inbounds i64, ptr %P, i64 2
+  %base64_3 = getelementptr inbounds i64, ptr %P, i64 3
 
-  store atomic i64 3, i64* %base64_2 unordered, align 8
-  store atomic i64 3, i64* %base64_3 unordered, align 8
+  store atomic i64 3, ptr %base64_2 unordered, align 8
+  store atomic i64 3, ptr %base64_3 unordered, align 8
   ret void
 }
 
-define void @write16To23AndThen24To31_atomic_weaker1(i64* nocapture %P, i64 %n64, i32 %n32, i16 %n16, i8 %n8) {
+define void @write16To23AndThen24To31_atomic_weaker1(ptr nocapture %P, i64 %n64, i32 %n32, i16 %n16, i8 %n8) {
 ; CHECK-LABEL: @write16To23AndThen24To31_atomic_weaker1(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[BASE0:%.*]] = bitcast i64* [[P:%.*]] to i8*
-; CHECK-NEXT:    [[MYBASE0:%.*]] = getelementptr inbounds i8, i8* [[BASE0]], i64 0
-; CHECK-NEXT:    tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 8 [[MYBASE0]], i8 0, i64 16, i32 8)
-; CHECK-NEXT:    [[BASE64_2:%.*]] = getelementptr inbounds i64, i64* [[P]], i64 2
-; CHECK-NEXT:    [[BASE64_3:%.*]] = getelementptr inbounds i64, i64* [[P]], i64 3
-; CHECK-NEXT:    store i64 3, i64* [[BASE64_2]], align 8
-; CHECK-NEXT:    store atomic i64 3, i64* [[BASE64_3]] unordered, align 8
+; CHECK-NEXT:    tail call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 8 [[P:%.*]], i8 0, i64 16, i32 8)
+; CHECK-NEXT:    [[BASE64_2:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 2
+; CHECK-NEXT:    [[BASE64_3:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 3
+; CHECK-NEXT:    store i64 3, ptr [[BASE64_2]], align 8
+; CHECK-NEXT:    store atomic i64 3, ptr [[BASE64_3]] unordered, align 8
 ; CHECK-NEXT:    ret void
 ;
 entry:
 
-  %base0 = bitcast i64* %P to i8*
-  %mybase0 = getelementptr inbounds i8, i8* %base0, i64 0
-  tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 8 %mybase0, i8 0, i64 32, i32 8)
+  tail call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 8 %P, i8 0, i64 32, i32 8)
 
-  %base64_2 = getelementptr inbounds i64, i64* %P, i64 2
-  %base64_3 = getelementptr inbounds i64, i64* %P, i64 3
+  %base64_2 = getelementptr inbounds i64, ptr %P, i64 2
+  %base64_3 = getelementptr inbounds i64, ptr %P, i64 3
 
-  store i64 3, i64* %base64_2, align 8
-  store atomic i64 3, i64* %base64_3 unordered, align 8
+  store i64 3, ptr %base64_2, align 8
+  store atomic i64 3, ptr %base64_3 unordered, align 8
   ret void
 }
 
-define void @write16To23AndThen24To31_atomic_weaker2(i64* nocapture %P, i64 %n64, i32 %n32, i16 %n16, i8 %n8) {
+define void @write16To23AndThen24To31_atomic_weaker2(ptr nocapture %P, i64 %n64, i32 %n32, i16 %n16, i8 %n8) {
 ; CHECK-LABEL: @write16To23AndThen24To31_atomic_weaker2(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[BASE0:%.*]] = bitcast i64* [[P:%.*]] to i8*
-; CHECK-NEXT:    [[MYBASE0:%.*]] = getelementptr inbounds i8, i8* [[BASE0]], i64 0
-; CHECK-NEXT:    tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 8 [[MYBASE0]], i8 0, i64 16, i32 8)
-; CHECK-NEXT:    [[BASE64_2:%.*]] = getelementptr inbounds i64, i64* [[P]], i64 2
-; CHECK-NEXT:    [[BASE64_3:%.*]] = getelementptr inbounds i64, i64* [[P]], i64 3
-; CHECK-NEXT:    store atomic i64 3, i64* [[BASE64_2]] unordered, align 8
-; CHECK-NEXT:    store i64 3, i64* [[BASE64_3]], align 8
+; CHECK-NEXT:    tail call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 8 [[P:%.*]], i8 0, i64 16, i32 8)
+; CHECK-NEXT:    [[BASE64_2:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 2
+; CHECK-NEXT:    [[BASE64_3:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 3
+; CHECK-NEXT:    store atomic i64 3, ptr [[BASE64_2]] unordered, align 8
+; CHECK-NEXT:    store i64 3, ptr [[BASE64_3]], align 8
 ; CHECK-NEXT:    ret void
 ;
 entry:
 
-  %base0 = bitcast i64* %P to i8*
-  %mybase0 = getelementptr inbounds i8, i8* %base0, i64 0
-  tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 8 %mybase0, i8 0, i64 32, i32 8)
+  tail call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 8 %P, i8 0, i64 32, i32 8)
 
-  %base64_2 = getelementptr inbounds i64, i64* %P, i64 2
-  %base64_3 = getelementptr inbounds i64, i64* %P, i64 3
+  %base64_2 = getelementptr inbounds i64, ptr %P, i64 2
+  %base64_3 = getelementptr inbounds i64, ptr %P, i64 3
 
-  store atomic i64 3, i64* %base64_2 unordered, align 8
-  store i64 3, i64* %base64_3, align 8
+  store atomic i64 3, ptr %base64_2 unordered, align 8
+  store i64 3, ptr %base64_3, align 8
   ret void
 }
 
-define void @ow_end_align1(i8* nocapture %p) {
+define void @ow_end_align1(ptr nocapture %p) {
 ; CHECK-LABEL: @ow_end_align1(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[P1:%.*]] = getelementptr inbounds i8, i8* [[P:%.*]], i64 1
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 1 [[P1]], i8 0, i64 27, i1 false)
-; CHECK-NEXT:    [[P2:%.*]] = getelementptr inbounds i8, i8* [[P1]], i64 27
-; CHECK-NEXT:    [[P2_I64:%.*]] = bitcast i8* [[P2]] to i64*
-; CHECK-NEXT:    store i64 1, i64* [[P2_I64]], align 1
+; CHECK-NEXT:    [[P1:%.*]] = getelementptr inbounds i8, ptr [[P:%.*]], i64 1
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 1 [[P1]], i8 0, i64 27, i1 false)
+; CHECK-NEXT:    [[P2:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 27
+; CHECK-NEXT:    store i64 1, ptr [[P2]], align 1
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %p1 = getelementptr inbounds i8, i8* %p, i64 1
-  call void @llvm.memset.p0i8.i64(i8* align 1 %p1, i8 0, i64 32, i1 false)
-  %p2 = getelementptr inbounds i8, i8* %p1, i64 27
-  %p2.i64 = bitcast i8* %p2 to i64*
-  store i64 1, i64* %p2.i64, align 1
+  %p1 = getelementptr inbounds i8, ptr %p, i64 1
+  call void @llvm.memset.p0.i64(ptr align 1 %p1, i8 0, i64 32, i1 false)
+  %p2 = getelementptr inbounds i8, ptr %p1, i64 27
+  store i64 1, ptr %p2, align 1
   ret void
 }
 
-define void @ow_end_align4(i8* nocapture %p) {
+define void @ow_end_align4(ptr nocapture %p) {
 ; CHECK-LABEL: @ow_end_align4(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[P1:%.*]] = getelementptr inbounds i8, i8* [[P:%.*]], i64 1
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 4 [[P1]], i8 0, i64 28, i1 false)
-; CHECK-NEXT:    [[P2:%.*]] = getelementptr inbounds i8, i8* [[P1]], i64 27
-; CHECK-NEXT:    [[P2_I64:%.*]] = bitcast i8* [[P2]] to i64*
-; CHECK-NEXT:    store i64 1, i64* [[P2_I64]], align 1
+; CHECK-NEXT:    [[P1:%.*]] = getelementptr inbounds i8, ptr [[P:%.*]], i64 1
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 4 [[P1]], i8 0, i64 28, i1 false)
+; CHECK-NEXT:    [[P2:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 27
+; CHECK-NEXT:    store i64 1, ptr [[P2]], align 1
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %p1 = getelementptr inbounds i8, i8* %p, i64 1
-  call void @llvm.memset.p0i8.i64(i8* align 4 %p1, i8 0, i64 32, i1 false)
-  %p2 = getelementptr inbounds i8, i8* %p1, i64 27
-  %p2.i64 = bitcast i8* %p2 to i64*
-  store i64 1, i64* %p2.i64, align 1
+  %p1 = getelementptr inbounds i8, ptr %p, i64 1
+  call void @llvm.memset.p0.i64(ptr align 4 %p1, i8 0, i64 32, i1 false)
+  %p2 = getelementptr inbounds i8, ptr %p1, i64 27
+  store i64 1, ptr %p2, align 1
   ret void
 }
 
-define void @ow_end_align8(i8* nocapture %p) {
+define void @ow_end_align8(ptr nocapture %p) {
 ; CHECK-LABEL: @ow_end_align8(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[P1:%.*]] = getelementptr inbounds i8, i8* [[P:%.*]], i64 1
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 8 [[P1]], i8 0, i64 32, i1 false)
-; CHECK-NEXT:    [[P2:%.*]] = getelementptr inbounds i8, i8* [[P1]], i64 27
-; CHECK-NEXT:    [[P2_I64:%.*]] = bitcast i8* [[P2]] to i64*
-; CHECK-NEXT:    store i64 1, i64* [[P2_I64]], align 1
+; CHECK-NEXT:    [[P1:%.*]] = getelementptr inbounds i8, ptr [[P:%.*]], i64 1
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[P1]], i8 0, i64 32, i1 false)
+; CHECK-NEXT:    [[P2:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 27
+; CHECK-NEXT:    store i64 1, ptr [[P2]], align 1
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %p1 = getelementptr inbounds i8, i8* %p, i64 1
-  call void @llvm.memset.p0i8.i64(i8* align 8 %p1, i8 0, i64 32, i1 false)
-  %p2 = getelementptr inbounds i8, i8* %p1, i64 27
-  %p2.i64 = bitcast i8* %p2 to i64*
-  store i64 1, i64* %p2.i64, align 1
+  %p1 = getelementptr inbounds i8, ptr %p, i64 1
+  call void @llvm.memset.p0.i64(ptr align 8 %p1, i8 0, i64 32, i1 false)
+  %p2 = getelementptr inbounds i8, ptr %p1, i64 27
+  store i64 1, ptr %p2, align 1
   ret void
 }
 

diff  --git a/llvm/test/Transforms/DeadStoreElimination/PartialStore.ll b/llvm/test/Transforms/DeadStoreElimination/PartialStore.ll
index 605268e2ca86a..917e8ff2565d9 100644
--- a/llvm/test/Transforms/DeadStoreElimination/PartialStore.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/PartialStore.ll
@@ -4,28 +4,25 @@ target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:1
 
 ; Ensure that the dead store is deleted in this case.  It is wholely
 ; overwritten by the second store.
-define void @test1(i32 *%V) {
+define void @test1(ptr %V) {
 ; CHECK-LABEL: @test1(
-; CHECK-NEXT:    store i32 1234567, i32* [[V:%.*]], align 4
+; CHECK-NEXT:    store i32 1234567, ptr [[V:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  %V2 = bitcast i32* %V to i8*            ; <i8*> [#uses=1]
-  store i8 0, i8* %V2
-  store i32 1234567, i32* %V
+  store i8 0, ptr %V
+  store i32 1234567, ptr %V
   ret void
 }
 
 ; Note that we could do better by merging the two stores into one.
-define void @test2(i32* %P) {
+define void @test2(ptr %P) {
 ; CHECK-LABEL: @test2(
-; CHECK-NEXT:    store i32 0, i32* [[P:%.*]], align 4
-; CHECK-NEXT:    [[Q:%.*]] = bitcast i32* [[P]] to i16*
-; CHECK-NEXT:    store i16 1, i16* [[Q]], align 2
+; CHECK-NEXT:    store i32 0, ptr [[P:%.*]], align 4
+; CHECK-NEXT:    store i16 1, ptr [[P]], align 2
 ; CHECK-NEXT:    ret void
 ;
-  store i32 0, i32* %P
-  %Q = bitcast i32* %P to i16*
-  store i16 1, i16* %Q
+  store i32 0, ptr %P
+  store i16 1, ptr %P
   ret void
 }
 
@@ -33,75 +30,70 @@ define void @test2(i32* %P) {
 define i32 @test3(double %__x) {
 ; CHECK-LABEL: @test3(
 ; CHECK-NEXT:    [[__U:%.*]] = alloca { [3 x i32] }, align 4
-; CHECK-NEXT:    [[TMP_1:%.*]] = bitcast { [3 x i32] }* [[__U]] to double*
-; CHECK-NEXT:    store double [[__X:%.*]], double* [[TMP_1]], align 8
-; CHECK-NEXT:    [[TMP_4:%.*]] = getelementptr { [3 x i32] }, { [3 x i32] }* [[__U]], i32 0, i32 0, i32 1
-; CHECK-NEXT:    [[TMP_5:%.*]] = load i32, i32* [[TMP_4]], align 4
+; CHECK-NEXT:    store double [[__X:%.*]], ptr [[__U]], align 8
+; CHECK-NEXT:    [[TMP_4:%.*]] = getelementptr { [3 x i32] }, ptr [[__U]], i32 0, i32 0, i32 1
+; CHECK-NEXT:    [[TMP_5:%.*]] = load i32, ptr [[TMP_4]], align 4
 ; CHECK-NEXT:    [[TMP_6:%.*]] = icmp slt i32 [[TMP_5]], 0
 ; CHECK-NEXT:    [[TMP_7:%.*]] = zext i1 [[TMP_6]] to i32
 ; CHECK-NEXT:    ret i32 [[TMP_7]]
 ;
   %__u = alloca { [3 x i32] }
-  %tmp.1 = bitcast { [3 x i32] }* %__u to double*
-  store double %__x, double* %tmp.1
-  %tmp.4 = getelementptr { [3 x i32] }, { [3 x i32] }* %__u, i32 0, i32 0, i32 1
-  %tmp.5 = load i32, i32* %tmp.4
+  store double %__x, ptr %__u
+  %tmp.4 = getelementptr { [3 x i32] }, ptr %__u, i32 0, i32 0, i32 1
+  %tmp.5 = load i32, ptr %tmp.4
   %tmp.6 = icmp slt i32 %tmp.5, 0
   %tmp.7 = zext i1 %tmp.6 to i32
   ret i32 %tmp.7
 }
 
 ; PR6043
-define void @test4(i8* %P) {
+define void @test4(ptr %P) {
 ; CHECK-LABEL: @test4(
-; CHECK-NEXT:    [[Q:%.*]] = bitcast i8* [[P:%.*]] to double*
-; CHECK-NEXT:    store double 0.000000e+00, double* [[Q]], align 8
+; CHECK-NEXT:    store double 0.000000e+00, ptr [[P:%.*]], align 8
 ; CHECK-NEXT:    ret void
 ;
 
-  store i8 19, i8* %P  ;; dead
-  %A = getelementptr i8, i8* %P, i32 3
+  store i8 19, ptr %P  ;; dead
+  %A = getelementptr i8, ptr %P, i32 3
 
-  store i8 42, i8* %A  ;; dead
+  store i8 42, ptr %A  ;; dead
 
-  %Q = bitcast i8* %P to double*
-  store double 0.0, double* %Q
+  store double 0.0, ptr %P
   ret void
 }
 
 ; PR8657
-declare void @test5a(i32*)
+declare void @test5a(ptr)
 define void @test5(i32 %i) nounwind ssp {
 ; CHECK-LABEL: @test5(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 20, i32* [[A]], align 4
-; CHECK-NEXT:    call void @test5a(i32* [[A]])
+; CHECK-NEXT:    store i32 20, ptr [[A]], align 4
+; CHECK-NEXT:    call void @test5a(ptr [[A]])
 ; CHECK-NEXT:    ret void
 ;
   %A = alloca i32
-  %B = bitcast i32* %A to i8*
-  %C = getelementptr i8, i8* %B, i32 %i
-  store i8 10, i8* %C        ;; Dead store to variable index.
-  store i32 20, i32* %A
+  %C = getelementptr i8, ptr %A, i32 %i
+  store i8 10, ptr %C        ;; Dead store to variable index.
+  store i32 20, ptr %A
 
-  call void @test5a(i32* %A)
+  call void @test5a(ptr %A)
   ret void
 }
 
-declare void @test5a_as1(i32*)
+declare void @test5a_as1(ptr)
 define void @test5_addrspacecast(i32 %i) nounwind ssp {
 ; CHECK-LABEL: @test5_addrspacecast(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 20, i32* [[A]], align 4
-; CHECK-NEXT:    call void @test5a(i32* [[A]])
+; CHECK-NEXT:    store i32 20, ptr [[A]], align 4
+; CHECK-NEXT:    call void @test5a(ptr [[A]])
 ; CHECK-NEXT:    ret void
 ;
   %A = alloca i32
-  %B = addrspacecast i32* %A to i8 addrspace(1)*
-  %C = getelementptr i8, i8 addrspace(1)* %B, i32 %i
-  store i8 10, i8 addrspace(1)* %C        ;; Dead store to variable index.
-  store i32 20, i32* %A
+  %B = addrspacecast ptr %A to ptr addrspace(1)
+  %C = getelementptr i8, ptr addrspace(1) %B, i32 %i
+  store i8 10, ptr addrspace(1) %C        ;; Dead store to variable index.
+  store i32 20, ptr %A
 
-  call void @test5a(i32* %A)
+  call void @test5a(ptr %A)
   ret void
 }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/PartialStore2.ll b/llvm/test/Transforms/DeadStoreElimination/PartialStore2.ll
index 4bba265684688..a4439571325ba 100644
--- a/llvm/test/Transforms/DeadStoreElimination/PartialStore2.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/PartialStore2.ll
@@ -6,25 +6,21 @@
 ;
 ; Better safe than sorry, do not assume anything about the padding for the
 ; i28 store that has 32 bits as store size.
-define void @test1(i32* %p) {
+define void @test1(ptr %p) {
 ; CHECK-LABEL: @test1(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32
-; CHECK-NEXT:    [[B:%.*]] = bitcast i32* [[A]] to i28*
-; CHECK-NEXT:    [[C:%.*]] = bitcast i32* [[A]] to { i16, i16 }*
-; CHECK-NEXT:    [[C1:%.*]] = getelementptr inbounds { i16, i16 }, { i16, i16 }* [[C]], i32 0, i32 1
-; CHECK-NEXT:    store i28 10, i28* [[B]]
-; CHECK-NEXT:    store i16 20, i16* [[C1]]
-; CHECK-NEXT:    call void @test1(i32* [[A]])
+; CHECK-NEXT:    [[C1:%.*]] = getelementptr inbounds { i16, i16 }, ptr [[A]], i32 0, i32 1
+; CHECK-NEXT:    store i28 10, ptr [[A]]
+; CHECK-NEXT:    store i16 20, ptr [[C1]]
+; CHECK-NEXT:    call void @test1(ptr [[A]])
 ; CHECK-NEXT:    ret void
 ;
   %a = alloca i32
-  %b = bitcast i32* %a to i28*
-  %c = bitcast i32* %a to { i16, i16 }*
-  %c1 = getelementptr inbounds { i16, i16 }, { i16, i16 }* %c, i32 0, i32 1
-  store i28 10, i28* %b
-  store i16 20, i16* %c1
+  %c1 = getelementptr inbounds { i16, i16 }, ptr %a, i32 0, i32 1
+  store i28 10, ptr %a
+  store i16 20, ptr %c1
 
-  call void @test1(i32* %a)
+  call void @test1(ptr %a)
   ret void
 }
 
@@ -33,23 +29,19 @@ define void @test1(i32* %p) {
 ;
 ; Better safe than sorry, do not assume anything about the padding for the
 ; i12 store that has 16 bits as store size.
-define void @test2(i32* %p) {
+define void @test2(ptr %p) {
 ; CHECK-LABEL: @test2(
 ; CHECK-NEXT:    [[U:%.*]] = alloca i32
-; CHECK-NEXT:    [[A:%.*]] = bitcast i32* [[U]] to i32*
-; CHECK-NEXT:    [[B:%.*]] = bitcast i32* [[U]] to i12*
-; CHECK-NEXT:    store i32 -1, i32* [[A]]
-; CHECK-NEXT:    store i12 20, i12* [[B]]
-; CHECK-NEXT:    call void @test2(i32* [[U]])
+; CHECK-NEXT:    store i32 -1, ptr [[U]]
+; CHECK-NEXT:    store i12 20, ptr [[U]]
+; CHECK-NEXT:    call void @test2(ptr [[U]])
 ; CHECK-NEXT:    ret void
 ;
   %u = alloca i32
-  %a = bitcast i32* %u to i32*
-  %b = bitcast i32* %u to i12*
-  store i32 -1, i32* %a
-  store i12 20, i12* %b
+  store i32 -1, ptr %u
+  store i12 20, ptr %u
 
-  call void @test2(i32* %u)
+  call void @test2(ptr %u)
   ret void
 }
 

diff  --git a/llvm/test/Transforms/DeadStoreElimination/X86/gather-null-pointer.ll b/llvm/test/Transforms/DeadStoreElimination/X86/gather-null-pointer.ll
index 6a5f4bb9eb25c..264aea821c68b 100644
--- a/llvm/test/Transforms/DeadStoreElimination/X86/gather-null-pointer.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/X86/gather-null-pointer.ll
@@ -3,19 +3,19 @@
 
 ; Both stores should be emitted because we can't tell if the gather aliases.
 
-define <4 x i32> @bar(<4 x i32> %arg, i32* %arg1) {
+define <4 x i32> @bar(<4 x i32> %arg, ptr %arg1) {
 ; CHECK-LABEL: @bar(
 ; CHECK-NEXT:  bb:
-; CHECK-NEXT:    store i32 5, i32* [[ARG1:%.*]]
-; CHECK-NEXT:    [[TMP:%.*]] = tail call <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32> zeroinitializer, i8* null, <4 x i32> [[ARG:%.*]], <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i8 1)
-; CHECK-NEXT:    store i32 10, i32* [[ARG1]]
+; CHECK-NEXT:    store i32 5, ptr [[ARG1:%.*]]
+; CHECK-NEXT:    [[TMP:%.*]] = tail call <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32> zeroinitializer, ptr null, <4 x i32> [[ARG:%.*]], <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i8 1)
+; CHECK-NEXT:    store i32 10, ptr [[ARG1]]
 ; CHECK-NEXT:    ret <4 x i32> [[TMP]]
 ;
 bb:
-  store i32 5, i32* %arg1
-  %tmp = tail call <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32> zeroinitializer, i8* null, <4 x i32> %arg, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i8 1)
-  store i32 10, i32* %arg1
+  store i32 5, ptr %arg1
+  %tmp = tail call <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32> zeroinitializer, ptr null, <4 x i32> %arg, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i8 1)
+  store i32 10, ptr %arg1
   ret <4 x i32> %tmp
 }
 
-declare <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32>, i8*, <4 x i32>, <4 x i32>, i8)
+declare <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32>, ptr, <4 x i32>, <4 x i32>, i8)

diff  --git a/llvm/test/Transforms/DeadStoreElimination/assume.ll b/llvm/test/Transforms/DeadStoreElimination/assume.ll
index e53f6bba1ab9b..aa767664c52de 100644
--- a/llvm/test/Transforms/DeadStoreElimination/assume.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/assume.ll
@@ -5,36 +5,36 @@
 
 define void @f() {
 ; CHECK-LABEL: @f(
-; CHECK-NEXT:    [[TMP1:%.*]] = call noalias i8* @_Znwm(i64 32)
-; CHECK-NEXT:    [[TMP2:%.*]] = icmp ugt i8* [[TMP1]], @global
+; CHECK-NEXT:    [[TMP1:%.*]] = call noalias ptr @_Znwm(i64 32)
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp ugt ptr [[TMP1]], @global
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[TMP2]])
 ; CHECK-NEXT:    ret void
 ;
-  %tmp1 = call noalias i8* @_Znwm(i64 32)
-  %tmp2 = icmp ugt i8* %tmp1, @global
+  %tmp1 = call noalias ptr @_Znwm(i64 32)
+  %tmp2 = icmp ugt ptr %tmp1, @global
   call void @llvm.assume(i1 %tmp2)
-  store i8 0, i8* %tmp1, align 1
+  store i8 0, ptr %tmp1, align 1
   ret void
 }
 
 define void @f2() {
 ; CHECK-LABEL: @f2(
-; CHECK-NEXT:    [[TMP1:%.*]] = call noalias i8* @_Znwm(i64 32)
-; CHECK-NEXT:    [[TMP2:%.*]] = icmp ugt i8* [[TMP1]], @global
+; CHECK-NEXT:    [[TMP1:%.*]] = call noalias ptr @_Znwm(i64 32)
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp ugt ptr [[TMP1]], @global
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[TMP2]])
-; CHECK-NEXT:    call void @quux(i8* @global)
+; CHECK-NEXT:    call void @quux(ptr @global)
 ; CHECK-NEXT:    ret void
 ;
-  %tmp1 = call noalias i8* @_Znwm(i64 32)
-  %tmp2 = icmp ugt i8* %tmp1, @global
+  %tmp1 = call noalias ptr @_Znwm(i64 32)
+  %tmp2 = icmp ugt ptr %tmp1, @global
   call void @llvm.assume(i1 %tmp2)
-  store i8 0, i8* %tmp1, align 1
-  call void @quux(i8* @global)
+  store i8 0, ptr %tmp1, align 1
+  call void @quux(ptr @global)
   ret void
 }
 
-declare i8* @_Znwm(i64)
+declare ptr @_Znwm(i64)
 
 declare void @llvm.assume(i1)
 
-declare void @quux(i8*)
+declare void @quux(ptr)

diff  --git a/llvm/test/Transforms/DeadStoreElimination/atomic-overlapping.ll b/llvm/test/Transforms/DeadStoreElimination/atomic-overlapping.ll
index d23208166136a..764f0b7127aec 100644
--- a/llvm/test/Transforms/DeadStoreElimination/atomic-overlapping.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/atomic-overlapping.ll
@@ -3,23 +3,17 @@
 
 target datalayout = "e-m:o-p:32:32-Fi8-i64:64-a:0:32-n32-S128"
 
-define void @widget(i8* %ptr) {
+define void @widget(ptr %ptr) {
 ; CHECK-LABEL: @widget(
 ; CHECK-NEXT:  bb:
-; CHECK-NEXT:    [[PTR1:%.*]] = getelementptr inbounds i8, i8* [[PTR:%.*]], i32 4
-; CHECK-NEXT:    [[PTR1_CAST:%.*]] = bitcast i8* [[PTR1]] to i32*
-; CHECK-NEXT:    store atomic i32 0, i32* [[PTR1_CAST]] monotonic, align 4
-; CHECK-NEXT:    [[PTR2:%.*]] = getelementptr inbounds i8, i8* [[PTR]], i32 0
-; CHECK-NEXT:    [[PTR2_CAST:%.*]] = bitcast i8* [[PTR2]] to i64**
-; CHECK-NEXT:    store i64* null, i64** [[PTR2_CAST]], align 4
+; CHECK-NEXT:    [[PTR1:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i32 4
+; CHECK-NEXT:    store atomic i32 0, ptr [[PTR1]] monotonic, align 4
+; CHECK-NEXT:    store ptr null, ptr [[PTR]], align 4
 ; CHECK-NEXT:    ret void
 ;
 bb:
-  %ptr1 = getelementptr inbounds i8, i8* %ptr, i32 4
-  %ptr1.cast = bitcast i8* %ptr1 to i32*
-  store atomic i32 0, i32* %ptr1.cast monotonic, align 4
-  %ptr2 = getelementptr inbounds i8, i8* %ptr, i32 0
-  %ptr2.cast = bitcast i8* %ptr2 to i64**
-  store i64* null, i64** %ptr2.cast, align 4
+  %ptr1 = getelementptr inbounds i8, ptr %ptr, i32 4
+  store atomic i32 0, ptr %ptr1 monotonic, align 4
+  store ptr null, ptr %ptr, align 4
   ret void
 }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/atomic-todo.ll b/llvm/test/Transforms/DeadStoreElimination/atomic-todo.ll
index c7b4b50ab875a..1745238aeb73f 100644
--- a/llvm/test/Transforms/DeadStoreElimination/atomic-todo.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/atomic-todo.ll
@@ -16,8 +16,8 @@ define i32 @test9() {
 ; CHECK-LABEL: test9
 ; CHECK-NOT: store i32 0
 ; CHECK: store i32 1
-  store i32 0, i32* @x
-  %x = load atomic i32, i32* @y monotonic, align 4
-  store i32 1, i32* @x
+  store i32 0, ptr @x
+  %x = load atomic i32, ptr @y monotonic, align 4
+  store i32 1, ptr @x
   ret i32 %x
 }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/atomic.ll b/llvm/test/Transforms/DeadStoreElimination/atomic.ll
index 7caba9aeafd14..3c2465242a198 100644
--- a/llvm/test/Transforms/DeadStoreElimination/atomic.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/atomic.ll
@@ -11,40 +11,40 @@ target triple = "x86_64-apple-macosx10.7.0"
 @x = common global i32 0, align 4
 @y = common global i32 0, align 4
 
-declare void @randomop(i32*)
+declare void @randomop(ptr)
 
 ; DSE across unordered store (allowed)
 define void @test1() {
 ; CHECK-LABEL: @test1(
-; CHECK-NEXT:    store atomic i32 0, i32* @y unordered, align 4
-; CHECK-NEXT:    store i32 1, i32* @x, align 4
+; CHECK-NEXT:    store atomic i32 0, ptr @y unordered, align 4
+; CHECK-NEXT:    store i32 1, ptr @x, align 4
 ; CHECK-NEXT:    ret void
 ;
-  store i32 0, i32* @x
-  store atomic i32 0, i32* @y unordered, align 4
-  store i32 1, i32* @x
+  store i32 0, ptr @x
+  store atomic i32 0, ptr @y unordered, align 4
+  store i32 1, ptr @x
   ret void
 }
 
 ; DSE remove unordered store (allowed)
 define void @test4() {
 ; CHECK-LABEL: @test4(
-; CHECK-NEXT:    store i32 1, i32* @x, align 4
+; CHECK-NEXT:    store i32 1, ptr @x, align 4
 ; CHECK-NEXT:    ret void
 ;
-  store atomic i32 0, i32* @x unordered, align 4
-  store i32 1, i32* @x
+  store atomic i32 0, ptr @x unordered, align 4
+  store i32 1, ptr @x
   ret void
 }
 
 ; DSE unordered store overwriting non-atomic store (allowed)
 define void @test5() {
 ; CHECK-LABEL: @test5(
-; CHECK-NEXT:    store atomic i32 1, i32* @x unordered, align 4
+; CHECK-NEXT:    store atomic i32 1, ptr @x unordered, align 4
 ; CHECK-NEXT:    ret void
 ;
-  store i32 0, i32* @x
-  store atomic i32 1, i32* @x unordered, align 4
+  store i32 0, ptr @x
+  store atomic i32 1, ptr @x unordered, align 4
   ret void
 }
 
@@ -53,8 +53,8 @@ define void @test6() {
 ; CHECK-LABEL: @test6(
 ; CHECK-NEXT:    ret void
 ;
-  %x = load atomic i32, i32* @x unordered, align 4
-  store atomic i32 %x, i32* @x unordered, align 4
+  %x = load atomic i32, ptr @x unordered, align 4
+  store atomic i32 %x, ptr @x unordered, align 4
   ret void
 }
 
@@ -63,11 +63,11 @@ define void @test6() {
 define void @test7() {
 ; CHECK-LABEL: @test7(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store atomic i32 0, i32* [[A]] seq_cst, align 4
+; CHECK-NEXT:    store atomic i32 0, ptr [[A]] seq_cst, align 4
 ; CHECK-NEXT:    ret void
 ;
   %a = alloca i32
-  store atomic i32 0, i32* %a seq_cst, align 4
+  store atomic i32 0, ptr %a seq_cst, align 4
   ret void
 }
 
@@ -76,15 +76,15 @@ define void @test7() {
 define i32 @test8() {
 ; CHECK-LABEL: @test8(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    call void @randomop(i32* [[A]])
-; CHECK-NEXT:    store i32 0, i32* [[A]], align 4
-; CHECK-NEXT:    [[X:%.*]] = load atomic i32, i32* @x seq_cst, align 4
+; CHECK-NEXT:    call void @randomop(ptr [[A]])
+; CHECK-NEXT:    store i32 0, ptr [[A]], align 4
+; CHECK-NEXT:    [[X:%.*]] = load atomic i32, ptr @x seq_cst, align 4
 ; CHECK-NEXT:    ret i32 [[X]]
 ;
   %a = alloca i32
-  call void @randomop(i32* %a)
-  store i32 0, i32* %a, align 4
-  %x = load atomic i32, i32* @x seq_cst, align 4
+  call void @randomop(ptr %a)
+  store i32 0, ptr %a, align 4
+  %x = load atomic i32, ptr @x seq_cst, align 4
   ret i32 %x
 }
 
@@ -93,53 +93,53 @@ define void @test10() {
 ; CHECK-LABEL: test10
 ; CHECK-NOT: store i32 0
 ; CHECK: store i32 1
-  store i32 0, i32* @x
-  store atomic i32 42, i32* @y monotonic, align 4
-  store i32 1, i32* @x
+  store i32 0, ptr @x
+  store atomic i32 42, ptr @y monotonic, align 4
+  store i32 1, ptr @x
   ret void
 }
 
 ; DSE across monotonic load (forbidden since the eliminated store is atomic)
 define i32 @test11() {
 ; CHECK-LABEL: @test11(
-; CHECK-NEXT:    store atomic i32 0, i32* @x monotonic, align 4
-; CHECK-NEXT:    [[X:%.*]] = load atomic i32, i32* @y monotonic, align 4
-; CHECK-NEXT:    store atomic i32 1, i32* @x monotonic, align 4
+; CHECK-NEXT:    store atomic i32 0, ptr @x monotonic, align 4
+; CHECK-NEXT:    [[X:%.*]] = load atomic i32, ptr @y monotonic, align 4
+; CHECK-NEXT:    store atomic i32 1, ptr @x monotonic, align 4
 ; CHECK-NEXT:    ret i32 [[X]]
 ;
-  store atomic i32 0, i32* @x monotonic, align 4
-  %x = load atomic i32, i32* @y monotonic, align 4
-  store atomic i32 1, i32* @x monotonic, align 4
+  store atomic i32 0, ptr @x monotonic, align 4
+  %x = load atomic i32, ptr @y monotonic, align 4
+  store atomic i32 1, ptr @x monotonic, align 4
   ret i32 %x
 }
 
 ; DSE across monotonic store (forbidden since the eliminated store is atomic)
 define void @test12() {
 ; CHECK-LABEL: @test12(
-; CHECK-NEXT:    store atomic i32 0, i32* @x monotonic, align 4
-; CHECK-NEXT:    store atomic i32 42, i32* @y monotonic, align 4
-; CHECK-NEXT:    store atomic i32 1, i32* @x monotonic, align 4
+; CHECK-NEXT:    store atomic i32 0, ptr @x monotonic, align 4
+; CHECK-NEXT:    store atomic i32 42, ptr @y monotonic, align 4
+; CHECK-NEXT:    store atomic i32 1, ptr @x monotonic, align 4
 ; CHECK-NEXT:    ret void
 ;
-  store atomic i32 0, i32* @x monotonic, align 4
-  store atomic i32 42, i32* @y monotonic, align 4
-  store atomic i32 1, i32* @x monotonic, align 4
+  store atomic i32 0, ptr @x monotonic, align 4
+  store atomic i32 42, ptr @y monotonic, align 4
+  store atomic i32 1, ptr @x monotonic, align 4
   ret void
 }
 
 ; But DSE is not allowed across a release-acquire pair.
 define i32 @test15() {
 ; CHECK-LABEL: @test15(
-; CHECK-NEXT:    store i32 0, i32* @x, align 4
-; CHECK-NEXT:    store atomic i32 0, i32* @y release, align 4
-; CHECK-NEXT:    [[X:%.*]] = load atomic i32, i32* @y acquire, align 4
-; CHECK-NEXT:    store i32 1, i32* @x, align 4
+; CHECK-NEXT:    store i32 0, ptr @x, align 4
+; CHECK-NEXT:    store atomic i32 0, ptr @y release, align 4
+; CHECK-NEXT:    [[X:%.*]] = load atomic i32, ptr @y acquire, align 4
+; CHECK-NEXT:    store i32 1, ptr @x, align 4
 ; CHECK-NEXT:    ret i32 [[X]]
 ;
-  store i32 0, i32* @x
-  store atomic i32 0, i32* @y release, align 4
-  %x = load atomic i32, i32* @y acquire, align 4
-  store i32 1, i32* @x
+  store i32 0, ptr @x
+  store atomic i32 0, ptr @y release, align 4
+  %x = load atomic i32, ptr @y acquire, align 4
+  store i32 1, ptr @x
   ret i32 %x
 }
 
@@ -149,181 +149,181 @@ define i32 @test15() {
 ; Be conservative, do not kill regular store.
 define i64 @test_atomicrmw_0() {
 ; CHECK-LABEL: @test_atomicrmw_0(
-; CHECK-NEXT:    store i64 1, i64* @z, align 8
-; CHECK-NEXT:    [[RES:%.*]] = atomicrmw add i64* @z, i64 -1 monotonic
+; CHECK-NEXT:    store i64 1, ptr @z, align 8
+; CHECK-NEXT:    [[RES:%.*]] = atomicrmw add ptr @z, i64 -1 monotonic
 ; CHECK-NEXT:    ret i64 [[RES]]
 ;
-  store i64 1, i64* @z
-  %res = atomicrmw add i64* @z, i64 -1 monotonic
+  store i64 1, ptr @z
+  %res = atomicrmw add ptr @z, i64 -1 monotonic
   ret i64 %res
 }
 
 ; Be conservative, do not kill regular store.
 define i64 @test_atomicrmw_1() {
 ; CHECK-LABEL: @test_atomicrmw_1(
-; CHECK-NEXT:    store i64 1, i64* @z, align 8
-; CHECK-NEXT:    [[RES:%.*]] = atomicrmw add i64* @z, i64 -1 acq_rel
+; CHECK-NEXT:    store i64 1, ptr @z, align 8
+; CHECK-NEXT:    [[RES:%.*]] = atomicrmw add ptr @z, i64 -1 acq_rel
 ; CHECK-NEXT:    ret i64 [[RES]]
 ;
-  store i64 1, i64* @z
-  %res = atomicrmw add i64* @z, i64 -1 acq_rel
+  store i64 1, ptr @z
+  %res = atomicrmw add ptr @z, i64 -1 acq_rel
   ret i64 %res
 }
 
 ; Monotonic atomicrmw should not block eliminating no-aliasing stores.
 define i64 @test_atomicrmw_2() {
 ; CHECK-LABEL: @test_atomicrmw_2(
-; CHECK-NEXT:    [[RES:%.*]] = atomicrmw add i64* @a, i64 -1 monotonic
-; CHECK-NEXT:    store i64 2, i64* @z, align 8
+; CHECK-NEXT:    [[RES:%.*]] = atomicrmw add ptr @a, i64 -1 monotonic
+; CHECK-NEXT:    store i64 2, ptr @z, align 8
 ; CHECK-NEXT:    ret i64 [[RES]]
 ;
-  store i64 1, i64* @z
-  %res = atomicrmw add i64* @a, i64 -1 monotonic
-  store i64 2, i64* @z
+  store i64 1, ptr @z
+  %res = atomicrmw add ptr @a, i64 -1 monotonic
+  store i64 2, ptr @z
   ret i64 %res
 }
 
 ; Be conservative, do not eliminate stores across atomic operations > monotonic.
 define i64 @test_atomicrmw_3() {
 ; CHECK-LABEL: @test_atomicrmw_3(
-; CHECK-NEXT:    store i64 1, i64* @z, align 8
-; CHECK-NEXT:    [[RES:%.*]] = atomicrmw add i64* @a, i64 -1 release
-; CHECK-NEXT:    store i64 2, i64* @z, align 8
+; CHECK-NEXT:    store i64 1, ptr @z, align 8
+; CHECK-NEXT:    [[RES:%.*]] = atomicrmw add ptr @a, i64 -1 release
+; CHECK-NEXT:    store i64 2, ptr @z, align 8
 ; CHECK-NEXT:    ret i64 [[RES]]
 ;
-  store i64 1, i64* @z
-  %res = atomicrmw add i64* @a, i64 -1 release
-  store i64 2, i64* @z
+  store i64 1, ptr @z
+  %res = atomicrmw add ptr @a, i64 -1 release
+  store i64 2, ptr @z
   ret i64 %res
 }
 
 ; Be conservative, do not eliminate may-alias stores.
-define i64 @test_atomicrmw_4(i64* %ptr) {
+define i64 @test_atomicrmw_4(ptr %ptr) {
 ; CHECK-LABEL: @test_atomicrmw_4(
-; CHECK-NEXT:    store i64 1, i64* @z, align 8
-; CHECK-NEXT:    [[RES:%.*]] = atomicrmw add i64* [[PTR:%.*]], i64 -1 monotonic
-; CHECK-NEXT:    store i64 2, i64* @z, align 8
+; CHECK-NEXT:    store i64 1, ptr @z, align 8
+; CHECK-NEXT:    [[RES:%.*]] = atomicrmw add ptr [[PTR:%.*]], i64 -1 monotonic
+; CHECK-NEXT:    store i64 2, ptr @z, align 8
 ; CHECK-NEXT:    ret i64 [[RES]]
 ;
-  store i64 1, i64* @z
-  %res = atomicrmw add i64* %ptr, i64 -1 monotonic
-  store i64 2, i64* @z
+  store i64 1, ptr @z
+  %res = atomicrmw add ptr %ptr, i64 -1 monotonic
+  store i64 2, ptr @z
   ret i64 %res
 }
 
 ; Be conservative, do not eliminate aliasing stores.
 define i64 @test_atomicrmw_5() {
 ; CHECK-LABEL: @test_atomicrmw_5(
-; CHECK-NEXT:    store i64 1, i64* @z, align 8
-; CHECK-NEXT:    [[RES:%.*]] = atomicrmw add i64* @z, i64 -1 monotonic
-; CHECK-NEXT:    store i64 2, i64* @z, align 8
+; CHECK-NEXT:    store i64 1, ptr @z, align 8
+; CHECK-NEXT:    [[RES:%.*]] = atomicrmw add ptr @z, i64 -1 monotonic
+; CHECK-NEXT:    store i64 2, ptr @z, align 8
 ; CHECK-NEXT:    ret i64 [[RES]]
 ;
-  store i64 1, i64* @z
-  %res = atomicrmw add i64* @z, i64 -1 monotonic
-  store i64 2, i64* @z
+  store i64 1, ptr @z
+  %res = atomicrmw add ptr @z, i64 -1 monotonic
+  store i64 2, ptr @z
   ret i64 %res
 }
 
 ; Be conservative, do not eliminate non-monotonic cmpxchg.
 define { i32, i1} @test_cmpxchg_1() {
 ; CHECK-LABEL: @test_cmpxchg_1(
-; CHECK-NEXT:    store i32 1, i32* @x, align 4
-; CHECK-NEXT:    [[RET:%.*]] = cmpxchg volatile i32* @x, i32 10, i32 20 seq_cst monotonic
-; CHECK-NEXT:    store i32 2, i32* @x, align 4
+; CHECK-NEXT:    store i32 1, ptr @x, align 4
+; CHECK-NEXT:    [[RET:%.*]] = cmpxchg volatile ptr @x, i32 10, i32 20 seq_cst monotonic
+; CHECK-NEXT:    store i32 2, ptr @x, align 4
 ; CHECK-NEXT:    ret { i32, i1 } [[RET]]
 ;
-  store i32 1, i32* @x
-  %ret = cmpxchg volatile i32* @x, i32 10, i32 20 seq_cst monotonic
-  store i32 2, i32* @x
+  store i32 1, ptr @x
+  %ret = cmpxchg volatile ptr @x, i32 10, i32 20 seq_cst monotonic
+  store i32 2, ptr @x
   ret { i32, i1 } %ret
 }
 
 ; Monotonic cmpxchg should not block DSE for non-aliasing stores.
 define { i32, i1} @test_cmpxchg_2() {
 ; CHECK-LABEL: @test_cmpxchg_2(
-; CHECK-NEXT:    [[RET:%.*]] = cmpxchg volatile i32* @y, i32 10, i32 20 monotonic monotonic
-; CHECK-NEXT:    store i32 2, i32* @x, align 4
+; CHECK-NEXT:    [[RET:%.*]] = cmpxchg volatile ptr @y, i32 10, i32 20 monotonic monotonic
+; CHECK-NEXT:    store i32 2, ptr @x, align 4
 ; CHECK-NEXT:    ret { i32, i1 } [[RET]]
 ;
-  store i32 1, i32* @x
-  %ret = cmpxchg volatile i32* @y, i32 10, i32 20 monotonic monotonic
-  store i32 2, i32* @x
+  store i32 1, ptr @x
+  %ret = cmpxchg volatile ptr @y, i32 10, i32 20 monotonic monotonic
+  store i32 2, ptr @x
   ret { i32, i1 } %ret
 }
 
 ; Be conservative, do not eliminate non-monotonic cmpxchg.
 define { i32, i1} @test_cmpxchg_3() {
 ; CHECK-LABEL: @test_cmpxchg_3(
-; CHECK-NEXT:    store i32 1, i32* @x, align 4
-; CHECK-NEXT:    [[RET:%.*]] = cmpxchg volatile i32* @y, i32 10, i32 20 seq_cst seq_cst
-; CHECK-NEXT:    store i32 2, i32* @x, align 4
+; CHECK-NEXT:    store i32 1, ptr @x, align 4
+; CHECK-NEXT:    [[RET:%.*]] = cmpxchg volatile ptr @y, i32 10, i32 20 seq_cst seq_cst
+; CHECK-NEXT:    store i32 2, ptr @x, align 4
 ; CHECK-NEXT:    ret { i32, i1 } [[RET]]
 ;
-  store i32 1, i32* @x
-  %ret = cmpxchg volatile i32* @y, i32 10, i32 20 seq_cst seq_cst
-  store i32 2, i32* @x
+  store i32 1, ptr @x
+  %ret = cmpxchg volatile ptr @y, i32 10, i32 20 seq_cst seq_cst
+  store i32 2, ptr @x
   ret { i32, i1 } %ret
 }
 
 ; Be conservative, do not eliminate may-alias stores.
-define { i32, i1} @test_cmpxchg_4(i32* %ptr) {
+define { i32, i1} @test_cmpxchg_4(ptr %ptr) {
 ; CHECK-LABEL: @test_cmpxchg_4(
-; CHECK-NEXT:    store i32 1, i32* @x, align 4
-; CHECK-NEXT:    [[RET:%.*]] = cmpxchg volatile i32* [[PTR:%.*]], i32 10, i32 20 monotonic monotonic
-; CHECK-NEXT:    store i32 2, i32* @x, align 4
+; CHECK-NEXT:    store i32 1, ptr @x, align 4
+; CHECK-NEXT:    [[RET:%.*]] = cmpxchg volatile ptr [[PTR:%.*]], i32 10, i32 20 monotonic monotonic
+; CHECK-NEXT:    store i32 2, ptr @x, align 4
 ; CHECK-NEXT:    ret { i32, i1 } [[RET]]
 ;
-  store i32 1, i32* @x
-  %ret = cmpxchg volatile i32* %ptr, i32 10, i32 20 monotonic monotonic
-  store i32 2, i32* @x
+  store i32 1, ptr @x
+  %ret = cmpxchg volatile ptr %ptr, i32 10, i32 20 monotonic monotonic
+  store i32 2, ptr @x
   ret { i32, i1 } %ret
 }
 
 ; Be conservative, do not eliminate alias stores.
-define { i32, i1} @test_cmpxchg_5(i32* %ptr) {
+define { i32, i1} @test_cmpxchg_5(ptr %ptr) {
 ; CHECK-LABEL: @test_cmpxchg_5(
-; CHECK-NEXT:    store i32 1, i32* @x, align 4
-; CHECK-NEXT:    [[RET:%.*]] = cmpxchg volatile i32* @x, i32 10, i32 20 monotonic monotonic
-; CHECK-NEXT:    store i32 2, i32* @x, align 4
+; CHECK-NEXT:    store i32 1, ptr @x, align 4
+; CHECK-NEXT:    [[RET:%.*]] = cmpxchg volatile ptr @x, i32 10, i32 20 monotonic monotonic
+; CHECK-NEXT:    store i32 2, ptr @x, align 4
 ; CHECK-NEXT:    ret { i32, i1 } [[RET]]
 ;
-  store i32 1, i32* @x
-  %ret = cmpxchg volatile i32* @x, i32 10, i32 20 monotonic monotonic
-  store i32 2, i32* @x
+  store i32 1, ptr @x
+  %ret = cmpxchg volatile ptr @x, i32 10, i32 20 monotonic monotonic
+  store i32 2, ptr @x
   ret { i32, i1 } %ret
 }
 
 ; **** Noop load->store tests **************************************************
 
 ; We can optimize unordered atomic loads or stores.
-define void @test_load_atomic(i32* %Q) {
+define void @test_load_atomic(ptr %Q) {
 ; CHECK-LABEL: @test_load_atomic(
 ; CHECK-NEXT:    ret void
 ;
-  %a = load atomic i32, i32* %Q unordered, align 4
-  store atomic i32 %a, i32* %Q unordered, align 4
+  %a = load atomic i32, ptr %Q unordered, align 4
+  store atomic i32 %a, ptr %Q unordered, align 4
   ret void
 }
 
 ; We can optimize unordered atomic loads or stores.
-define void @test_store_atomic(i32* %Q) {
+define void @test_store_atomic(ptr %Q) {
 ; CHECK-LABEL: @test_store_atomic(
 ; CHECK-NEXT:    ret void
 ;
-  %a = load i32, i32* %Q
-  store atomic i32 %a, i32* %Q unordered, align 4
+  %a = load i32, ptr %Q
+  store atomic i32 %a, ptr %Q unordered, align 4
   ret void
 }
 
 ; We can NOT optimize release atomic loads or stores.
-define void @test_store_atomic_release(i32* %Q) {
+define void @test_store_atomic_release(ptr %Q) {
 ; CHECK-LABEL: @test_store_atomic_release(
-; CHECK-NEXT:    [[A:%.*]] = load i32, i32* [[Q:%.*]], align 4
-; CHECK-NEXT:    store atomic i32 [[A]], i32* [[Q]] release, align 4
+; CHECK-NEXT:    [[A:%.*]] = load i32, ptr [[Q:%.*]], align 4
+; CHECK-NEXT:    store atomic i32 [[A]], ptr [[Q]] release, align 4
 ; CHECK-NEXT:    ret void
 ;
-  %a = load i32, i32* %Q
-  store atomic i32 %a, i32* %Q release, align 4
+  %a = load i32, ptr %Q
+  store atomic i32 %a, ptr %Q release, align 4
   ret void
 }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/calloc-store.ll b/llvm/test/Transforms/DeadStoreElimination/calloc-store.ll
index de0e55e33c0f8..b86908c01c402 100644
--- a/llvm/test/Transforms/DeadStoreElimination/calloc-store.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/calloc-store.ll
@@ -1,156 +1,153 @@
 ; RUN: opt < %s -basic-aa -dse -S | FileCheck %s
 
-declare noalias i8* @calloc(i64, i64) inaccessiblememonly allockind("alloc,zeroed")
+declare noalias ptr @calloc(i64, i64) inaccessiblememonly allockind("alloc,zeroed")
 
-define i32* @test1() {
+define ptr @test1() {
 ; CHECK-LABEL: test1
-  %1 = tail call noalias i8* @calloc(i64 1, i64 4)
-  %2 = bitcast i8* %1 to i32*
+  %1 = tail call noalias ptr @calloc(i64 1, i64 4)
   ; This store is dead and should be removed
-  store i32 0, i32* %2, align 4
-; CHECK-NOT: store i32 0, i32* %2, align 4
-  ret i32* %2
+  store i32 0, ptr %1, align 4
+; CHECK-NOT: store i32 0, ptr %1, align 4
+  ret ptr %1
 }
 
-define i32* @test2() {
+define ptr @test2() {
 ; CHECK-LABEL: test2
-  %1 = tail call noalias i8* @calloc(i64 1, i64 4)
-  %2 = bitcast i8* %1 to i32*
-  %3 = getelementptr i32, i32* %2, i32 5
-  store i32 0, i32* %3, align 4
-; CHECK-NOT: store i32 0, i32* %2, align 4
-  ret i32* %2
+  %1 = tail call noalias ptr @calloc(i64 1, i64 4)
+  %2 = getelementptr i32, ptr %1, i32 5
+  store i32 0, ptr %2, align 4
+; CHECK-NOT: store i32 0, ptr %1, align 4
+  ret ptr %1
 }
 
-define i32* @test3(i32 *%arg) {
+define ptr @test3(ptr %arg) {
 ; CHECK-LABEL: test3
-  store i32 0, i32* %arg, align 4
-; CHECK: store i32 0, i32* %arg, align 4
-  ret i32* %arg
+  store i32 0, ptr %arg, align 4
+; CHECK: store i32 0, ptr %arg, align 4
+  ret ptr %arg
 }
 
-declare void @clobber_memory(i8*)
-define i8* @test4() {
+declare void @clobber_memory(ptr)
+define ptr @test4() {
 ; CHECK-LABEL: test4
-  %1 = tail call noalias i8* @calloc(i64 1, i64 4)
-  call void @clobber_memory(i8* %1)
-  store i8 0, i8* %1, align 4
-; CHECK: store i8 0, i8* %1, align 4
-  ret i8* %1
+  %1 = tail call noalias ptr @calloc(i64 1, i64 4)
+  call void @clobber_memory(ptr %1)
+  store i8 0, ptr %1, align 4
+; CHECK: store i8 0, ptr %1, align 4
+  ret ptr %1
 }
 
-define i32* @test5() {
+define ptr @test5() {
 ; CHECK-LABEL: test5
-  %1 = tail call noalias i8* @calloc(i64 1, i64 4)
-  %2 = bitcast i8* %1 to i32*
-  store volatile i32 0, i32* %2, align 4
-; CHECK: store volatile i32 0, i32* %2, align 4
-  ret i32* %2
+  %1 = tail call noalias ptr @calloc(i64 1, i64 4)
+  store volatile i32 0, ptr %1, align 4
+; CHECK: store volatile i32 0, ptr %1, align 4
+  ret ptr %1
 }
 
-define i8* @test6() {
+define ptr @test6() {
 ; CHECK-LABEL: test6
-  %1 = tail call noalias i8* @calloc(i64 1, i64 4)
-  store i8 5, i8* %1, align 4
-; CHECK: store i8 5, i8* %1, align 4
-  ret i8* %1
+  %1 = tail call noalias ptr @calloc(i64 1, i64 4)
+  store i8 5, ptr %1, align 4
+; CHECK: store i8 5, ptr %1, align 4
+  ret ptr %1
 }
 
-define i8* @test7(i8 %arg) {
+define ptr @test7(i8 %arg) {
 ; CHECK-LABEL: test7
-  %1 = tail call noalias i8* @calloc(i64 1, i64 4)
-  store i8 %arg, i8* %1, align 4
-; CHECK: store i8 %arg, i8* %1, align 4
-  ret i8* %1
+  %1 = tail call noalias ptr @calloc(i64 1, i64 4)
+  store i8 %arg, ptr %1, align 4
+; CHECK: store i8 %arg, ptr %1, align 4
+  ret ptr %1
 }
 
-define i8* @test8() {
+define ptr @test8() {
 ; CHECK-LABEL: test8
 ; CHECK-NOT: store
-  %p = tail call noalias i8* @calloc(i64 1, i64 4)
-  store i8 0, i8* %p, align 1
-  %p.1 = getelementptr i8, i8* %p, i32 1
-  store i8 0, i8* %p.1, align 1
-  %p.3 = getelementptr i8, i8* %p, i32 3
-  store i8 0, i8* %p.3, align 1
-  %p.2 = getelementptr i8, i8* %p, i32 2
-  store i8 0, i8* %p.2, align 1
-  ret i8* %p
+  %p = tail call noalias ptr @calloc(i64 1, i64 4)
+  store i8 0, ptr %p, align 1
+  %p.1 = getelementptr i8, ptr %p, i32 1
+  store i8 0, ptr %p.1, align 1
+  %p.3 = getelementptr i8, ptr %p, i32 3
+  store i8 0, ptr %p.3, align 1
+  %p.2 = getelementptr i8, ptr %p, i32 2
+  store i8 0, ptr %p.2, align 1
+  ret ptr %p
 }
 
-define i8* @test9() {
+define ptr @test9() {
 ; CHECK-LABEL: test9
-; CHECK-NEXT:    %p = tail call noalias i8* @calloc(i64 1, i64 4)
-; CHECK-NEXT:    store i8 5, i8* %p, align 1
-; CHECK-NEXT:    ret i8* %p
-
-  %p = tail call noalias i8* @calloc(i64 1, i64 4)
-  store i8 5, i8* %p, align 1
-  %p.1 = getelementptr i8, i8* %p, i32 1
-  store i8 0, i8* %p.1, align 1
-  %p.3 = getelementptr i8, i8* %p, i32 3
-  store i8 0, i8* %p.3, align 1
-  %p.2 = getelementptr i8, i8* %p, i32 2
-  store i8 0, i8* %p.2, align 1
-  ret i8* %p
+; CHECK-NEXT:    %p = tail call noalias ptr @calloc(i64 1, i64 4)
+; CHECK-NEXT:    store i8 5, ptr %p, align 1
+; CHECK-NEXT:    ret ptr %p
+
+  %p = tail call noalias ptr @calloc(i64 1, i64 4)
+  store i8 5, ptr %p, align 1
+  %p.1 = getelementptr i8, ptr %p, i32 1
+  store i8 0, ptr %p.1, align 1
+  %p.3 = getelementptr i8, ptr %p, i32 3
+  store i8 0, ptr %p.3, align 1
+  %p.2 = getelementptr i8, ptr %p, i32 2
+  store i8 0, ptr %p.2, align 1
+  ret ptr %p
 }
 
-define i8* @test10() {
+define ptr @test10() {
 ; CHECK-LABEL: @test10(
-; CHECK-NEXT:    [[P:%.*]] = tail call noalias i8* @calloc(i64 1, i64 4)
-; CHECK-NEXT:    [[P_3:%.*]] = getelementptr i8, i8* [[P]], i32 3
-; CHECK-NEXT:    store i8 5, i8* [[P_3]], align 1
-; CHECK-NEXT:    ret i8* [[P]]
+; CHECK-NEXT:    [[P:%.*]] = tail call noalias ptr @calloc(i64 1, i64 4)
+; CHECK-NEXT:    [[P_3:%.*]] = getelementptr i8, ptr [[P]], i32 3
+; CHECK-NEXT:    store i8 5, ptr [[P_3]], align 1
+; CHECK-NEXT:    ret ptr [[P]]
 ;
 
-  %p = tail call noalias i8* @calloc(i64 1, i64 4)
-  store i8 0, i8* %p, align 1
-  %p.1 = getelementptr i8, i8* %p, i32 1
-  store i8 0, i8* %p.1, align 1
-  %p.3 = getelementptr i8, i8* %p, i32 3
-  store i8 5, i8* %p.3, align 1
-  %p.2 = getelementptr i8, i8* %p, i32 2
-  store i8 0, i8* %p.2, align 1
-  ret i8* %p
+  %p = tail call noalias ptr @calloc(i64 1, i64 4)
+  store i8 0, ptr %p, align 1
+  %p.1 = getelementptr i8, ptr %p, i32 1
+  store i8 0, ptr %p.1, align 1
+  %p.3 = getelementptr i8, ptr %p, i32 3
+  store i8 5, ptr %p.3, align 1
+  %p.2 = getelementptr i8, ptr %p, i32 2
+  store i8 0, ptr %p.2, align 1
+  ret ptr %p
 }
 
-define i8* @test11() {
+define ptr @test11() {
 ; CHECK-LABEL: @test11(
-; CHECK-NEXT:    [[P:%.*]] = tail call noalias i8* @calloc(i64 1, i64 4)
-; CHECK-NEXT:    ret i8* [[P]]
+; CHECK-NEXT:    [[P:%.*]] = tail call noalias ptr @calloc(i64 1, i64 4)
+; CHECK-NEXT:    ret ptr [[P]]
 ;
 
-  %p = tail call noalias i8* @calloc(i64 1, i64 4)
-  store i8 0, i8* %p, align 1
-  %p.1 = getelementptr i8, i8* %p, i32 1
-  store i8 0, i8* %p.1, align 1
-  %p.3 = getelementptr i8, i8* %p, i32 3
-  store i8 5, i8* %p.3, align 1
-  %p.2 = getelementptr i8, i8* %p, i32 2
-  store i8 0, i8* %p.2, align 1
-  %p.3.2 = getelementptr i8, i8* %p, i32 3
-  store i8 0, i8* %p.3.2, align 1
-  ret i8* %p
+  %p = tail call noalias ptr @calloc(i64 1, i64 4)
+  store i8 0, ptr %p, align 1
+  %p.1 = getelementptr i8, ptr %p, i32 1
+  store i8 0, ptr %p.1, align 1
+  %p.3 = getelementptr i8, ptr %p, i32 3
+  store i8 5, ptr %p.3, align 1
+  %p.2 = getelementptr i8, ptr %p, i32 2
+  store i8 0, ptr %p.2, align 1
+  %p.3.2 = getelementptr i8, ptr %p, i32 3
+  store i8 0, ptr %p.3.2, align 1
+  ret ptr %p
 }
 
-define i8* @test12() {
+define ptr @test12() {
 ; CHECK-LABEL: @test12(
-; CHECK-NEXT:    [[P:%.*]] = tail call noalias i8* @calloc(i64 1, i64 4)
-; CHECK-NEXT:    [[P_3:%.*]] = getelementptr i8, i8* [[P]], i32 3
-; CHECK-NEXT:    store i8 5, i8* [[P_3]], align 1
-; CHECK-NEXT:    call void @use(i8* [[P]])
-; CHECK-NEXT:    [[P_3_2:%.*]] = getelementptr i8, i8* [[P]], i32 3
-; CHECK-NEXT:    store i8 0, i8* [[P_3_2]], align 1
-; CHECK-NEXT:    ret i8* [[P]]
+; CHECK-NEXT:    [[P:%.*]] = tail call noalias ptr @calloc(i64 1, i64 4)
+; CHECK-NEXT:    [[P_3:%.*]] = getelementptr i8, ptr [[P]], i32 3
+; CHECK-NEXT:    store i8 5, ptr [[P_3]], align 1
+; CHECK-NEXT:    call void @use(ptr [[P]])
+; CHECK-NEXT:    [[P_3_2:%.*]] = getelementptr i8, ptr [[P]], i32 3
+; CHECK-NEXT:    store i8 0, ptr [[P_3_2]], align 1
+; CHECK-NEXT:    ret ptr [[P]]
 ;
 
-  %p = tail call noalias i8* @calloc(i64 1, i64 4)
-  %p.3 = getelementptr i8, i8* %p, i32 3
-  store i8 5, i8* %p.3, align 1
-  call void @use(i8* %p)
-  %p.3.2 = getelementptr i8, i8* %p, i32 3
-  store i8 0, i8* %p.3.2, align 1
-  ret i8* %p
+  %p = tail call noalias ptr @calloc(i64 1, i64 4)
+  %p.3 = getelementptr i8, ptr %p, i32 3
+  store i8 5, ptr %p.3, align 1
+  call void @use(ptr %p)
+  %p.3.2 = getelementptr i8, ptr %p, i32 3
+  store i8 0, ptr %p.3.2, align 1
+  ret ptr %p
 }
 
-declare void @use(i8*) readonly
+declare void @use(ptr) readonly

diff  --git a/llvm/test/Transforms/DeadStoreElimination/captures-before-call.ll b/llvm/test/Transforms/DeadStoreElimination/captures-before-call.ll
index 7d9986151d9c3..9a23c738ef689 100644
--- a/llvm/test/Transforms/DeadStoreElimination/captures-before-call.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/captures-before-call.ll
@@ -9,52 +9,52 @@ define i32 @other_value_escapes_before_call() {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[V1:%.*]] = alloca i32, align 4
 ; CHECK-NEXT:    [[V2:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 0, i32* [[V1]], align 4
-; CHECK-NEXT:    call void @escape(i32* nonnull [[V1]])
+; CHECK-NEXT:    store i32 0, ptr [[V1]], align 4
+; CHECK-NEXT:    call void @escape(ptr nonnull [[V1]])
 ; CHECK-NEXT:    [[CALL:%.*]] = call i32 @getval()
-; CHECK-NEXT:    store i32 [[CALL]], i32* [[V2]], align 4
-; CHECK-NEXT:    call void @escape(i32* nonnull [[V2]])
-; CHECK-NEXT:    [[LOAD_V2:%.*]] = load i32, i32* [[V2]], align 4
-; CHECK-NEXT:    [[LOAD_V1:%.*]] = load i32, i32* [[V1]], align 4
+; CHECK-NEXT:    store i32 [[CALL]], ptr [[V2]], align 4
+; CHECK-NEXT:    call void @escape(ptr nonnull [[V2]])
+; CHECK-NEXT:    [[LOAD_V2:%.*]] = load i32, ptr [[V2]], align 4
+; CHECK-NEXT:    [[LOAD_V1:%.*]] = load i32, ptr [[V1]], align 4
 ; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 [[LOAD_V2]], [[LOAD_V1]]
 ; CHECK-NEXT:    ret i32 [[ADD]]
 ;
 entry:
   %v1 = alloca i32, align 4
   %v2 = alloca i32, align 4
-  store i32 0, i32* %v1, align 4
-  call void @escape(i32* nonnull %v1)
-  store i32 55555, i32* %v2, align 4
+  store i32 0, ptr %v1, align 4
+  call void @escape(ptr nonnull %v1)
+  store i32 55555, ptr %v2, align 4
   %call = call i32 @getval()
-  store i32 %call, i32* %v2, align 4
-  call void @escape(i32* nonnull %v2)
-  %load.v2 = load i32, i32* %v2, align 4
-  %load.v1 = load i32, i32* %v1, align 4
+  store i32 %call, ptr %v2, align 4
+  call void @escape(ptr nonnull %v2)
+  %load.v2 = load i32, ptr %v2, align 4
+  %load.v1 = load i32, ptr %v1, align 4
   %add = add nsw i32 %load.v2, %load.v1
   ret i32 %add
 }
 
-declare void @escape(i32*)
+declare void @escape(ptr)
 
 declare i32 @getval()
 
-declare void @escape_and_clobber(i32*)
-declare void @escape_writeonly(i32*) writeonly
+declare void @escape_and_clobber(ptr)
+declare void @escape_writeonly(ptr) writeonly
 declare void @clobber()
 
 define i32 @test_not_captured_before_call_same_bb() {
 ; CHECK-LABEL: @test_not_captured_before_call_same_bb(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
 ; CHECK-NEXT:    [[R:%.*]] = call i32 @getval()
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
-; CHECK-NEXT:    call void @escape_and_clobber(i32* [[A]])
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
+; CHECK-NEXT:    call void @escape_and_clobber(ptr [[A]])
 ; CHECK-NEXT:    ret i32 [[R]]
 ;
   %a = alloca i32, align 4
-  store i32 55, i32* %a
+  store i32 55, ptr %a
   %r = call i32 @getval()
-  store i32 99, i32* %a, align 4
-  call void @escape_and_clobber(i32* %a)
+  store i32 99, ptr %a, align 4
+  call void @escape_and_clobber(ptr %a)
   ret i32 %r
 }
 
@@ -62,40 +62,40 @@ define i32 @test_not_captured_before_call_same_bb_escape_unreachable_block() {
 ; CHECK-LABEL: @test_not_captured_before_call_same_bb_escape_unreachable_block(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
 ; CHECK-NEXT:    [[R:%.*]] = call i32 @getval()
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
-; CHECK-NEXT:    call void @escape_and_clobber(i32* [[A]])
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
+; CHECK-NEXT:    call void @escape_and_clobber(ptr [[A]])
 ; CHECK-NEXT:    ret i32 [[R]]
 ; CHECK:       unreach:
-; CHECK-NEXT:    call void @escape_and_clobber(i32* [[A]])
+; CHECK-NEXT:    call void @escape_and_clobber(ptr [[A]])
 ; CHECK-NEXT:    ret i32 0
 ;
   %a = alloca i32, align 4
-  store i32 55, i32* %a
+  store i32 55, ptr %a
   %r = call i32 @getval()
-  store i32 99, i32* %a, align 4
-  call void @escape_and_clobber(i32* %a)
+  store i32 99, ptr %a, align 4
+  call void @escape_and_clobber(ptr %a)
   ret i32 %r
 
 unreach:
-  call void @escape_and_clobber(i32* %a)
+  call void @escape_and_clobber(ptr %a)
   ret i32 0
 }
 
 define i32 @test_captured_and_clobbered_after_load_same_bb_2() {
 ; CHECK-LABEL: @test_captured_and_clobbered_after_load_same_bb_2(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 55, i32* [[A]], align 4
+; CHECK-NEXT:    store i32 55, ptr [[A]], align 4
 ; CHECK-NEXT:    [[R:%.*]] = call i32 @getval()
-; CHECK-NEXT:    call void @escape_and_clobber(i32* [[A]])
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
+; CHECK-NEXT:    call void @escape_and_clobber(ptr [[A]])
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
 ; CHECK-NEXT:    call void @clobber()
 ; CHECK-NEXT:    ret i32 [[R]]
 ;
   %a = alloca i32, align 4
-  store i32 55, i32* %a
+  store i32 55, ptr %a
   %r = call i32 @getval()
-  call void @escape_and_clobber(i32* %a)
-  store i32 99, i32* %a, align 4
+  call void @escape_and_clobber(ptr %a)
+  store i32 99, ptr %a, align 4
   call void @clobber()
   ret i32 %r
 }
@@ -104,16 +104,16 @@ define i32 @test_captured_after_call_same_bb_2_clobbered_later() {
 ; CHECK-LABEL: @test_captured_after_call_same_bb_2_clobbered_later(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
 ; CHECK-NEXT:    [[R:%.*]] = call i32 @getval()
-; CHECK-NEXT:    call void @escape_writeonly(i32* [[A]])
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
+; CHECK-NEXT:    call void @escape_writeonly(ptr [[A]])
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
 ; CHECK-NEXT:    call void @clobber()
 ; CHECK-NEXT:    ret i32 [[R]]
 ;
   %a = alloca i32, align 4
-  store i32 55, i32* %a
+  store i32 55, ptr %a
   %r = call i32 @getval()
-  call void @escape_writeonly(i32* %a)
-  store i32 99, i32* %a, align 4
+  call void @escape_writeonly(ptr %a)
+  store i32 99, ptr %a, align 4
   call void @clobber()
   ret i32 %r
 }
@@ -123,23 +123,23 @@ define i32 @test_captured_sibling_path_to_call_other_blocks_1(i1 %c.1) {
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[ELSE:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    call void @escape_writeonly(i32* [[A]])
+; CHECK-NEXT:    call void @escape_writeonly(ptr [[A]])
 ; CHECK-NEXT:    br label [[EXIT:%.*]]
 ; CHECK:       else:
 ; CHECK-NEXT:    [[R:%.*]] = call i32 @getval()
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
 ; CHECK-NEXT:    [[P:%.*]] = phi i32 [ 0, [[THEN]] ], [ [[R]], [[ELSE]] ]
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
 ; CHECK-NEXT:    call void @clobber()
 ; CHECK-NEXT:    ret i32 [[P]]
 ;
   %a = alloca i32, align 4
-  store i32 55, i32* %a
+  store i32 55, ptr %a
   br i1 %c.1, label %then, label %else
 
 then:
-  call void @escape_writeonly(i32* %a)
+  call void @escape_writeonly(ptr %a)
   br label %exit
 
 else:
@@ -148,7 +148,7 @@ else:
 
 exit:
   %p = phi i32 [ 0, %then ], [ %r, %else ]
-  store i32 99, i32* %a, align 4
+  store i32 99, ptr %a, align 4
   call void @clobber()
   ret i32 %p
 }
@@ -156,35 +156,35 @@ exit:
 define i32 @test_captured_before_call_other_blocks_2(i1 %c.1) {
 ; CHECK-LABEL: @test_captured_before_call_other_blocks_2(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 55, i32* [[A]], align 4
+; CHECK-NEXT:    store i32 55, ptr [[A]], align 4
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[ELSE:%.*]]
 ; CHECK:       then:
 ; CHECK-NEXT:    br label [[EXIT:%.*]]
 ; CHECK:       else:
-; CHECK-NEXT:    call void @escape_and_clobber(i32* [[A]])
+; CHECK-NEXT:    call void @escape_and_clobber(ptr [[A]])
 ; CHECK-NEXT:    [[R:%.*]] = call i32 @getval()
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
 ; CHECK-NEXT:    [[P:%.*]] = phi i32 [ 0, [[THEN]] ], [ [[R]], [[ELSE]] ]
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
 ; CHECK-NEXT:    call void @clobber()
 ; CHECK-NEXT:    ret i32 [[P]]
 ;
   %a = alloca i32, align 4
-  store i32 55, i32* %a
+  store i32 55, ptr %a
   br i1 %c.1, label %then, label %else
 
 then:
   br label %exit
 
 else:
-  call void @escape_and_clobber(i32* %a)
+  call void @escape_and_clobber(ptr %a)
   %r = call i32 @getval()
   br label %exit
 
 exit:
   %p = phi i32 [ 0, %then ], [ %r, %else ]
-  store i32 99, i32* %a, align 4
+  store i32 99, ptr %a, align 4
   call void @clobber()
   ret i32 %p
 }
@@ -193,33 +193,33 @@ define i32 @test_captured_before_call_other_blocks_4(i1 %c.1) {
 ; CHECK-LABEL: @test_captured_before_call_other_blocks_4(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 55, i32* [[A]], align 4
-; CHECK-NEXT:    call void @escape_writeonly(i32* [[A]])
+; CHECK-NEXT:    store i32 55, ptr [[A]], align 4
+; CHECK-NEXT:    call void @escape_writeonly(ptr [[A]])
 ; CHECK-NEXT:    [[R:%.*]] = call i32 @getval()
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[EXIT:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    call void @escape_writeonly(i32* [[A]])
+; CHECK-NEXT:    call void @escape_writeonly(ptr [[A]])
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
 ; CHECK-NEXT:    [[P:%.*]] = phi i32 [ 0, [[THEN]] ], [ [[R]], [[ENTRY:%.*]] ]
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
 ; CHECK-NEXT:    call void @clobber()
 ; CHECK-NEXT:    ret i32 [[P]]
 ;
 entry:
   %a = alloca i32, align 4
-  store i32 55, i32* %a
-  call void @escape_writeonly(i32* %a)
+  store i32 55, ptr %a
+  call void @escape_writeonly(ptr %a)
   %r = call i32 @getval()
   br i1 %c.1, label %then, label %exit
 
 then:
-  call void @escape_writeonly(i32* %a)
+  call void @escape_writeonly(ptr %a)
   br label %exit
 
 exit:
   %p = phi i32 [ 0, %then ], [ %r, %entry ]
-  store i32 99, i32* %a, align 4
+  store i32 99, ptr %a, align 4
   call void @clobber()
   ret i32 %p
 }
@@ -228,29 +228,29 @@ define i32 @test_captured_before_call_other_blocks_5(i1 %c.1) {
 ; CHECK-LABEL: @test_captured_before_call_other_blocks_5(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 55, i32* [[A]], align 4
+; CHECK-NEXT:    store i32 55, ptr [[A]], align 4
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[EXIT:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    call void @escape_writeonly(i32* [[A]])
+; CHECK-NEXT:    call void @escape_writeonly(ptr [[A]])
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
 ; CHECK-NEXT:    [[R:%.*]] = call i32 @getval()
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
 ; CHECK-NEXT:    call void @clobber()
 ; CHECK-NEXT:    ret i32 [[R]]
 ;
 entry:
   %a = alloca i32, align 4
-  store i32 55, i32* %a
+  store i32 55, ptr %a
   br i1 %c.1, label %then, label %exit
 
 then:
-  call void @escape_writeonly(i32* %a)
+  call void @escape_writeonly(ptr %a)
   br label %exit
 
 exit:
   %r = call i32 @getval()
-  store i32 99, i32* %a, align 4
+  store i32 99, ptr %a, align 4
   call void @clobber()
   ret i32 %r
 }
@@ -259,31 +259,31 @@ define i32 @test_captured_before_call_other_blocks_6(i1 %c.1) {
 ; CHECK-LABEL: @test_captured_before_call_other_blocks_6(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 55, i32* [[A]], align 4
+; CHECK-NEXT:    store i32 55, ptr [[A]], align 4
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[EXIT:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    call void @escape_writeonly(i32* [[A]])
+; CHECK-NEXT:    call void @escape_writeonly(ptr [[A]])
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
 ; CHECK-NEXT:    [[R:%.*]] = call i32 @getval()
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
-; CHECK-NEXT:    call void @escape_writeonly(i32* [[A]])
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
+; CHECK-NEXT:    call void @escape_writeonly(ptr [[A]])
 ; CHECK-NEXT:    call void @clobber()
 ; CHECK-NEXT:    ret i32 [[R]]
 ;
 entry:
   %a = alloca i32, align 4
-  store i32 55, i32* %a
+  store i32 55, ptr %a
   br i1 %c.1, label %then, label %exit
 
 then:
-  call void @escape_writeonly(i32* %a)
+  call void @escape_writeonly(ptr %a)
   br label %exit
 
 exit:
   %r = call i32 @getval()
-  store i32 99, i32* %a, align 4
-  call void @escape_writeonly(i32* %a)
+  store i32 99, ptr %a, align 4
+  call void @escape_writeonly(ptr %a)
   call void @clobber()
   ret i32 %r
 }
@@ -292,20 +292,20 @@ define i32 @test_not_captured_before_call_other_blocks_1(i1 %c.1) {
 ; CHECK-LABEL: @test_not_captured_before_call_other_blocks_1(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
 ; CHECK-NEXT:    [[R:%.*]] = call i32 @getval()
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[ELSE:%.*]]
 ; CHECK:       then:
 ; CHECK-NEXT:    br label [[EXIT:%.*]]
 ; CHECK:       else:
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
-; CHECK-NEXT:    call void @escape_and_clobber(i32* [[A]])
+; CHECK-NEXT:    call void @escape_and_clobber(ptr [[A]])
 ; CHECK-NEXT:    ret i32 [[R]]
 ;
   %a = alloca i32, align 4
-  store i32 55, i32* %a
+  store i32 55, ptr %a
   %r = call i32 @getval()
-  store i32 99, i32* %a, align 4
+  store i32 99, ptr %a, align 4
   br i1 %c.1, label %then, label %else
 
 then:
@@ -315,7 +315,7 @@ else:
   br label %exit
 
 exit:
-  call void @escape_and_clobber(i32* %a)
+  call void @escape_and_clobber(ptr %a)
   ret i32 %r
 }
 
@@ -323,29 +323,29 @@ define i32 @test_not_captured_before_call_other_blocks_2(i1 %c.1) {
 ; CHECK-LABEL: @test_not_captured_before_call_other_blocks_2(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
 ; CHECK-NEXT:    [[R:%.*]] = call i32 @getval()
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[ELSE:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    call void @escape_and_clobber(i32* [[A]])
+; CHECK-NEXT:    call void @escape_and_clobber(ptr [[A]])
 ; CHECK-NEXT:    br label [[EXIT:%.*]]
 ; CHECK:       else:
-; CHECK-NEXT:    call void @escape_and_clobber(i32* [[A]])
+; CHECK-NEXT:    call void @escape_and_clobber(ptr [[A]])
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
 ; CHECK-NEXT:    ret i32 [[R]]
 ;
   %a = alloca i32, align 4
-  store i32 55, i32* %a
+  store i32 55, ptr %a
   %r = call i32 @getval()
-  store i32 99, i32* %a, align 4
+  store i32 99, ptr %a, align 4
   br i1 %c.1, label %then, label %else
 
 then:
-  call void @escape_and_clobber(i32* %a)
+  call void @escape_and_clobber(ptr %a)
   br label %exit
 
 else:
-  call void @escape_and_clobber(i32* %a)
+  call void @escape_and_clobber(ptr %a)
   br label %exit
 
 exit:
@@ -356,10 +356,10 @@ define i32 @test_not_captured_before_call_other_blocks_3(i1 %c.1) {
 ; CHECK-LABEL: @test_not_captured_before_call_other_blocks_3(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
 ; CHECK-NEXT:    [[R:%.*]] = call i32 @getval()
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[ELSE:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    call void @escape_and_clobber(i32* [[A]])
+; CHECK-NEXT:    call void @escape_and_clobber(ptr [[A]])
 ; CHECK-NEXT:    br label [[EXIT:%.*]]
 ; CHECK:       else:
 ; CHECK-NEXT:    br label [[EXIT]]
@@ -367,13 +367,13 @@ define i32 @test_not_captured_before_call_other_blocks_3(i1 %c.1) {
 ; CHECK-NEXT:    ret i32 [[R]]
 ;
   %a = alloca i32, align 4
-  store i32 55, i32* %a
+  store i32 55, ptr %a
   %r = call i32 @getval()
-  store i32 99, i32* %a, align 4
+  store i32 99, ptr %a, align 4
   br i1 %c.1, label %then, label %else
 
 then:
-  call void @escape_and_clobber(i32* %a)
+  call void @escape_and_clobber(ptr %a)
   br label %exit
 
 else:
@@ -391,16 +391,16 @@ define i32 @test_not_captured_before_call_other_blocks_4(i1 %c.1) {
 ; CHECK-NEXT:    br label [[EXIT:%.*]]
 ; CHECK:       else:
 ; CHECK-NEXT:    [[R:%.*]] = call i32 @getval()
-; CHECK-NEXT:    call void @escape_writeonly(i32* [[A]])
+; CHECK-NEXT:    call void @escape_writeonly(ptr [[A]])
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
 ; CHECK-NEXT:    [[P:%.*]] = phi i32 [ 0, [[THEN]] ], [ [[R]], [[ELSE]] ]
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
 ; CHECK-NEXT:    call void @clobber()
 ; CHECK-NEXT:    ret i32 [[P]]
 ;
   %a = alloca i32, align 4
-  store i32 55, i32* %a
+  store i32 55, ptr %a
   br i1 %c.1, label %then, label %else
 
 then:
@@ -408,12 +408,12 @@ then:
 
 else:
   %r = call i32 @getval()
-  call void @escape_writeonly(i32* %a)
+  call void @escape_writeonly(ptr %a)
   br label %exit
 
 exit:
   %p = phi i32 [ 0, %then ], [ %r, %else ]
-  store i32 99, i32* %a, align 4
+  store i32 99, ptr %a, align 4
   call void @clobber()
   ret i32 %p
 }
@@ -425,27 +425,27 @@ define i32 @test_not_captured_before_call_other_blocks_5(i1 %c.1) {
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[EXIT:%.*]]
 ; CHECK:       then:
 ; CHECK-NEXT:    [[R:%.*]] = call i32 @getval()
-; CHECK-NEXT:    call void @escape_writeonly(i32* [[A]])
+; CHECK-NEXT:    call void @escape_writeonly(ptr [[A]])
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
 ; CHECK-NEXT:    [[P:%.*]] = phi i32 [ [[R]], [[THEN]] ], [ 0, [[ENTRY:%.*]] ]
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
 ; CHECK-NEXT:    call void @clobber()
 ; CHECK-NEXT:    ret i32 [[P]]
 ;
 entry:
   %a = alloca i32, align 4
-  store i32 55, i32* %a
+  store i32 55, ptr %a
   br i1 %c.1, label %then, label %exit
 
 then:
   %r = call i32 @getval()
-  call void @escape_writeonly(i32* %a)
+  call void @escape_writeonly(ptr %a)
   br label %exit
 
 exit:
   %p = phi i32 [ %r, %then ], [ 0, %entry ]
-  store i32 99, i32* %a, align 4
+  store i32 99, ptr %a, align 4
   call void @clobber()
   ret i32 %p
 }
@@ -454,33 +454,33 @@ define i32 @test_not_captured_before_call_other_blocks_6(i1 %c.1) {
 ; CHECK-LABEL: @test_not_captured_before_call_other_blocks_6(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 55, i32* [[A]], align 4
+; CHECK-NEXT:    store i32 55, ptr [[A]], align 4
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[EXIT:%.*]]
 ; CHECK:       then:
 ; CHECK-NEXT:    [[R:%.*]] = call i32 @getval()
-; CHECK-NEXT:    call void @escape_writeonly(i32* [[A]])
+; CHECK-NEXT:    call void @escape_writeonly(ptr [[A]])
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
 ; CHECK-NEXT:    [[P:%.*]] = phi i32 [ [[R]], [[THEN]] ], [ 0, [[ENTRY:%.*]] ]
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
-; CHECK-NEXT:    call void @escape_writeonly(i32* [[A]])
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
+; CHECK-NEXT:    call void @escape_writeonly(ptr [[A]])
 ; CHECK-NEXT:    call void @clobber()
 ; CHECK-NEXT:    ret i32 [[P]]
 ;
 entry:
   %a = alloca i32, align 4
-  store i32 55, i32* %a
+  store i32 55, ptr %a
   br i1 %c.1, label %then, label %exit
 
 then:
   %r = call i32 @getval()
-  call void @escape_writeonly(i32* %a)
+  call void @escape_writeonly(ptr %a)
   br label %exit
 
 exit:
   %p = phi i32 [ %r, %then ], [ 0, %entry ]
-  store i32 99, i32* %a, align 4
-  call void @escape_writeonly(i32* %a)
+  store i32 99, ptr %a, align 4
+  call void @escape_writeonly(ptr %a)
   call void @clobber()
   ret i32 %p
 }
@@ -490,31 +490,31 @@ define i32 @test_not_captured_before_call_other_blocks_7(i1 %c.1) {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
 ; CHECK-NEXT:    [[R:%.*]] = call i32 @getval()
-; CHECK-NEXT:    call void @escape_writeonly(i32* [[A]])
+; CHECK-NEXT:    call void @escape_writeonly(ptr [[A]])
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[EXIT:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    call void @escape_writeonly(i32* [[A]])
+; CHECK-NEXT:    call void @escape_writeonly(ptr [[A]])
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
 ; CHECK-NEXT:    [[P:%.*]] = phi i32 [ 0, [[THEN]] ], [ [[R]], [[ENTRY:%.*]] ]
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
 ; CHECK-NEXT:    call void @clobber()
 ; CHECK-NEXT:    ret i32 [[P]]
 ;
 entry:
   %a = alloca i32, align 4
-  store i32 55, i32* %a
+  store i32 55, ptr %a
   %r = call i32 @getval()
-  call void @escape_writeonly(i32* %a)
+  call void @escape_writeonly(ptr %a)
   br i1 %c.1, label %then, label %exit
 
 then:
-  call void @escape_writeonly(i32* %a)
+  call void @escape_writeonly(ptr %a)
   br label %exit
 
 exit:
   %p = phi i32 [ 0, %then ], [ %r, %entry ]
-  store i32 99, i32* %a, align 4
+  store i32 99, ptr %a, align 4
   call void @clobber()
   ret i32 %p
 }
@@ -522,20 +522,20 @@ exit:
 define i32 @test_not_captured_before_call_same_bb_but_read() {
 ; CHECK-LABEL: @test_not_captured_before_call_same_bb_but_read(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 55, i32* [[A]], align 4
+; CHECK-NEXT:    store i32 55, ptr [[A]], align 4
 ; CHECK-NEXT:    [[R:%.*]] = call i32 @getval()
-; CHECK-NEXT:    [[LV:%.*]] = load i32, i32* [[A]], align 4
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
-; CHECK-NEXT:    call void @escape_and_clobber(i32* [[A]])
+; CHECK-NEXT:    [[LV:%.*]] = load i32, ptr [[A]], align 4
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
+; CHECK-NEXT:    call void @escape_and_clobber(ptr [[A]])
 ; CHECK-NEXT:    [[RES:%.*]] = add i32 [[R]], [[LV]]
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %a = alloca i32, align 4
-  store i32 55, i32* %a
+  store i32 55, ptr %a
   %r = call i32 @getval()
-  %lv = load i32, i32* %a
-  store i32 99, i32* %a, align 4
-  call void @escape_and_clobber(i32* %a)
+  %lv = load i32, ptr %a
+  store i32 99, ptr %a, align 4
+  call void @escape_and_clobber(ptr %a)
   %res = add i32 %r, %lv
   ret i32 %res
 }
@@ -546,123 +546,121 @@ define i32 @test_captured_after_loop(i1 %c.1) {
 ; CHECK-NEXT:    br label [[LOOP:%.*]]
 ; CHECK:       loop:
 ; CHECK-NEXT:    [[R:%.*]] = call i32 @getval()
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[LOOP]], label [[EXIT:%.*]]
 ; CHECK:       exit:
-; CHECK-NEXT:    call void @escape_and_clobber(i32* [[A]])
+; CHECK-NEXT:    call void @escape_and_clobber(ptr [[A]])
 ; CHECK-NEXT:    ret i32 [[R]]
 ;
   %a = alloca i32, align 4
-  store i32 55, i32* %a
+  store i32 55, ptr %a
   br label %loop
 
 loop:
   %r = call i32 @getval()
-  store i32 99, i32* %a, align 4
+  store i32 99, ptr %a, align 4
   br i1 %c.1, label %loop, label %exit
 
 exit:
-  call void @escape_and_clobber(i32* %a)
+  call void @escape_and_clobber(ptr %a)
   ret i32 %r
 }
 
 define i32 @test_captured_in_loop(i1 %c.1) {
 ; CHECK-LABEL: @test_captured_in_loop(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 55, i32* [[A]], align 4
+; CHECK-NEXT:    store i32 55, ptr [[A]], align 4
 ; CHECK-NEXT:    br label [[LOOP:%.*]]
 ; CHECK:       loop:
 ; CHECK-NEXT:    [[R:%.*]] = call i32 @getval()
-; CHECK-NEXT:    call void @escape_writeonly(i32* [[A]])
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
+; CHECK-NEXT:    call void @escape_writeonly(ptr [[A]])
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[LOOP]], label [[EXIT:%.*]]
 ; CHECK:       exit:
-; CHECK-NEXT:    call void @escape_and_clobber(i32* [[A]])
+; CHECK-NEXT:    call void @escape_and_clobber(ptr [[A]])
 ; CHECK-NEXT:    ret i32 [[R]]
 ;
   %a = alloca i32, align 4
-  store i32 55, i32* %a
+  store i32 55, ptr %a
   br label %loop
 
 loop:
   %r = call i32 @getval()
-  call void @escape_writeonly(i32* %a)
-  store i32 99, i32* %a, align 4
+  call void @escape_writeonly(ptr %a)
+  store i32 99, ptr %a, align 4
   br i1 %c.1, label %loop, label %exit
 
 exit:
-  call void @escape_and_clobber(i32* %a)
+  call void @escape_and_clobber(ptr %a)
   ret i32 %r
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i1)
-define void @test_escaping_store_removed(i8* %src, i64** %escape) {
+declare void @llvm.memcpy.p0.p0.i64(ptr, ptr, i64, i1)
+define void @test_escaping_store_removed(ptr %src, ptr %escape) {
 ; CHECK-LABEL: @test_escaping_store_removed(
 ; CHECK-NEXT:  bb:
 ; CHECK-NEXT:    [[A:%.*]] = alloca i64, align 8
-; CHECK-NEXT:    [[EXT_A:%.*]] = bitcast i64* [[A]] to i8*
 ; CHECK-NEXT:    call void @clobber()
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[EXT_A]], i8* [[SRC:%.*]], i64 8, i1 false)
-; CHECK-NEXT:    store i64* [[A]], i64** [[ESCAPE:%.*]], align 8
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr [[A]], ptr [[SRC:%.*]], i64 8, i1 false)
+; CHECK-NEXT:    store ptr [[A]], ptr [[ESCAPE:%.*]], align 8
 ; CHECK-NEXT:    call void @clobber()
-; CHECK-NEXT:    store i64 99, i64* [[A]], align 8
+; CHECK-NEXT:    store i64 99, ptr [[A]], align 8
 ; CHECK-NEXT:    call void @clobber()
 ; CHECK-NEXT:    ret void
 ;
 bb:
   %a = alloca i64, align 8
-  %ext.a = bitcast i64* %a to i8*
-  store i64 0, i64* %a
+  store i64 0, ptr %a
   call void @clobber()
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %ext.a, i8* %src, i64 8, i1 false)
-  store i64* %a, i64** %escape, align 8
-  store i64* %a, i64** %escape, align 8
+  call void @llvm.memcpy.p0.p0.i64(ptr %a, ptr %src, i64 8, i1 false)
+  store ptr %a, ptr %escape, align 8
+  store ptr %a, ptr %escape, align 8
   call void @clobber()
-  store i64 99, i64* %a
+  store i64 99, ptr %a
   call void @clobber()
   ret void
 }
 
 
-define void @test_invoke_captures() personality i8* undef {
+define void @test_invoke_captures() personality ptr undef {
 ; CHECK-LABEL: @test_invoke_captures(
 ; CHECK-NEXT:  bb:
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
 ; CHECK-NEXT:    invoke void @clobber()
 ; CHECK-NEXT:    to label [[BB2:%.*]] unwind label [[BB5:%.*]]
 ; CHECK:       bb2:
-; CHECK-NEXT:    store i32 0, i32* [[A]], align 8
-; CHECK-NEXT:    invoke void @escape(i32* [[A]])
+; CHECK-NEXT:    store i32 0, ptr [[A]], align 8
+; CHECK-NEXT:    invoke void @escape(ptr [[A]])
 ; CHECK-NEXT:    to label [[BB9:%.*]] unwind label [[BB10:%.*]]
 ; CHECK:       bb4:
 ; CHECK-NEXT:    ret void
 ; CHECK:       bb5:
-; CHECK-NEXT:    [[LP_1:%.*]] = landingpad { i8*, i32 }
+; CHECK-NEXT:    [[LP_1:%.*]] = landingpad { ptr, i32 }
 ; CHECK-NEXT:    cleanup
 ; CHECK-NEXT:    ret void
 ; CHECK:       bb9:
 ; CHECK-NEXT:    ret void
 ; CHECK:       bb10:
-; CHECK-NEXT:    [[LP_2:%.*]] = landingpad { i8*, i32 }
+; CHECK-NEXT:    [[LP_2:%.*]] = landingpad { ptr, i32 }
 ; CHECK-NEXT:    cleanup
 ; CHECK-NEXT:    unreachable
 ;
 bb:
   %a = alloca i32
-  store i32 99, i32* %a
+  store i32 99, ptr %a
   invoke void @clobber()
   to label %bb2 unwind label %bb5
 
 bb2:
-  store i32 0, i32* %a, align 8
-  invoke void @escape(i32* %a)
+  store i32 0, ptr %a, align 8
+  invoke void @escape(ptr %a)
   to label %bb9 unwind label %bb10
 
 bb4:
   ret void
 
 bb5:
-  %lp.1 = landingpad { i8*, i32 }
+  %lp.1 = landingpad { ptr, i32 }
   cleanup
   ret void
 
@@ -670,40 +668,40 @@ bb9:
   ret void
 
 bb10:
-  %lp.2 = landingpad { i8*, i32 }
+  %lp.2 = landingpad { ptr, i32 }
   cleanup
   unreachable
 }
 
-declare noalias i32* @alloc() nounwind
+declare noalias ptr @alloc() nounwind
 declare i32 @getval_nounwind() nounwind
 
 define i32 @test_not_captured_before_load_same_bb_noalias_call() {
 ; CHECK-LABEL: @test_not_captured_before_load_same_bb_noalias_call(
-; CHECK-NEXT:    [[A:%.*]] = call i32* @alloc()
+; CHECK-NEXT:    [[A:%.*]] = call ptr @alloc()
 ; CHECK-NEXT:    [[R:%.*]] = call i32 @getval_nounwind()
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
-; CHECK-NEXT:    call void @escape_and_clobber(i32* [[A]])
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
+; CHECK-NEXT:    call void @escape_and_clobber(ptr [[A]])
 ; CHECK-NEXT:    ret i32 [[R]]
 ;
-  %a = call i32* @alloc()
-  store i32 55, i32* %a
+  %a = call ptr @alloc()
+  store i32 55, ptr %a
   %r = call i32 @getval_nounwind()
-  store i32 99, i32* %a, align 4
-  call void @escape_and_clobber(i32* %a)
+  store i32 99, ptr %a, align 4
+  call void @escape_and_clobber(ptr %a)
   ret i32 %r
 }
 
-define i32 @test_not_captured_before_load_same_bb_noalias_arg(i32* noalias %a) {
+define i32 @test_not_captured_before_load_same_bb_noalias_arg(ptr noalias %a) {
 ; CHECK-LABEL: @test_not_captured_before_load_same_bb_noalias_arg(
 ; CHECK-NEXT:    [[R:%.*]] = call i32 @getval_nounwind()
-; CHECK-NEXT:    store i32 99, i32* [[A:%.*]], align 4
-; CHECK-NEXT:    call void @escape_and_clobber(i32* [[A]])
+; CHECK-NEXT:    store i32 99, ptr [[A:%.*]], align 4
+; CHECK-NEXT:    call void @escape_and_clobber(ptr [[A]])
 ; CHECK-NEXT:    ret i32 [[R]]
 ;
-  store i32 55, i32* %a
+  store i32 55, ptr %a
   %r = call i32 @getval_nounwind()
-  store i32 99, i32* %a, align 4
-  call void @escape_and_clobber(i32* %a)
+  store i32 99, ptr %a, align 4
+  call void @escape_and_clobber(ptr %a)
   ret i32 %r
 }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/captures-before-load.ll b/llvm/test/Transforms/DeadStoreElimination/captures-before-load.ll
index f50d1775e576b..3fe1d19fb0e7c 100644
--- a/llvm/test/Transforms/DeadStoreElimination/captures-before-load.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/captures-before-load.ll
@@ -1,431 +1,431 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -passes='dse' -S %s | FileCheck %s
 
-declare void @escape_and_clobber(i32*)
-declare void @escape_writeonly(i32*) writeonly
+declare void @escape_and_clobber(ptr)
+declare void @escape_writeonly(ptr) writeonly
 declare void @clobber()
 
-define i32 @test_not_captured_before_load_same_bb(i32** %in.ptr) {
+define i32 @test_not_captured_before_load_same_bb(ptr %in.ptr) {
 ; CHECK-LABEL: @test_not_captured_before_load_same_bb(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[IN_LV_1:%.*]] = load i32*, i32** [[IN_PTR:%.*]], align 2
-; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, i32* [[IN_LV_1]], align 2
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
-; CHECK-NEXT:    call void @escape_and_clobber(i32* [[A]])
+; CHECK-NEXT:    [[IN_LV_1:%.*]] = load ptr, ptr [[IN_PTR:%.*]], align 2
+; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, ptr [[IN_LV_1]], align 2
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
+; CHECK-NEXT:    call void @escape_and_clobber(ptr [[A]])
 ; CHECK-NEXT:    ret i32 [[IN_LV_2]]
 ;
   %a = alloca i32, align 4
-  store i32 55, i32* %a
-  %in.lv.1 = load i32* , i32** %in.ptr, align 2
-  %in.lv.2 = load i32 , i32* %in.lv.1, align 2
-  store i32 99, i32* %a, align 4
-  call void @escape_and_clobber(i32* %a)
+  store i32 55, ptr %a
+  %in.lv.1 = load ptr , ptr %in.ptr, align 2
+  %in.lv.2 = load i32 , ptr %in.lv.1, align 2
+  store i32 99, ptr %a, align 4
+  call void @escape_and_clobber(ptr %a)
   ret i32 %in.lv.2
 }
 
-define i32 @test_not_captured_before_load_same_bb_escape_unreachable_block(i32** %in.ptr) {
+define i32 @test_not_captured_before_load_same_bb_escape_unreachable_block(ptr %in.ptr) {
 ; CHECK-LABEL: @test_not_captured_before_load_same_bb_escape_unreachable_block(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[IN_LV_1:%.*]] = load i32*, i32** [[IN_PTR:%.*]], align 2
-; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, i32* [[IN_LV_1]], align 2
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
-; CHECK-NEXT:    call void @escape_and_clobber(i32* [[A]])
+; CHECK-NEXT:    [[IN_LV_1:%.*]] = load ptr, ptr [[IN_PTR:%.*]], align 2
+; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, ptr [[IN_LV_1]], align 2
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
+; CHECK-NEXT:    call void @escape_and_clobber(ptr [[A]])
 ; CHECK-NEXT:    ret i32 [[IN_LV_2]]
 ; CHECK:       unreach:
-; CHECK-NEXT:    call void @escape_and_clobber(i32* [[A]])
+; CHECK-NEXT:    call void @escape_and_clobber(ptr [[A]])
 ; CHECK-NEXT:    ret i32 0
 ;
   %a = alloca i32, align 4
-  store i32 55, i32* %a
-  %in.lv.1 = load i32* , i32** %in.ptr, align 2
-  %in.lv.2 = load i32 , i32* %in.lv.1, align 2
-  store i32 99, i32* %a, align 4
-  call void @escape_and_clobber(i32* %a)
+  store i32 55, ptr %a
+  %in.lv.1 = load ptr , ptr %in.ptr, align 2
+  %in.lv.2 = load i32 , ptr %in.lv.1, align 2
+  store i32 99, ptr %a, align 4
+  call void @escape_and_clobber(ptr %a)
   ret i32 %in.lv.2
 
 unreach:
-  call void @escape_and_clobber(i32* %a)
+  call void @escape_and_clobber(ptr %a)
   ret i32 0
 }
 
-define i32 @test_captured_and_clobbered_after_load_same_bb_2(i32** %in.ptr) {
+define i32 @test_captured_and_clobbered_after_load_same_bb_2(ptr %in.ptr) {
 ; CHECK-LABEL: @test_captured_and_clobbered_after_load_same_bb_2(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 55, i32* [[A]], align 4
-; CHECK-NEXT:    [[IN_LV_1:%.*]] = load i32*, i32** [[IN_PTR:%.*]], align 2
-; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, i32* [[IN_LV_1]], align 2
-; CHECK-NEXT:    call void @escape_and_clobber(i32* [[A]])
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
+; CHECK-NEXT:    store i32 55, ptr [[A]], align 4
+; CHECK-NEXT:    [[IN_LV_1:%.*]] = load ptr, ptr [[IN_PTR:%.*]], align 2
+; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, ptr [[IN_LV_1]], align 2
+; CHECK-NEXT:    call void @escape_and_clobber(ptr [[A]])
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
 ; CHECK-NEXT:    call void @clobber()
 ; CHECK-NEXT:    ret i32 [[IN_LV_2]]
 ;
   %a = alloca i32, align 4
-  store i32 55, i32* %a
-  %in.lv.1 = load i32* , i32** %in.ptr, align 2
-  %in.lv.2 = load i32 , i32* %in.lv.1, align 2
-  call void @escape_and_clobber(i32* %a)
-  store i32 99, i32* %a, align 4
+  store i32 55, ptr %a
+  %in.lv.1 = load ptr , ptr %in.ptr, align 2
+  %in.lv.2 = load i32 , ptr %in.lv.1, align 2
+  call void @escape_and_clobber(ptr %a)
+  store i32 99, ptr %a, align 4
   call void @clobber()
   ret i32 %in.lv.2
 }
 
-define i32 @test_captured_after_load_same_bb_2_clobbered_later(i32** %in.ptr) {
+define i32 @test_captured_after_load_same_bb_2_clobbered_later(ptr %in.ptr) {
 ; CHECK-LABEL: @test_captured_after_load_same_bb_2_clobbered_later(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[IN_LV_1:%.*]] = load i32*, i32** [[IN_PTR:%.*]], align 2
-; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, i32* [[IN_LV_1]], align 2
-; CHECK-NEXT:    call void @escape_writeonly(i32* [[A]])
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
+; CHECK-NEXT:    [[IN_LV_1:%.*]] = load ptr, ptr [[IN_PTR:%.*]], align 2
+; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, ptr [[IN_LV_1]], align 2
+; CHECK-NEXT:    call void @escape_writeonly(ptr [[A]])
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
 ; CHECK-NEXT:    call void @clobber()
 ; CHECK-NEXT:    ret i32 [[IN_LV_2]]
 ;
   %a = alloca i32, align 4
-  store i32 55, i32* %a
-  %in.lv.1 = load i32* , i32** %in.ptr, align 2
-  %in.lv.2 = load i32 , i32* %in.lv.1, align 2
-  call void @escape_writeonly(i32* %a)
-  store i32 99, i32* %a, align 4
+  store i32 55, ptr %a
+  %in.lv.1 = load ptr , ptr %in.ptr, align 2
+  %in.lv.2 = load i32 , ptr %in.lv.1, align 2
+  call void @escape_writeonly(ptr %a)
+  store i32 99, ptr %a, align 4
   call void @clobber()
   ret i32 %in.lv.2
 }
 
-define i32 @test_captured_and_clobbered_before_load_same_bb_1(i32** %in.ptr) {
+define i32 @test_captured_and_clobbered_before_load_same_bb_1(ptr %in.ptr) {
 ; CHECK-LABEL: @test_captured_and_clobbered_before_load_same_bb_1(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 55, i32* [[A]], align 4
-; CHECK-NEXT:    [[IN_LV_1:%.*]] = load i32*, i32** [[IN_PTR:%.*]], align 2
-; CHECK-NEXT:    call void @escape_and_clobber(i32* [[A]])
-; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, i32* [[IN_LV_1]], align 2
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
+; CHECK-NEXT:    store i32 55, ptr [[A]], align 4
+; CHECK-NEXT:    [[IN_LV_1:%.*]] = load ptr, ptr [[IN_PTR:%.*]], align 2
+; CHECK-NEXT:    call void @escape_and_clobber(ptr [[A]])
+; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, ptr [[IN_LV_1]], align 2
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
 ; CHECK-NEXT:    call void @clobber()
 ; CHECK-NEXT:    ret i32 [[IN_LV_2]]
 ;
   %a = alloca i32, align 4
-  store i32 55, i32* %a
-  %in.lv.1 = load i32* , i32** %in.ptr, align 2
-  call void @escape_and_clobber(i32* %a)
-  %in.lv.2 = load i32 , i32* %in.lv.1, align 2
-  store i32 99, i32* %a, align 4
+  store i32 55, ptr %a
+  %in.lv.1 = load ptr , ptr %in.ptr, align 2
+  call void @escape_and_clobber(ptr %a)
+  %in.lv.2 = load i32 , ptr %in.lv.1, align 2
+  store i32 99, ptr %a, align 4
   call void @clobber()
   ret i32 %in.lv.2
 }
 
-define i32 @test_captured_before_load_same_bb_1_clobbered_later(i32** %in.ptr) {
+define i32 @test_captured_before_load_same_bb_1_clobbered_later(ptr %in.ptr) {
 ; CHECK-LABEL: @test_captured_before_load_same_bb_1_clobbered_later(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[IN_LV_1:%.*]] = load i32*, i32** [[IN_PTR:%.*]], align 2
-; CHECK-NEXT:    call void @escape_writeonly(i32* [[A]])
-; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, i32* [[IN_LV_1]], align 2
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
+; CHECK-NEXT:    [[IN_LV_1:%.*]] = load ptr, ptr [[IN_PTR:%.*]], align 2
+; CHECK-NEXT:    call void @escape_writeonly(ptr [[A]])
+; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, ptr [[IN_LV_1]], align 2
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
 ; CHECK-NEXT:    call void @clobber()
 ; CHECK-NEXT:    ret i32 [[IN_LV_2]]
 ;
   %a = alloca i32, align 4
-  store i32 55, i32* %a
-  %in.lv.1 = load i32* , i32** %in.ptr, align 2
-  call void @escape_writeonly(i32* %a)
-  %in.lv.2 = load i32 , i32* %in.lv.1, align 2
-  store i32 99, i32* %a, align 4
+  store i32 55, ptr %a
+  %in.lv.1 = load ptr , ptr %in.ptr, align 2
+  call void @escape_writeonly(ptr %a)
+  %in.lv.2 = load i32 , ptr %in.lv.1, align 2
+  store i32 99, ptr %a, align 4
   call void @clobber()
   ret i32 %in.lv.2
 }
 
-define i32 @test_captured_before_load_same_bb_2(i32** %in.ptr) {
+define i32 @test_captured_before_load_same_bb_2(ptr %in.ptr) {
 ; CHECK-LABEL: @test_captured_before_load_same_bb_2(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 55, i32* [[A]], align 4
-; CHECK-NEXT:    call void @escape_writeonly(i32* [[A]])
-; CHECK-NEXT:    [[IN_LV_1:%.*]] = load i32*, i32** [[IN_PTR:%.*]], align 2
-; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, i32* [[IN_LV_1]], align 2
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
+; CHECK-NEXT:    store i32 55, ptr [[A]], align 4
+; CHECK-NEXT:    call void @escape_writeonly(ptr [[A]])
+; CHECK-NEXT:    [[IN_LV_1:%.*]] = load ptr, ptr [[IN_PTR:%.*]], align 2
+; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, ptr [[IN_LV_1]], align 2
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
 ; CHECK-NEXT:    call void @clobber()
 ; CHECK-NEXT:    ret i32 [[IN_LV_2]]
 ;
   %a = alloca i32, align 4
-  store i32 55, i32* %a
-  call void @escape_writeonly(i32* %a)
-  %in.lv.1 = load i32* , i32** %in.ptr, align 2
-  %in.lv.2 = load i32 , i32* %in.lv.1, align 2
-  store i32 99, i32* %a, align 4
+  store i32 55, ptr %a
+  call void @escape_writeonly(ptr %a)
+  %in.lv.1 = load ptr , ptr %in.ptr, align 2
+  %in.lv.2 = load i32 , ptr %in.lv.1, align 2
+  store i32 99, ptr %a, align 4
   call void @clobber()
   ret i32 %in.lv.2
 }
 
-define i32 @test_not_captured_before_load_same_bb_clobber(i32** %in.ptr) {
+define i32 @test_not_captured_before_load_same_bb_clobber(ptr %in.ptr) {
 ; CHECK-LABEL: @test_not_captured_before_load_same_bb_clobber(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
 ; CHECK-NEXT:    call void @clobber()
-; CHECK-NEXT:    [[IN_LV_1:%.*]] = load i32*, i32** [[IN_PTR:%.*]], align 2
-; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, i32* [[IN_LV_1]], align 2
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
-; CHECK-NEXT:    call void @escape_and_clobber(i32* [[A]])
+; CHECK-NEXT:    [[IN_LV_1:%.*]] = load ptr, ptr [[IN_PTR:%.*]], align 2
+; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, ptr [[IN_LV_1]], align 2
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
+; CHECK-NEXT:    call void @escape_and_clobber(ptr [[A]])
 ; CHECK-NEXT:    ret i32 [[IN_LV_2]]
 ;
   %a = alloca i32, align 4
-  store i32 55, i32* %a
+  store i32 55, ptr %a
   call void @clobber()
-  %in.lv.1 = load i32* , i32** %in.ptr, align 2
-  %in.lv.2 = load i32 , i32* %in.lv.1, align 2
-  store i32 99, i32* %a, align 4
-  call void @escape_and_clobber(i32* %a)
+  %in.lv.1 = load ptr , ptr %in.ptr, align 2
+  %in.lv.2 = load i32 , ptr %in.lv.1, align 2
+  store i32 99, ptr %a, align 4
+  call void @escape_and_clobber(ptr %a)
   ret i32 %in.lv.2
 }
 
-define i32 @test_captured_before_load_same_bb(i32** %in.ptr) {
+define i32 @test_captured_before_load_same_bb(ptr %in.ptr) {
 ; CHECK-LABEL: @test_captured_before_load_same_bb(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 55, i32* [[A]], align 4
-; CHECK-NEXT:    call void @escape_and_clobber(i32* [[A]])
-; CHECK-NEXT:    [[IN_LV_1:%.*]] = load i32*, i32** [[IN_PTR:%.*]], align 2
-; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, i32* [[IN_LV_1]], align 2
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
-; CHECK-NEXT:    call void @escape_and_clobber(i32* [[A]])
+; CHECK-NEXT:    store i32 55, ptr [[A]], align 4
+; CHECK-NEXT:    call void @escape_and_clobber(ptr [[A]])
+; CHECK-NEXT:    [[IN_LV_1:%.*]] = load ptr, ptr [[IN_PTR:%.*]], align 2
+; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, ptr [[IN_LV_1]], align 2
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
+; CHECK-NEXT:    call void @escape_and_clobber(ptr [[A]])
 ; CHECK-NEXT:    ret i32 [[IN_LV_2]]
 ;
   %a = alloca i32, align 4
-  store i32 55, i32* %a
-  call void @escape_and_clobber(i32* %a)
-  %in.lv.1 = load i32* , i32** %in.ptr, align 2
-  %in.lv.2 = load i32 , i32* %in.lv.1, align 2
-  store i32 99, i32* %a, align 4
-  call void @escape_and_clobber(i32* %a)
+  store i32 55, ptr %a
+  call void @escape_and_clobber(ptr %a)
+  %in.lv.1 = load ptr , ptr %in.ptr, align 2
+  %in.lv.2 = load i32 , ptr %in.lv.1, align 2
+  store i32 99, ptr %a, align 4
+  call void @escape_and_clobber(ptr %a)
   ret i32 %in.lv.2
 }
 
-define i32 @test_captured_sibling_path_to_load_other_blocks_1(i32** %in.ptr, i1 %c.1) {
+define i32 @test_captured_sibling_path_to_load_other_blocks_1(ptr %in.ptr, i1 %c.1) {
 ; CHECK-LABEL: @test_captured_sibling_path_to_load_other_blocks_1(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[ELSE:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    call void @escape_writeonly(i32* [[A]])
+; CHECK-NEXT:    call void @escape_writeonly(ptr [[A]])
 ; CHECK-NEXT:    br label [[EXIT:%.*]]
 ; CHECK:       else:
-; CHECK-NEXT:    [[IN_LV_1:%.*]] = load i32*, i32** [[IN_PTR:%.*]], align 2
-; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, i32* [[IN_LV_1]], align 2
+; CHECK-NEXT:    [[IN_LV_1:%.*]] = load ptr, ptr [[IN_PTR:%.*]], align 2
+; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, ptr [[IN_LV_1]], align 2
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
 ; CHECK-NEXT:    [[P:%.*]] = phi i32 [ 0, [[THEN]] ], [ [[IN_LV_2]], [[ELSE]] ]
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
 ; CHECK-NEXT:    call void @clobber()
 ; CHECK-NEXT:    ret i32 [[P]]
 ;
   %a = alloca i32, align 4
-  store i32 55, i32* %a
+  store i32 55, ptr %a
   br i1 %c.1, label %then, label %else
 
 then:
-  call void @escape_writeonly(i32* %a)
+  call void @escape_writeonly(ptr %a)
   br label %exit
 
 else:
-  %in.lv.1 = load i32* , i32** %in.ptr, align 2
-  %in.lv.2 = load i32 , i32* %in.lv.1, align 2
+  %in.lv.1 = load ptr , ptr %in.ptr, align 2
+  %in.lv.2 = load i32 , ptr %in.lv.1, align 2
   br label %exit
 
 exit:
   %p = phi i32 [ 0, %then ], [ %in.lv.2, %else ]
-  store i32 99, i32* %a, align 4
+  store i32 99, ptr %a, align 4
   call void @clobber()
   ret i32 %p
 }
 
-define i32 @test_only_captured_sibling_path_with_ret_to_load_other_blocks(i32** %in.ptr, i1 %c.1) {
+define i32 @test_only_captured_sibling_path_with_ret_to_load_other_blocks(ptr %in.ptr, i1 %c.1) {
 ; CHECK-LABEL: @test_only_captured_sibling_path_with_ret_to_load_other_blocks(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[ELSE:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    call void @escape_writeonly(i32* [[A]])
+; CHECK-NEXT:    call void @escape_writeonly(ptr [[A]])
 ; CHECK-NEXT:    ret i32 0
 ; CHECK:       else:
-; CHECK-NEXT:    [[IN_LV_1:%.*]] = load i32*, i32** [[IN_PTR:%.*]], align 2
-; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, i32* [[IN_LV_1]], align 2
+; CHECK-NEXT:    [[IN_LV_1:%.*]] = load ptr, ptr [[IN_PTR:%.*]], align 2
+; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, ptr [[IN_LV_1]], align 2
 ; CHECK-NEXT:    br label [[EXIT:%.*]]
 ; CHECK:       exit:
 ; CHECK-NEXT:    call void @clobber()
 ; CHECK-NEXT:    ret i32 [[IN_LV_2]]
 ;
   %a = alloca i32, align 4
-  store i32 55, i32* %a
+  store i32 55, ptr %a
   br i1 %c.1, label %then, label %else
 
 then:
-  call void @escape_writeonly(i32* %a)
+  call void @escape_writeonly(ptr %a)
   ret i32 0
 
 else:
-  %in.lv.1 = load i32* , i32** %in.ptr, align 2
-  %in.lv.2 = load i32 , i32* %in.lv.1, align 2
+  %in.lv.1 = load ptr , ptr %in.ptr, align 2
+  %in.lv.2 = load i32 , ptr %in.lv.1, align 2
   br label %exit
 
 exit:
-  store i32 99, i32* %a, align 4
+  store i32 99, ptr %a, align 4
   call void @clobber()
   ret i32 %in.lv.2
 }
 
-define i32 @test_captured_before_load_other_blocks_2(i32** %in.ptr, i1 %c.1) {
+define i32 @test_captured_before_load_other_blocks_2(ptr %in.ptr, i1 %c.1) {
 ; CHECK-LABEL: @test_captured_before_load_other_blocks_2(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 55, i32* [[A]], align 4
+; CHECK-NEXT:    store i32 55, ptr [[A]], align 4
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[ELSE:%.*]]
 ; CHECK:       then:
 ; CHECK-NEXT:    br label [[EXIT:%.*]]
 ; CHECK:       else:
-; CHECK-NEXT:    call void @escape_and_clobber(i32* [[A]])
-; CHECK-NEXT:    [[IN_LV_1:%.*]] = load i32*, i32** [[IN_PTR:%.*]], align 2
-; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, i32* [[IN_LV_1]], align 2
+; CHECK-NEXT:    call void @escape_and_clobber(ptr [[A]])
+; CHECK-NEXT:    [[IN_LV_1:%.*]] = load ptr, ptr [[IN_PTR:%.*]], align 2
+; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, ptr [[IN_LV_1]], align 2
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
 ; CHECK-NEXT:    [[P:%.*]] = phi i32 [ 0, [[THEN]] ], [ [[IN_LV_2]], [[ELSE]] ]
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
 ; CHECK-NEXT:    call void @clobber()
 ; CHECK-NEXT:    ret i32 [[P]]
 ;
   %a = alloca i32, align 4
-  store i32 55, i32* %a
+  store i32 55, ptr %a
   br i1 %c.1, label %then, label %else
 
 then:
   br label %exit
 
 else:
-  call void @escape_and_clobber(i32* %a)
-  %in.lv.1 = load i32* , i32** %in.ptr, align 2
-  %in.lv.2 = load i32 , i32* %in.lv.1, align 2
+  call void @escape_and_clobber(ptr %a)
+  %in.lv.1 = load ptr , ptr %in.ptr, align 2
+  %in.lv.2 = load i32 , ptr %in.lv.1, align 2
   br label %exit
 
 exit:
   %p = phi i32 [ 0, %then ], [ %in.lv.2, %else ]
-  store i32 99, i32* %a, align 4
+  store i32 99, ptr %a, align 4
   call void @clobber()
   ret i32 %p
 }
 
-define i32 @test_captured_before_load_other_blocks_4(i32** %in.ptr, i1 %c.1) {
+define i32 @test_captured_before_load_other_blocks_4(ptr %in.ptr, i1 %c.1) {
 ; CHECK-LABEL: @test_captured_before_load_other_blocks_4(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 55, i32* [[A]], align 4
-; CHECK-NEXT:    call void @escape_writeonly(i32* [[A]])
-; CHECK-NEXT:    [[IN_LV_1:%.*]] = load i32*, i32** [[IN_PTR:%.*]], align 2
-; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, i32* [[IN_LV_1]], align 2
+; CHECK-NEXT:    store i32 55, ptr [[A]], align 4
+; CHECK-NEXT:    call void @escape_writeonly(ptr [[A]])
+; CHECK-NEXT:    [[IN_LV_1:%.*]] = load ptr, ptr [[IN_PTR:%.*]], align 2
+; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, ptr [[IN_LV_1]], align 2
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[EXIT:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    call void @escape_writeonly(i32* [[A]])
+; CHECK-NEXT:    call void @escape_writeonly(ptr [[A]])
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
 ; CHECK-NEXT:    [[P:%.*]] = phi i32 [ 0, [[THEN]] ], [ [[IN_LV_2]], [[ENTRY:%.*]] ]
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
 ; CHECK-NEXT:    call void @clobber()
 ; CHECK-NEXT:    ret i32 [[P]]
 ;
 entry:
   %a = alloca i32, align 4
-  store i32 55, i32* %a
-  call void @escape_writeonly(i32* %a)
-  %in.lv.1 = load i32* , i32** %in.ptr, align 2
-  %in.lv.2 = load i32 , i32* %in.lv.1, align 2
+  store i32 55, ptr %a
+  call void @escape_writeonly(ptr %a)
+  %in.lv.1 = load ptr , ptr %in.ptr, align 2
+  %in.lv.2 = load i32 , ptr %in.lv.1, align 2
   br i1 %c.1, label %then, label %exit
 
 then:
-  call void @escape_writeonly(i32* %a)
+  call void @escape_writeonly(ptr %a)
   br label %exit
 
 exit:
   %p = phi i32 [ 0, %then ], [ %in.lv.2, %entry ]
-  store i32 99, i32* %a, align 4
+  store i32 99, ptr %a, align 4
   call void @clobber()
   ret i32 %p
 }
 
-define i32 @test_captured_before_load_other_blocks_5(i32** %in.ptr, i1 %c.1) {
+define i32 @test_captured_before_load_other_blocks_5(ptr %in.ptr, i1 %c.1) {
 ; CHECK-LABEL: @test_captured_before_load_other_blocks_5(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 55, i32* [[A]], align 4
+; CHECK-NEXT:    store i32 55, ptr [[A]], align 4
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[EXIT:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    call void @escape_writeonly(i32* [[A]])
+; CHECK-NEXT:    call void @escape_writeonly(ptr [[A]])
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
-; CHECK-NEXT:    [[IN_LV_1:%.*]] = load i32*, i32** [[IN_PTR:%.*]], align 2
-; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, i32* [[IN_LV_1]], align 2
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
+; CHECK-NEXT:    [[IN_LV_1:%.*]] = load ptr, ptr [[IN_PTR:%.*]], align 2
+; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, ptr [[IN_LV_1]], align 2
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
 ; CHECK-NEXT:    call void @clobber()
 ; CHECK-NEXT:    ret i32 [[IN_LV_2]]
 ;
 entry:
   %a = alloca i32, align 4
-  store i32 55, i32* %a
+  store i32 55, ptr %a
   br i1 %c.1, label %then, label %exit
 
 then:
-  call void @escape_writeonly(i32* %a)
+  call void @escape_writeonly(ptr %a)
   br label %exit
 
 exit:
-  %in.lv.1 = load i32* , i32** %in.ptr, align 2
-  %in.lv.2 = load i32 , i32* %in.lv.1, align 2
-  store i32 99, i32* %a, align 4
+  %in.lv.1 = load ptr , ptr %in.ptr, align 2
+  %in.lv.2 = load i32 , ptr %in.lv.1, align 2
+  store i32 99, ptr %a, align 4
   call void @clobber()
   ret i32 %in.lv.2
 }
 
-define i32 @test_captured_before_load_other_blocks_6(i32** %in.ptr, i1 %c.1) {
+define i32 @test_captured_before_load_other_blocks_6(ptr %in.ptr, i1 %c.1) {
 ; CHECK-LABEL: @test_captured_before_load_other_blocks_6(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 55, i32* [[A]], align 4
+; CHECK-NEXT:    store i32 55, ptr [[A]], align 4
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[EXIT:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    call void @escape_writeonly(i32* [[A]])
+; CHECK-NEXT:    call void @escape_writeonly(ptr [[A]])
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
-; CHECK-NEXT:    [[IN_LV_1:%.*]] = load i32*, i32** [[IN_PTR:%.*]], align 2
-; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, i32* [[IN_LV_1]], align 2
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
-; CHECK-NEXT:    call void @escape_writeonly(i32* [[A]])
+; CHECK-NEXT:    [[IN_LV_1:%.*]] = load ptr, ptr [[IN_PTR:%.*]], align 2
+; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, ptr [[IN_LV_1]], align 2
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
+; CHECK-NEXT:    call void @escape_writeonly(ptr [[A]])
 ; CHECK-NEXT:    call void @clobber()
 ; CHECK-NEXT:    ret i32 [[IN_LV_2]]
 ;
 entry:
   %a = alloca i32, align 4
-  store i32 55, i32* %a
+  store i32 55, ptr %a
   br i1 %c.1, label %then, label %exit
 
 then:
-  call void @escape_writeonly(i32* %a)
+  call void @escape_writeonly(ptr %a)
   br label %exit
 
 exit:
-  %in.lv.1 = load i32* , i32** %in.ptr, align 2
-  %in.lv.2 = load i32 , i32* %in.lv.1, align 2
-  store i32 99, i32* %a, align 4
-  call void @escape_writeonly(i32* %a)
+  %in.lv.1 = load ptr , ptr %in.ptr, align 2
+  %in.lv.2 = load i32 , ptr %in.lv.1, align 2
+  store i32 99, ptr %a, align 4
+  call void @escape_writeonly(ptr %a)
   call void @clobber()
   ret i32 %in.lv.2
 }
 
-define i32 @test_not_captured_before_load_other_blocks_1(i32** %in.ptr, i1 %c.1) {
+define i32 @test_not_captured_before_load_other_blocks_1(ptr %in.ptr, i1 %c.1) {
 ; CHECK-LABEL: @test_not_captured_before_load_other_blocks_1(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[IN_LV_1:%.*]] = load i32*, i32** [[IN_PTR:%.*]], align 2
-; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, i32* [[IN_LV_1]], align 2
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
+; CHECK-NEXT:    [[IN_LV_1:%.*]] = load ptr, ptr [[IN_PTR:%.*]], align 2
+; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, ptr [[IN_LV_1]], align 2
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[ELSE:%.*]]
 ; CHECK:       then:
 ; CHECK-NEXT:    br label [[EXIT:%.*]]
 ; CHECK:       else:
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
-; CHECK-NEXT:    call void @escape_and_clobber(i32* [[A]])
+; CHECK-NEXT:    call void @escape_and_clobber(ptr [[A]])
 ; CHECK-NEXT:    ret i32 [[IN_LV_2]]
 ;
   %a = alloca i32, align 4
-  store i32 55, i32* %a
-  %in.lv.1 = load i32* , i32** %in.ptr, align 2
-  %in.lv.2 = load i32 , i32* %in.lv.1, align 2
-  store i32 99, i32* %a, align 4
+  store i32 55, ptr %a
+  %in.lv.1 = load ptr , ptr %in.ptr, align 2
+  %in.lv.2 = load i32 , ptr %in.lv.1, align 2
+  store i32 99, ptr %a, align 4
   br i1 %c.1, label %then, label %else
 
 then:
@@ -435,54 +435,54 @@ else:
   br label %exit
 
 exit:
-  call void @escape_and_clobber(i32* %a)
+  call void @escape_and_clobber(ptr %a)
   ret i32 %in.lv.2
 }
 
-define i32 @test_not_captured_before_load_other_blocks_2(i32** %in.ptr, i1 %c.1) {
+define i32 @test_not_captured_before_load_other_blocks_2(ptr %in.ptr, i1 %c.1) {
 ; CHECK-LABEL: @test_not_captured_before_load_other_blocks_2(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[IN_LV_1:%.*]] = load i32*, i32** [[IN_PTR:%.*]], align 2
-; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, i32* [[IN_LV_1]], align 2
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
+; CHECK-NEXT:    [[IN_LV_1:%.*]] = load ptr, ptr [[IN_PTR:%.*]], align 2
+; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, ptr [[IN_LV_1]], align 2
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[ELSE:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    call void @escape_and_clobber(i32* [[A]])
+; CHECK-NEXT:    call void @escape_and_clobber(ptr [[A]])
 ; CHECK-NEXT:    br label [[EXIT:%.*]]
 ; CHECK:       else:
-; CHECK-NEXT:    call void @escape_and_clobber(i32* [[A]])
+; CHECK-NEXT:    call void @escape_and_clobber(ptr [[A]])
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
 ; CHECK-NEXT:    ret i32 [[IN_LV_2]]
 ;
   %a = alloca i32, align 4
-  store i32 55, i32* %a
-  %in.lv.1 = load i32* , i32** %in.ptr, align 2
-  %in.lv.2 = load i32 , i32* %in.lv.1, align 2
-  store i32 99, i32* %a, align 4
+  store i32 55, ptr %a
+  %in.lv.1 = load ptr , ptr %in.ptr, align 2
+  %in.lv.2 = load i32 , ptr %in.lv.1, align 2
+  store i32 99, ptr %a, align 4
   br i1 %c.1, label %then, label %else
 
 then:
-  call void @escape_and_clobber(i32* %a)
+  call void @escape_and_clobber(ptr %a)
   br label %exit
 
 else:
-  call void @escape_and_clobber(i32* %a)
+  call void @escape_and_clobber(ptr %a)
   br label %exit
 
 exit:
   ret i32 %in.lv.2
 }
 
-define i32 @test_not_captured_before_load_other_blocks_3(i32** %in.ptr, i1 %c.1) {
+define i32 @test_not_captured_before_load_other_blocks_3(ptr %in.ptr, i1 %c.1) {
 ; CHECK-LABEL: @test_not_captured_before_load_other_blocks_3(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[IN_LV_1:%.*]] = load i32*, i32** [[IN_PTR:%.*]], align 2
-; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, i32* [[IN_LV_1]], align 2
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
+; CHECK-NEXT:    [[IN_LV_1:%.*]] = load ptr, ptr [[IN_PTR:%.*]], align 2
+; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, ptr [[IN_LV_1]], align 2
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[ELSE:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    call void @escape_and_clobber(i32* [[A]])
+; CHECK-NEXT:    call void @escape_and_clobber(ptr [[A]])
 ; CHECK-NEXT:    br label [[EXIT:%.*]]
 ; CHECK:       else:
 ; CHECK-NEXT:    br label [[EXIT]]
@@ -490,14 +490,14 @@ define i32 @test_not_captured_before_load_other_blocks_3(i32** %in.ptr, i1 %c.1)
 ; CHECK-NEXT:    ret i32 [[IN_LV_2]]
 ;
   %a = alloca i32, align 4
-  store i32 55, i32* %a
-  %in.lv.1 = load i32* , i32** %in.ptr, align 2
-  %in.lv.2 = load i32 , i32* %in.lv.1, align 2
-  store i32 99, i32* %a, align 4
+  store i32 55, ptr %a
+  %in.lv.1 = load ptr , ptr %in.ptr, align 2
+  %in.lv.2 = load i32 , ptr %in.lv.1, align 2
+  store i32 99, ptr %a, align 4
   br i1 %c.1, label %then, label %else
 
 then:
-  call void @escape_and_clobber(i32* %a)
+  call void @escape_and_clobber(ptr %a)
   br label %exit
 
 else:
@@ -507,287 +507,284 @@ exit:
   ret i32 %in.lv.2
 }
 
-define i32 @test_not_captured_before_load_other_blocks_4(i32** %in.ptr, i1 %c.1) {
+define i32 @test_not_captured_before_load_other_blocks_4(ptr %in.ptr, i1 %c.1) {
 ; CHECK-LABEL: @test_not_captured_before_load_other_blocks_4(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[ELSE:%.*]]
 ; CHECK:       then:
 ; CHECK-NEXT:    br label [[EXIT:%.*]]
 ; CHECK:       else:
-; CHECK-NEXT:    [[IN_LV_1:%.*]] = load i32*, i32** [[IN_PTR:%.*]], align 2
-; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, i32* [[IN_LV_1]], align 2
-; CHECK-NEXT:    call void @escape_writeonly(i32* [[A]])
+; CHECK-NEXT:    [[IN_LV_1:%.*]] = load ptr, ptr [[IN_PTR:%.*]], align 2
+; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, ptr [[IN_LV_1]], align 2
+; CHECK-NEXT:    call void @escape_writeonly(ptr [[A]])
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
 ; CHECK-NEXT:    [[P:%.*]] = phi i32 [ 0, [[THEN]] ], [ [[IN_LV_2]], [[ELSE]] ]
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
 ; CHECK-NEXT:    call void @clobber()
 ; CHECK-NEXT:    ret i32 [[P]]
 ;
   %a = alloca i32, align 4
-  store i32 55, i32* %a
+  store i32 55, ptr %a
   br i1 %c.1, label %then, label %else
 
 then:
   br label %exit
 
 else:
-  %in.lv.1 = load i32* , i32** %in.ptr, align 2
-  %in.lv.2 = load i32 , i32* %in.lv.1, align 2
-  call void @escape_writeonly(i32* %a)
+  %in.lv.1 = load ptr , ptr %in.ptr, align 2
+  %in.lv.2 = load i32 , ptr %in.lv.1, align 2
+  call void @escape_writeonly(ptr %a)
   br label %exit
 
 exit:
   %p = phi i32 [ 0, %then ], [ %in.lv.2, %else ]
-  store i32 99, i32* %a, align 4
+  store i32 99, ptr %a, align 4
   call void @clobber()
   ret i32 %p
 }
 
-define i32 @test_not_captured_before_load_other_blocks_5(i32** %in.ptr, i1 %c.1) {
+define i32 @test_not_captured_before_load_other_blocks_5(ptr %in.ptr, i1 %c.1) {
 ; CHECK-LABEL: @test_not_captured_before_load_other_blocks_5(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[EXIT:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    [[IN_LV_1:%.*]] = load i32*, i32** [[IN_PTR:%.*]], align 2
-; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, i32* [[IN_LV_1]], align 2
-; CHECK-NEXT:    call void @escape_writeonly(i32* [[A]])
+; CHECK-NEXT:    [[IN_LV_1:%.*]] = load ptr, ptr [[IN_PTR:%.*]], align 2
+; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, ptr [[IN_LV_1]], align 2
+; CHECK-NEXT:    call void @escape_writeonly(ptr [[A]])
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
 ; CHECK-NEXT:    [[P:%.*]] = phi i32 [ [[IN_LV_2]], [[THEN]] ], [ 0, [[ENTRY:%.*]] ]
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
 ; CHECK-NEXT:    call void @clobber()
 ; CHECK-NEXT:    ret i32 [[P]]
 ;
 entry:
   %a = alloca i32, align 4
-  store i32 55, i32* %a
+  store i32 55, ptr %a
   br i1 %c.1, label %then, label %exit
 
 then:
-  %in.lv.1 = load i32* , i32** %in.ptr, align 2
-  %in.lv.2 = load i32 , i32* %in.lv.1, align 2
-  call void @escape_writeonly(i32* %a)
+  %in.lv.1 = load ptr , ptr %in.ptr, align 2
+  %in.lv.2 = load i32 , ptr %in.lv.1, align 2
+  call void @escape_writeonly(ptr %a)
   br label %exit
 
 exit:
   %p = phi i32 [ %in.lv.2, %then ], [ 0, %entry ]
-  store i32 99, i32* %a, align 4
+  store i32 99, ptr %a, align 4
   call void @clobber()
   ret i32 %p
 }
 
-define i32 @test_not_captured_before_load_other_blocks_6(i32** %in.ptr, i1 %c.1) {
+define i32 @test_not_captured_before_load_other_blocks_6(ptr %in.ptr, i1 %c.1) {
 ; CHECK-LABEL: @test_not_captured_before_load_other_blocks_6(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 55, i32* [[A]], align 4
+; CHECK-NEXT:    store i32 55, ptr [[A]], align 4
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[EXIT:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    [[IN_LV_1:%.*]] = load i32*, i32** [[IN_PTR:%.*]], align 2
-; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, i32* [[IN_LV_1]], align 2
-; CHECK-NEXT:    call void @escape_writeonly(i32* [[A]])
+; CHECK-NEXT:    [[IN_LV_1:%.*]] = load ptr, ptr [[IN_PTR:%.*]], align 2
+; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, ptr [[IN_LV_1]], align 2
+; CHECK-NEXT:    call void @escape_writeonly(ptr [[A]])
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
 ; CHECK-NEXT:    [[P:%.*]] = phi i32 [ [[IN_LV_2]], [[THEN]] ], [ 0, [[ENTRY:%.*]] ]
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
-; CHECK-NEXT:    call void @escape_writeonly(i32* [[A]])
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
+; CHECK-NEXT:    call void @escape_writeonly(ptr [[A]])
 ; CHECK-NEXT:    call void @clobber()
 ; CHECK-NEXT:    ret i32 [[P]]
 ;
 entry:
   %a = alloca i32, align 4
-  store i32 55, i32* %a
+  store i32 55, ptr %a
   br i1 %c.1, label %then, label %exit
 
 then:
-  %in.lv.1 = load i32* , i32** %in.ptr, align 2
-  %in.lv.2 = load i32 , i32* %in.lv.1, align 2
-  call void @escape_writeonly(i32* %a)
+  %in.lv.1 = load ptr , ptr %in.ptr, align 2
+  %in.lv.2 = load i32 , ptr %in.lv.1, align 2
+  call void @escape_writeonly(ptr %a)
   br label %exit
 
 exit:
   %p = phi i32 [ %in.lv.2, %then ], [ 0, %entry ]
-  store i32 99, i32* %a, align 4
-  call void @escape_writeonly(i32* %a)
+  store i32 99, ptr %a, align 4
+  call void @escape_writeonly(ptr %a)
   call void @clobber()
   ret i32 %p
 }
 
-define i32 @test_not_captured_before_load_other_blocks_7(i32** %in.ptr, i1 %c.1) {
+define i32 @test_not_captured_before_load_other_blocks_7(ptr %in.ptr, i1 %c.1) {
 ; CHECK-LABEL: @test_not_captured_before_load_other_blocks_7(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[IN_LV_1:%.*]] = load i32*, i32** [[IN_PTR:%.*]], align 2
-; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, i32* [[IN_LV_1]], align 2
-; CHECK-NEXT:    call void @escape_writeonly(i32* [[A]])
+; CHECK-NEXT:    [[IN_LV_1:%.*]] = load ptr, ptr [[IN_PTR:%.*]], align 2
+; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, ptr [[IN_LV_1]], align 2
+; CHECK-NEXT:    call void @escape_writeonly(ptr [[A]])
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[EXIT:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    call void @escape_writeonly(i32* [[A]])
+; CHECK-NEXT:    call void @escape_writeonly(ptr [[A]])
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
 ; CHECK-NEXT:    [[P:%.*]] = phi i32 [ 0, [[THEN]] ], [ [[IN_LV_2]], [[ENTRY:%.*]] ]
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
 ; CHECK-NEXT:    call void @clobber()
 ; CHECK-NEXT:    ret i32 [[P]]
 ;
 entry:
   %a = alloca i32, align 4
-  store i32 55, i32* %a
-  %in.lv.1 = load i32* , i32** %in.ptr, align 2
-  %in.lv.2 = load i32 , i32* %in.lv.1, align 2
-  call void @escape_writeonly(i32* %a)
+  store i32 55, ptr %a
+  %in.lv.1 = load ptr , ptr %in.ptr, align 2
+  %in.lv.2 = load i32 , ptr %in.lv.1, align 2
+  call void @escape_writeonly(ptr %a)
   br i1 %c.1, label %then, label %exit
 
 then:
-  call void @escape_writeonly(i32* %a)
+  call void @escape_writeonly(ptr %a)
   br label %exit
 
 exit:
   %p = phi i32 [ 0, %then ], [ %in.lv.2, %entry ]
-  store i32 99, i32* %a, align 4
+  store i32 99, ptr %a, align 4
   call void @clobber()
   ret i32 %p
 }
 
-define i32 @test_not_captured_before_load_same_bb_but_read(i32** %in.ptr) {
+define i32 @test_not_captured_before_load_same_bb_but_read(ptr %in.ptr) {
 ; CHECK-LABEL: @test_not_captured_before_load_same_bb_but_read(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 55, i32* [[A]], align 4
-; CHECK-NEXT:    [[IN_LV_1:%.*]] = load i32*, i32** [[IN_PTR:%.*]], align 2
-; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, i32* [[IN_LV_1]], align 2
-; CHECK-NEXT:    [[LV:%.*]] = load i32, i32* [[A]], align 4
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
-; CHECK-NEXT:    call void @escape_and_clobber(i32* [[A]])
+; CHECK-NEXT:    store i32 55, ptr [[A]], align 4
+; CHECK-NEXT:    [[IN_LV_1:%.*]] = load ptr, ptr [[IN_PTR:%.*]], align 2
+; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, ptr [[IN_LV_1]], align 2
+; CHECK-NEXT:    [[LV:%.*]] = load i32, ptr [[A]], align 4
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
+; CHECK-NEXT:    call void @escape_and_clobber(ptr [[A]])
 ; CHECK-NEXT:    [[RES:%.*]] = add i32 [[IN_LV_2]], [[LV]]
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %a = alloca i32, align 4
-  store i32 55, i32* %a
-  %in.lv.1 = load i32* , i32** %in.ptr, align 2
-  %in.lv.2 = load i32 , i32* %in.lv.1, align 2
-  %lv = load i32, i32* %a
-  store i32 99, i32* %a, align 4
-  call void @escape_and_clobber(i32* %a)
+  store i32 55, ptr %a
+  %in.lv.1 = load ptr , ptr %in.ptr, align 2
+  %in.lv.2 = load i32 , ptr %in.lv.1, align 2
+  %lv = load i32, ptr %a
+  store i32 99, ptr %a, align 4
+  call void @escape_and_clobber(ptr %a)
   %res = add i32 %in.lv.2, %lv
   ret i32 %res
 }
 
-define i32 @test_not_captured_before_load_may_alias_same_bb_but_read(i32** %in.ptr, i32* %b, i1 %c) {
+define i32 @test_not_captured_before_load_may_alias_same_bb_but_read(ptr %in.ptr, ptr %b, i1 %c) {
 ; CHECK-LABEL: @test_not_captured_before_load_may_alias_same_bb_but_read(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 55, i32* [[A]], align 4
-; CHECK-NEXT:    [[IN_LV_1:%.*]] = load i32*, i32** [[IN_PTR:%.*]], align 2
-; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, i32* [[IN_LV_1]], align 2
-; CHECK-NEXT:    [[PTR:%.*]] = select i1 [[C:%.*]], i32* [[A]], i32* [[B:%.*]]
-; CHECK-NEXT:    [[LV:%.*]] = load i32, i32* [[PTR]], align 4
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
-; CHECK-NEXT:    call void @escape_and_clobber(i32* [[A]])
+; CHECK-NEXT:    store i32 55, ptr [[A]], align 4
+; CHECK-NEXT:    [[IN_LV_1:%.*]] = load ptr, ptr [[IN_PTR:%.*]], align 2
+; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, ptr [[IN_LV_1]], align 2
+; CHECK-NEXT:    [[PTR:%.*]] = select i1 [[C:%.*]], ptr [[A]], ptr [[B:%.*]]
+; CHECK-NEXT:    [[LV:%.*]] = load i32, ptr [[PTR]], align 4
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
+; CHECK-NEXT:    call void @escape_and_clobber(ptr [[A]])
 ; CHECK-NEXT:    [[RES:%.*]] = add i32 [[IN_LV_2]], [[LV]]
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %a = alloca i32, align 4
-  store i32 55, i32* %a
-  %in.lv.1 = load i32* , i32** %in.ptr, align 2
-  %in.lv.2 = load i32 , i32* %in.lv.1, align 2
-  %ptr = select i1 %c, i32* %a, i32* %b
-  %lv = load i32, i32* %ptr
-  store i32 99, i32* %a, align 4
-  call void @escape_and_clobber(i32* %a)
+  store i32 55, ptr %a
+  %in.lv.1 = load ptr , ptr %in.ptr, align 2
+  %in.lv.2 = load i32 , ptr %in.lv.1, align 2
+  %ptr = select i1 %c, ptr %a, ptr %b
+  %lv = load i32, ptr %ptr
+  store i32 99, ptr %a, align 4
+  call void @escape_and_clobber(ptr %a)
   %res = add i32 %in.lv.2, %lv
   ret i32 %res
 }
 
-define i32 @test_captured_after_loop(i32** %in.ptr, i1 %c.1) {
+define i32 @test_captured_after_loop(ptr %in.ptr, i1 %c.1) {
 ; CHECK-LABEL: @test_captured_after_loop(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
 ; CHECK-NEXT:    br label [[LOOP:%.*]]
 ; CHECK:       loop:
-; CHECK-NEXT:    [[IN_LV_1:%.*]] = load i32*, i32** [[IN_PTR:%.*]], align 2
-; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, i32* [[IN_LV_1]], align 2
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
+; CHECK-NEXT:    [[IN_LV_1:%.*]] = load ptr, ptr [[IN_PTR:%.*]], align 2
+; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, ptr [[IN_LV_1]], align 2
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[LOOP]], label [[EXIT:%.*]]
 ; CHECK:       exit:
-; CHECK-NEXT:    call void @escape_and_clobber(i32* [[A]])
+; CHECK-NEXT:    call void @escape_and_clobber(ptr [[A]])
 ; CHECK-NEXT:    ret i32 [[IN_LV_2]]
 ;
   %a = alloca i32, align 4
-  store i32 55, i32* %a
+  store i32 55, ptr %a
   br label %loop
 
 loop:
-  %in.lv.1 = load i32* , i32** %in.ptr, align 2
-  %in.lv.2 = load i32 , i32* %in.lv.1, align 2
-  store i32 99, i32* %a, align 4
+  %in.lv.1 = load ptr , ptr %in.ptr, align 2
+  %in.lv.2 = load i32 , ptr %in.lv.1, align 2
+  store i32 99, ptr %a, align 4
   br i1 %c.1, label %loop, label %exit
 
 exit:
-  call void @escape_and_clobber(i32* %a)
+  call void @escape_and_clobber(ptr %a)
   ret i32 %in.lv.2
 }
 
-define i32 @test_captured_in_loop(i32** %in.ptr, i1 %c.1) {
+define i32 @test_captured_in_loop(ptr %in.ptr, i1 %c.1) {
 ; CHECK-LABEL: @test_captured_in_loop(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 55, i32* [[A]], align 4
+; CHECK-NEXT:    store i32 55, ptr [[A]], align 4
 ; CHECK-NEXT:    br label [[LOOP:%.*]]
 ; CHECK:       loop:
-; CHECK-NEXT:    [[IN_LV_1:%.*]] = load i32*, i32** [[IN_PTR:%.*]], align 2
-; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, i32* [[IN_LV_1]], align 2
-; CHECK-NEXT:    call void @escape_writeonly(i32* [[A]])
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
+; CHECK-NEXT:    [[IN_LV_1:%.*]] = load ptr, ptr [[IN_PTR:%.*]], align 2
+; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, ptr [[IN_LV_1]], align 2
+; CHECK-NEXT:    call void @escape_writeonly(ptr [[A]])
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[LOOP]], label [[EXIT:%.*]]
 ; CHECK:       exit:
-; CHECK-NEXT:    call void @escape_and_clobber(i32* [[A]])
+; CHECK-NEXT:    call void @escape_and_clobber(ptr [[A]])
 ; CHECK-NEXT:    ret i32 [[IN_LV_2]]
 ;
   %a = alloca i32, align 4
-  store i32 55, i32* %a
+  store i32 55, ptr %a
   br label %loop
 
 loop:
-  %in.lv.1 = load i32* , i32** %in.ptr, align 2
-  %in.lv.2 = load i32 , i32* %in.lv.1, align 2
-  call void @escape_writeonly(i32* %a)
-  store i32 99, i32* %a, align 4
+  %in.lv.1 = load ptr , ptr %in.ptr, align 2
+  %in.lv.2 = load i32 , ptr %in.lv.1, align 2
+  call void @escape_writeonly(ptr %a)
+  store i32 99, ptr %a, align 4
   br i1 %c.1, label %loop, label %exit
 
 exit:
-  call void @escape_and_clobber(i32* %a)
+  call void @escape_and_clobber(ptr %a)
   ret i32 %in.lv.2
 }
 
-declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1 immarg)
+declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i1 immarg)
 
- at global = external global [10 x i16]*
+ at global = external global ptr
 
 define void @test_memset_not_captured_before_load() {
 ; CHECK-LABEL: @test_memset_not_captured_before_load(
 ; CHECK-NEXT:    [[A:%.*]] = alloca [2 x i32], align 4
-; CHECK-NEXT:    [[LV_1:%.*]] = load [10 x i16]*, [10 x i16]** @global, align 8
-; CHECK-NEXT:    [[GEP_A_0:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[A]], i32 0, i32 0
-; CHECK-NEXT:    store i32 1, i32* [[GEP_A_0]], align 4
-; CHECK-NEXT:    [[GEP_LV:%.*]] = getelementptr inbounds [10 x i16], [10 x i16]* [[LV_1]], i64 0, i32 1
-; CHECK-NEXT:    [[LV_2:%.*]] = load i16, i16* [[GEP_LV]], align 2
+; CHECK-NEXT:    [[LV_1:%.*]] = load ptr, ptr @global, align 8
+; CHECK-NEXT:    store i32 1, ptr [[A]], align 4
+; CHECK-NEXT:    [[GEP_LV:%.*]] = getelementptr inbounds [10 x i16], ptr [[LV_1]], i64 0, i32 1
+; CHECK-NEXT:    [[LV_2:%.*]] = load i16, ptr [[GEP_LV]], align 2
 ; CHECK-NEXT:    [[EXT_LV_2:%.*]] = zext i16 [[LV_2]] to i32
-; CHECK-NEXT:    [[GEP_A_1:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[A]], i32 0, i32 1
-; CHECK-NEXT:    store i32 [[EXT_LV_2]], i32* [[GEP_A_1]], align 4
-; CHECK-NEXT:    call void @escape_and_clobber(i32* [[GEP_A_0]])
+; CHECK-NEXT:    [[GEP_A_1:%.*]] = getelementptr inbounds [2 x i32], ptr [[A]], i32 0, i32 1
+; CHECK-NEXT:    store i32 [[EXT_LV_2]], ptr [[GEP_A_1]], align 4
+; CHECK-NEXT:    call void @escape_and_clobber(ptr [[A]])
 ; CHECK-NEXT:    ret void
 ;
   %a = alloca [2 x i32], align 4
-  %cast.a = bitcast [2 x i32]* %a to i8*
-  call void @llvm.memset.p0i8.i32(i8* %cast.a, i8 0, i32 8, i1 false)
-  %lv.1 = load [10 x i16]*, [10 x i16]** @global, align 8
-  %gep.a.0 = getelementptr inbounds [2 x i32], [2 x i32]* %a, i32 0, i32 0
-  store i32 1, i32* %gep.a.0, align 4
-  %gep.lv = getelementptr inbounds [10 x i16], [10 x i16]* %lv.1, i64 0, i32 1
-  %lv.2 = load i16, i16* %gep.lv, align 2
+  call void @llvm.memset.p0.i32(ptr %a, i8 0, i32 8, i1 false)
+  %lv.1 = load ptr, ptr @global, align 8
+  store i32 1, ptr %a, align 4
+  %gep.lv = getelementptr inbounds [10 x i16], ptr %lv.1, i64 0, i32 1
+  %lv.2 = load i16, ptr %gep.lv, align 2
   %ext.lv.2 = zext i16 %lv.2 to i32
-  %gep.a.1 = getelementptr inbounds [2 x i32], [2 x i32]* %a, i32 0, i32 1
-  store i32 %ext.lv.2, i32* %gep.a.1, align 4
-  call void @escape_and_clobber(i32* %gep.a.0)
+  %gep.a.1 = getelementptr inbounds [2 x i32], ptr %a, i32 0, i32 1
+  store i32 %ext.lv.2, ptr %gep.a.1, align 4
+  call void @escape_and_clobber(ptr %a)
   ret void
 }
 
@@ -795,40 +792,36 @@ define void @test_test_not_captured_before_load(i1 %c.1) {
 ; CHECK-LABEL: @test_test_not_captured_before_load(
 ; CHECK-NEXT:  bb:
 ; CHECK-NEXT:    [[A:%.*]] = alloca [2 x i32], align 4
-; CHECK-NEXT:    [[CAST_A:%.*]] = bitcast [2 x i32]* [[A]] to i8*
-; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, i8* [[CAST_A]], i32 4
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i32(i8* align 1 [[TMP0]], i8 0, i32 4, i1 false)
-; CHECK-NEXT:    [[LV_1:%.*]] = load [10 x i16]*, [10 x i16]** @global, align 8
-; CHECK-NEXT:    [[GEP_LV:%.*]] = getelementptr inbounds [10 x i16], [10 x i16]* [[LV_1]], i64 0, i32 1
-; CHECK-NEXT:    [[LV_2:%.*]] = load i16, i16* [[GEP_LV]], align 2
-; CHECK-NEXT:    [[GEP_A_0:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[A]], i32 0, i32 0
-; CHECK-NEXT:    store i32 1, i32* [[GEP_A_0]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 4
+; CHECK-NEXT:    call void @llvm.memset.p0.i32(ptr align 1 [[TMP0]], i8 0, i32 4, i1 false)
+; CHECK-NEXT:    [[LV_1:%.*]] = load ptr, ptr @global, align 8
+; CHECK-NEXT:    [[GEP_LV:%.*]] = getelementptr inbounds [10 x i16], ptr [[LV_1]], i64 0, i32 1
+; CHECK-NEXT:    [[LV_2:%.*]] = load i16, ptr [[GEP_LV]], align 2
+; CHECK-NEXT:    store i32 1, ptr [[A]], align 4
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[ELSE:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    call void @escape_and_clobber(i32* [[GEP_A_0]])
+; CHECK-NEXT:    call void @escape_and_clobber(ptr [[A]])
 ; CHECK-NEXT:    br label [[EXIT:%.*]]
 ; CHECK:       else:
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
 ; CHECK-NEXT:    [[EXT_LV_2:%.*]] = zext i16 [[LV_2]] to i32
-; CHECK-NEXT:    [[GEP_A_1:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[A]], i32 0, i32 1
-; CHECK-NEXT:    store i32 [[EXT_LV_2]], i32* [[GEP_A_1]], align 4
-; CHECK-NEXT:    call void @escape_and_clobber(i32* [[GEP_A_0]])
+; CHECK-NEXT:    [[GEP_A_1:%.*]] = getelementptr inbounds [2 x i32], ptr [[A]], i32 0, i32 1
+; CHECK-NEXT:    store i32 [[EXT_LV_2]], ptr [[GEP_A_1]], align 4
+; CHECK-NEXT:    call void @escape_and_clobber(ptr [[A]])
 ; CHECK-NEXT:    ret void
 ;
 bb:
   %a = alloca [2 x i32], align 4
-  %cast.a = bitcast [2 x i32]* %a to i8*
-  call void @llvm.memset.p0i8.i32(i8* %cast.a, i8 0, i32 8, i1 false)
-  %lv.1 = load [10 x i16]*, [10 x i16]** @global, align 8
-  %gep.lv = getelementptr inbounds [10 x i16], [10 x i16]* %lv.1, i64 0, i32 1
-  %lv.2 = load i16, i16* %gep.lv, align 2
-  %gep.a.0 = getelementptr inbounds [2 x i32], [2 x i32]* %a, i32 0, i32 0
-  store i32 1, i32* %gep.a.0, align 4
+  call void @llvm.memset.p0.i32(ptr %a, i8 0, i32 8, i1 false)
+  %lv.1 = load ptr, ptr @global, align 8
+  %gep.lv = getelementptr inbounds [10 x i16], ptr %lv.1, i64 0, i32 1
+  %lv.2 = load i16, ptr %gep.lv, align 2
+  store i32 1, ptr %a, align 4
   br i1 %c.1, label %then, label %else
 
 then:
-  call void @escape_and_clobber(i32* %gep.a.0)
+  call void @escape_and_clobber(ptr %a)
   br label %exit
 
 else:
@@ -836,73 +829,71 @@ else:
 
 exit:
   %ext.lv.2 = zext i16 %lv.2 to i32
-  %gep.a.1 = getelementptr inbounds [2 x i32], [2 x i32]* %a, i32 0, i32 1
-  store i32 %ext.lv.2, i32* %gep.a.1, align 4
-  call void @escape_and_clobber(i32* %gep.a.0)
+  %gep.a.1 = getelementptr inbounds [2 x i32], ptr %a, i32 0, i32 1
+  store i32 %ext.lv.2, ptr %gep.a.1, align 4
+  call void @escape_and_clobber(ptr %a)
   ret void
 }
 
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture) #0
-declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg) #1
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #0
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) #1
 
 declare void @use.i64(i64)
 
-define i64 @test_a_not_captured_at_all(i64** %ptr, i64** %ptr.2, i1 %c) {
+define i64 @test_a_not_captured_at_all(ptr %ptr, ptr %ptr.2, i1 %c) {
 ; CHECK-LABEL: @test_a_not_captured_at_all(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[A:%.*]] = alloca i64, align 8
 ; CHECK-NEXT:    [[B:%.*]] = alloca i64, align 8
-; CHECK-NEXT:    store i64* [[B]], i64** [[PTR:%.*]], align 8
-; CHECK-NEXT:    [[LV_1:%.*]] = load i64*, i64** [[PTR_2:%.*]], align 8
+; CHECK-NEXT:    store ptr [[B]], ptr [[PTR:%.*]], align 8
+; CHECK-NEXT:    [[LV_1:%.*]] = load ptr, ptr [[PTR_2:%.*]], align 8
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[EXIT:%.*]], label [[THEN:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    [[LV_2:%.*]] = load i64, i64* [[LV_1]], align 4
+; CHECK-NEXT:    [[LV_2:%.*]] = load i64, ptr [[LV_1]], align 4
 ; CHECK-NEXT:    call void @use.i64(i64 [[LV_2]])
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
-; CHECK-NEXT:    [[A_CAST:%.*]] = bitcast i64* [[A]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 8, i8* [[A_CAST]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 8, ptr [[A]])
 ; CHECK-NEXT:    call void @clobber()
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* [[A_CAST]], i8 0, i64 8, i1 false)
-; CHECK-NEXT:    [[L:%.*]] = load i64, i64* [[A]], align 4
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr [[A]], i8 0, i64 8, i1 false)
+; CHECK-NEXT:    [[L:%.*]] = load i64, ptr [[A]], align 4
 ; CHECK-NEXT:    ret i64 [[L]]
 ;
 entry:
   %a = alloca i64, align 8
   %b = alloca i64, align 8
-  store i64* %b, i64** %ptr, align 8
-  %lv.1 = load i64*, i64** %ptr.2, align 8
+  store ptr %b, ptr %ptr, align 8
+  %lv.1 = load ptr, ptr %ptr.2, align 8
   br i1 %c, label %exit, label %then
 
 then:
-  %lv.2 = load i64, i64* %lv.1
+  %lv.2 = load i64, ptr %lv.1
   call void @use.i64(i64 %lv.2)
   br label %exit
 
 exit:
-  %a.cast = bitcast i64* %a to i8*
-  call void @llvm.lifetime.start.p0i8(i64 8, i8* %a.cast)
-  store i64 99, i64* %a
+  call void @llvm.lifetime.start.p0(i64 8, ptr %a)
+  store i64 99, ptr %a
   call void @clobber()
-  call void @llvm.memset.p0i8.i64(i8* %a.cast, i8 0, i64 8, i1 false)
-  %l = load i64, i64* %a
+  call void @llvm.memset.p0.i64(ptr %a, i8 0, i64 8, i1 false)
+  %l = load i64, ptr %a
   ret i64 %l
 }
 
-define i32 @test_not_captured_both_paths_1(i32** %in.ptr, i1 %c.1) {
+define i32 @test_not_captured_both_paths_1(ptr %in.ptr, i1 %c.1) {
 ; CHECK-LABEL: @test_not_captured_both_paths_1(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 55, i32* [[A]], align 4
+; CHECK-NEXT:    store i32 55, ptr [[A]], align 4
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[ELSE:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
-; CHECK-NEXT:    call void @escape_writeonly(i32* [[A]])
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
+; CHECK-NEXT:    call void @escape_writeonly(ptr [[A]])
 ; CHECK-NEXT:    br label [[EXIT:%.*]]
 ; CHECK:       else:
-; CHECK-NEXT:    [[IN_LV_1:%.*]] = load i32*, i32** [[IN_PTR:%.*]], align 2
-; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, i32* [[IN_LV_1]], align 2
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
-; CHECK-NEXT:    call void @escape_writeonly(i32* [[A]])
+; CHECK-NEXT:    [[IN_LV_1:%.*]] = load ptr, ptr [[IN_PTR:%.*]], align 2
+; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, ptr [[IN_LV_1]], align 2
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
+; CHECK-NEXT:    call void @escape_writeonly(ptr [[A]])
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
 ; CHECK-NEXT:    [[P:%.*]] = phi i32 [ 0, [[THEN]] ], [ [[IN_LV_2]], [[ELSE]] ]
@@ -910,19 +901,19 @@ define i32 @test_not_captured_both_paths_1(i32** %in.ptr, i1 %c.1) {
 ; CHECK-NEXT:    ret i32 [[P]]
 ;
   %a = alloca i32, align 4
-  store i32 55, i32* %a
+  store i32 55, ptr %a
   br i1 %c.1, label %then, label %else
 
 then:
-  store i32 99, i32* %a, align 4
-  call void @escape_writeonly(i32* %a)
+  store i32 99, ptr %a, align 4
+  call void @escape_writeonly(ptr %a)
   br label %exit
 
 else:
-  %in.lv.1 = load i32* , i32** %in.ptr, align 2
-  %in.lv.2 = load i32 , i32* %in.lv.1, align 2
-  store i32 99, i32* %a, align 4
-  call void @escape_writeonly(i32* %a)
+  %in.lv.1 = load ptr , ptr %in.ptr, align 2
+  %in.lv.2 = load i32 , ptr %in.lv.1, align 2
+  store i32 99, ptr %a, align 4
+  call void @escape_writeonly(ptr %a)
   br label %exit
 
 exit:
@@ -931,20 +922,20 @@ exit:
   ret i32 %p
 }
 
-define i32 @test_not_captured_both_paths_2(i32** %in.ptr, i1 %c.1) {
+define i32 @test_not_captured_both_paths_2(ptr %in.ptr, i1 %c.1) {
 ; CHECK-LABEL: @test_not_captured_both_paths_2(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 55, i32* [[A]], align 4
+; CHECK-NEXT:    store i32 55, ptr [[A]], align 4
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[ELSE:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    [[IN_LV_1:%.*]] = load i32*, i32** [[IN_PTR:%.*]], align 2
-; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, i32* [[IN_LV_1]], align 2
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
-; CHECK-NEXT:    call void @escape_writeonly(i32* [[A]])
+; CHECK-NEXT:    [[IN_LV_1:%.*]] = load ptr, ptr [[IN_PTR:%.*]], align 2
+; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, ptr [[IN_LV_1]], align 2
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
+; CHECK-NEXT:    call void @escape_writeonly(ptr [[A]])
 ; CHECK-NEXT:    br label [[EXIT:%.*]]
 ; CHECK:       else:
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
-; CHECK-NEXT:    call void @escape_writeonly(i32* [[A]])
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
+; CHECK-NEXT:    call void @escape_writeonly(ptr [[A]])
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
 ; CHECK-NEXT:    [[P:%.*]] = phi i32 [ [[IN_LV_2]], [[THEN]] ], [ 0, [[ELSE]] ]
@@ -952,19 +943,19 @@ define i32 @test_not_captured_both_paths_2(i32** %in.ptr, i1 %c.1) {
 ; CHECK-NEXT:    ret i32 [[P]]
 ;
   %a = alloca i32, align 4
-  store i32 55, i32* %a
+  store i32 55, ptr %a
   br i1 %c.1, label %then, label %else
 
 then:
-  %in.lv.1 = load i32* , i32** %in.ptr, align 2
-  %in.lv.2 = load i32 , i32* %in.lv.1, align 2
-  store i32 99, i32* %a, align 4
-  call void @escape_writeonly(i32* %a)
+  %in.lv.1 = load ptr , ptr %in.ptr, align 2
+  %in.lv.2 = load i32 , ptr %in.lv.1, align 2
+  store i32 99, ptr %a, align 4
+  call void @escape_writeonly(ptr %a)
   br label %exit
 
 else:
-  store i32 99, i32* %a, align 4
-  call void @escape_writeonly(i32* %a)
+  store i32 99, ptr %a, align 4
+  call void @escape_writeonly(ptr %a)
   br label %exit
 
 exit:
@@ -973,20 +964,20 @@ exit:
   ret i32 %p
 }
 
-define i32 @test_captured_before_store_both_paths_2(i32** %in.ptr, i1 %c.1) {
+define i32 @test_captured_before_store_both_paths_2(ptr %in.ptr, i1 %c.1) {
 ; CHECK-LABEL: @test_captured_before_store_both_paths_2(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 55, i32* [[A]], align 4
+; CHECK-NEXT:    store i32 55, ptr [[A]], align 4
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[ELSE:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    call void @escape_writeonly(i32* [[A]])
-; CHECK-NEXT:    [[IN_LV_1:%.*]] = load i32*, i32** [[IN_PTR:%.*]], align 2
-; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, i32* [[IN_LV_1]], align 2
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
+; CHECK-NEXT:    call void @escape_writeonly(ptr [[A]])
+; CHECK-NEXT:    [[IN_LV_1:%.*]] = load ptr, ptr [[IN_PTR:%.*]], align 2
+; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, ptr [[IN_LV_1]], align 2
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
 ; CHECK-NEXT:    br label [[EXIT:%.*]]
 ; CHECK:       else:
-; CHECK-NEXT:    call void @escape_writeonly(i32* [[A]])
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
+; CHECK-NEXT:    call void @escape_writeonly(ptr [[A]])
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
 ; CHECK-NEXT:    [[P:%.*]] = phi i32 [ [[IN_LV_2]], [[THEN]] ], [ 0, [[ELSE]] ]
@@ -994,19 +985,19 @@ define i32 @test_captured_before_store_both_paths_2(i32** %in.ptr, i1 %c.1) {
 ; CHECK-NEXT:    ret i32 [[P]]
 ;
   %a = alloca i32, align 4
-  store i32 55, i32* %a
+  store i32 55, ptr %a
   br i1 %c.1, label %then, label %else
 
 then:
-  call void @escape_writeonly(i32* %a)
-  %in.lv.1 = load i32* , i32** %in.ptr, align 2
-  %in.lv.2 = load i32 , i32* %in.lv.1, align 2
-  store i32 99, i32* %a, align 4
+  call void @escape_writeonly(ptr %a)
+  %in.lv.1 = load ptr , ptr %in.ptr, align 2
+  %in.lv.2 = load i32 , ptr %in.lv.1, align 2
+  store i32 99, ptr %a, align 4
   br label %exit
 
 else:
-  call void @escape_writeonly(i32* %a)
-  store i32 99, i32* %a, align 4
+  call void @escape_writeonly(ptr %a)
+  store i32 99, ptr %a, align 4
   br label %exit
 
 exit:
@@ -1016,165 +1007,163 @@ exit:
 }
 
 
-declare noalias i32* @alloc() nounwind
+declare noalias ptr @alloc() nounwind
 
-define i32 @test_not_captured_before_load_same_bb_noalias_call(i32** %in.ptr) {
+define i32 @test_not_captured_before_load_same_bb_noalias_call(ptr %in.ptr) {
 ; CHECK-LABEL: @test_not_captured_before_load_same_bb_noalias_call(
-; CHECK-NEXT:    [[A:%.*]] = call i32* @alloc()
-; CHECK-NEXT:    [[IN_LV_1:%.*]] = load i32*, i32** [[IN_PTR:%.*]], align 2
-; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, i32* [[IN_LV_1]], align 2
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
-; CHECK-NEXT:    call void @escape_and_clobber(i32* [[A]])
+; CHECK-NEXT:    [[A:%.*]] = call ptr @alloc()
+; CHECK-NEXT:    [[IN_LV_1:%.*]] = load ptr, ptr [[IN_PTR:%.*]], align 2
+; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, ptr [[IN_LV_1]], align 2
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
+; CHECK-NEXT:    call void @escape_and_clobber(ptr [[A]])
 ; CHECK-NEXT:    ret i32 [[IN_LV_2]]
 ;
-  %a = call i32* @alloc()
-  store i32 55, i32* %a
-  %in.lv.1 = load i32* , i32** %in.ptr, align 2
-  %in.lv.2 = load i32 , i32* %in.lv.1, align 2
-  store i32 99, i32* %a, align 4
-  call void @escape_and_clobber(i32* %a)
+  %a = call ptr @alloc()
+  store i32 55, ptr %a
+  %in.lv.1 = load ptr , ptr %in.ptr, align 2
+  %in.lv.2 = load i32 , ptr %in.lv.1, align 2
+  store i32 99, ptr %a, align 4
+  call void @escape_and_clobber(ptr %a)
   ret i32 %in.lv.2
 }
 
-define i32 @test_not_captured_before_load_same_bb_noalias_arg(i32** %in.ptr, i32* noalias %a) {
+define i32 @test_not_captured_before_load_same_bb_noalias_arg(ptr %in.ptr, ptr noalias %a) {
 ; CHECK-LABEL: @test_not_captured_before_load_same_bb_noalias_arg(
-; CHECK-NEXT:    [[IN_LV_1:%.*]] = load i32*, i32** [[IN_PTR:%.*]], align 2
-; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, i32* [[IN_LV_1]], align 2
-; CHECK-NEXT:    store i32 99, i32* [[A:%.*]], align 4
-; CHECK-NEXT:    call void @escape_and_clobber(i32* [[A]])
+; CHECK-NEXT:    [[IN_LV_1:%.*]] = load ptr, ptr [[IN_PTR:%.*]], align 2
+; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, ptr [[IN_LV_1]], align 2
+; CHECK-NEXT:    store i32 99, ptr [[A:%.*]], align 4
+; CHECK-NEXT:    call void @escape_and_clobber(ptr [[A]])
 ; CHECK-NEXT:    ret i32 [[IN_LV_2]]
 ;
-  store i32 55, i32* %a
-  %in.lv.1 = load i32* , i32** %in.ptr, align 2
-  %in.lv.2 = load i32 , i32* %in.lv.1, align 2
-  store i32 99, i32* %a, align 4
-  call void @escape_and_clobber(i32* %a)
+  store i32 55, ptr %a
+  %in.lv.1 = load ptr , ptr %in.ptr, align 2
+  %in.lv.2 = load i32 , ptr %in.lv.1, align 2
+  store i32 99, ptr %a, align 4
+  call void @escape_and_clobber(ptr %a)
   ret i32 %in.lv.2
 }
 
-define i32 @instruction_captures_multiple_objects(i32* %p.1, i32** %p.2, i32** %p.3, i1 %c) {
+define i32 @instruction_captures_multiple_objects(ptr %p.1, ptr %p.2, ptr %p.3, i1 %c) {
 ; CHECK-LABEL: @instruction_captures_multiple_objects(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[A_1:%.*]] = alloca i32, align 4
 ; CHECK-NEXT:    [[A_2:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 0, i32* [[P_1:%.*]], align 8
+; CHECK-NEXT:    store i32 0, ptr [[P_1:%.*]], align 8
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[THEN:%.*]], label [[ELSE:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    [[LV_2:%.*]] = load i32*, i32** [[P_2:%.*]], align 8
-; CHECK-NEXT:    [[LV_2_2:%.*]] = load i32, i32* [[LV_2]], align 4
+; CHECK-NEXT:    [[LV_2:%.*]] = load ptr, ptr [[P_2:%.*]], align 8
+; CHECK-NEXT:    [[LV_2_2:%.*]] = load i32, ptr [[LV_2]], align 4
 ; CHECK-NEXT:    ret i32 [[LV_2_2]]
 ; CHECK:       else:
-; CHECK-NEXT:    [[LV_3:%.*]] = load i32*, i32** [[P_3:%.*]], align 8
-; CHECK-NEXT:    [[LV_3_2:%.*]] = load i32, i32* [[LV_3]], align 4
-; CHECK-NEXT:    call void @capture_and_clobber_multiple(i32* [[A_1]], i32* [[A_2]])
+; CHECK-NEXT:    [[LV_3:%.*]] = load ptr, ptr [[P_3:%.*]], align 8
+; CHECK-NEXT:    [[LV_3_2:%.*]] = load i32, ptr [[LV_3]], align 4
+; CHECK-NEXT:    call void @capture_and_clobber_multiple(ptr [[A_1]], ptr [[A_2]])
 ; CHECK-NEXT:    ret i32 [[LV_3_2]]
 ;
 entry:
   %a.1 = alloca i32
   %a.2 = alloca i32
-  store i32 0, i32* %p.1, align 8
+  store i32 0, ptr %p.1, align 8
   br i1 %c, label %then, label %else
 
 then:
-  store i32 99, i32* %a.2, align 4
-  %lv.2 = load i32*, i32** %p.2
-  %lv.2.2 = load i32, i32* %lv.2
-  store i32 0, i32* %a.1, align 8
+  store i32 99, ptr %a.2, align 4
+  %lv.2 = load ptr, ptr %p.2
+  %lv.2.2 = load i32, ptr %lv.2
+  store i32 0, ptr %a.1, align 8
   ret i32 %lv.2.2
 
 else:
-  %lv.3 = load i32*, i32** %p.3
-  %lv.3.2 = load i32, i32* %lv.3
-  call void @capture_and_clobber_multiple(i32* %a.1, i32* %a.2)
+  %lv.3 = load ptr, ptr %p.3
+  %lv.3.2 = load i32, ptr %lv.3
+  call void @capture_and_clobber_multiple(ptr %a.1, ptr %a.2)
   ret i32 %lv.3.2
 }
 
-declare void @capture_and_clobber_multiple(i32*, i32*)
+declare void @capture_and_clobber_multiple(ptr, ptr)
 
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
 
-define i64 @earliest_escape_ptrtoint(i64** %p.1) {
+define i64 @earliest_escape_ptrtoint(ptr %p.1) {
 ; CHECK-LABEL: @earliest_escape_ptrtoint(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[A_1:%.*]] = alloca i64, align 8
 ; CHECK-NEXT:    [[A_2:%.*]] = alloca i64, align 8
-; CHECK-NEXT:    [[LV_1:%.*]] = load i64*, i64** [[P_1:%.*]], align 8
-; CHECK-NEXT:    [[LV_2:%.*]] = load i64, i64* [[LV_1]], align 4
-; CHECK-NEXT:    store i64* [[A_1]], i64** [[P_1]], align 8
-; CHECK-NEXT:    [[A_2_CAST:%.*]] = bitcast i64* [[A_2]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 8, i8* [[A_2_CAST]])
+; CHECK-NEXT:    [[LV_1:%.*]] = load ptr, ptr [[P_1:%.*]], align 8
+; CHECK-NEXT:    [[LV_2:%.*]] = load i64, ptr [[LV_1]], align 4
+; CHECK-NEXT:    store ptr [[A_1]], ptr [[P_1]], align 8
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 8, ptr [[A_2]])
 ; CHECK-NEXT:    ret i64 [[LV_2]]
 ;
 entry:
   %a.1 = alloca i64
   %a.2 = alloca i64
-  store i64 99, i64* %a.1
-  %lv.1 = load i64*, i64** %p.1
-  %lv.2 = load i64, i64* %lv.1
-  store i64* %a.1, i64** %p.1, align 8
-  %int = ptrtoint i64* %a.2 to i64
-  store i64 %int , i64* %a.2, align 8
-  %a.2.cast = bitcast i64* %a.2 to i8*
-  call void @llvm.lifetime.end.p0i8(i64 8, i8* %a.2.cast)
+  store i64 99, ptr %a.1
+  %lv.1 = load ptr, ptr %p.1
+  %lv.2 = load i64, ptr %lv.1
+  store ptr %a.1, ptr %p.1, align 8
+  %int = ptrtoint ptr %a.2 to i64
+  store i64 %int , ptr %a.2, align 8
+  call void @llvm.lifetime.end.p0(i64 8, ptr %a.2)
   ret i64 %lv.2
 }
 
 define i32 @test_not_captured_before_load_of_ptrtoint(i64 %in) {
 ; CHECK-LABEL: @test_not_captured_before_load_of_ptrtoint(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[IN_PTR:%.*]] = inttoptr i64 [[IN:%.*]] to i32*
-; CHECK-NEXT:    [[IN_PTR_LOAD:%.*]] = load i32, i32* [[IN_PTR]], align 4
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
-; CHECK-NEXT:    call void @escape_and_clobber(i32* [[A]])
+; CHECK-NEXT:    [[IN_PTR:%.*]] = inttoptr i64 [[IN:%.*]] to ptr
+; CHECK-NEXT:    [[IN_PTR_LOAD:%.*]] = load i32, ptr [[IN_PTR]], align 4
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
+; CHECK-NEXT:    call void @escape_and_clobber(ptr [[A]])
 ; CHECK-NEXT:    ret i32 [[IN_PTR_LOAD]]
 ;
   %a = alloca i32, align 4
-  store i32 55, i32* %a
-  %in.ptr = inttoptr i64 %in to i32*
-  %in.ptr.load = load i32, i32* %in.ptr
-  store i32 99, i32* %a
-  call void @escape_and_clobber(i32* %a)
+  store i32 55, ptr %a
+  %in.ptr = inttoptr i64 %in to ptr
+  %in.ptr.load = load i32, ptr %in.ptr
+  store i32 99, ptr %a
+  call void @escape_and_clobber(ptr %a)
   ret i32 %in.ptr.load
 }
 
-declare i32* @getptr()
+declare ptr @getptr()
 
 define i32 @test_not_captured_before_load_of_call() {
 ; CHECK-LABEL: @test_not_captured_before_load_of_call(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[IN_PTR:%.*]] = call i32* @getptr() #[[ATTR4:[0-9]+]]
-; CHECK-NEXT:    [[IN_PTR_LOAD:%.*]] = load i32, i32* [[IN_PTR]], align 4
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
-; CHECK-NEXT:    call void @escape_and_clobber(i32* [[A]])
+; CHECK-NEXT:    [[IN_PTR:%.*]] = call ptr @getptr() #[[ATTR4:[0-9]+]]
+; CHECK-NEXT:    [[IN_PTR_LOAD:%.*]] = load i32, ptr [[IN_PTR]], align 4
+; CHECK-NEXT:    store i32 99, ptr [[A]], align 4
+; CHECK-NEXT:    call void @escape_and_clobber(ptr [[A]])
 ; CHECK-NEXT:    ret i32 [[IN_PTR_LOAD]]
 ;
   %a = alloca i32, align 4
-  store i32 55, i32* %a
-  %in.ptr = call i32* @getptr() readnone
-  %in.ptr.load = load i32, i32* %in.ptr
-  store i32 99, i32* %a
-  call void @escape_and_clobber(i32* %a)
+  store i32 55, ptr %a
+  %in.ptr = call ptr @getptr() readnone
+  %in.ptr.load = load i32, ptr %in.ptr
+  store i32 99, ptr %a
+  call void @escape_and_clobber(ptr %a)
   ret i32 %in.ptr.load
 }
 
-define i32 @test_not_captured_multiple_objects(i1 %c, i32** %in.ptr) {
+define i32 @test_not_captured_multiple_objects(i1 %c, ptr %in.ptr) {
 ; CHECK-LABEL: @test_not_captured_multiple_objects(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
 ; CHECK-NEXT:    [[B:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[O:%.*]] = select i1 [[C:%.*]], i32* [[A]], i32* [[B]]
-; CHECK-NEXT:    [[IN_LV_1:%.*]] = load i32*, i32** [[IN_PTR:%.*]], align 2
-; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, i32* [[IN_LV_1]], align 2
-; CHECK-NEXT:    store i32 99, i32* [[O]], align 4
-; CHECK-NEXT:    call void @escape_and_clobber(i32* [[O]])
+; CHECK-NEXT:    [[O:%.*]] = select i1 [[C:%.*]], ptr [[A]], ptr [[B]]
+; CHECK-NEXT:    [[IN_LV_1:%.*]] = load ptr, ptr [[IN_PTR:%.*]], align 2
+; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, ptr [[IN_LV_1]], align 2
+; CHECK-NEXT:    store i32 99, ptr [[O]], align 4
+; CHECK-NEXT:    call void @escape_and_clobber(ptr [[O]])
 ; CHECK-NEXT:    ret i32 [[IN_LV_2]]
 ;
   %a = alloca i32, align 4
   %b = alloca i32, align 4
-  %o = select i1 %c, i32* %a, i32* %b
-  store i32 55, i32* %o
-  %in.lv.1 = load i32* , i32** %in.ptr, align 2
-  %in.lv.2 = load i32 , i32* %in.lv.1, align 2
-  store i32 99, i32* %o
-  call void @escape_and_clobber(i32* %o)
+  %o = select i1 %c, ptr %a, ptr %b
+  store i32 55, ptr %o
+  %in.lv.1 = load ptr , ptr %in.ptr, align 2
+  %in.lv.2 = load i32 , ptr %in.lv.1, align 2
+  store i32 99, ptr %o
+  call void @escape_and_clobber(ptr %o)
   ret i32 %in.lv.2
 }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/combined-partial-overwrites.ll b/llvm/test/Transforms/DeadStoreElimination/combined-partial-overwrites.ll
index 9cf847b36eebb..15092a1a77291 100644
--- a/llvm/test/Transforms/DeadStoreElimination/combined-partial-overwrites.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/combined-partial-overwrites.ll
@@ -5,23 +5,21 @@ target triple = "powerpc64le-unknown-linux"
 
 %"struct.std::complex" = type { { float, float } }
 
-define void @_Z4testSt7complexIfE(%"struct.std::complex"* noalias nocapture sret(%"struct.std::complex") %agg.result, i64 %c.coerce) {
+define void @_Z4testSt7complexIfE(ptr noalias nocapture sret(%"struct.std::complex") %agg.result, i64 %c.coerce) {
 ; CHECK-LABEL: @_Z4testSt7complexIfE(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[REF_TMP:%.*]] = alloca i64, align 8
-; CHECK-NEXT:    [[TMPCAST:%.*]] = bitcast i64* [[REF_TMP]] to %"struct.std::complex"*
 ; CHECK-NEXT:    [[C_SROA_0_0_EXTRACT_SHIFT:%.*]] = lshr i64 [[C_COERCE:%.*]], 32
 ; CHECK-NEXT:    [[C_SROA_0_0_EXTRACT_TRUNC:%.*]] = trunc i64 [[C_SROA_0_0_EXTRACT_SHIFT]] to i32
 ; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i32 [[C_SROA_0_0_EXTRACT_TRUNC]] to float
 ; CHECK-NEXT:    [[C_SROA_2_0_EXTRACT_TRUNC:%.*]] = trunc i64 [[C_COERCE]] to i32
 ; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32 [[C_SROA_2_0_EXTRACT_TRUNC]] to float
-; CHECK-NEXT:    call void @_Z3barSt7complexIfE(%"struct.std::complex"* nonnull sret(%"struct.std::complex") [[TMPCAST]], i64 [[C_COERCE]])
-; CHECK-NEXT:    [[TMP2:%.*]] = load i64, i64* [[REF_TMP]], align 8
-; CHECK-NEXT:    [[_M_VALUE_REALP_I_I:%.*]] = getelementptr inbounds %"struct.std::complex", %"struct.std::complex"* [[AGG_RESULT:%.*]], i64 0, i32 0, i32 0
+; CHECK-NEXT:    call void @_Z3barSt7complexIfE(ptr nonnull sret(%"struct.std::complex") [[REF_TMP]], i64 [[C_COERCE]])
+; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr [[REF_TMP]], align 8
 ; CHECK-NEXT:    [[TMP3:%.*]] = lshr i64 [[TMP2]], 32
 ; CHECK-NEXT:    [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
 ; CHECK-NEXT:    [[TMP5:%.*]] = bitcast i32 [[TMP4]] to float
-; CHECK-NEXT:    [[_M_VALUE_IMAGP_I_I:%.*]] = getelementptr inbounds %"struct.std::complex", %"struct.std::complex"* [[AGG_RESULT]], i64 0, i32 0, i32 1
+; CHECK-NEXT:    [[_M_VALUE_IMAGP_I_I:%.*]] = getelementptr inbounds %"struct.std::complex", ptr [[AGG_RESULT:%.*]], i64 0, i32 0, i32 1
 ; CHECK-NEXT:    [[TMP6:%.*]] = trunc i64 [[TMP2]] to i32
 ; CHECK-NEXT:    [[TMP7:%.*]] = bitcast i32 [[TMP6]] to float
 ; CHECK-NEXT:    [[MUL_AD_I_I:%.*]] = fmul fast float [[TMP5]], [[TMP1]]
@@ -30,117 +28,96 @@ define void @_Z4testSt7complexIfE(%"struct.std::complex"* noalias nocapture sret
 ; CHECK-NEXT:    [[MUL_AC_I_I:%.*]] = fmul fast float [[TMP5]], [[TMP0]]
 ; CHECK-NEXT:    [[MUL_BD_I_I:%.*]] = fmul fast float [[TMP7]], [[TMP1]]
 ; CHECK-NEXT:    [[MUL_R_I_I:%.*]] = fsub fast float [[MUL_AC_I_I]], [[MUL_BD_I_I]]
-; CHECK-NEXT:    store float [[MUL_R_I_I]], float* [[_M_VALUE_REALP_I_I]], align 4
-; CHECK-NEXT:    store float [[MUL_I_I_I]], float* [[_M_VALUE_IMAGP_I_I]], align 4
+; CHECK-NEXT:    store float [[MUL_R_I_I]], ptr [[AGG_RESULT]], align 4
+; CHECK-NEXT:    store float [[MUL_I_I_I]], ptr [[_M_VALUE_IMAGP_I_I]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
 
   %ref.tmp = alloca i64, align 8
-  %tmpcast = bitcast i64* %ref.tmp to %"struct.std::complex"*
   %c.sroa.0.0.extract.shift = lshr i64 %c.coerce, 32
   %c.sroa.0.0.extract.trunc = trunc i64 %c.sroa.0.0.extract.shift to i32
   %0 = bitcast i32 %c.sroa.0.0.extract.trunc to float
   %c.sroa.2.0.extract.trunc = trunc i64 %c.coerce to i32
   %1 = bitcast i32 %c.sroa.2.0.extract.trunc to float
-  call void @_Z3barSt7complexIfE(%"struct.std::complex"* nonnull sret(%"struct.std::complex") %tmpcast, i64 %c.coerce)
-  %2 = bitcast %"struct.std::complex"* %agg.result to i64*
-  %3 = load i64, i64* %ref.tmp, align 8
-  store i64 %3, i64* %2, align 4
-
-  %_M_value.realp.i.i = getelementptr inbounds %"struct.std::complex", %"struct.std::complex"* %agg.result, i64 0, i32 0, i32 0
-  %4 = lshr i64 %3, 32
-  %5 = trunc i64 %4 to i32
-  %6 = bitcast i32 %5 to float
-  %_M_value.imagp.i.i = getelementptr inbounds %"struct.std::complex", %"struct.std::complex"* %agg.result, i64 0, i32 0, i32 1
-  %7 = trunc i64 %3 to i32
-  %8 = bitcast i32 %7 to float
-  %mul_ad.i.i = fmul fast float %6, %1
-  %mul_bc.i.i = fmul fast float %8, %0
+  call void @_Z3barSt7complexIfE(ptr nonnull sret(%"struct.std::complex") %ref.tmp, i64 %c.coerce)
+  %2 = load i64, ptr %ref.tmp, align 8
+  store i64 %2, ptr %agg.result, align 4
+
+  %3 = lshr i64 %2, 32
+  %4 = trunc i64 %3 to i32
+  %5 = bitcast i32 %4 to float
+  %_M_value.imagp.i.i = getelementptr inbounds %"struct.std::complex", ptr %agg.result, i64 0, i32 0, i32 1
+  %6 = trunc i64 %2 to i32
+  %7 = bitcast i32 %6 to float
+  %mul_ad.i.i = fmul fast float %5, %1
+  %mul_bc.i.i = fmul fast float %7, %0
   %mul_i.i.i = fadd fast float %mul_ad.i.i, %mul_bc.i.i
-  %mul_ac.i.i = fmul fast float %6, %0
-  %mul_bd.i.i = fmul fast float %8, %1
+  %mul_ac.i.i = fmul fast float %5, %0
+  %mul_bd.i.i = fmul fast float %7, %1
   %mul_r.i.i = fsub fast float %mul_ac.i.i, %mul_bd.i.i
-  store float %mul_r.i.i, float* %_M_value.realp.i.i, align 4
-  store float %mul_i.i.i, float* %_M_value.imagp.i.i, align 4
+  store float %mul_r.i.i, ptr %agg.result, align 4
+  store float %mul_i.i.i, ptr %_M_value.imagp.i.i, align 4
   ret void
 }
 
-declare void @_Z3barSt7complexIfE(%"struct.std::complex"* sret(%"struct.std::complex"), i64)
+declare void @_Z3barSt7complexIfE(ptr sret(%"struct.std::complex"), i64)
 
-define void @test1(i32 *%ptr) {
+define void @test1(ptr %ptr) {
 ; CHECK-LABEL: @test1(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[BPTR:%.*]] = bitcast i32* [[PTR:%.*]] to i8*
-; CHECK-NEXT:    [[WPTR:%.*]] = bitcast i32* [[PTR]] to i16*
-; CHECK-NEXT:    store i16 -30062, i16* [[WPTR]], align 2
-; CHECK-NEXT:    [[BPTR3:%.*]] = getelementptr inbounds i8, i8* [[BPTR]], i64 3
-; CHECK-NEXT:    store i8 47, i8* [[BPTR3]], align 1
-; CHECK-NEXT:    [[BPTR1:%.*]] = getelementptr inbounds i8, i8* [[BPTR]], i64 1
-; CHECK-NEXT:    [[WPTRP:%.*]] = bitcast i8* [[BPTR1]] to i16*
-; CHECK-NEXT:    store i16 2020, i16* [[WPTRP]], align 1
+; CHECK-NEXT:    store i16 -30062, ptr [[PTR:%.*]], align 2
+; CHECK-NEXT:    [[BPTR3:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 3
+; CHECK-NEXT:    store i8 47, ptr [[BPTR3]], align 1
+; CHECK-NEXT:    [[BPTR1:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 1
+; CHECK-NEXT:    store i16 2020, ptr [[BPTR1]], align 1
 ; CHECK-NEXT:    ret void
 ;
 entry:
 
-  store i32 5, i32* %ptr
-  %bptr = bitcast i32* %ptr to i8*
-  store i8 7, i8* %bptr
-  %wptr = bitcast i32* %ptr to i16*
-  store i16 -30062, i16* %wptr
-  %bptr2 = getelementptr inbounds i8, i8* %bptr, i64 2
-  store i8 25, i8* %bptr2
-  %bptr3 = getelementptr inbounds i8, i8* %bptr, i64 3
-  store i8 47, i8* %bptr3
-  %bptr1 = getelementptr inbounds i8, i8* %bptr, i64 1
-  %wptrp = bitcast i8* %bptr1 to i16*
-  store i16 2020, i16* %wptrp, align 1
+  store i32 5, ptr %ptr
+  store i8 7, ptr %ptr
+  store i16 -30062, ptr %ptr
+  %bptr2 = getelementptr inbounds i8, ptr %ptr, i64 2
+  store i8 25, ptr %bptr2
+  %bptr3 = getelementptr inbounds i8, ptr %ptr, i64 3
+  store i8 47, ptr %bptr3
+  %bptr1 = getelementptr inbounds i8, ptr %ptr, i64 1
+  store i16 2020, ptr %bptr1, align 1
   ret void
 
 
 }
 
-define void @test2(i32 *%ptr) {
+define void @test2(ptr %ptr) {
 ; CHECK-LABEL: @test2(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[BPTR:%.*]] = bitcast i32* [[PTR:%.*]] to i8*
-; CHECK-NEXT:    [[BPTRM1:%.*]] = getelementptr inbounds i8, i8* [[BPTR]], i64 -1
-; CHECK-NEXT:    [[BPTR1:%.*]] = getelementptr inbounds i8, i8* [[BPTR]], i64 1
-; CHECK-NEXT:    [[BPTR2:%.*]] = getelementptr inbounds i8, i8* [[BPTR]], i64 2
-; CHECK-NEXT:    [[BPTR3:%.*]] = getelementptr inbounds i8, i8* [[BPTR]], i64 3
-; CHECK-NEXT:    [[WPTR:%.*]] = bitcast i8* [[BPTR]] to i16*
-; CHECK-NEXT:    [[WPTRM1:%.*]] = bitcast i8* [[BPTRM1]] to i16*
-; CHECK-NEXT:    [[WPTR1:%.*]] = bitcast i8* [[BPTR1]] to i16*
-; CHECK-NEXT:    [[WPTR2:%.*]] = bitcast i8* [[BPTR2]] to i16*
-; CHECK-NEXT:    [[WPTR3:%.*]] = bitcast i8* [[BPTR3]] to i16*
-; CHECK-NEXT:    store i16 1456, i16* [[WPTRM1]], align 1
-; CHECK-NEXT:    store i16 1346, i16* [[WPTR]], align 1
-; CHECK-NEXT:    store i16 1756, i16* [[WPTR1]], align 1
-; CHECK-NEXT:    store i16 1126, i16* [[WPTR2]], align 1
-; CHECK-NEXT:    store i16 5656, i16* [[WPTR3]], align 1
+; CHECK-NEXT:    [[BPTRM1:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i64 -1
+; CHECK-NEXT:    [[BPTR1:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 1
+; CHECK-NEXT:    [[BPTR2:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 2
+; CHECK-NEXT:    [[BPTR3:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 3
+; CHECK-NEXT:    store i16 1456, ptr [[BPTRM1]], align 1
+; CHECK-NEXT:    store i16 1346, ptr [[PTR]], align 1
+; CHECK-NEXT:    store i16 1756, ptr [[BPTR1]], align 1
+; CHECK-NEXT:    store i16 1126, ptr [[BPTR2]], align 1
+; CHECK-NEXT:    store i16 5656, ptr [[BPTR3]], align 1
 ; CHECK-NEXT:    ret void
 ;
 entry:
 
-  store i32 5, i32* %ptr
+  store i32 5, ptr %ptr
 
-  %bptr = bitcast i32* %ptr to i8*
-  %bptrm1 = getelementptr inbounds i8, i8* %bptr, i64 -1
-  %bptr1 = getelementptr inbounds i8, i8* %bptr, i64 1
-  %bptr2 = getelementptr inbounds i8, i8* %bptr, i64 2
-  %bptr3 = getelementptr inbounds i8, i8* %bptr, i64 3
+  %bptrm1 = getelementptr inbounds i8, ptr %ptr, i64 -1
+  %bptr1 = getelementptr inbounds i8, ptr %ptr, i64 1
+  %bptr2 = getelementptr inbounds i8, ptr %ptr, i64 2
+  %bptr3 = getelementptr inbounds i8, ptr %ptr, i64 3
 
-  %wptr = bitcast i8* %bptr to i16*
-  %wptrm1 = bitcast i8* %bptrm1 to i16*
-  %wptr1 = bitcast i8* %bptr1 to i16*
-  %wptr2 = bitcast i8* %bptr2 to i16*
-  %wptr3 = bitcast i8* %bptr3 to i16*
 
-  store i16 1456, i16* %wptrm1, align 1
-  store i16 1346, i16* %wptr, align 1
-  store i16 1756, i16* %wptr1, align 1
-  store i16 1126, i16* %wptr2, align 1
-  store i16 5656, i16* %wptr3, align 1
+  store i16 1456, ptr %bptrm1, align 1
+  store i16 1346, ptr %ptr, align 1
+  store i16 1756, ptr %bptr1, align 1
+  store i16 1126, ptr %bptr2, align 1
+  store i16 5656, ptr %bptr3, align 1
 
 
 
@@ -148,50 +125,38 @@ entry:
 
 }
 
-define signext i8 @test3(i32 *%ptr) {
+define signext i8 @test3(ptr %ptr) {
 ; CHECK-LABEL: @test3(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    store i32 5, i32* [[PTR:%.*]], align 4
-; CHECK-NEXT:    [[BPTR:%.*]] = bitcast i32* [[PTR]] to i8*
-; CHECK-NEXT:    [[BPTRM1:%.*]] = getelementptr inbounds i8, i8* [[BPTR]], i64 -1
-; CHECK-NEXT:    [[BPTR1:%.*]] = getelementptr inbounds i8, i8* [[BPTR]], i64 1
-; CHECK-NEXT:    [[BPTR2:%.*]] = getelementptr inbounds i8, i8* [[BPTR]], i64 2
-; CHECK-NEXT:    [[BPTR3:%.*]] = getelementptr inbounds i8, i8* [[BPTR]], i64 3
-; CHECK-NEXT:    [[WPTR:%.*]] = bitcast i8* [[BPTR]] to i16*
-; CHECK-NEXT:    [[WPTRM1:%.*]] = bitcast i8* [[BPTRM1]] to i16*
-; CHECK-NEXT:    [[WPTR1:%.*]] = bitcast i8* [[BPTR1]] to i16*
-; CHECK-NEXT:    [[WPTR2:%.*]] = bitcast i8* [[BPTR2]] to i16*
-; CHECK-NEXT:    [[WPTR3:%.*]] = bitcast i8* [[BPTR3]] to i16*
-; CHECK-NEXT:    [[V:%.*]] = load i8, i8* [[BPTR]], align 1
-; CHECK-NEXT:    store i16 1456, i16* [[WPTRM1]], align 1
-; CHECK-NEXT:    store i16 1346, i16* [[WPTR]], align 1
-; CHECK-NEXT:    store i16 1756, i16* [[WPTR1]], align 1
-; CHECK-NEXT:    store i16 1126, i16* [[WPTR2]], align 1
-; CHECK-NEXT:    store i16 5656, i16* [[WPTR3]], align 1
+; CHECK-NEXT:    store i32 5, ptr [[PTR:%.*]], align 4
+; CHECK-NEXT:    [[BPTRM1:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 -1
+; CHECK-NEXT:    [[BPTR1:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 1
+; CHECK-NEXT:    [[BPTR2:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 2
+; CHECK-NEXT:    [[BPTR3:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 3
+; CHECK-NEXT:    [[V:%.*]] = load i8, ptr [[PTR]], align 1
+; CHECK-NEXT:    store i16 1456, ptr [[BPTRM1]], align 1
+; CHECK-NEXT:    store i16 1346, ptr [[PTR]], align 1
+; CHECK-NEXT:    store i16 1756, ptr [[BPTR1]], align 1
+; CHECK-NEXT:    store i16 1126, ptr [[BPTR2]], align 1
+; CHECK-NEXT:    store i16 5656, ptr [[BPTR3]], align 1
 ; CHECK-NEXT:    ret i8 [[V]]
 ;
 entry:
 
-  store i32 5, i32* %ptr
+  store i32 5, ptr %ptr
 
-  %bptr = bitcast i32* %ptr to i8*
-  %bptrm1 = getelementptr inbounds i8, i8* %bptr, i64 -1
-  %bptr1 = getelementptr inbounds i8, i8* %bptr, i64 1
-  %bptr2 = getelementptr inbounds i8, i8* %bptr, i64 2
-  %bptr3 = getelementptr inbounds i8, i8* %bptr, i64 3
+  %bptrm1 = getelementptr inbounds i8, ptr %ptr, i64 -1
+  %bptr1 = getelementptr inbounds i8, ptr %ptr, i64 1
+  %bptr2 = getelementptr inbounds i8, ptr %ptr, i64 2
+  %bptr3 = getelementptr inbounds i8, ptr %ptr, i64 3
 
-  %wptr = bitcast i8* %bptr to i16*
-  %wptrm1 = bitcast i8* %bptrm1 to i16*
-  %wptr1 = bitcast i8* %bptr1 to i16*
-  %wptr2 = bitcast i8* %bptr2 to i16*
-  %wptr3 = bitcast i8* %bptr3 to i16*
 
-  %v = load i8, i8* %bptr, align 1
-  store i16 1456, i16* %wptrm1, align 1
-  store i16 1346, i16* %wptr, align 1
-  store i16 1756, i16* %wptr1, align 1
-  store i16 1126, i16* %wptr2, align 1
-  store i16 5656, i16* %wptr3, align 1
+  %v = load i8, ptr %ptr, align 1
+  store i16 1456, ptr %bptrm1, align 1
+  store i16 1346, ptr %ptr, align 1
+  store i16 1756, ptr %bptr1, align 1
+  store i16 1126, ptr %bptr2, align 1
+  store i16 5656, ptr %bptr3, align 1
 
 
   ret i8 %v
@@ -199,15 +164,15 @@ entry:
 }
 
 %struct.foostruct = type {
-i32 (i8*, i8**, i32, i8, i8*)*,
-i32 (i8*, i8**, i32, i8, i8*)*,
-i32 (i8*, i8**, i32, i8, i8*)*,
-i32 (i8*, i8**, i32, i8, i8*)*,
-void (i8*, i32, i32)*
+ptr,
+ptr,
+ptr,
+ptr,
+ptr
 }
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1)
-declare void @goFunc(%struct.foostruct*)
-declare i32 @fa(i8*, i8**, i32, i8, i8*)
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1)
+declare void @goFunc(ptr)
+declare i32 @fa(ptr, ptr, i32, i8, ptr)
 
 ; We miss this case, because of an aggressive limit of partial overlap analysis.
 ; With a larger partial store limit, we remove the memset.
@@ -215,127 +180,108 @@ define void @test4()  {
 ; CHECK-LABEL: @test4(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[BANG:%.*]] = alloca [[STRUCT_FOOSTRUCT:%.*]], align 8
-; CHECK-NEXT:    [[V2:%.*]] = getelementptr inbounds [[STRUCT_FOOSTRUCT]], %struct.foostruct* [[BANG]], i64 0, i32 0
-; CHECK-NEXT:    store i32 (i8*, i8**, i32, i8, i8*)* @fa, i32 (i8*, i8**, i32, i8, i8*)** [[V2]], align 8
-; CHECK-NEXT:    [[V3:%.*]] = getelementptr inbounds [[STRUCT_FOOSTRUCT]], %struct.foostruct* [[BANG]], i64 0, i32 1
-; CHECK-NEXT:    store i32 (i8*, i8**, i32, i8, i8*)* @fa, i32 (i8*, i8**, i32, i8, i8*)** [[V3]], align 8
-; CHECK-NEXT:    [[V4:%.*]] = getelementptr inbounds [[STRUCT_FOOSTRUCT]], %struct.foostruct* [[BANG]], i64 0, i32 2
-; CHECK-NEXT:    store i32 (i8*, i8**, i32, i8, i8*)* @fa, i32 (i8*, i8**, i32, i8, i8*)** [[V4]], align 8
-; CHECK-NEXT:    [[V5:%.*]] = getelementptr inbounds [[STRUCT_FOOSTRUCT]], %struct.foostruct* [[BANG]], i64 0, i32 3
-; CHECK-NEXT:    store i32 (i8*, i8**, i32, i8, i8*)* @fa, i32 (i8*, i8**, i32, i8, i8*)** [[V5]], align 8
-; CHECK-NEXT:    [[V6:%.*]] = getelementptr inbounds [[STRUCT_FOOSTRUCT]], %struct.foostruct* [[BANG]], i64 0, i32 4
-; CHECK-NEXT:    store void (i8*, i32, i32)* null, void (i8*, i32, i32)** [[V6]], align 8
-; CHECK-NEXT:    call void @goFunc(%struct.foostruct* [[BANG]])
+; CHECK-NEXT:    store ptr @fa, ptr [[BANG]], align 8
+; CHECK-NEXT:    [[V3:%.*]] = getelementptr inbounds [[STRUCT_FOOSTRUCT]], ptr [[BANG]], i64 0, i32 1
+; CHECK-NEXT:    store ptr @fa, ptr [[V3]], align 8
+; CHECK-NEXT:    [[V4:%.*]] = getelementptr inbounds [[STRUCT_FOOSTRUCT]], ptr [[BANG]], i64 0, i32 2
+; CHECK-NEXT:    store ptr @fa, ptr [[V4]], align 8
+; CHECK-NEXT:    [[V5:%.*]] = getelementptr inbounds [[STRUCT_FOOSTRUCT]], ptr [[BANG]], i64 0, i32 3
+; CHECK-NEXT:    store ptr @fa, ptr [[V5]], align 8
+; CHECK-NEXT:    [[V6:%.*]] = getelementptr inbounds [[STRUCT_FOOSTRUCT]], ptr [[BANG]], i64 0, i32 4
+; CHECK-NEXT:    store ptr null, ptr [[V6]], align 8
+; CHECK-NEXT:    call void @goFunc(ptr [[BANG]])
 ; CHECK-NEXT:    ret void
 entry:
 
   %bang = alloca %struct.foostruct, align 8
-  %v1 = bitcast %struct.foostruct* %bang to i8*
-  call void @llvm.memset.p0i8.i64(i8* align 8 %v1, i8 0, i64 40, i1 false)
-  %v2 = getelementptr inbounds %struct.foostruct, %struct.foostruct* %bang, i64 0, i32 0
-  store i32 (i8*, i8**, i32, i8, i8*)* @fa, i32 (i8*, i8**, i32, i8, i8*)** %v2, align 8
-  %v3 = getelementptr inbounds %struct.foostruct, %struct.foostruct* %bang, i64 0, i32 1
-  store i32 (i8*, i8**, i32, i8, i8*)* @fa, i32 (i8*, i8**, i32, i8, i8*)** %v3, align 8
-  %v4 = getelementptr inbounds %struct.foostruct, %struct.foostruct* %bang, i64 0, i32 2
-  store i32 (i8*, i8**, i32, i8, i8*)* @fa, i32 (i8*, i8**, i32, i8, i8*)** %v4, align 8
-  %v5 = getelementptr inbounds %struct.foostruct, %struct.foostruct* %bang, i64 0, i32 3
-  store i32 (i8*, i8**, i32, i8, i8*)* @fa, i32 (i8*, i8**, i32, i8, i8*)** %v5, align 8
-  %v6 = getelementptr inbounds %struct.foostruct, %struct.foostruct* %bang, i64 0, i32 4
-  store void (i8*, i32, i32)* null, void (i8*, i32, i32)** %v6, align 8
-  call void @goFunc(%struct.foostruct* %bang)
+  call void @llvm.memset.p0.i64(ptr align 8 %bang, i8 0, i64 40, i1 false)
+  store ptr @fa, ptr %bang, align 8
+  %v3 = getelementptr inbounds %struct.foostruct, ptr %bang, i64 0, i32 1
+  store ptr @fa, ptr %v3, align 8
+  %v4 = getelementptr inbounds %struct.foostruct, ptr %bang, i64 0, i32 2
+  store ptr @fa, ptr %v4, align 8
+  %v5 = getelementptr inbounds %struct.foostruct, ptr %bang, i64 0, i32 3
+  store ptr @fa, ptr %v5, align 8
+  %v6 = getelementptr inbounds %struct.foostruct, ptr %bang, i64 0, i32 4
+  store ptr null, ptr %v6, align 8
+  call void @goFunc(ptr %bang)
   ret void
 
 }
 
-define signext i8 @test5(i32 *%ptr) {
+define signext i8 @test5(ptr %ptr) {
 ; CHECK-LABEL: @test5(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[BPTR:%.*]] = bitcast i32* [[PTR:%.*]] to i8*
-; CHECK-NEXT:    [[BPTR1:%.*]] = getelementptr inbounds i8, i8* [[BPTR]], i64 1
-; CHECK-NEXT:    [[BPTR2:%.*]] = getelementptr inbounds i8, i8* [[BPTR]], i64 2
-; CHECK-NEXT:    [[BPTR3:%.*]] = getelementptr inbounds i8, i8* [[BPTR]], i64 3
-; CHECK-NEXT:    [[WPTR:%.*]] = bitcast i8* [[BPTR]] to i16*
-; CHECK-NEXT:    [[WPTR1:%.*]] = bitcast i8* [[BPTR1]] to i16*
-; CHECK-NEXT:    [[WPTR2:%.*]] = bitcast i8* [[BPTR2]] to i16*
-; CHECK-NEXT:    store i16 -1, i16* [[WPTR2]], align 1
-; CHECK-NEXT:    store i16 1456, i16* [[WPTR1]], align 1
-; CHECK-NEXT:    store i16 1346, i16* [[WPTR]], align 1
+; CHECK-NEXT:    [[BPTR1:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i64 1
+; CHECK-NEXT:    [[BPTR2:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 2
+; CHECK-NEXT:    [[BPTR3:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 3
+; CHECK-NEXT:    store i16 -1, ptr [[BPTR2]], align 1
+; CHECK-NEXT:    store i16 1456, ptr [[BPTR1]], align 1
+; CHECK-NEXT:    store i16 1346, ptr [[PTR]], align 1
 ; CHECK-NEXT:    ret i8 0
 ;
 entry:
 
-  store i32 0, i32* %ptr
+  store i32 0, ptr %ptr
 
-  %bptr = bitcast i32* %ptr to i8*
-  %bptr1 = getelementptr inbounds i8, i8* %bptr, i64 1
-  %bptr2 = getelementptr inbounds i8, i8* %bptr, i64 2
-  %bptr3 = getelementptr inbounds i8, i8* %bptr, i64 3
+  %bptr1 = getelementptr inbounds i8, ptr %ptr, i64 1
+  %bptr2 = getelementptr inbounds i8, ptr %ptr, i64 2
+  %bptr3 = getelementptr inbounds i8, ptr %ptr, i64 3
 
-  %wptr = bitcast i8* %bptr to i16*
-  %wptr1 = bitcast i8* %bptr1 to i16*
-  %wptr2 = bitcast i8* %bptr2 to i16*
 
-  store i16 65535, i16* %wptr2, align 1
-  store i16 1456, i16* %wptr1, align 1
-  store i16 1346, i16* %wptr, align 1
+  store i16 65535, ptr %bptr2, align 1
+  store i16 1456, ptr %bptr1, align 1
+  store i16 1346, ptr %ptr, align 1
 
 
   ret i8 0
 }
 
-define signext i8 @test6(i32 *%ptr) {
+define signext i8 @test6(ptr %ptr) {
 ; CHECK-LABEL: @test6(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[BPTR:%.*]] = bitcast i32* [[PTR:%.*]] to i16*
-; CHECK-NEXT:    [[BPTR1:%.*]] = getelementptr inbounds i16, i16* [[BPTR]], i64 0
-; CHECK-NEXT:    [[BPTR2:%.*]] = getelementptr inbounds i16, i16* [[BPTR]], i64 1
-; CHECK-NEXT:    store i16 1456, i16* [[BPTR2]], align 1
-; CHECK-NEXT:    store i16 -1, i16* [[BPTR1]], align 1
+; CHECK-NEXT:    [[BPTR2:%.*]] = getelementptr inbounds i16, ptr [[PTR:%.*]], i64 1
+; CHECK-NEXT:    store i16 1456, ptr [[BPTR2]], align 1
+; CHECK-NEXT:    store i16 -1, ptr [[PTR]], align 1
 ; CHECK-NEXT:    ret i8 0
 ;
 entry:
 
-  store i32 0, i32* %ptr
+  store i32 0, ptr %ptr
 
-  %bptr = bitcast i32* %ptr to i16*
-  %bptr1 = getelementptr inbounds i16, i16* %bptr, i64 0
-  %bptr2 = getelementptr inbounds i16, i16* %bptr, i64 1
+  %bptr2 = getelementptr inbounds i16, ptr %ptr, i64 1
 
-  store i16 1456, i16* %bptr2, align 1
-  store i16 65535, i16* %bptr1, align 1
+  store i16 1456, ptr %bptr2, align 1
+  store i16 65535, ptr %ptr, align 1
 
 
   ret i8 0
 }
 
-define signext i8 @test7(i64 *%ptr) {
+define signext i8 @test7(ptr %ptr) {
 ; CHECK-LABEL: @test7(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[BPTR:%.*]] = bitcast i64* [[PTR:%.*]] to i16*
-; CHECK-NEXT:    [[BPTR1:%.*]] = getelementptr inbounds i16, i16* [[BPTR]], i64 0
-; CHECK-NEXT:    [[BPTR2:%.*]] = getelementptr inbounds i16, i16* [[BPTR]], i64 1
-; CHECK-NEXT:    [[BPTR3:%.*]] = getelementptr inbounds i16, i16* [[BPTR]], i64 2
-; CHECK-NEXT:    [[BPTR4:%.*]] = getelementptr inbounds i16, i16* [[BPTR]], i64 3
-; CHECK-NEXT:    store i16 1346, i16* [[BPTR1]], align 1
-; CHECK-NEXT:    store i16 1756, i16* [[BPTR3]], align 1
-; CHECK-NEXT:    store i16 1456, i16* [[BPTR2]], align 1
-; CHECK-NEXT:    store i16 5656, i16* [[BPTR4]], align 1
+; CHECK-NEXT:    [[BPTR2:%.*]] = getelementptr inbounds i16, ptr [[PTR:%.*]], i64 1
+; CHECK-NEXT:    [[BPTR3:%.*]] = getelementptr inbounds i16, ptr [[PTR]], i64 2
+; CHECK-NEXT:    [[BPTR4:%.*]] = getelementptr inbounds i16, ptr [[PTR]], i64 3
+; CHECK-NEXT:    store i16 1346, ptr [[PTR]], align 1
+; CHECK-NEXT:    store i16 1756, ptr [[BPTR3]], align 1
+; CHECK-NEXT:    store i16 1456, ptr [[BPTR2]], align 1
+; CHECK-NEXT:    store i16 5656, ptr [[BPTR4]], align 1
 ; CHECK-NEXT:    ret i8 0
 ;
 entry:
 
-  store i64 0, i64* %ptr
+  store i64 0, ptr %ptr
 
-  %bptr = bitcast i64* %ptr to i16*
-  %bptr1 = getelementptr inbounds i16, i16* %bptr, i64 0
-  %bptr2 = getelementptr inbounds i16, i16* %bptr, i64 1
-  %bptr3 = getelementptr inbounds i16, i16* %bptr, i64 2
-  %bptr4 = getelementptr inbounds i16, i16* %bptr, i64 3
+  %bptr2 = getelementptr inbounds i16, ptr %ptr, i64 1
+  %bptr3 = getelementptr inbounds i16, ptr %ptr, i64 2
+  %bptr4 = getelementptr inbounds i16, ptr %ptr, i64 3
 
-  store i16 1346, i16* %bptr1, align 1
-  store i16 1756, i16* %bptr3, align 1
-  store i16 1456, i16* %bptr2, align 1
-  store i16 5656, i16* %bptr4, align 1
+  store i16 1346, ptr %ptr, align 1
+  store i16 1756, ptr %bptr3, align 1
+  store i16 1456, ptr %bptr2, align 1
+  store i16 5656, ptr %bptr4, align 1
 
 
   ret i8 0

diff  --git a/llvm/test/Transforms/DeadStoreElimination/const-pointers.ll b/llvm/test/Transforms/DeadStoreElimination/const-pointers.ll
index a2218b725cd3b..f29028b1fabc3 100644
--- a/llvm/test/Transforms/DeadStoreElimination/const-pointers.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/const-pointers.ll
@@ -5,12 +5,11 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
 
 @g = global i32 42
 
-define void @test1(%t* noalias %pp) {
-  %p = getelementptr inbounds %t, %t* %pp, i32 0, i32 0
+define void @test1(ptr noalias %pp) {
 
-  store i32 1, i32* %p; <-- This is dead
-  %x = load i32, i32* inttoptr (i32 12345 to i32*)
-  store i32 %x, i32* %p
+  store i32 1, ptr %pp; <-- This is dead
+  %x = load i32, ptr inttoptr (i32 12345 to ptr)
+  store i32 %x, ptr %pp
   ret void
 ; CHECK-LABEL: define void @test1(
 ; CHECK: store
@@ -19,8 +18,8 @@ define void @test1(%t* noalias %pp) {
 }
 
 define void @test3() {
-  store i32 1, i32* @g; <-- This is dead.
-  store i32 42, i32* @g
+  store i32 1, ptr @g; <-- This is dead.
+  store i32 42, ptr @g
   ret void
 ; CHECK-LABEL: define void @test3(
 ; CHECK: store
@@ -28,10 +27,10 @@ define void @test3() {
 ; CHECK: ret void
 }
 
-define void @test4(i32* %p) {
-  store i32 1, i32* %p
-  %x = load i32, i32* @g; <-- %p and @g could alias
-  store i32 %x, i32* %p
+define void @test4(ptr %p) {
+  store i32 1, ptr %p
+  %x = load i32, ptr @g; <-- %p and @g could alias
+  store i32 %x, ptr %p
   ret void
 ; CHECK-LABEL: define void @test4(
 ; CHECK: store

diff  --git a/llvm/test/Transforms/DeadStoreElimination/crash.ll b/llvm/test/Transforms/DeadStoreElimination/crash.ll
index ccee7fb8ba58b..41b6b4f03a9f7 100644
--- a/llvm/test/Transforms/DeadStoreElimination/crash.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/crash.ll
@@ -3,7 +3,7 @@
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
 target triple = "i386-apple-darwin10.0"
 
- at g80 = external global i8                         ; <i8*> [#uses=3]
+ at g80 = external global i8                         ; <ptr> [#uses=3]
 
 declare signext i8 @foo(i8 signext, i8 signext) nounwind readnone ssp
 
@@ -16,31 +16,28 @@ entry:
 
 bb:                                               ; preds = %bb, %entry
   %storemerge = phi i8 [ %2, %bb ], [ 1, %entry ] ; <i8> [#uses=1]
-  store i8 %storemerge, i8* @g80
+  store i8 %storemerge, ptr @g80
   %0 = tail call i32 @func68(i32 1) nounwind ssp  ; <i32> [#uses=1]
   %1 = trunc i32 %0 to i8                         ; <i8> [#uses=1]
-  store i8 %1, i8* @g80, align 1
-  store i8 undef, i8* @g80, align 1
+  store i8 %1, ptr @g80, align 1
+  store i8 undef, ptr @g80, align 1
   %2 = tail call signext i8 @foo(i8 signext undef, i8 signext 1) nounwind ; <i8> [#uses=1]
   br label %bb
 }
 
 define fastcc i32 @test2() nounwind ssp {
 bb14:                                             ; preds = %bb4
-  %0 = bitcast i8* undef to i8**                  ; <i8**> [#uses=1]
-  %1 = getelementptr inbounds i8*, i8** %0, i64 undef  ; <i8**> [#uses=1]
-  %2 = bitcast i8** %1 to i16*                    ; <i16*> [#uses=2]
-  %3 = getelementptr inbounds i16, i16* %2, i64 undef  ; <i16*> [#uses=1]
-  %4 = bitcast i16* %3 to i8*                     ; <i8*> [#uses=1]
-  %5 = getelementptr inbounds i8, i8* %4, i64 undef   ; <i8*> [#uses=1]
-  %6 = getelementptr inbounds i16, i16* %2, i64 undef  ; <i16*> [#uses=1]
-  store i16 undef, i16* %6, align 2
-  %7 = getelementptr inbounds i8, i8* %5, i64 undef   ; <i8*> [#uses=1]
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %7, i8* undef, i64 undef, i1 false)
+  %0 = getelementptr inbounds ptr, ptr undef, i64 undef  ; <ptr> [#uses=1]
+  %1 = getelementptr inbounds i16, ptr %0, i64 undef  ; <ptr> [#uses=1]
+  %2 = getelementptr inbounds i8, ptr %1, i64 undef   ; <ptr> [#uses=1]
+  %3 = getelementptr inbounds i16, ptr %0, i64 undef  ; <ptr> [#uses=1]
+  store i16 undef, ptr %3, align 2
+  %4 = getelementptr inbounds i8, ptr %2, i64 undef   ; <ptr> [#uses=1]
+  call void @llvm.memcpy.p0.p0.i64(ptr %4, ptr undef, i64 undef, i1 false)
   unreachable
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
 
 
 ; rdar://7635088
@@ -49,26 +46,26 @@ entry:
   ret i32 0
   
 dead:
-  %P2 = getelementptr i32, i32 *%P2, i32 52
-  %Q2 = getelementptr i32, i32 *%Q2, i32 52
-  store i32 4, i32* %P2
-  store i32 4, i32* %Q2
+  %P2 = getelementptr i32, ptr %P2, i32 52
+  %Q2 = getelementptr i32, ptr %Q2, i32 52
+  store i32 4, ptr %P2
+  store i32 4, ptr %Q2
   br label %dead
 }
 
 
 ; PR3141
 %struct.ada__tags__dispatch_table = type { [1 x i32] }
-%struct.f393a00_1__object = type { %struct.ada__tags__dispatch_table*, i8 }
+%struct.f393a00_1__object = type { ptr, i8 }
 %struct.f393a00_2__windmill = type { %struct.f393a00_1__object, i16 }
 
-define void @test4(%struct.f393a00_2__windmill* %a, %struct.f393a00_2__windmill* %b) {
+define void @test4(ptr %a, ptr %b) {
 entry:
-	%t = alloca %struct.f393a00_2__windmill		; <%struct.f393a00_2__windmill*> [#uses=1]
-	%0 = getelementptr %struct.f393a00_2__windmill, %struct.f393a00_2__windmill* %t, i32 0, i32 0, i32 0		; <%struct.ada__tags__dispatch_table**> [#uses=1]
-	%1 = load %struct.ada__tags__dispatch_table*, %struct.ada__tags__dispatch_table** null, align 4		; <%struct.ada__tags__dispatch_table*> [#uses=1]
-	%2 = load %struct.ada__tags__dispatch_table*, %struct.ada__tags__dispatch_table** %0, align 8		; <%struct.ada__tags__dispatch_table*> [#uses=1]
-	store %struct.ada__tags__dispatch_table* %2, %struct.ada__tags__dispatch_table** null, align 4
-	store %struct.ada__tags__dispatch_table* %1, %struct.ada__tags__dispatch_table** null, align 4
+	%t = alloca %struct.f393a00_2__windmill		; <ptr> [#uses=1]
+	%0 = getelementptr %struct.f393a00_2__windmill, ptr %t, i32 0, i32 0, i32 0		; <ptr> [#uses=1]
+	%1 = load ptr, ptr null, align 4		; <ptr> [#uses=1]
+	%2 = load ptr, ptr %0, align 8		; <ptr> [#uses=1]
+	store ptr %2, ptr null, align 4
+	store ptr %1, ptr null, align 4
 	ret void
 }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/cs-cs-aliasing.ll b/llvm/test/Transforms/DeadStoreElimination/cs-cs-aliasing.ll
index b403e3382234d..d5ad6e0b0c6b0 100644
--- a/llvm/test/Transforms/DeadStoreElimination/cs-cs-aliasing.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/cs-cs-aliasing.ll
@@ -5,14 +5,14 @@ target triple = "x86_64-unknown-linux-gnu"
 %class.basic_string = type { %"class.__gnu_cxx::__versa_string" }
 %"class.__gnu_cxx::__versa_string" = type { %"class.__gnu_cxx::__sso_string_base" }
 %"class.__gnu_cxx::__sso_string_base" = type { %"struct.__gnu_cxx::__vstring_utility<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider", i64, %union.anon }
-%"struct.__gnu_cxx::__vstring_utility<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider" = type { i8* }
+%"struct.__gnu_cxx::__vstring_utility<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider" = type { ptr }
 %union.anon = type { i64, [8 x i8] }
 
 ; Function Attrs: nounwind
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) #0
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) #0
 
 ; Function Attrs: noinline nounwind readonly uwtable
-declare zeroext i1 @callee_takes_string(%class.basic_string* nonnull) #1 align 2
+declare zeroext i1 @callee_takes_string(ptr nonnull) #1 align 2
 
 ; Function Attrs: nounwind uwtable
 define weak_odr zeroext i1 @test() #2 align 2 {
@@ -22,27 +22,21 @@ define weak_odr zeroext i1 @test() #2 align 2 {
 bb:
   %tmp = alloca %class.basic_string, align 8
   %tmp1 = alloca %class.basic_string, align 8
-  %tmp3 = getelementptr inbounds %class.basic_string, %class.basic_string* %tmp, i64 0, i32 0, i32 0, i32 2
-  %tmp4 = bitcast %union.anon* %tmp3 to i8*
-  %tmp5 = getelementptr inbounds %class.basic_string, %class.basic_string* %tmp, i64 0, i32 0, i32 0, i32 0, i32 0
-  %tmp6 = getelementptr inbounds %class.basic_string, %class.basic_string* %tmp, i64 0, i32 0, i32 0, i32 1
-  %tmp7 = getelementptr inbounds i8, i8* %tmp4, i64 1
-  %tmp8 = bitcast %class.basic_string* %tmp to i8*
+  %tmp3 = getelementptr inbounds %class.basic_string, ptr %tmp, i64 0, i32 0, i32 0, i32 2
+  %tmp6 = getelementptr inbounds %class.basic_string, ptr %tmp, i64 0, i32 0, i32 0, i32 1
+  %tmp7 = getelementptr inbounds i8, ptr %tmp3, i64 1
   %tmp9 = bitcast i64 0 to i64
-  %tmp10 = getelementptr inbounds %class.basic_string, %class.basic_string* %tmp1, i64 0, i32 0, i32 0, i32 2
-  %tmp11 = bitcast %union.anon* %tmp10 to i8*
-  %tmp12 = getelementptr inbounds %class.basic_string, %class.basic_string* %tmp1, i64 0, i32 0, i32 0, i32 0, i32 0
-  %tmp13 = getelementptr inbounds %class.basic_string, %class.basic_string* %tmp1, i64 0, i32 0, i32 0, i32 1
-  %tmp14 = getelementptr inbounds i8, i8* %tmp11, i64 1
-  %tmp15 = bitcast %class.basic_string* %tmp1 to i8*
+  %tmp10 = getelementptr inbounds %class.basic_string, ptr %tmp1, i64 0, i32 0, i32 0, i32 2
+  %tmp13 = getelementptr inbounds %class.basic_string, ptr %tmp1, i64 0, i32 0, i32 0, i32 1
+  %tmp14 = getelementptr inbounds i8, ptr %tmp10, i64 1
   br label %_ZN12basic_stringIcSt11char_traitsIcESaIcEEC2EPKcRKS2_.exit
 
 _ZN12basic_stringIcSt11char_traitsIcESaIcEEC2EPKcRKS2_.exit: ; preds = %bb
-  store i8* %tmp4, i8** %tmp5, align 8
-  store i8 62, i8* %tmp4, align 8
-  store i64 1, i64* %tmp6, align 8
-  store i8 0, i8* %tmp7, align 1
-  %tmp16 = call zeroext i1 @callee_takes_string(%class.basic_string* nonnull %tmp)
+  store ptr %tmp3, ptr %tmp, align 8
+  store i8 62, ptr %tmp3, align 8
+  store i64 1, ptr %tmp6, align 8
+  store i8 0, ptr %tmp7, align 1
+  %tmp16 = call zeroext i1 @callee_takes_string(ptr nonnull %tmp)
   br label %_ZN9__gnu_cxx17__sso_string_baseIcSt11char_traitsIcESaIcEED2Ev.exit3
 
 _ZN9__gnu_cxx17__sso_string_baseIcSt11char_traitsIcESaIcEED2Ev.exit3: ; preds = %_ZN12basic_stringIcSt11char_traitsIcESaIcEEC2EPKcRKS2_.exit
@@ -50,21 +44,21 @@ _ZN9__gnu_cxx17__sso_string_baseIcSt11char_traitsIcESaIcEED2Ev.exit3: ; preds =
 ; CHECK: _ZN9__gnu_cxx17__sso_string_baseIcSt11char_traitsIcESaIcEED2Ev.exit3:
 
 ; The following can be read through the call %tmp17:
-  store i8* %tmp11, i8** %tmp12, align 8
-  store i8 125, i8* %tmp11, align 8
-  store i64 1, i64* %tmp13, align 8
-  store i8 0, i8* %tmp14, align 1
+  store ptr %tmp10, ptr %tmp1, align 8
+  store i8 125, ptr %tmp10, align 8
+  store i64 1, ptr %tmp13, align 8
+  store i8 0, ptr %tmp14, align 1
 
-; CHECK: store i8* %tmp11, i8** %tmp12, align 8
-; CHECK: store i8 125, i8* %tmp11, align 8
-; CHECK: store i64 1, i64* %tmp13, align 8
-; CHECK: store i8 0, i8* %tmp14, align 1
+; CHECK: store ptr %tmp10, ptr %tmp1, align 8
+; CHECK: store i8 125, ptr %tmp10, align 8
+; CHECK: store i64 1, ptr %tmp13, align 8
+; CHECK: store i8 0, ptr %tmp14, align 1
 
-  %tmp17 = call zeroext i1 @callee_takes_string(%class.basic_string* nonnull %tmp1)
-  call void @llvm.memset.p0i8.i64(i8* align 8 %tmp11, i8 -51, i64 16, i1 false) #0
-  call void @llvm.memset.p0i8.i64(i8* align 8 %tmp15, i8 -51, i64 32, i1 false) #0
-  call void @llvm.memset.p0i8.i64(i8* align 8 %tmp4, i8 -51, i64 16, i1 false) #0
-  call void @llvm.memset.p0i8.i64(i8* align 8 %tmp8, i8 -51, i64 32, i1 false) #0
+  %tmp17 = call zeroext i1 @callee_takes_string(ptr nonnull %tmp1)
+  call void @llvm.memset.p0.i64(ptr align 8 %tmp10, i8 -51, i64 16, i1 false) #0
+  call void @llvm.memset.p0.i64(ptr align 8 %tmp1, i8 -51, i64 32, i1 false) #0
+  call void @llvm.memset.p0.i64(ptr align 8 %tmp3, i8 -51, i64 16, i1 false) #0
+  call void @llvm.memset.p0.i64(ptr align 8 %tmp, i8 -51, i64 32, i1 false) #0
   ret i1 %tmp17
 }
 

diff  --git a/llvm/test/Transforms/DeadStoreElimination/debug-counter.ll b/llvm/test/Transforms/DeadStoreElimination/debug-counter.ll
index b881e38e92f30..36b20435227c6 100644
--- a/llvm/test/Transforms/DeadStoreElimination/debug-counter.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/debug-counter.ll
@@ -18,31 +18,31 @@
 target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
 
 
-define void @test(i32* noalias %P, i32* noalias %Q, i32* noalias %R) {
+define void @test(ptr noalias %P, ptr noalias %Q, ptr noalias %R) {
 ; SKIP0-COUNT1-LABEL: @test(
-; SKIP0-COUNT1-NEXT:    store i32 1, i32* [[P:%.*]]
+; SKIP0-COUNT1-NEXT:    store i32 1, ptr [[P:%.*]]
 ; SKIP0-COUNT1-NEXT:    br i1 true, label [[BB1:%.*]], label [[BB2:%.*]]
 ; SKIP0-COUNT1:       bb1:
 ; SKIP0-COUNT1-NEXT:    br label [[BB3:%.*]]
 ; SKIP0-COUNT1:       bb2:
 ; SKIP0-COUNT1-NEXT:    br label [[BB3]]
 ; SKIP0-COUNT1:       bb3:
-; SKIP0-COUNT1-NEXT:    store i32 0, i32* [[Q:%.*]]
-; SKIP0-COUNT1-NEXT:    store i32 0, i32* [[R:%.*]]
-; SKIP0-COUNT1-NEXT:    store i32 0, i32* [[P]]
+; SKIP0-COUNT1-NEXT:    store i32 0, ptr [[Q:%.*]]
+; SKIP0-COUNT1-NEXT:    store i32 0, ptr [[R:%.*]]
+; SKIP0-COUNT1-NEXT:    store i32 0, ptr [[P]]
 ; SKIP0-COUNT1-NEXT:    ret void
 ;
 ; SKIP1-COUNT1-LABEL: @test(
-; SKIP1-COUNT1-NEXT:    store i32 1, i32* [[R:%.*]]
+; SKIP1-COUNT1-NEXT:    store i32 1, ptr [[R:%.*]]
 ; SKIP1-COUNT1-NEXT:    br i1 true, label [[BB1:%.*]], label [[BB2:%.*]]
 ; SKIP1-COUNT1:       bb1:
 ; SKIP1-COUNT1-NEXT:    br label [[BB3:%.*]]
 ; SKIP1-COUNT1:       bb2:
 ; SKIP1-COUNT1-NEXT:    br label [[BB3]]
 ; SKIP1-COUNT1:       bb3:
-; SKIP1-COUNT1-NEXT:    store i32 0, i32* [[Q:%.*]]
-; SKIP1-COUNT1-NEXT:    store i32 0, i32* [[R]]
-; SKIP1-COUNT1-NEXT:    store i32 0, i32* [[P:%.*]]
+; SKIP1-COUNT1-NEXT:    store i32 0, ptr [[Q:%.*]]
+; SKIP1-COUNT1-NEXT:    store i32 0, ptr [[R]]
+; SKIP1-COUNT1-NEXT:    store i32 0, ptr [[P:%.*]]
 ; SKIP1-COUNT1-NEXT:    ret void
 ;
 ; SKIP0-COUNT2-LABEL: @test(
@@ -52,35 +52,35 @@ define void @test(i32* noalias %P, i32* noalias %Q, i32* noalias %R) {
 ; SKIP0-COUNT2:       bb2:
 ; SKIP0-COUNT2-NEXT:    br label [[BB3]]
 ; SKIP0-COUNT2:       bb3:
-; SKIP0-COUNT2-NEXT:    store i32 0, i32* [[Q:%.*]]
-; SKIP0-COUNT2-NEXT:    store i32 0, i32* [[R:%.*]]
-; SKIP0-COUNT2-NEXT:    store i32 0, i32* [[P:%.*]]
+; SKIP0-COUNT2-NEXT:    store i32 0, ptr [[Q:%.*]]
+; SKIP0-COUNT2-NEXT:    store i32 0, ptr [[R:%.*]]
+; SKIP0-COUNT2-NEXT:    store i32 0, ptr [[P:%.*]]
 ; SKIP0-COUNT2-NEXT:    ret void
 ;
 ; SKIP2-COUNT1-LABEL: @test(
-; SKIP2-COUNT1-NEXT:    store i32 1, i32* [[P:%.*]]
-; SKIP2-COUNT1-NEXT:    store i32 1, i32* [[R:%.*]]
+; SKIP2-COUNT1-NEXT:    store i32 1, ptr [[P:%.*]]
+; SKIP2-COUNT1-NEXT:    store i32 1, ptr [[R:%.*]]
 ; SKIP2-COUNT1-NEXT:    br i1 true, label [[BB1:%.*]], label [[BB2:%.*]]
 ; SKIP2-COUNT1:       bb1:
 ; SKIP2-COUNT1-NEXT:    br label [[BB3:%.*]]
 ; SKIP2-COUNT1:       bb2:
 ; SKIP2-COUNT1-NEXT:    br label [[BB3]]
 ; SKIP2-COUNT1:       bb3:
-; SKIP2-COUNT1-NEXT:    store i32 0, i32* [[Q:%.*]]
-; SKIP2-COUNT1-NEXT:    store i32 0, i32* [[R]]
-; SKIP2-COUNT1-NEXT:    store i32 0, i32* [[P]]
+; SKIP2-COUNT1-NEXT:    store i32 0, ptr [[Q:%.*]]
+; SKIP2-COUNT1-NEXT:    store i32 0, ptr [[R]]
+; SKIP2-COUNT1-NEXT:    store i32 0, ptr [[P]]
 ; SKIP2-COUNT1-NEXT:    ret void
 ;
-  store i32 1, i32* %P
-  store i32 1, i32* %R
+  store i32 1, ptr %P
+  store i32 1, ptr %R
   br i1 true, label %bb1, label %bb2
 bb1:
   br label %bb3
 bb2:
   br label %bb3
 bb3:
-  store i32 0, i32* %Q
-  store i32 0, i32* %R
-  store i32 0, i32* %P
+  store i32 0, ptr %Q
+  store i32 0, ptr %R
+  store i32 0, ptr %P
   ret void
 }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/debuginfo.ll b/llvm/test/Transforms/DeadStoreElimination/debuginfo.ll
index b927965dc4054..f483f6396fc84 100644
--- a/llvm/test/Transforms/DeadStoreElimination/debuginfo.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/debuginfo.ll
@@ -2,30 +2,26 @@
 
 target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
 
-declare noalias i8* @malloc(i32)
+declare noalias ptr @malloc(i32)
 
 declare void @test_f()
 
-define i32* @test_salvage(i32 %arg) {
+define ptr @test_salvage(i32 %arg) {
 ; Check that all four original local variables have their values preserved.
 ; CHECK-LABEL: @test_salvage(
 ; CHECK-NEXT: malloc
-; CHECK-NEXT: @llvm.dbg.value(metadata i8* %p, metadata ![[p:.*]], metadata !DIExpression())
-; CHECK-NEXT: bitcast
-; CHECK-NEXT: @llvm.dbg.value(metadata i32* %P, metadata ![[P:.*]], metadata !DIExpression())
+; CHECK-NEXT: @llvm.dbg.value(metadata ptr %p, metadata ![[p:.*]], metadata !DIExpression())
 ; CHECK-NEXT: @llvm.dbg.value(metadata i32 %arg, metadata ![[DEAD:.*]], metadata !DIExpression(DW_OP_plus_uconst, 1, DW_OP_stack_value))
 ; CHECK-NEXT: call void @test_f()
-; CHECK-NEXT: store i32 0, i32* %P
+; CHECK-NEXT: store i32 0, ptr %p
 
-  %p = tail call i8* @malloc(i32 4)
-  %P = bitcast i8* %p to i32*
+  %p = tail call ptr @malloc(i32 4)
   %DEAD = add i32 %arg, 1
-  store i32 %DEAD, i32* %P
+  store i32 %DEAD, ptr %p
   call void @test_f()
-  store i32 0, i32* %P
-  ret i32* %P
+  store i32 0, ptr %p
+  ret ptr %p
 }
 
 ; CHECK: ![[p]] = !DILocalVariable(name: "1"
-; CHECK: ![[P]] = !DILocalVariable(name: "2"
-; CHECK: ![[DEAD]] = !DILocalVariable(name: "3"
+; CHECK: ![[DEAD]] = !DILocalVariable(name: "2"

diff  --git a/llvm/test/Transforms/DeadStoreElimination/dominate.ll b/llvm/test/Transforms/DeadStoreElimination/dominate.ll
index 24dd65e07bbc2..c3e8d88542ced 100644
--- a/llvm/test/Transforms/DeadStoreElimination/dominate.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/dominate.ll
@@ -5,16 +5,15 @@ declare void @bar()
 define void @foo() {
 bb1:
   %memtmp3.i = alloca [21 x i8], align 1
-  %0 = getelementptr inbounds [21 x i8], [21 x i8]* %memtmp3.i, i64 0, i64 0
   br label %bb3
 
 bb2:
-  call void @llvm.lifetime.end.p0i8(i64 -1, i8* %0)
+  call void @llvm.lifetime.end.p0(i64 -1, ptr %memtmp3.i)
   br label %bb3
 
 bb3:
   call void @bar()
-  call void @llvm.lifetime.end.p0i8(i64 -1, i8* %0)
+  call void @llvm.lifetime.end.p0(i64 -1, ptr %memtmp3.i)
   br label %bb4
 
 bb4:
@@ -22,4 +21,4 @@ bb4:
 
 }
 
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) nounwind
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind

diff  --git a/llvm/test/Transforms/DeadStoreElimination/fence-todo.ll b/llvm/test/Transforms/DeadStoreElimination/fence-todo.ll
index a00ef9cbf244d..cb2802a63fdbe 100644
--- a/llvm/test/Transforms/DeadStoreElimination/fence-todo.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/fence-todo.ll
@@ -7,19 +7,19 @@
 ; Right now the DSE in presence of fence is only done in end blocks (with no successors),
 ; but the same logic applies to other basic blocks as well.
 ; The store to %addr.i can be removed since it is a byval attribute
-define void @test3(i32* byval(i32) %addr.i) {
+define void @test3(ptr byval(i32) %addr.i) {
 ; CHECK-LABEL: @test3
 ; CHECK-NOT: store
 ; CHECK: fence
 ; CHECK: ret
-  store i32 5, i32* %addr.i, align 4
+  store i32 5, ptr %addr.i, align 4
   fence release
   ret void
 }
 
-declare void @foo(i8* nocapture %p)
+declare void @foo(ptr nocapture %p)
 
-declare noalias i8* @malloc(i32)
+declare noalias ptr @malloc(i32)
 
 ; DSE of stores in locations allocated through library calls.
 define void @test_nocapture() {
@@ -28,9 +28,9 @@ define void @test_nocapture() {
 ; CHECK: foo
 ; CHECK-NOT: store
 ; CHECK: fence
-  %m  =  call i8* @malloc(i32 24)
-  call void @foo(i8* %m)
-  store i8 4, i8* %m
+  %m  =  call ptr @malloc(i32 24)
+  call void @foo(ptr %m)
+  store i8 4, ptr %m
   fence release
   ret void
 }
@@ -43,8 +43,8 @@ define void @fence_seq_cst() {
 ; CHECK-NEXT: fence seq_cst
 ; CHECK-NEXT: ret void
   %P1 = alloca i32
-  store i32 0, i32* %P1, align 4
+  store i32 0, ptr %P1, align 4
   fence seq_cst
-  store i32 4, i32* %P1, align 4
+  store i32 4, ptr %P1, align 4
   ret void
 }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/fence.ll b/llvm/test/Transforms/DeadStoreElimination/fence.ll
index 5f2398812e93d..e285433c3b764 100644
--- a/llvm/test/Transforms/DeadStoreElimination/fence.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/fence.ll
@@ -6,43 +6,43 @@
 ; it is conservatively correct.  A legal optimization
 ; could hoist the second store above the fence, and then
 ; DSE one of them.
-define void @test1(i32* %addr.i) {
+define void @test1(ptr %addr.i) {
 ; CHECK-LABEL: @test1
 ; CHECK: store i32 5
 ; CHECK: fence
 ; CHECK: store i32 5
 ; CHECK: ret
-  store i32 5, i32* %addr.i, align 4
+  store i32 5, ptr %addr.i, align 4
   fence release
-  store i32 5, i32* %addr.i, align 4
+  store i32 5, ptr %addr.i, align 4
   ret void
 }
 
 ; Same as previous, but with 
diff erent values.  If we ever optimize 
 ; this more aggressively, this allows us to check that the correct
 ; store is retained (the 'i32 1' store in this case)
-define void @test1b(i32* %addr.i) {
+define void @test1b(ptr %addr.i) {
 ; CHECK-LABEL: @test1b
 ; CHECK: store i32 42
 ; CHECK: fence release
 ; CHECK: store i32 1
 ; CHECK: ret
-  store i32 42, i32* %addr.i, align 4
+  store i32 42, ptr %addr.i, align 4
   fence release
-  store i32 1, i32* %addr.i, align 4
+  store i32 1, ptr %addr.i, align 4
   ret void
 }
 
 ; We *could* DSE across this fence, but don't.  No other thread can
 ; observe the order of the acquire fence and the store.
-define void @test2(i32* %addr.i) {
+define void @test2(ptr %addr.i) {
 ; CHECK-LABEL: @test2
 ; CHECK: store
 ; CHECK: fence
 ; CHECK: store
 ; CHECK: ret
-  store i32 5, i32* %addr.i, align 4
+  store i32 5, ptr %addr.i, align 4
   fence acquire
-  store i32 5, i32* %addr.i, align 4
+  store i32 5, ptr %addr.i, align 4
   ret void
 }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/free.ll b/llvm/test/Transforms/DeadStoreElimination/free.ll
index abde433c3855b..e43dd13d179fc 100644
--- a/llvm/test/Transforms/DeadStoreElimination/free.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/free.ll
@@ -3,46 +3,42 @@
 
 target datalayout = "e-p:64:64:64"
 
-declare void @free(i8* nocapture) allockind("free")
-declare noalias i8* @malloc(i64) allockind("alloc,uninitialized")
+declare void @free(ptr nocapture) allockind("free")
+declare noalias ptr @malloc(i64) allockind("alloc,uninitialized")
 
-define void @test(i32* %Q, i32* %P) {
+define void @test(ptr %Q, ptr %P) {
 ; CHECK-LABEL: @test(
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[P:%.*]] to i8*
-; CHECK-NEXT:    tail call void @free(i8* [[TMP1]]) [[ATTR0:#.*]]
+; CHECK-NEXT:    tail call void @free(ptr [[P:%.*]]) [[ATTR0:#.*]]
 ; CHECK-NEXT:    ret void
 ;
-  %DEAD = load i32, i32* %Q            ; <i32> [#uses=1]
-  store i32 %DEAD, i32* %P
-  %1 = bitcast i32* %P to i8*
-  tail call void @free(i8* %1) nounwind
+  %DEAD = load i32, ptr %Q            ; <i32> [#uses=1]
+  store i32 %DEAD, ptr %P
+  tail call void @free(ptr %P) nounwind
   ret void
 }
 
-define void @test2({i32, i32}* %P) {
+define void @test2(ptr %P) {
 ; CHECK-LABEL: @test2(
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast { i32, i32 }* [[P:%.*]] to i8*
-; CHECK-NEXT:    tail call void @free(i8* [[TMP1]]) [[ATTR0]]
+; CHECK-NEXT:    tail call void @free(ptr [[P:%.*]]) [[ATTR0]]
 ; CHECK-NEXT:    ret void
 ;
-  %Q = getelementptr {i32, i32}, {i32, i32} *%P, i32 0, i32 1
-  store i32 4, i32* %Q
-  %1 = bitcast {i32, i32}* %P to i8*
-  tail call void @free(i8* %1) nounwind
+  %Q = getelementptr {i32, i32}, ptr %P, i32 0, i32 1
+  store i32 4, ptr %Q
+  tail call void @free(ptr %P) nounwind
   ret void
 }
 
 define void @test3() {
 ; CHECK-LABEL: @test3(
-; CHECK-NEXT:    [[M:%.*]] = call i8* @malloc(i64 24)
-; CHECK-NEXT:    call void @free(i8* [[M]]) [[ATTR0]]
+; CHECK-NEXT:    [[M:%.*]] = call ptr @malloc(i64 24)
+; CHECK-NEXT:    call void @free(ptr [[M]]) [[ATTR0]]
 ; CHECK-NEXT:    ret void
 ;
-  %m = call i8* @malloc(i64 24)
-  store i8 0, i8* %m
-  %m1 = getelementptr i8, i8* %m, i64 1
-  store i8 1, i8* %m1
-  call void @free(i8* %m) nounwind
+  %m = call ptr @malloc(i64 24)
+  store i8 0, ptr %m
+  %m1 = getelementptr i8, ptr %m, i64 1
+  store i8 1, ptr %m1
+  call void @free(ptr %m) nounwind
   ret void
 }
 
@@ -50,24 +46,24 @@ define void @test3() {
 define void @test4(i1 %x) nounwind {
 ; CHECK-LABEL: @test4(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[ALLOC1:%.*]] = tail call noalias i8* @malloc(i64 4) [[ATTR0]]
+; CHECK-NEXT:    [[ALLOC1:%.*]] = tail call noalias ptr @malloc(i64 4) [[ATTR0]]
 ; CHECK-NEXT:    br i1 [[X:%.*]], label [[SKIPINIT1:%.*]], label [[INIT1:%.*]]
 ; CHECK:       init1:
 ; CHECK-NEXT:    br label [[SKIPINIT1]]
 ; CHECK:       skipinit1:
-; CHECK-NEXT:    tail call void @free(i8* [[ALLOC1]]) [[ATTR0]]
+; CHECK-NEXT:    tail call void @free(ptr [[ALLOC1]]) [[ATTR0]]
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %alloc1 = tail call noalias i8* @malloc(i64 4) nounwind
+  %alloc1 = tail call noalias ptr @malloc(i64 4) nounwind
   br i1 %x, label %skipinit1, label %init1
 
 init1:
-  store i8 1, i8* %alloc1
+  store i8 1, ptr %alloc1
   br label %skipinit1
 
 skipinit1:
-  tail call void @free(i8* %alloc1) nounwind
+  tail call void @free(ptr %alloc1) nounwind
   ret void
 }
 
@@ -75,13 +71,13 @@ define void @test5() {
 ; CHECK-LABEL: @test5(
 ; CHECK-NEXT:    br label [[BB:%.*]]
 ; CHECK:       bb:
-; CHECK-NEXT:    tail call void @free(i8* undef) [[ATTR0]]
+; CHECK-NEXT:    tail call void @free(ptr undef) [[ATTR0]]
 ; CHECK-NEXT:    br label [[BB]]
 ;
   br label %bb
 
 bb:
-  tail call void @free(i8* undef) nounwind
+  tail call void @free(ptr undef) nounwind
   br label %bb
 }
 
@@ -89,36 +85,30 @@ bb:
 %struct  = type { i32 , i32  }
 
 ; Test case inspired by PR48036.
-define void @delete_field_after(%struct* %ptr) {
+define void @delete_field_after(ptr %ptr) {
 ;
 ; CHECK-LABEL: @delete_field_after(
-; CHECK-NEXT:    [[PTR_F0:%.*]] = getelementptr [[STRUCT:%.*]], %struct* [[PTR:%.*]], i32 1
-; CHECK-NEXT:    [[BC:%.*]] = bitcast %struct* [[PTR_F0]] to i8*
-; CHECK-NEXT:    [[PTR_F1:%.*]] = getelementptr [[STRUCT]], %struct* [[PTR]], i32 0, i32 1
-; CHECK-NEXT:    store i32 0, i32* [[PTR_F1]], align 4
-; CHECK-NEXT:    call void @free(i8* [[BC]])
+; CHECK-NEXT:    [[PTR_F0:%.*]] = getelementptr [[STRUCT:%.*]], ptr [[PTR:%.*]], i32 1
+; CHECK-NEXT:    [[PTR_F1:%.*]] = getelementptr [[STRUCT]], ptr [[PTR]], i32 0, i32 1
+; CHECK-NEXT:    store i32 0, ptr [[PTR_F1]], align 4
+; CHECK-NEXT:    call void @free(ptr [[PTR_F0]])
 ; CHECK-NEXT:    ret void
 ;
-  %ptr.f0 = getelementptr %struct, %struct* %ptr, i32 1
-  %bc = bitcast %struct* %ptr.f0 to i8*
-  %ptr.f1 = getelementptr %struct, %struct* %ptr, i32 0, i32 1
-  store i32 0, i32* %ptr.f1
-  call void @free(i8* %bc)
+  %ptr.f0 = getelementptr %struct, ptr %ptr, i32 1
+  %ptr.f1 = getelementptr %struct, ptr %ptr, i32 0, i32 1
+  store i32 0, ptr %ptr.f1
+  call void @free(ptr %ptr.f0)
   ret void
 }
 
 ; Test case inspired by PR48036.
-define void @delete_field_before(%struct* %ptr) {
+define void @delete_field_before(ptr %ptr) {
 ; CHECK-LABEL: @delete_field_before(
-; CHECK-NEXT:    [[PTR_F0:%.*]] = getelementptr [[STRUCT:%.*]], %struct* [[PTR:%.*]], i32 0
-; CHECK-NEXT:    [[BC:%.*]] = bitcast %struct* [[PTR_F0]] to i8*
-; CHECK-NEXT:    call void @free(i8* [[BC]])
+; CHECK-NEXT:    call void @free(ptr [[PTR:%.*]])
 ; CHECK-NEXT:    ret void
 ;
-  %ptr.f0 = getelementptr %struct, %struct* %ptr, i32 0
-  %bc = bitcast %struct* %ptr.f0 to i8*
-  %ptr.f1 = getelementptr %struct, %struct* %ptr, i32 1, i32 1
-  store i32 0, i32* %ptr.f1
-  call void @free(i8* %bc)
+  %ptr.f1 = getelementptr %struct, ptr %ptr, i32 1, i32 1
+  store i32 0, ptr %ptr.f1
+  call void @free(ptr %ptr)
   ret void
 }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/inst-limits.ll b/llvm/test/Transforms/DeadStoreElimination/inst-limits.ll
index 6357477ae43be..e65630ff8868f 100644
--- a/llvm/test/Transforms/DeadStoreElimination/inst-limits.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/inst-limits.ll
@@ -12,8 +12,8 @@ define i32 @test_within_limit() !dbg !4 {
 entry:
   ; The first store; later there is a second store to the same location,
   ; so this store should be optimized away by DSE.
-  ; CHECK-NOT: store i32 1, i32* @x, align 4
-  store i32 1, i32* @x, align 4
+  ; CHECK-NOT: store i32 1, ptr @x, align 4
+  store i32 1, ptr @x, align 4
 
   ; Insert 98 dummy instructions between the two stores
   %0 = bitcast i32 0 to i32
@@ -119,8 +119,8 @@ entry:
   ; effect on the working of DSE in any way.
   call void @llvm.dbg.value(metadata i32 undef, metadata !10, metadata !DIExpression()), !dbg !DILocation(scope: !4)
 
-  ; CHECK:  store i32 -1, i32* @x, align 4
-  store i32 -1, i32* @x, align 4
+  ; CHECK:  store i32 -1, ptr @x, align 4
+  store i32 -1, ptr @x, align 4
   ret i32 0
 }
 
@@ -128,8 +128,8 @@ entry:
 define i32 @test_outside_limit() {
 entry:
   ; The first store; later there is a second store to the same location
-  ; CHECK-NOT: store i32 1, i32* @x, align 4
-  store i32 1, i32* @x, align 4
+  ; CHECK-NOT: store i32 1, ptr @x, align 4
+  store i32 1, ptr @x, align 4
 
   ; Insert 99 dummy instructions between the two stores; this is
   ; one too many instruction for the DSE to take place.
@@ -233,8 +233,8 @@ entry:
   %97 = bitcast i32 0 to i32
   %98 = bitcast i32 0 to i32
 
-  ; CHECK:  store i32 -1, i32* @x, align 4
-  store i32 -1, i32* @x, align 4
+  ; CHECK:  store i32 -1, ptr @x, align 4
+  store i32 -1, ptr @x, align 4
   ret i32 0
 }
 
@@ -255,6 +255,6 @@ declare void @llvm.dbg.value(metadata, metadata, metadata)
 !9 = !{!10}
 !10 = !DILocalVariable(name: "x", scope: !4, type: !8)
 !11 = !{i32 2, !"Dwarf Version", i32 4}
-!12 = !{i32* undef}
+!12 = !{ptr undef}
 
 !13 = !{i32 1, !"Debug Info Version", i32 3}

diff  --git a/llvm/test/Transforms/DeadStoreElimination/int_sideeffect.ll b/llvm/test/Transforms/DeadStoreElimination/int_sideeffect.ll
index 035e787f6bd7a..afdbe27e13cd9 100644
--- a/llvm/test/Transforms/DeadStoreElimination/int_sideeffect.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/int_sideeffect.ll
@@ -7,9 +7,9 @@ declare void @llvm.sideeffect()
 ; CHECK-LABEL: dse
 ; CHECK: store
 ; CHECK-NOT: store
-define void @dse(float* %p) {
-    store float 0.0, float* %p
+define void @dse(ptr %p) {
+    store float 0.0, ptr %p
     call void @llvm.sideeffect()
-    store float 0.0, float* %p
+    store float 0.0, ptr %p
     ret void
 }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/invariant.start.ll b/llvm/test/Transforms/DeadStoreElimination/invariant.start.ll
index 45c19c3588b03..2d95c94685fea 100644
--- a/llvm/test/Transforms/DeadStoreElimination/invariant.start.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/invariant.start.ll
@@ -2,36 +2,36 @@
 ; Test to make sure llvm.invariant.start calls are not treated as clobbers.
 ; RUN: opt < %s -basic-aa -dse -S | FileCheck %s
 
-declare {}* @llvm.invariant.start.p0i8(i64, i8* nocapture) nounwind readonly
+declare ptr @llvm.invariant.start.p0(i64, ptr nocapture) nounwind readonly
 
 ; We could remove either store here. The first store is dead in the
 ; conventional sense, because there is a later killing store. The second store
 ; is undefined behavior by the semantics of invariant.start, and as such
 ; unreachable.
-define void @test(i8 *%p) {
+define void @test(ptr %p) {
 ; CHECK-LABEL: @test(
-; CHECK-NEXT:    store i8 3, i8* [[P:%.*]], align 4
+; CHECK-NEXT:    store i8 3, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  store i8 1, i8* %p, align 4
-  %i = call {}* @llvm.invariant.start.p0i8(i64 1, i8* %p)
-  store i8 3, i8* %p, align 4
+  store i8 1, ptr %p, align 4
+  %i = call ptr @llvm.invariant.start.p0(i64 1, ptr %p)
+  store i8 3, ptr %p, align 4
   ret void
 }
 
 ; FIXME: We should be able to remove the first store to p, even though p and q
 ; may alias.
-define void @test2(i8* %p, i8* %q) {
+define void @test2(ptr %p, ptr %q) {
 ; CHECK-LABEL: @test2(
-; CHECK-NEXT:    store i8 1, i8* [[P:%.*]], align 4
-; CHECK-NEXT:    store i8 2, i8* [[Q:%.*]], align 4
-; CHECK-NEXT:    [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[Q]])
-; CHECK-NEXT:    store i8 3, i8* [[P]], align 4
+; CHECK-NEXT:    store i8 1, ptr [[P:%.*]], align 4
+; CHECK-NEXT:    store i8 2, ptr [[Q:%.*]], align 4
+; CHECK-NEXT:    [[I:%.*]] = call ptr @llvm.invariant.start.p0(i64 1, ptr [[Q]])
+; CHECK-NEXT:    store i8 3, ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  store i8 1, i8* %p, align 4
-  store i8 2, i8* %q, align 4
-  %i = call {}* @llvm.invariant.start.p0i8(i64 1, i8* %q)
-  store i8 3, i8* %p, align 4
+  store i8 1, ptr %p, align 4
+  store i8 2, ptr %q, align 4
+  %i = call ptr @llvm.invariant.start.p0(i64 1, ptr %q)
+  store i8 3, ptr %p, align 4
   ret void
 }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/launder.invariant.group.ll b/llvm/test/Transforms/DeadStoreElimination/launder.invariant.group.ll
index 28abe2eb5feea..a20b00fb47ced 100644
--- a/llvm/test/Transforms/DeadStoreElimination/launder.invariant.group.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/launder.invariant.group.ll
@@ -1,65 +1,65 @@
 ; RUN: opt < %s -basic-aa -dse -S | FileCheck %s
 
-; CHECK-LABEL: void @skipBarrier(i8* %ptr)
-define void @skipBarrier(i8* %ptr) {
+; CHECK-LABEL: void @skipBarrier(ptr %ptr)
+define void @skipBarrier(ptr %ptr) {
 ; CHECK-NOT: store i8 42
-  store i8 42, i8* %ptr
-; CHECK: %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
-  %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
+  store i8 42, ptr %ptr
+; CHECK: %ptr2 = call ptr @llvm.launder.invariant.group.p0(ptr %ptr)
+  %ptr2 = call ptr @llvm.launder.invariant.group.p0(ptr %ptr)
 ; CHECK: store i8 43
-  store i8 43, i8* %ptr2
+  store i8 43, ptr %ptr2
   ret void
 }
 
-; CHECK-LABEL: void @skip2Barriers(i8* %ptr)
-define void @skip2Barriers(i8* %ptr) {
+; CHECK-LABEL: void @skip2Barriers(ptr %ptr)
+define void @skip2Barriers(ptr %ptr) {
 ; CHECK-NOT: store i8 42
-  store i8 42, i8* %ptr
-; CHECK: %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
-  %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
+  store i8 42, ptr %ptr
+; CHECK: %ptr2 = call ptr @llvm.launder.invariant.group.p0(ptr %ptr)
+  %ptr2 = call ptr @llvm.launder.invariant.group.p0(ptr %ptr)
 ; CHECK-NOT: store i8 43
-  store i8 43, i8* %ptr2
-  %ptr3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr2)
-  %ptr4 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr3)
+  store i8 43, ptr %ptr2
+  %ptr3 = call ptr @llvm.launder.invariant.group.p0(ptr %ptr2)
+  %ptr4 = call ptr @llvm.launder.invariant.group.p0(ptr %ptr3)
 
 ; CHECK: store i8 44
-  store i8 44, i8* %ptr4
+  store i8 44, ptr %ptr4
   ret void
 }
 
-; CHECK-LABEL: void @skip3Barriers(i8* %ptr)
-define void @skip3Barriers(i8* %ptr) {
+; CHECK-LABEL: void @skip3Barriers(ptr %ptr)
+define void @skip3Barriers(ptr %ptr) {
 ; CHECK-NOT: store i8 42
-  store i8 42, i8* %ptr
-; CHECK: %ptr2 = call i8* @llvm.strip.invariant.group.p0i8(i8* %ptr)
-  %ptr2 = call i8* @llvm.strip.invariant.group.p0i8(i8* %ptr)
+  store i8 42, ptr %ptr
+; CHECK: %ptr2 = call ptr @llvm.strip.invariant.group.p0(ptr %ptr)
+  %ptr2 = call ptr @llvm.strip.invariant.group.p0(ptr %ptr)
 ; CHECK-NOT: store i8 43
-  store i8 43, i8* %ptr2
-  %ptr3 = call i8* @llvm.strip.invariant.group.p0i8(i8* %ptr2)
-  %ptr4 = call i8* @llvm.strip.invariant.group.p0i8(i8* %ptr3)
+  store i8 43, ptr %ptr2
+  %ptr3 = call ptr @llvm.strip.invariant.group.p0(ptr %ptr2)
+  %ptr4 = call ptr @llvm.strip.invariant.group.p0(ptr %ptr3)
 
 ; CHECK: store i8 44
-  store i8 44, i8* %ptr4
+  store i8 44, ptr %ptr4
   ret void
 }
 
-; CHECK-LABEL: void @skip4Barriers(i8* %ptr)
-define void @skip4Barriers(i8* %ptr) {
+; CHECK-LABEL: void @skip4Barriers(ptr %ptr)
+define void @skip4Barriers(ptr %ptr) {
 ; CHECK-NOT: store i8 42
-  store i8 42, i8* %ptr
-; CHECK: %ptr2 = call i8* @llvm.strip.invariant.group.p0i8(i8* %ptr)
-  %ptr2 = call i8* @llvm.strip.invariant.group.p0i8(i8* %ptr)
+  store i8 42, ptr %ptr
+; CHECK: %ptr2 = call ptr @llvm.strip.invariant.group.p0(ptr %ptr)
+  %ptr2 = call ptr @llvm.strip.invariant.group.p0(ptr %ptr)
 ; CHECK-NOT: store i8 43
-  store i8 43, i8* %ptr2
-  %ptr3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr2)
-  %ptr4 = call i8* @llvm.strip.invariant.group.p0i8(i8* %ptr3)
-  %ptr5 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr3)
+  store i8 43, ptr %ptr2
+  %ptr3 = call ptr @llvm.launder.invariant.group.p0(ptr %ptr2)
+  %ptr4 = call ptr @llvm.strip.invariant.group.p0(ptr %ptr3)
+  %ptr5 = call ptr @llvm.launder.invariant.group.p0(ptr %ptr3)
 
 ; CHECK: store i8 44
-  store i8 44, i8* %ptr5
+  store i8 44, ptr %ptr5
   ret void
 }
 
 
-declare i8* @llvm.launder.invariant.group.p0i8(i8*)
-declare i8* @llvm.strip.invariant.group.p0i8(i8*)
+declare ptr @llvm.launder.invariant.group.p0(ptr)
+declare ptr @llvm.strip.invariant.group.p0(ptr)

diff  --git a/llvm/test/Transforms/DeadStoreElimination/libcalls-darwin.ll b/llvm/test/Transforms/DeadStoreElimination/libcalls-darwin.ll
index 9fe5eb3ca7e0f..11752f89bbd5d 100644
--- a/llvm/test/Transforms/DeadStoreElimination/libcalls-darwin.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/libcalls-darwin.ll
@@ -3,114 +3,114 @@
 
 ; Tests for libcalls only available on Darwin platforms.
 
-declare void @memset_pattern4(i8*, i8*, i64)
-declare void @memset_pattern8(i8*, i8*, i64)
-declare void @memset_pattern16(i8*, i8*, i64)
+declare void @memset_pattern4(ptr, ptr, i64)
+declare void @memset_pattern8(ptr, ptr, i64)
+declare void @memset_pattern16(ptr, ptr, i64)
 
-define void @test_memset_pattern4_const_size(i8* noalias %a, i8* noalias %pattern) {
+define void @test_memset_pattern4_const_size(ptr noalias %a, ptr noalias %pattern) {
 ; CHECK-LABEL: @test_memset_pattern4_const_size(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[A_GEP_17:%.*]] = getelementptr i8, i8* [[A:%.*]], i32 17
-; CHECK-NEXT:    store i8 1, i8* [[A_GEP_17]], align 1
-; CHECK-NEXT:    call void @memset_pattern4(i8* [[A]], i8* [[PATTERN:%.*]], i64 17)
+; CHECK-NEXT:    [[A_GEP_17:%.*]] = getelementptr i8, ptr [[A:%.*]], i32 17
+; CHECK-NEXT:    store i8 1, ptr [[A_GEP_17]], align 1
+; CHECK-NEXT:    call void @memset_pattern4(ptr [[A]], ptr [[PATTERN:%.*]], i64 17)
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %a.gep.1 = getelementptr i8, i8* %a, i32 1
-  store i8 0, i8* %a.gep.1
-  %a.gep.17 = getelementptr i8, i8* %a, i32 17
-  store i8 1, i8* %a.gep.17
-  call void @memset_pattern4(i8* %a, i8* %pattern, i64 17)
+  %a.gep.1 = getelementptr i8, ptr %a, i32 1
+  store i8 0, ptr %a.gep.1
+  %a.gep.17 = getelementptr i8, ptr %a, i32 17
+  store i8 1, ptr %a.gep.17
+  call void @memset_pattern4(ptr %a, ptr %pattern, i64 17)
   ret void
 }
 
-define void @test_memset_pattern4_variable_size(i8* noalias %a, i8* noalias %pattern, i64 %n) {
+define void @test_memset_pattern4_variable_size(ptr noalias %a, ptr noalias %pattern, i64 %n) {
 ; CHECK-LABEL: @test_memset_pattern4_variable_size(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[A_GEP_1:%.*]] = getelementptr i8, i8* [[A:%.*]], i32 1
-; CHECK-NEXT:    store i8 0, i8* [[A_GEP_1]], align 1
-; CHECK-NEXT:    [[A_GEP_17:%.*]] = getelementptr i8, i8* [[A]], i32 17
-; CHECK-NEXT:    store i8 1, i8* [[A_GEP_17]], align 1
-; CHECK-NEXT:    call void @memset_pattern4(i8* [[A]], i8* [[PATTERN:%.*]], i64 [[N:%.*]])
+; CHECK-NEXT:    [[A_GEP_1:%.*]] = getelementptr i8, ptr [[A:%.*]], i32 1
+; CHECK-NEXT:    store i8 0, ptr [[A_GEP_1]], align 1
+; CHECK-NEXT:    [[A_GEP_17:%.*]] = getelementptr i8, ptr [[A]], i32 17
+; CHECK-NEXT:    store i8 1, ptr [[A_GEP_17]], align 1
+; CHECK-NEXT:    call void @memset_pattern4(ptr [[A]], ptr [[PATTERN:%.*]], i64 [[N:%.*]])
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %a.gep.1 = getelementptr i8, i8* %a, i32 1
-  store i8 0, i8* %a.gep.1
-  %a.gep.17 = getelementptr i8, i8* %a, i32 17
-  store i8 1, i8* %a.gep.17
-  call void @memset_pattern4(i8* %a, i8* %pattern, i64 %n)
+  %a.gep.1 = getelementptr i8, ptr %a, i32 1
+  store i8 0, ptr %a.gep.1
+  %a.gep.17 = getelementptr i8, ptr %a, i32 17
+  store i8 1, ptr %a.gep.17
+  call void @memset_pattern4(ptr %a, ptr %pattern, i64 %n)
   ret void
 }
 
-define void @test_memset_pattern8_const_size(i8* noalias %a, i8* noalias %pattern) {
+define void @test_memset_pattern8_const_size(ptr noalias %a, ptr noalias %pattern) {
 ; CHECK-LABEL: @test_memset_pattern8_const_size(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[A_GEP_17:%.*]] = getelementptr i8, i8* [[A:%.*]], i32 17
-; CHECK-NEXT:    store i8 1, i8* [[A_GEP_17]], align 1
-; CHECK-NEXT:    call void @memset_pattern8(i8* [[A]], i8* [[PATTERN:%.*]], i64 17)
+; CHECK-NEXT:    [[A_GEP_17:%.*]] = getelementptr i8, ptr [[A:%.*]], i32 17
+; CHECK-NEXT:    store i8 1, ptr [[A_GEP_17]], align 1
+; CHECK-NEXT:    call void @memset_pattern8(ptr [[A]], ptr [[PATTERN:%.*]], i64 17)
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %a.gep.1 = getelementptr i8, i8* %a, i32 1
-  store i8 0, i8* %a.gep.1
-  %a.gep.17 = getelementptr i8, i8* %a, i32 17
-  store i8 1, i8* %a.gep.17
-  call void @memset_pattern8(i8* %a, i8* %pattern, i64 17)
+  %a.gep.1 = getelementptr i8, ptr %a, i32 1
+  store i8 0, ptr %a.gep.1
+  %a.gep.17 = getelementptr i8, ptr %a, i32 17
+  store i8 1, ptr %a.gep.17
+  call void @memset_pattern8(ptr %a, ptr %pattern, i64 17)
   ret void
 }
 
-define void @test_memset_pattern8_variable_size(i8* noalias %a, i8* noalias %pattern, i64 %n) {
+define void @test_memset_pattern8_variable_size(ptr noalias %a, ptr noalias %pattern, i64 %n) {
 ; CHECK-LABEL: @test_memset_pattern8_variable_size(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[A_GEP_1:%.*]] = getelementptr i8, i8* [[A:%.*]], i32 1
-; CHECK-NEXT:    store i8 0, i8* [[A_GEP_1]], align 1
-; CHECK-NEXT:    [[A_GEP_17:%.*]] = getelementptr i8, i8* [[A]], i32 17
-; CHECK-NEXT:    store i8 1, i8* [[A_GEP_17]], align 1
-; CHECK-NEXT:    call void @memset_pattern8(i8* [[A]], i8* [[PATTERN:%.*]], i64 [[N:%.*]])
+; CHECK-NEXT:    [[A_GEP_1:%.*]] = getelementptr i8, ptr [[A:%.*]], i32 1
+; CHECK-NEXT:    store i8 0, ptr [[A_GEP_1]], align 1
+; CHECK-NEXT:    [[A_GEP_17:%.*]] = getelementptr i8, ptr [[A]], i32 17
+; CHECK-NEXT:    store i8 1, ptr [[A_GEP_17]], align 1
+; CHECK-NEXT:    call void @memset_pattern8(ptr [[A]], ptr [[PATTERN:%.*]], i64 [[N:%.*]])
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %a.gep.1 = getelementptr i8, i8* %a, i32 1
-  store i8 0, i8* %a.gep.1
-  %a.gep.17 = getelementptr i8, i8* %a, i32 17
-  store i8 1, i8* %a.gep.17
-  call void @memset_pattern8(i8* %a, i8* %pattern, i64 %n)
+  %a.gep.1 = getelementptr i8, ptr %a, i32 1
+  store i8 0, ptr %a.gep.1
+  %a.gep.17 = getelementptr i8, ptr %a, i32 17
+  store i8 1, ptr %a.gep.17
+  call void @memset_pattern8(ptr %a, ptr %pattern, i64 %n)
   ret void
 }
 
-define void @test_memset_pattern16_const_size(i8* noalias %a, i8* noalias %pattern) {
+define void @test_memset_pattern16_const_size(ptr noalias %a, ptr noalias %pattern) {
 ; CHECK-LABEL: @test_memset_pattern16_const_size(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[A_GEP_17:%.*]] = getelementptr i8, i8* [[A:%.*]], i32 17
-; CHECK-NEXT:    store i8 1, i8* [[A_GEP_17]], align 1
-; CHECK-NEXT:    call void @memset_pattern16(i8* [[A]], i8* [[PATTERN:%.*]], i64 17)
+; CHECK-NEXT:    [[A_GEP_17:%.*]] = getelementptr i8, ptr [[A:%.*]], i32 17
+; CHECK-NEXT:    store i8 1, ptr [[A_GEP_17]], align 1
+; CHECK-NEXT:    call void @memset_pattern16(ptr [[A]], ptr [[PATTERN:%.*]], i64 17)
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %a.gep.1 = getelementptr i8, i8* %a, i32 1
-  store i8 0, i8* %a.gep.1
-  %a.gep.17 = getelementptr i8, i8* %a, i32 17
-  store i8 1, i8* %a.gep.17
-  call void @memset_pattern16(i8* %a, i8* %pattern, i64 17)
+  %a.gep.1 = getelementptr i8, ptr %a, i32 1
+  store i8 0, ptr %a.gep.1
+  %a.gep.17 = getelementptr i8, ptr %a, i32 17
+  store i8 1, ptr %a.gep.17
+  call void @memset_pattern16(ptr %a, ptr %pattern, i64 17)
   ret void
 }
 
-define void @test_memset_pattern16_variable_size(i8* noalias %a, i8* noalias %pattern, i64 %n) {
+define void @test_memset_pattern16_variable_size(ptr noalias %a, ptr noalias %pattern, i64 %n) {
 ; CHECK-LABEL: @test_memset_pattern16_variable_size(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[A_GEP_1:%.*]] = getelementptr i8, i8* [[A:%.*]], i32 1
-; CHECK-NEXT:    store i8 0, i8* [[A_GEP_1]], align 1
-; CHECK-NEXT:    [[A_GEP_17:%.*]] = getelementptr i8, i8* [[A]], i32 17
-; CHECK-NEXT:    store i8 1, i8* [[A_GEP_17]], align 1
-; CHECK-NEXT:    call void @memset_pattern16(i8* [[A]], i8* [[PATTERN:%.*]], i64 [[N:%.*]])
+; CHECK-NEXT:    [[A_GEP_1:%.*]] = getelementptr i8, ptr [[A:%.*]], i32 1
+; CHECK-NEXT:    store i8 0, ptr [[A_GEP_1]], align 1
+; CHECK-NEXT:    [[A_GEP_17:%.*]] = getelementptr i8, ptr [[A]], i32 17
+; CHECK-NEXT:    store i8 1, ptr [[A_GEP_17]], align 1
+; CHECK-NEXT:    call void @memset_pattern16(ptr [[A]], ptr [[PATTERN:%.*]], i64 [[N:%.*]])
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %a.gep.1 = getelementptr i8, i8* %a, i32 1
-  store i8 0, i8* %a.gep.1
-  %a.gep.17 = getelementptr i8, i8* %a, i32 17
-  store i8 1, i8* %a.gep.17
-  call void @memset_pattern16(i8* %a, i8* %pattern, i64 %n)
+  %a.gep.1 = getelementptr i8, ptr %a, i32 1
+  store i8 0, ptr %a.gep.1
+  %a.gep.17 = getelementptr i8, ptr %a, i32 17
+  store i8 1, ptr %a.gep.17
+  call void @memset_pattern16(ptr %a, ptr %pattern, i64 %n)
   ret void
 }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/libcalls.ll b/llvm/test/Transforms/DeadStoreElimination/libcalls.ll
index 619287748d3e3..e675f260dc9b7 100644
--- a/llvm/test/Transforms/DeadStoreElimination/libcalls.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/libcalls.ll
@@ -4,616 +4,578 @@
 
 target triple = "x86_64-unknown-linux-gnu"
 
-declare i8* @strcpy(i8* %dest, i8* %src) nounwind
-define void @test1(i8* %src) {
+declare ptr @strcpy(ptr %dest, ptr %src) nounwind
+define void @test1(ptr %src) {
 ; CHECK-LABEL: @test1(
 ; CHECK-NEXT:    ret void
 ;
   %B = alloca [16 x i8]
-  %dest = getelementptr inbounds [16 x i8], [16 x i8]* %B, i64 0, i64 0
-  %call = call i8* @strcpy(i8* %dest, i8* %src)
+  %call = call ptr @strcpy(ptr %B, ptr %src)
   ret void
 }
 
-define void @strcpy_reads_after(i8* noalias %dest, i8* %src) {
+define void @strcpy_reads_after(ptr noalias %dest, ptr %src) {
 ; CHECK-LABEL: @strcpy_reads_after(
-; CHECK-NEXT:    [[SRC_2:%.*]] = getelementptr inbounds i8, i8* [[SRC:%.*]], i64 1
-; CHECK-NEXT:    store i8 99, i8* [[SRC_2]], align 1
-; CHECK-NEXT:    [[SRC_1:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 1
-; CHECK-NEXT:    [[CALL:%.*]] = call i8* @strcpy(i8* [[DEST:%.*]], i8* [[SRC_1]])
-; CHECK-NEXT:    store i8 2, i8* [[SRC]], align 1
-; CHECK-NEXT:    store i8 2, i8* [[SRC_2]], align 1
+; CHECK-NEXT:    [[SRC_2:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 1
+; CHECK-NEXT:    store i8 99, ptr [[SRC_2]], align 1
+; CHECK-NEXT:    [[SRC_1:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 1
+; CHECK-NEXT:    [[CALL:%.*]] = call ptr @strcpy(ptr [[DEST:%.*]], ptr [[SRC_1]])
+; CHECK-NEXT:    store i8 2, ptr [[SRC]], align 1
+; CHECK-NEXT:    store i8 2, ptr [[SRC_2]], align 1
 ; CHECK-NEXT:    ret void
 ;
-  %src.2 = getelementptr inbounds i8, i8* %src, i64 1
-  store i8 1, i8* %src
-  store i8 99, i8* %src.2
-  %src.1 = getelementptr inbounds i8, i8* %src, i64 1
-  %call = call i8* @strcpy(i8* %dest, i8* %src.1)
-  store i8 2, i8* %src
-  store i8 2, i8* %src.2
+  %src.2 = getelementptr inbounds i8, ptr %src, i64 1
+  store i8 1, ptr %src
+  store i8 99, ptr %src.2
+  %src.1 = getelementptr inbounds i8, ptr %src, i64 1
+  %call = call ptr @strcpy(ptr %dest, ptr %src.1)
+  store i8 2, ptr %src
+  store i8 2, ptr %src.2
   ret void
 }
 
-declare i8* @strncpy(i8* %dest, i8* %src, i64 %n) nounwind
-define void @test2(i8* %src) {
+declare ptr @strncpy(ptr %dest, ptr %src, i64 %n) nounwind
+define void @test2(ptr %src) {
 ; CHECK-LABEL: @test2(
 ; CHECK-NEXT:    ret void
 ;
   %B = alloca [16 x i8]
-  %dest = getelementptr inbounds [16 x i8], [16 x i8]* %B, i64 0, i64 0
-  %call = call i8* @strncpy(i8* %dest, i8* %src, i64 12)
+  %call = call ptr @strncpy(ptr %B, ptr %src, i64 12)
   ret void
 }
 
-declare i8* @strcat(i8* %dest, i8* %src) nounwind
-define void @test3(i8* %src) {
+declare ptr @strcat(ptr %B, ptr %src) nounwind
+define void @test3(ptr %src) {
 ; CHECK-LABEL: @test3(
 ; CHECK-NEXT:    ret void
 ;
   %B = alloca [16 x i8]
-  %dest = getelementptr inbounds [16 x i8], [16 x i8]* %B, i64 0, i64 0
-  %call = call i8* @strcat(i8* %dest, i8* %src)
+  %call = call ptr @strcat(ptr %B, ptr %src)
   ret void
 }
 
-define void @test_strcat_with_lifetime(i8* %src) {
+define void @test_strcat_with_lifetime(ptr %src) {
 ; CHECK-LABEL: @test_strcat_with_lifetime(
 ; CHECK-NEXT:    [[B:%.*]] = alloca [16 x i8], align 1
-; CHECK-NEXT:    [[B_CAST:%.*]] = bitcast [16 x i8]* [[B]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull [[B_CAST]])
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull [[B_CAST]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[B]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[B]])
 ; CHECK-NEXT:    ret void
 ;
   %B = alloca [16 x i8]
-  %B.cast = bitcast [16 x i8]* %B to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %B.cast)
-  %dest = getelementptr inbounds [16 x i8], [16 x i8]* %B, i64 0, i64 0
-  %call = call i8* @strcat(i8* %dest, i8* %src)
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %B.cast)
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %B)
+  %call = call ptr @strcat(ptr %B, ptr %src)
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %B)
   ret void
 }
 
-define void @test_strcat_with_lifetime_nonlocal(i8* %dest, i8* %src) {
+define void @test_strcat_with_lifetime_nonlocal(ptr %dest, ptr %src) {
 ; CHECK-LABEL: @test_strcat_with_lifetime_nonlocal(
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull [[DEST:%.*]])
-; CHECK-NEXT:    [[CALL:%.*]] = call i8* @strcat(i8* [[DEST]], i8* [[SRC:%.*]])
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull [[DEST]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[DEST:%.*]])
+; CHECK-NEXT:    [[CALL:%.*]] = call ptr @strcat(ptr [[DEST]], ptr [[SRC:%.*]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[DEST]])
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %dest)
-  %call = call i8* @strcat(i8* %dest, i8* %src)
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %dest)
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %dest)
+  %call = call ptr @strcat(ptr %dest, ptr %src)
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %dest)
   ret void
 }
 
-declare i8* @strncat(i8* %dest, i8* %src, i64 %n) nounwind
-define void @test4(i8* %src) {
+declare ptr @strncat(ptr %dest, ptr %src, i64 %n) nounwind
+define void @test4(ptr %src) {
 ; CHECK-LABEL: @test4(
 ; CHECK-NEXT:    ret void
 ;
   %B = alloca [16 x i8]
-  %dest = getelementptr inbounds [16 x i8], [16 x i8]* %B, i64 0, i64 0
-  %call = call i8* @strncat(i8* %dest, i8* %src, i64 12)
+  %call = call ptr @strncat(ptr %B, ptr %src, i64 12)
   ret void
 }
 
-define void @test5(i8* nocapture %src) {
+define void @test5(ptr nocapture %src) {
 ; CHECK-LABEL: @test5(
 ; CHECK-NEXT:    ret void
 ;
   %dest = alloca [100 x i8], align 16
-  %arraydecay = getelementptr inbounds [100 x i8], [100 x i8]* %dest, i64 0, i64 0
-  %call = call i8* @strcpy(i8* %arraydecay, i8* %src)
-  %arrayidx = getelementptr inbounds i8, i8* %call, i64 10
-  store i8 97, i8* %arrayidx, align 1
+  %call = call ptr @strcpy(ptr %dest, ptr %src)
+  %arrayidx = getelementptr inbounds i8, ptr %call, i64 10
+  store i8 97, ptr %arrayidx, align 1
   ret void
 }
 
-declare void @user(i8* %p)
-define void @test6(i8* %src) {
+declare void @user(ptr %p)
+define void @test6(ptr %src) {
 ; CHECK-LABEL: @test6(
 ; CHECK-NEXT:    [[B:%.*]] = alloca [16 x i8], align 1
-; CHECK-NEXT:    [[DEST:%.*]] = getelementptr inbounds [16 x i8], [16 x i8]* [[B]], i64 0, i64 0
-; CHECK-NEXT:    [[CALL:%.*]] = call i8* @strcpy(i8* [[DEST]], i8* [[SRC:%.*]])
-; CHECK-NEXT:    call void @user(i8* [[DEST]])
+; CHECK-NEXT:    [[CALL:%.*]] = call ptr @strcpy(ptr [[B]], ptr [[SRC:%.*]])
+; CHECK-NEXT:    call void @user(ptr [[B]])
 ; CHECK-NEXT:    ret void
 ;
   %B = alloca [16 x i8]
-  %dest = getelementptr inbounds [16 x i8], [16 x i8]* %B, i64 0, i64 0
-  %call = call i8* @strcpy(i8* %dest, i8* %src)
-  call void @user(i8* %dest)
+  %call = call ptr @strcpy(ptr %B, ptr %src)
+  call void @user(ptr %B)
   ret void
 }
 
-declare i32 @memcmp(i8*, i8*, i64)
+declare i32 @memcmp(ptr, ptr, i64)
 
-define i32 @test_memcmp_const_size(i8* noalias %foo) {
+define i32 @test_memcmp_const_size(ptr noalias %foo) {
 ; CHECK-LABEL: @test_memcmp_const_size(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[STACK:%.*]] = alloca [10 x i8], align 1
-; CHECK-NEXT:    [[STACK_PTR:%.*]] = bitcast [10 x i8]* [[STACK]] to i8*
-; CHECK-NEXT:    store i8 49, i8* [[STACK_PTR]], align 1
-; CHECK-NEXT:    [[GEP_1:%.*]] = getelementptr i8, i8* [[STACK_PTR]], i64 1
-; CHECK-NEXT:    store i8 50, i8* [[GEP_1]], align 1
-; CHECK-NEXT:    [[RES:%.*]] = call i32 @memcmp(i8* nonnull dereferenceable(2) [[FOO:%.*]], i8* nonnull dereferenceable(2) [[STACK_PTR]], i64 2)
+; CHECK-NEXT:    store i8 49, ptr [[STACK]], align 1
+; CHECK-NEXT:    [[GEP_1:%.*]] = getelementptr i8, ptr [[STACK]], i64 1
+; CHECK-NEXT:    store i8 50, ptr [[GEP_1]], align 1
+; CHECK-NEXT:    [[RES:%.*]] = call i32 @memcmp(ptr nonnull dereferenceable(2) [[FOO:%.*]], ptr nonnull dereferenceable(2) [[STACK]], i64 2)
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
 entry:
   %stack = alloca [10 x i8]
-  %stack.ptr = bitcast [10 x i8]* %stack to i8*
-  store i8 49, i8* %stack.ptr, align 1
-  %gep.1 = getelementptr i8, i8* %stack.ptr, i64 1
-  store i8 50, i8* %gep.1, align 1
-  %gep.2 = getelementptr i8, i8* %stack.ptr, i64 2
-  store i8 51, i8* %gep.2, align 1
-  %gep.3 = getelementptr i8, i8* %stack.ptr, i64 3
-  store i8 52, i8* %gep.3, align 1
-  %res = call i32 @memcmp(i8* nonnull dereferenceable(2) %foo, i8* nonnull dereferenceable(2) %stack.ptr, i64 2)
+  store i8 49, ptr %stack, align 1
+  %gep.1 = getelementptr i8, ptr %stack, i64 1
+  store i8 50, ptr %gep.1, align 1
+  %gep.2 = getelementptr i8, ptr %stack, i64 2
+  store i8 51, ptr %gep.2, align 1
+  %gep.3 = getelementptr i8, ptr %stack, i64 3
+  store i8 52, ptr %gep.3, align 1
+  %res = call i32 @memcmp(ptr nonnull dereferenceable(2) %foo, ptr nonnull dereferenceable(2) %stack, i64 2)
   ret i32 %res
 }
 
-define i32 @test_memcmp_variable_size(i8* noalias %foo, i64 %n) {
+define i32 @test_memcmp_variable_size(ptr noalias %foo, i64 %n) {
 ; CHECK-LABEL: @test_memcmp_variable_size(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[STACK:%.*]] = alloca [10 x i8], align 1
-; CHECK-NEXT:    [[STACK_PTR:%.*]] = bitcast [10 x i8]* [[STACK]] to i8*
-; CHECK-NEXT:    store i8 49, i8* [[STACK_PTR]], align 1
-; CHECK-NEXT:    [[GEP_1:%.*]] = getelementptr i8, i8* [[STACK_PTR]], i64 1
-; CHECK-NEXT:    store i8 50, i8* [[GEP_1]], align 1
-; CHECK-NEXT:    [[GEP_2:%.*]] = getelementptr i8, i8* [[STACK_PTR]], i64 2
-; CHECK-NEXT:    store i8 51, i8* [[GEP_2]], align 1
-; CHECK-NEXT:    [[GEP_3:%.*]] = getelementptr i8, i8* [[STACK_PTR]], i64 3
-; CHECK-NEXT:    store i8 52, i8* [[GEP_3]], align 1
-; CHECK-NEXT:    [[RES:%.*]] = call i32 @memcmp(i8* nonnull [[FOO:%.*]], i8* nonnull [[STACK_PTR]], i64 [[N:%.*]])
+; CHECK-NEXT:    store i8 49, ptr [[STACK]], align 1
+; CHECK-NEXT:    [[GEP_1:%.*]] = getelementptr i8, ptr [[STACK]], i64 1
+; CHECK-NEXT:    store i8 50, ptr [[GEP_1]], align 1
+; CHECK-NEXT:    [[GEP_2:%.*]] = getelementptr i8, ptr [[STACK]], i64 2
+; CHECK-NEXT:    store i8 51, ptr [[GEP_2]], align 1
+; CHECK-NEXT:    [[GEP_3:%.*]] = getelementptr i8, ptr [[STACK]], i64 3
+; CHECK-NEXT:    store i8 52, ptr [[GEP_3]], align 1
+; CHECK-NEXT:    [[RES:%.*]] = call i32 @memcmp(ptr nonnull [[FOO:%.*]], ptr nonnull [[STACK]], i64 [[N:%.*]])
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
 entry:
   %stack = alloca [10 x i8]
-  %stack.ptr = bitcast [10 x i8]* %stack to i8*
-  store i8 49, i8* %stack.ptr, align 1
-  %gep.1 = getelementptr i8, i8* %stack.ptr, i64 1
-  store i8 50, i8* %gep.1, align 1
-  %gep.2 = getelementptr i8, i8* %stack.ptr, i64 2
-  store i8 51, i8* %gep.2, align 1
-  %gep.3 = getelementptr i8, i8* %stack.ptr, i64 3
-  store i8 52, i8* %gep.3, align 1
-  %res = call i32 @memcmp(i8* nonnull %foo, i8* nonnull %stack.ptr, i64 %n)
+  store i8 49, ptr %stack, align 1
+  %gep.1 = getelementptr i8, ptr %stack, i64 1
+  store i8 50, ptr %gep.1, align 1
+  %gep.2 = getelementptr i8, ptr %stack, i64 2
+  store i8 51, ptr %gep.2, align 1
+  %gep.3 = getelementptr i8, ptr %stack, i64 3
+  store i8 52, ptr %gep.3, align 1
+  %res = call i32 @memcmp(ptr nonnull %foo, ptr nonnull %stack, i64 %n)
   ret i32 %res
 }
 
-declare i32 @bcmp(i8*, i8*, i64)
+declare i32 @bcmp(ptr, ptr, i64)
 
-define i1 @test_bcmp_const_size(i8* noalias %foo) {
+define i1 @test_bcmp_const_size(ptr noalias %foo) {
 ; CHECK-LABEL: @test_bcmp_const_size(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[STACK:%.*]] = alloca [10 x i8], align 1
-; CHECK-NEXT:    [[STACK_PTR:%.*]] = bitcast [10 x i8]* [[STACK]] to i8*
-; CHECK-NEXT:    store i8 49, i8* [[STACK_PTR]], align 1
-; CHECK-NEXT:    [[GEP_1:%.*]] = getelementptr i8, i8* [[STACK_PTR]], i64 1
-; CHECK-NEXT:    store i8 50, i8* [[GEP_1]], align 1
-; CHECK-NEXT:    [[CALL:%.*]] = call i32 @bcmp(i8* nonnull dereferenceable(2) [[FOO:%.*]], i8* nonnull dereferenceable(2) [[STACK_PTR]], i64 2)
+; CHECK-NEXT:    store i8 49, ptr [[STACK]], align 1
+; CHECK-NEXT:    [[GEP_1:%.*]] = getelementptr i8, ptr [[STACK]], i64 1
+; CHECK-NEXT:    store i8 50, ptr [[GEP_1]], align 1
+; CHECK-NEXT:    [[CALL:%.*]] = call i32 @bcmp(ptr nonnull dereferenceable(2) [[FOO:%.*]], ptr nonnull dereferenceable(2) [[STACK]], i64 2)
 ; CHECK-NEXT:    [[RES:%.*]] = icmp eq i32 [[CALL]], 0
 ; CHECK-NEXT:    ret i1 [[RES]]
 ;
 entry:
   %stack = alloca [10 x i8]
-  %stack.ptr = bitcast [10 x i8]* %stack to i8*
-  store i8 49, i8* %stack.ptr, align 1
-  %gep.1 = getelementptr i8, i8* %stack.ptr, i64 1
-  store i8 50, i8* %gep.1, align 1
-  %gep.2 = getelementptr i8, i8* %stack.ptr, i64 2
-  store i8 51, i8* %gep.2, align 1
-  %gep.3 = getelementptr i8, i8* %stack.ptr, i64 3
-  store i8 52, i8* %gep.3, align 1
-  %call = call i32 @bcmp(i8* nonnull dereferenceable(2) %foo, i8* nonnull dereferenceable(2) %stack.ptr, i64 2)
+  store i8 49, ptr %stack, align 1
+  %gep.1 = getelementptr i8, ptr %stack, i64 1
+  store i8 50, ptr %gep.1, align 1
+  %gep.2 = getelementptr i8, ptr %stack, i64 2
+  store i8 51, ptr %gep.2, align 1
+  %gep.3 = getelementptr i8, ptr %stack, i64 3
+  store i8 52, ptr %gep.3, align 1
+  %call = call i32 @bcmp(ptr nonnull dereferenceable(2) %foo, ptr nonnull dereferenceable(2) %stack, i64 2)
   %res = icmp eq i32 %call, 0
   ret i1 %res
 }
 
-define i1 @test_bcmp_variable_size(i8* noalias %foo, i64 %n) {
+define i1 @test_bcmp_variable_size(ptr noalias %foo, i64 %n) {
 ; CHECK-LABEL: @test_bcmp_variable_size(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[STACK:%.*]] = alloca [10 x i8], align 1
-; CHECK-NEXT:    [[STACK_PTR:%.*]] = bitcast [10 x i8]* [[STACK]] to i8*
-; CHECK-NEXT:    store i8 49, i8* [[STACK_PTR]], align 1
-; CHECK-NEXT:    [[GEP_1:%.*]] = getelementptr i8, i8* [[STACK_PTR]], i64 1
-; CHECK-NEXT:    store i8 50, i8* [[GEP_1]], align 1
-; CHECK-NEXT:    [[GEP_2:%.*]] = getelementptr i8, i8* [[STACK_PTR]], i64 2
-; CHECK-NEXT:    store i8 51, i8* [[GEP_2]], align 1
-; CHECK-NEXT:    [[GEP_3:%.*]] = getelementptr i8, i8* [[STACK_PTR]], i64 3
-; CHECK-NEXT:    store i8 52, i8* [[GEP_3]], align 1
-; CHECK-NEXT:    [[CALL:%.*]] = call i32 @bcmp(i8* nonnull [[FOO:%.*]], i8* nonnull [[STACK_PTR]], i64 [[N:%.*]])
+; CHECK-NEXT:    store i8 49, ptr [[STACK]], align 1
+; CHECK-NEXT:    [[GEP_1:%.*]] = getelementptr i8, ptr [[STACK]], i64 1
+; CHECK-NEXT:    store i8 50, ptr [[GEP_1]], align 1
+; CHECK-NEXT:    [[GEP_2:%.*]] = getelementptr i8, ptr [[STACK]], i64 2
+; CHECK-NEXT:    store i8 51, ptr [[GEP_2]], align 1
+; CHECK-NEXT:    [[GEP_3:%.*]] = getelementptr i8, ptr [[STACK]], i64 3
+; CHECK-NEXT:    store i8 52, ptr [[GEP_3]], align 1
+; CHECK-NEXT:    [[CALL:%.*]] = call i32 @bcmp(ptr nonnull [[FOO:%.*]], ptr nonnull [[STACK]], i64 [[N:%.*]])
 ; CHECK-NEXT:    [[RES:%.*]] = icmp eq i32 [[CALL]], 0
 ; CHECK-NEXT:    ret i1 [[RES]]
 ;
 entry:
   %stack = alloca [10 x i8]
-  %stack.ptr = bitcast [10 x i8]* %stack to i8*
-  store i8 49, i8* %stack.ptr, align 1
-  %gep.1 = getelementptr i8, i8* %stack.ptr, i64 1
-  store i8 50, i8* %gep.1, align 1
-  %gep.2 = getelementptr i8, i8* %stack.ptr, i64 2
-  store i8 51, i8* %gep.2, align 1
-  %gep.3 = getelementptr i8, i8* %stack.ptr, i64 3
-  store i8 52, i8* %gep.3, align 1
-  %call = call i32 @bcmp(i8* nonnull %foo, i8* nonnull %stack.ptr, i64 %n)
+  store i8 49, ptr %stack, align 1
+  %gep.1 = getelementptr i8, ptr %stack, i64 1
+  store i8 50, ptr %gep.1, align 1
+  %gep.2 = getelementptr i8, ptr %stack, i64 2
+  store i8 51, ptr %gep.2, align 1
+  %gep.3 = getelementptr i8, ptr %stack, i64 3
+  store i8 52, ptr %gep.3, align 1
+  %call = call i32 @bcmp(ptr nonnull %foo, ptr nonnull %stack, i64 %n)
   %res = icmp eq i32 %call, 0
   ret i1 %res
 }
 
-declare i8* @memchr(i8*, i32, i64)
+declare ptr @memchr(ptr, i32, i64)
 
-define i8* @test_memchr_const_size() {
+define ptr @test_memchr_const_size() {
 ; CHECK-LABEL: @test_memchr_const_size(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[STACK:%.*]] = alloca [10 x i8], align 1
-; CHECK-NEXT:    [[STACK_PTR:%.*]] = bitcast [10 x i8]* [[STACK]] to i8*
-; CHECK-NEXT:    store i8 49, i8* [[STACK_PTR]], align 1
-; CHECK-NEXT:    [[GEP_1:%.*]] = getelementptr i8, i8* [[STACK_PTR]], i64 1
-; CHECK-NEXT:    store i8 50, i8* [[GEP_1]], align 1
-; CHECK-NEXT:    [[CALL:%.*]] = call i8* @memchr(i8* [[STACK_PTR]], i32 42, i64 2)
-; CHECK-NEXT:    ret i8* [[CALL]]
+; CHECK-NEXT:    store i8 49, ptr [[STACK]], align 1
+; CHECK-NEXT:    [[GEP_1:%.*]] = getelementptr i8, ptr [[STACK]], i64 1
+; CHECK-NEXT:    store i8 50, ptr [[GEP_1]], align 1
+; CHECK-NEXT:    [[CALL:%.*]] = call ptr @memchr(ptr [[STACK]], i32 42, i64 2)
+; CHECK-NEXT:    ret ptr [[CALL]]
 ;
 entry:
   %stack = alloca [10 x i8]
-  %stack.ptr = bitcast [10 x i8]* %stack to i8*
-  store i8 49, i8* %stack.ptr, align 1
-  %gep.1 = getelementptr i8, i8* %stack.ptr, i64 1
-  store i8 50, i8* %gep.1, align 1
-  %gep.2 = getelementptr i8, i8* %stack.ptr, i64 2
-  store i8 51, i8* %gep.2, align 1
-  %gep.3 = getelementptr i8, i8* %stack.ptr, i64 3
-  store i8 52, i8* %gep.3, align 1
-  %call = call i8* @memchr(i8* %stack.ptr, i32 42, i64 2)
-  ret i8* %call
-}
-
-define i8* @test_memchr_variable_size(i64 %n) {
+  store i8 49, ptr %stack, align 1
+  %gep.1 = getelementptr i8, ptr %stack, i64 1
+  store i8 50, ptr %gep.1, align 1
+  %gep.2 = getelementptr i8, ptr %stack, i64 2
+  store i8 51, ptr %gep.2, align 1
+  %gep.3 = getelementptr i8, ptr %stack, i64 3
+  store i8 52, ptr %gep.3, align 1
+  %call = call ptr @memchr(ptr %stack, i32 42, i64 2)
+  ret ptr %call
+}
+
+define ptr @test_memchr_variable_size(i64 %n) {
 ; CHECK-LABEL: @test_memchr_variable_size(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[STACK:%.*]] = alloca [10 x i8], align 1
-; CHECK-NEXT:    [[STACK_PTR:%.*]] = bitcast [10 x i8]* [[STACK]] to i8*
-; CHECK-NEXT:    store i8 49, i8* [[STACK_PTR]], align 1
-; CHECK-NEXT:    [[GEP_1:%.*]] = getelementptr i8, i8* [[STACK_PTR]], i64 1
-; CHECK-NEXT:    store i8 50, i8* [[GEP_1]], align 1
-; CHECK-NEXT:    [[GEP_2:%.*]] = getelementptr i8, i8* [[STACK_PTR]], i64 2
-; CHECK-NEXT:    store i8 51, i8* [[GEP_2]], align 1
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr i8, i8* [[STACK_PTR]], i64 4
-; CHECK-NEXT:    store i8 52, i8* [[GEP]], align 1
-; CHECK-NEXT:    [[CALL:%.*]] = call i8* @memchr(i8* [[STACK_PTR]], i32 42, i64 [[N:%.*]])
-; CHECK-NEXT:    ret i8* [[CALL]]
+; CHECK-NEXT:    store i8 49, ptr [[STACK]], align 1
+; CHECK-NEXT:    [[GEP_1:%.*]] = getelementptr i8, ptr [[STACK]], i64 1
+; CHECK-NEXT:    store i8 50, ptr [[GEP_1]], align 1
+; CHECK-NEXT:    [[GEP_2:%.*]] = getelementptr i8, ptr [[STACK]], i64 2
+; CHECK-NEXT:    store i8 51, ptr [[GEP_2]], align 1
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr i8, ptr [[STACK]], i64 4
+; CHECK-NEXT:    store i8 52, ptr [[GEP]], align 1
+; CHECK-NEXT:    [[CALL:%.*]] = call ptr @memchr(ptr [[STACK]], i32 42, i64 [[N:%.*]])
+; CHECK-NEXT:    ret ptr [[CALL]]
 ;
 entry:
   %stack = alloca [10 x i8]
-  %stack.ptr = bitcast [10 x i8]* %stack to i8*
-  store i8 49, i8* %stack.ptr, align 1
-  %gep.1 = getelementptr i8, i8* %stack.ptr, i64 1
-  store i8 50, i8* %gep.1, align 1
-  %gep.2 = getelementptr i8, i8* %stack.ptr, i64 2
-  store i8 51, i8* %gep.2, align 1
-  %gep = getelementptr i8, i8* %stack.ptr, i64 4
-  store i8 52, i8* %gep, align 1
-  %call = call i8* @memchr(i8* %stack.ptr, i32 42, i64 %n)
-  ret i8* %call
+  store i8 49, ptr %stack, align 1
+  %gep.1 = getelementptr i8, ptr %stack, i64 1
+  store i8 50, ptr %gep.1, align 1
+  %gep.2 = getelementptr i8, ptr %stack, i64 2
+  store i8 51, ptr %gep.2, align 1
+  %gep = getelementptr i8, ptr %stack, i64 4
+  store i8 52, ptr %gep, align 1
+  %call = call ptr @memchr(ptr %stack, i32 42, i64 %n)
+  ret ptr %call
 }
 
-declare i8* @memccpy(i8*, i8*, i32, i64)
+declare ptr @memccpy(ptr, ptr, i32, i64)
 
-define i8* @test_memccpy_const_size(i8* %foo) {
+define ptr @test_memccpy_const_size(ptr %foo) {
 ; CHECK-LABEL: @test_memccpy_const_size(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[STACK:%.*]] = alloca [10 x i8], align 1
-; CHECK-NEXT:    [[STACK_PTR:%.*]] = bitcast [10 x i8]* [[STACK]] to i8*
-; CHECK-NEXT:    store i8 49, i8* [[STACK_PTR]], align 1
-; CHECK-NEXT:    [[GEP_1:%.*]] = getelementptr i8, i8* [[STACK_PTR]], i64 1
-; CHECK-NEXT:    store i8 50, i8* [[GEP_1]], align 1
-; CHECK-NEXT:    [[RES:%.*]] = call i8* @memccpy(i8* [[FOO:%.*]], i8* [[STACK_PTR]], i32 42, i64 2)
-; CHECK-NEXT:    ret i8* [[RES]]
+; CHECK-NEXT:    store i8 49, ptr [[STACK]], align 1
+; CHECK-NEXT:    [[GEP_1:%.*]] = getelementptr i8, ptr [[STACK]], i64 1
+; CHECK-NEXT:    store i8 50, ptr [[GEP_1]], align 1
+; CHECK-NEXT:    [[RES:%.*]] = call ptr @memccpy(ptr [[FOO:%.*]], ptr [[STACK]], i32 42, i64 2)
+; CHECK-NEXT:    ret ptr [[RES]]
 ;
 entry:
   %stack = alloca [10 x i8]
-  %stack.ptr = bitcast [10 x i8]* %stack to i8*
-  store i8 49, i8* %stack.ptr, align 1
-  %gep.1 = getelementptr i8, i8* %stack.ptr, i64 1
-  store i8 50, i8* %gep.1, align 1
-  %gep.2 = getelementptr i8, i8* %stack.ptr, i64 2
-  store i8 51, i8* %gep.2, align 1
-  %gep.3 = getelementptr i8, i8* %stack.ptr, i64 3
-  store i8 52, i8* %gep.3, align 1
-  %res = call i8* @memccpy(i8* %foo, i8* %stack.ptr, i32 42, i64 2)
-  ret i8* %res
-}
-
-define i8* @test_memccpy_variable_size(i8* %foo, i64 %n) {
+  store i8 49, ptr %stack, align 1
+  %gep.1 = getelementptr i8, ptr %stack, i64 1
+  store i8 50, ptr %gep.1, align 1
+  %gep.2 = getelementptr i8, ptr %stack, i64 2
+  store i8 51, ptr %gep.2, align 1
+  %gep.3 = getelementptr i8, ptr %stack, i64 3
+  store i8 52, ptr %gep.3, align 1
+  %res = call ptr @memccpy(ptr %foo, ptr %stack, i32 42, i64 2)
+  ret ptr %res
+}
+
+define ptr @test_memccpy_variable_size(ptr %foo, i64 %n) {
 ; CHECK-LABEL: @test_memccpy_variable_size(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[STACK:%.*]] = alloca [10 x i8], align 1
-; CHECK-NEXT:    [[STACK_PTR:%.*]] = bitcast [10 x i8]* [[STACK]] to i8*
-; CHECK-NEXT:    store i8 49, i8* [[STACK_PTR]], align 1
-; CHECK-NEXT:    [[GEP_1:%.*]] = getelementptr i8, i8* [[STACK_PTR]], i64 1
-; CHECK-NEXT:    store i8 50, i8* [[GEP_1]], align 1
-; CHECK-NEXT:    [[GEP_2:%.*]] = getelementptr i8, i8* [[STACK_PTR]], i64 2
-; CHECK-NEXT:    store i8 51, i8* [[GEP_2]], align 1
-; CHECK-NEXT:    [[GEP_3:%.*]] = getelementptr i8, i8* [[STACK_PTR]], i64 3
-; CHECK-NEXT:    store i8 52, i8* [[GEP_3]], align 1
-; CHECK-NEXT:    [[RES:%.*]] = call i8* @memccpy(i8* [[FOO:%.*]], i8* [[STACK_PTR]], i32 42, i64 [[N:%.*]])
-; CHECK-NEXT:    ret i8* [[RES]]
+; CHECK-NEXT:    store i8 49, ptr [[STACK]], align 1
+; CHECK-NEXT:    [[GEP_1:%.*]] = getelementptr i8, ptr [[STACK]], i64 1
+; CHECK-NEXT:    store i8 50, ptr [[GEP_1]], align 1
+; CHECK-NEXT:    [[GEP_2:%.*]] = getelementptr i8, ptr [[STACK]], i64 2
+; CHECK-NEXT:    store i8 51, ptr [[GEP_2]], align 1
+; CHECK-NEXT:    [[GEP_3:%.*]] = getelementptr i8, ptr [[STACK]], i64 3
+; CHECK-NEXT:    store i8 52, ptr [[GEP_3]], align 1
+; CHECK-NEXT:    [[RES:%.*]] = call ptr @memccpy(ptr [[FOO:%.*]], ptr [[STACK]], i32 42, i64 [[N:%.*]])
+; CHECK-NEXT:    ret ptr [[RES]]
 ;
 entry:
   %stack = alloca [10 x i8]
-  %stack.ptr = bitcast [10 x i8]* %stack to i8*
-  store i8 49, i8* %stack.ptr, align 1
-  %gep.1 = getelementptr i8, i8* %stack.ptr, i64 1
-  store i8 50, i8* %gep.1, align 1
-  %gep.2 = getelementptr i8, i8* %stack.ptr, i64 2
-  store i8 51, i8* %gep.2, align 1
-  %gep.3 = getelementptr i8, i8* %stack.ptr, i64 3
-  store i8 52, i8* %gep.3, align 1
-  %res = call i8* @memccpy(i8* %foo, i8* %stack.ptr, i32 42, i64 %n)
-  ret i8* %res
+  store i8 49, ptr %stack, align 1
+  %gep.1 = getelementptr i8, ptr %stack, i64 1
+  store i8 50, ptr %gep.1, align 1
+  %gep.2 = getelementptr i8, ptr %stack, i64 2
+  store i8 51, ptr %gep.2, align 1
+  %gep.3 = getelementptr i8, ptr %stack, i64 3
+  store i8 52, ptr %gep.3, align 1
+  %res = call ptr @memccpy(ptr %foo, ptr %stack, i32 42, i64 %n)
+  ret ptr %res
 }
 
 ; Make sure memccpy does not kill any stores, because it is not known how many
 ; bytes are written.
-define i8* @test_memccpy_const_size_does_not_kill_stores(i8* noalias %dest, i8* noalias %foo) {
+define ptr @test_memccpy_const_size_does_not_kill_stores(ptr noalias %dest, ptr noalias %foo) {
 ; CHECK-LABEL: @test_memccpy_const_size_does_not_kill_stores(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    store i8 49, i8* [[DEST:%.*]], align 1
-; CHECK-NEXT:    [[GEP_1:%.*]] = getelementptr i8, i8* [[DEST]], i64 1
-; CHECK-NEXT:    store i8 50, i8* [[GEP_1]], align 1
-; CHECK-NEXT:    [[GEP_2:%.*]] = getelementptr i8, i8* [[DEST]], i64 2
-; CHECK-NEXT:    store i8 51, i8* [[GEP_2]], align 1
-; CHECK-NEXT:    [[GEP_3:%.*]] = getelementptr i8, i8* [[DEST]], i64 3
-; CHECK-NEXT:    store i8 52, i8* [[GEP_3]], align 1
-; CHECK-NEXT:    [[RES:%.*]] = call i8* @memccpy(i8* [[DEST]], i8* [[FOO:%.*]], i32 42, i64 2)
-; CHECK-NEXT:    ret i8* [[RES]]
+; CHECK-NEXT:    store i8 49, ptr [[DEST:%.*]], align 1
+; CHECK-NEXT:    [[GEP_1:%.*]] = getelementptr i8, ptr [[DEST]], i64 1
+; CHECK-NEXT:    store i8 50, ptr [[GEP_1]], align 1
+; CHECK-NEXT:    [[GEP_2:%.*]] = getelementptr i8, ptr [[DEST]], i64 2
+; CHECK-NEXT:    store i8 51, ptr [[GEP_2]], align 1
+; CHECK-NEXT:    [[GEP_3:%.*]] = getelementptr i8, ptr [[DEST]], i64 3
+; CHECK-NEXT:    store i8 52, ptr [[GEP_3]], align 1
+; CHECK-NEXT:    [[RES:%.*]] = call ptr @memccpy(ptr [[DEST]], ptr [[FOO:%.*]], i32 42, i64 2)
+; CHECK-NEXT:    ret ptr [[RES]]
 ;
 entry:
-  store i8 49, i8* %dest, align 1
-  %gep.1 = getelementptr i8, i8* %dest, i64 1
-  store i8 50, i8* %gep.1, align 1
-  %gep.2 = getelementptr i8, i8* %dest, i64 2
-  store i8 51, i8* %gep.2, align 1
-  %gep.3 = getelementptr i8, i8* %dest, i64 3
-  store i8 52, i8* %gep.3, align 1
-  %res = call i8* @memccpy(i8* %dest, i8* %foo, i32 42, i64 2)
-  ret i8* %res
-}
-
-define void @dse_strcpy(i8* nocapture readonly %src) {
+  store i8 49, ptr %dest, align 1
+  %gep.1 = getelementptr i8, ptr %dest, i64 1
+  store i8 50, ptr %gep.1, align 1
+  %gep.2 = getelementptr i8, ptr %dest, i64 2
+  store i8 51, ptr %gep.2, align 1
+  %gep.3 = getelementptr i8, ptr %dest, i64 3
+  store i8 52, ptr %gep.3, align 1
+  %res = call ptr @memccpy(ptr %dest, ptr %foo, i32 42, i64 2)
+  ret ptr %res
+}
+
+define void @dse_strcpy(ptr nocapture readonly %src) {
 ; CHECK-LABEL: @dse_strcpy(
 ; CHECK-NEXT:    [[A:%.*]] = alloca [256 x i8], align 16
-; CHECK-NEXT:    [[BUF:%.*]] = getelementptr inbounds [256 x i8], [256 x i8]* [[A]], i64 0, i64 0
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 256, i8* nonnull [[BUF]])
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 256, i8* nonnull [[BUF]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 256, ptr nonnull [[A]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 256, ptr nonnull [[A]])
 ; CHECK-NEXT:    ret void
 ;
   %a = alloca [256 x i8], align 16
-  %buf = getelementptr inbounds [256 x i8], [256 x i8]* %a, i64 0, i64 0
-  call void @llvm.lifetime.start.p0i8(i64 256, i8* nonnull %buf)
-  call i8* @strcpy(i8* nonnull %buf, i8* nonnull dereferenceable(1) %src)
-  call void @llvm.lifetime.end.p0i8(i64 256, i8* nonnull %buf)
+  call void @llvm.lifetime.start.p0(i64 256, ptr nonnull %a)
+  call ptr @strcpy(ptr nonnull %a, ptr nonnull dereferenceable(1) %src)
+  call void @llvm.lifetime.end.p0(i64 256, ptr nonnull %a)
   ret void
 }
 
-define void @dse_strncpy(i8* nocapture readonly %src) {
+define void @dse_strncpy(ptr nocapture readonly %src) {
 ; CHECK-LABEL: @dse_strncpy(
 ; CHECK-NEXT:    [[A:%.*]] = alloca [256 x i8], align 16
-; CHECK-NEXT:    [[BUF:%.*]] = getelementptr inbounds [256 x i8], [256 x i8]* [[A]], i64 0, i64 0
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 256, i8* nonnull [[BUF]])
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 256, i8* nonnull [[BUF]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 256, ptr nonnull [[A]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 256, ptr nonnull [[A]])
 ; CHECK-NEXT:    ret void
 ;
   %a = alloca [256 x i8], align 16
-  %buf = getelementptr inbounds [256 x i8], [256 x i8]* %a, i64 0, i64 0
-  call void @llvm.lifetime.start.p0i8(i64 256, i8* nonnull %buf)
-  call i8* @strncpy(i8* nonnull %buf, i8* nonnull dereferenceable(1) %src, i64 6)
-  call void @llvm.lifetime.end.p0i8(i64 256, i8* nonnull %buf)
+  call void @llvm.lifetime.start.p0(i64 256, ptr nonnull %a)
+  call ptr @strncpy(ptr nonnull %a, ptr nonnull dereferenceable(1) %src, i64 6)
+  call void @llvm.lifetime.end.p0(i64 256, ptr nonnull %a)
   ret void
 }
 
-define void @dse_strcat(i8* nocapture readonly %src) {
+define void @dse_strcat(ptr nocapture readonly %src) {
 ; CHECK-LABEL: @dse_strcat(
 ; CHECK-NEXT:    [[A:%.*]] = alloca [256 x i8], align 16
-; CHECK-NEXT:    [[BUF:%.*]] = getelementptr inbounds [256 x i8], [256 x i8]* [[A]], i64 0, i64 0
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 256, i8* nonnull [[BUF]])
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 256, i8* nonnull [[BUF]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 256, ptr nonnull [[A]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 256, ptr nonnull [[A]])
 ; CHECK-NEXT:    ret void
 ;
   %a = alloca [256 x i8], align 16
-  %buf = getelementptr inbounds [256 x i8], [256 x i8]* %a, i64 0, i64 0
-  call void @llvm.lifetime.start.p0i8(i64 256, i8* nonnull %buf)
-  call i8* @strcat(i8* nonnull %buf, i8* nonnull dereferenceable(1) %src)
-  call void @llvm.lifetime.end.p0i8(i64 256, i8* nonnull %buf)
+  call void @llvm.lifetime.start.p0(i64 256, ptr nonnull %a)
+  call ptr @strcat(ptr nonnull %a, ptr nonnull dereferenceable(1) %src)
+  call void @llvm.lifetime.end.p0(i64 256, ptr nonnull %a)
   ret void
 }
 
-define void @dse_strncat(i8* nocapture readonly %src) {
+define void @dse_strncat(ptr nocapture readonly %src) {
 ; CHECK-LABEL: @dse_strncat(
 ; CHECK-NEXT:    [[A:%.*]] = alloca [256 x i8], align 16
-; CHECK-NEXT:    [[BUF:%.*]] = getelementptr inbounds [256 x i8], [256 x i8]* [[A]], i64 0, i64 0
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 256, i8* nonnull [[BUF]])
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 256, i8* nonnull [[BUF]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 256, ptr nonnull [[A]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 256, ptr nonnull [[A]])
 ; CHECK-NEXT:    ret void
 ;
   %a = alloca [256 x i8], align 16
-  %buf = getelementptr inbounds [256 x i8], [256 x i8]* %a, i64 0, i64 0
-  call void @llvm.lifetime.start.p0i8(i64 256, i8* nonnull %buf)
-  call i8* @strncat(i8* nonnull %buf, i8* nonnull dereferenceable(1) %src, i64 6)
-  call void @llvm.lifetime.end.p0i8(i64 256, i8* nonnull %buf)
+  call void @llvm.lifetime.start.p0(i64 256, ptr nonnull %a)
+  call ptr @strncat(ptr nonnull %a, ptr nonnull dereferenceable(1) %src, i64 6)
+  call void @llvm.lifetime.end.p0(i64 256, ptr nonnull %a)
   ret void
 }
 
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
 
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind
 
 ; Test that strncpy/memset overwriting each other is optimized out
 
 ; strncpy -> memset, full overwrite
-define void @dse_strncpy_test1(i8* noalias %out, i8* noalias %in) {
+define void @dse_strncpy_test1(ptr noalias %out, ptr noalias %in) {
 ; CHECK-LABEL: @dse_strncpy_test1(
-; CHECK-NEXT:    tail call void @llvm.memset.p0i8.i64(i8* [[OUT:%.*]], i8 42, i64 100, i1 false)
+; CHECK-NEXT:    tail call void @llvm.memset.p0.i64(ptr [[OUT:%.*]], i8 42, i64 100, i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  %call = tail call i8* @strncpy(i8* %out, i8* %in, i64 100)
-  tail call void @llvm.memset.p0i8.i64(i8* %out, i8 42, i64 100, i1 false)
+  %call = tail call ptr @strncpy(ptr %out, ptr %in, i64 100)
+  tail call void @llvm.memset.p0.i64(ptr %out, i8 42, i64 100, i1 false)
   ret void
 }
 
-declare i8* @__memset_chk(i8* writeonly, i32, i64, i64) argmemonly
+declare ptr @__memset_chk(ptr writeonly, i32, i64, i64) argmemonly
 
 ; strncpy -> __memset_chk, full overwrite
-define void @dse_strncpy_memset_chk_test1(i8* noalias %out, i8* noalias %in, i64 %n) {
+define void @dse_strncpy_memset_chk_test1(ptr noalias %out, ptr noalias %in, i64 %n) {
 ; CHECK-LABEL: @dse_strncpy_memset_chk_test1(
-; CHECK-NEXT:    [[CALL:%.*]] = tail call i8* @strncpy(i8* [[OUT:%.*]], i8* [[IN:%.*]], i64 100)
-; CHECK-NEXT:    [[CALL_2:%.*]] = tail call i8* @__memset_chk(i8* [[OUT]], i32 42, i64 100, i64 [[N:%.*]])
+; CHECK-NEXT:    [[CALL:%.*]] = tail call ptr @strncpy(ptr [[OUT:%.*]], ptr [[IN:%.*]], i64 100)
+; CHECK-NEXT:    [[CALL_2:%.*]] = tail call ptr @__memset_chk(ptr [[OUT]], i32 42, i64 100, i64 [[N:%.*]])
 ; CHECK-NEXT:    ret void
 ;
-  %call = tail call i8* @strncpy(i8* %out, i8* %in, i64 100)
-  %call.2 = tail call i8* @__memset_chk(i8* %out, i32 42, i64 100, i64 %n)
+  %call = tail call ptr @strncpy(ptr %out, ptr %in, i64 100)
+  %call.2 = tail call ptr @__memset_chk(ptr %out, i32 42, i64 100, i64 %n)
   ret void
 }
 
-declare void @use(i8*)
+declare void @use(ptr)
 
-define void @dse_memset_chk_cannot_eliminates_store(i8* %out, i64 %n) {
+define void @dse_memset_chk_cannot_eliminates_store(ptr %out, i64 %n) {
 ; CHECK-LABEL: @dse_memset_chk_cannot_eliminates_store(
-; CHECK-NEXT:    store i8 10, i8* [[OUT:%.*]], align 1
-; CHECK-NEXT:    [[CALL_2:%.*]] = tail call i8* @__memset_chk(i8* [[OUT]], i32 42, i64 100, i64 [[N:%.*]])
+; CHECK-NEXT:    store i8 10, ptr [[OUT:%.*]], align 1
+; CHECK-NEXT:    [[CALL_2:%.*]] = tail call ptr @__memset_chk(ptr [[OUT]], i32 42, i64 100, i64 [[N:%.*]])
 ; CHECK-NEXT:    ret void
 ;
-  store i8 10, i8* %out
-  %call.2 = tail call i8* @__memset_chk(i8* %out, i32 42, i64 100, i64 %n)
+  store i8 10, ptr %out
+  %call.2 = tail call ptr @__memset_chk(ptr %out, i32 42, i64 100, i64 %n)
   ret void
 }
 
 define void @dse_memset_chk_eliminates_store_local_object_escapes_after(i64 %n) {
 ; CHECK-LABEL: @dse_memset_chk_eliminates_store_local_object_escapes_after(
 ; CHECK-NEXT:    [[A:%.*]] = alloca [200 x i8], align 1
-; CHECK-NEXT:    [[OUT:%.*]] = bitcast [200 x i8]* [[A]] to i8*
-; CHECK-NEXT:    store i8 10, i8* [[OUT]], align 1
-; CHECK-NEXT:    [[OUT_100:%.*]] = getelementptr i8, i8* [[OUT]], i64 100
-; CHECK-NEXT:    store i8 10, i8* [[OUT_100]], align 1
-; CHECK-NEXT:    [[CALL_2:%.*]] = tail call i8* @__memset_chk(i8* [[OUT]], i32 42, i64 100, i64 [[N:%.*]])
-; CHECK-NEXT:    call void @use(i8* [[OUT]])
+; CHECK-NEXT:    store i8 10, ptr [[A]], align 1
+; CHECK-NEXT:    [[OUT_100:%.*]] = getelementptr i8, ptr [[A]], i64 100
+; CHECK-NEXT:    store i8 10, ptr [[OUT_100]], align 1
+; CHECK-NEXT:    [[CALL_2:%.*]] = tail call ptr @__memset_chk(ptr [[A]], i32 42, i64 100, i64 [[N:%.*]])
+; CHECK-NEXT:    call void @use(ptr [[A]])
 ; CHECK-NEXT:    ret void
 ;
   %a = alloca [200 x i8]
-  %out = bitcast [200 x i8]* %a to i8*
-  store i8 10, i8* %out
-  %out.100 = getelementptr i8, i8* %out, i64 100
-  store i8 10, i8* %out.100
-  %call.2 = tail call i8* @__memset_chk(i8* %out, i32 42, i64 100, i64 %n)
-  call void @use(i8* %out)
+  store i8 10, ptr %a
+  %out.100 = getelementptr i8, ptr %a, i64 100
+  store i8 10, ptr %out.100
+  %call.2 = tail call ptr @__memset_chk(ptr %a, i32 42, i64 100, i64 %n)
+  call void @use(ptr %a)
   ret void
 }
 
 define void @dse_memset_chk_eliminates_store_local_object_escapes_before(i64 %n) {
 ; CHECK-LABEL: @dse_memset_chk_eliminates_store_local_object_escapes_before(
 ; CHECK-NEXT:    [[A:%.*]] = alloca [200 x i8], align 1
-; CHECK-NEXT:    [[OUT:%.*]] = bitcast [200 x i8]* [[A]] to i8*
-; CHECK-NEXT:    call void @use(i8* [[OUT]])
-; CHECK-NEXT:    store i8 10, i8* [[OUT]], align 1
-; CHECK-NEXT:    [[OUT_100:%.*]] = getelementptr i8, i8* [[OUT]], i64 100
-; CHECK-NEXT:    store i8 0, i8* [[OUT_100]], align 1
-; CHECK-NEXT:    [[CALL_2:%.*]] = tail call i8* @__memset_chk(i8* [[OUT]], i32 42, i64 100, i64 [[N:%.*]])
-; CHECK-NEXT:    call void @use(i8* [[OUT]])
+; CHECK-NEXT:    call void @use(ptr [[A]])
+; CHECK-NEXT:    store i8 10, ptr [[A]], align 1
+; CHECK-NEXT:    [[OUT_100:%.*]] = getelementptr i8, ptr [[A]], i64 100
+; CHECK-NEXT:    store i8 0, ptr [[OUT_100]], align 1
+; CHECK-NEXT:    [[CALL_2:%.*]] = tail call ptr @__memset_chk(ptr [[A]], i32 42, i64 100, i64 [[N:%.*]])
+; CHECK-NEXT:    call void @use(ptr [[A]])
 ; CHECK-NEXT:    ret void
 ;
   %a = alloca [200 x i8]
-  %out = bitcast [200 x i8]* %a to i8*
-  call void @use(i8* %out)
-  store i8 10, i8* %out
-  %out.100 = getelementptr i8, i8* %out, i64 100
-  store i8 0, i8* %out.100
-  %call.2 = tail call i8* @__memset_chk(i8* %out, i32 42, i64 100, i64 %n)
-  call void @use(i8* %out)
+  call void @use(ptr %a)
+  store i8 10, ptr %a
+  %out.100 = getelementptr i8, ptr %a, i64 100
+  store i8 0, ptr %out.100
+  %call.2 = tail call ptr @__memset_chk(ptr %a, i32 42, i64 100, i64 %n)
+  call void @use(ptr %a)
   ret void
 }
 
 ; strncpy -> memset, partial overwrite
-define void @dse_strncpy_test2(i8* noalias %out, i8* noalias %in) {
+define void @dse_strncpy_test2(ptr noalias %out, ptr noalias %in) {
 ; CHECK-LABEL: @dse_strncpy_test2(
-; CHECK-NEXT:    [[CALL:%.*]] = tail call i8* @strncpy(i8* [[OUT:%.*]], i8* [[IN:%.*]], i64 100)
-; CHECK-NEXT:    tail call void @llvm.memset.p0i8.i64(i8* [[OUT]], i8 42, i64 99, i1 false)
+; CHECK-NEXT:    [[CALL:%.*]] = tail call ptr @strncpy(ptr [[OUT:%.*]], ptr [[IN:%.*]], i64 100)
+; CHECK-NEXT:    tail call void @llvm.memset.p0.i64(ptr [[OUT]], i8 42, i64 99, i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  %call = tail call i8* @strncpy(i8* %out, i8* %in, i64 100)
-  tail call void @llvm.memset.p0i8.i64(i8* %out, i8 42, i64 99, i1 false)
+  %call = tail call ptr @strncpy(ptr %out, ptr %in, i64 100)
+  tail call void @llvm.memset.p0.i64(ptr %out, i8 42, i64 99, i1 false)
   ret void
 }
 
 ; strncpy -> memset_chk, partial overwrite
-define void @dse_strncpy_memset_chk_test2(i8* noalias %out, i8* noalias %in, i64 %n) {
+define void @dse_strncpy_memset_chk_test2(ptr noalias %out, ptr noalias %in, i64 %n) {
 ; CHECK-LABEL: @dse_strncpy_memset_chk_test2(
-; CHECK-NEXT:    [[CALL:%.*]] = tail call i8* @strncpy(i8* [[OUT:%.*]], i8* [[IN:%.*]], i64 100)
-; CHECK-NEXT:    [[CALL_2:%.*]] = tail call i8* @__memset_chk(i8* [[OUT]], i32 42, i64 99, i64 [[N:%.*]])
+; CHECK-NEXT:    [[CALL:%.*]] = tail call ptr @strncpy(ptr [[OUT:%.*]], ptr [[IN:%.*]], i64 100)
+; CHECK-NEXT:    [[CALL_2:%.*]] = tail call ptr @__memset_chk(ptr [[OUT]], i32 42, i64 99, i64 [[N:%.*]])
 ; CHECK-NEXT:    ret void
 ;
-  %call = tail call i8* @strncpy(i8* %out, i8* %in, i64 100)
-  %call.2 = tail call i8* @__memset_chk(i8* %out, i32 42, i64 99, i64 %n)
+  %call = tail call ptr @strncpy(ptr %out, ptr %in, i64 100)
+  %call.2 = tail call ptr @__memset_chk(ptr %out, i32 42, i64 99, i64 %n)
   ret void
 }
 
 ; strncpy -> memset, 
diff erent destination
-define void @dse_strncpy_test3(i8* noalias %out1, i8* noalias %out2, i8* noalias %in) {
+define void @dse_strncpy_test3(ptr noalias %out1, ptr noalias %out2, ptr noalias %in) {
 ; CHECK-LABEL: @dse_strncpy_test3(
-; CHECK-NEXT:    [[CALL:%.*]] = tail call i8* @strncpy(i8* [[OUT1:%.*]], i8* [[IN:%.*]], i64 100)
-; CHECK-NEXT:    tail call void @llvm.memset.p0i8.i64(i8* [[OUT2:%.*]], i8 42, i64 100, i1 false)
+; CHECK-NEXT:    [[CALL:%.*]] = tail call ptr @strncpy(ptr [[OUT1:%.*]], ptr [[IN:%.*]], i64 100)
+; CHECK-NEXT:    tail call void @llvm.memset.p0.i64(ptr [[OUT2:%.*]], i8 42, i64 100, i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  %call = tail call i8* @strncpy(i8* %out1, i8* %in, i64 100)
-  tail call void @llvm.memset.p0i8.i64(i8* %out2, i8 42, i64 100, i1 false)
+  %call = tail call ptr @strncpy(ptr %out1, ptr %in, i64 100)
+  tail call void @llvm.memset.p0.i64(ptr %out2, i8 42, i64 100, i1 false)
   ret void
 }
 
 ; strncpy -> memset_chk, 
diff erent destination
-define void @dse_strncpy_chk_test3(i8* noalias %out1, i8* noalias %out2, i8* noalias %in, i64 %n) {
+define void @dse_strncpy_chk_test3(ptr noalias %out1, ptr noalias %out2, ptr noalias %in, i64 %n) {
 ; CHECK-LABEL: @dse_strncpy_chk_test3(
-; CHECK-NEXT:    [[CALL:%.*]] = tail call i8* @strncpy(i8* [[OUT1:%.*]], i8* [[IN:%.*]], i64 100)
-; CHECK-NEXT:    [[CALL_2:%.*]] = tail call i8* @__memset_chk(i8* [[OUT2:%.*]], i32 42, i64 100, i64 [[N:%.*]])
+; CHECK-NEXT:    [[CALL:%.*]] = tail call ptr @strncpy(ptr [[OUT1:%.*]], ptr [[IN:%.*]], i64 100)
+; CHECK-NEXT:    [[CALL_2:%.*]] = tail call ptr @__memset_chk(ptr [[OUT2:%.*]], i32 42, i64 100, i64 [[N:%.*]])
 ; CHECK-NEXT:    ret void
 ;
-  %call = tail call i8* @strncpy(i8* %out1, i8* %in, i64 100)
-  %call.2 = tail call i8* @__memset_chk(i8* %out2, i32 42, i64 100, i64 %n)
+  %call = tail call ptr @strncpy(ptr %out1, ptr %in, i64 100)
+  %call.2 = tail call ptr @__memset_chk(ptr %out2, i32 42, i64 100, i64 %n)
   ret void
 }
 
 ; memset -> strncpy, full overwrite
-define void @dse_strncpy_test4(i8* noalias %out, i8* noalias %in) {
+define void @dse_strncpy_test4(ptr noalias %out, ptr noalias %in) {
 ; CHECK-LABEL: @dse_strncpy_test4(
-; CHECK-NEXT:    [[CALL:%.*]] = tail call i8* @strncpy(i8* [[OUT:%.*]], i8* [[IN:%.*]], i64 100)
+; CHECK-NEXT:    [[CALL:%.*]] = tail call ptr @strncpy(ptr [[OUT:%.*]], ptr [[IN:%.*]], i64 100)
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memset.p0i8.i64(i8* %out, i8 42, i64 100, i1 false)
-  %call = tail call i8* @strncpy(i8* %out, i8* %in, i64 100)
+  tail call void @llvm.memset.p0.i64(ptr %out, i8 42, i64 100, i1 false)
+  %call = tail call ptr @strncpy(ptr %out, ptr %in, i64 100)
   ret void
 }
 
 ; memset -> strncpy, partial overwrite
-define void @dse_strncpy_test5(i8* noalias %out, i8* noalias %in) {
+define void @dse_strncpy_test5(ptr noalias %out, ptr noalias %in) {
 ; CHECK-LABEL: @dse_strncpy_test5(
-; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, i8* [[OUT:%.*]], i64 99
-; CHECK-NEXT:    tail call void @llvm.memset.p0i8.i64(i8* align 1 [[TMP1]], i8 42, i64 1, i1 false)
-; CHECK-NEXT:    [[CALL:%.*]] = tail call i8* @strncpy(i8* [[OUT]], i8* [[IN:%.*]], i64 99)
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[OUT:%.*]], i64 99
+; CHECK-NEXT:    tail call void @llvm.memset.p0.i64(ptr align 1 [[TMP1]], i8 42, i64 1, i1 false)
+; CHECK-NEXT:    [[CALL:%.*]] = tail call ptr @strncpy(ptr [[OUT]], ptr [[IN:%.*]], i64 99)
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memset.p0i8.i64(i8* %out, i8 42, i64 100, i1 false)
-  %call = tail call i8* @strncpy(i8* %out, i8* %in, i64 99)
+  tail call void @llvm.memset.p0.i64(ptr %out, i8 42, i64 100, i1 false)
+  %call = tail call ptr @strncpy(ptr %out, ptr %in, i64 99)
   ret void
 }
 
 ; memset -> strncpy, 
diff erent destination
-define void @dse_strncpy_test6(i8* noalias %out1, i8* noalias %out2, i8* noalias %in) {
+define void @dse_strncpy_test6(ptr noalias %out1, ptr noalias %out2, ptr noalias %in) {
 ; CHECK-LABEL: @dse_strncpy_test6(
-; CHECK-NEXT:    tail call void @llvm.memset.p0i8.i64(i8* [[OUT1:%.*]], i8 42, i64 100, i1 false)
-; CHECK-NEXT:    [[CALL:%.*]] = tail call i8* @strncpy(i8* [[OUT2:%.*]], i8* [[IN:%.*]], i64 100)
+; CHECK-NEXT:    tail call void @llvm.memset.p0.i64(ptr [[OUT1:%.*]], i8 42, i64 100, i1 false)
+; CHECK-NEXT:    [[CALL:%.*]] = tail call ptr @strncpy(ptr [[OUT2:%.*]], ptr [[IN:%.*]], i64 100)
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memset.p0i8.i64(i8* %out1, i8 42, i64 100, i1 false)
-  %call = tail call i8* @strncpy(i8* %out2, i8* %in, i64 100)
+  tail call void @llvm.memset.p0.i64(ptr %out1, i8 42, i64 100, i1 false)
+  %call = tail call ptr @strncpy(ptr %out2, ptr %in, i64 100)
   ret void
 }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/lifetime.ll b/llvm/test/Transforms/DeadStoreElimination/lifetime.ll
index c54f1593a93df..92bd2e580caa0 100644
--- a/llvm/test/Transforms/DeadStoreElimination/lifetime.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/lifetime.ll
@@ -3,88 +3,83 @@
 
 target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
 
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) nounwind
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) nounwind
-declare void @llvm.memset.p0i8.i8(i8* nocapture, i8, i8, i1) nounwind
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind
+declare void @llvm.memset.p0.i8(ptr nocapture, i8, i8, i1) nounwind
 
 define void @test1() {
 ; CHECK-LABEL: @test1(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i8, align 1
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 1, i8* [[A]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 1, ptr [[A]])
 ; CHECK-NEXT:    ret void
 ;
   %A = alloca i8
 
-  store i8 0, i8* %A  ;; Written to by memset
-  call void @llvm.lifetime.end.p0i8(i64 1, i8* %A)
+  store i8 0, ptr %A  ;; Written to by memset
+  call void @llvm.lifetime.end.p0(i64 1, ptr %A)
 
-  call void @llvm.memset.p0i8.i8(i8* %A, i8 0, i8 -1, i1 false)
+  call void @llvm.memset.p0.i8(ptr %A, i8 0, i8 -1, i1 false)
 
   ret void
 }
 
-define void @test2(i32* %P) {
+define void @test2(ptr %P) {
 ; CHECK-LABEL: @test2(
-; CHECK-NEXT:    [[Q:%.*]] = getelementptr i32, i32* [[P:%.*]], i32 1
-; CHECK-NEXT:    [[R:%.*]] = bitcast i32* [[Q]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 4, i8* [[R]])
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 4, i8* [[R]])
+; CHECK-NEXT:    [[Q:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 1
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 4, ptr [[Q]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 4, ptr [[Q]])
 ; CHECK-NEXT:    ret void
 ;
-  %Q = getelementptr i32, i32* %P, i32 1
-  %R = bitcast i32* %Q to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* %R)
-  store i32 0, i32* %Q  ;; This store is dead.
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* %R)
+  %Q = getelementptr i32, ptr %P, i32 1
+  call void @llvm.lifetime.start.p0(i64 4, ptr %Q)
+  store i32 0, ptr %Q  ;; This store is dead.
+  call void @llvm.lifetime.end.p0(i64 4, ptr %Q)
   ret void
 }
 
 ; lifetime.end only marks the first two bytes of %A as dead. Make sure
-; `store i8 20, i8* %A.2 is not removed.
+; `store i8 20, ptr %A.2 is not removed.
 define void @test3_lifetime_end_partial() {
 ; CHECK-LABEL: @test3_lifetime_end_partial(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[A_0:%.*]] = bitcast i32* [[A]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 2, i8* [[A_0]])
-; CHECK-NEXT:    [[A_1:%.*]] = getelementptr i8, i8* [[A_0]], i64 1
-; CHECK-NEXT:    [[A_2:%.*]] = getelementptr i8, i8* [[A_0]], i64 2
-; CHECK-NEXT:    store i8 20, i8* [[A_2]], align 1
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 2, i8* [[A_0]])
-; CHECK-NEXT:    call void @use(i8* [[A_1]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 2, ptr [[A]])
+; CHECK-NEXT:    [[A_1:%.*]] = getelementptr i8, ptr [[A]], i64 1
+; CHECK-NEXT:    [[A_2:%.*]] = getelementptr i8, ptr [[A]], i64 2
+; CHECK-NEXT:    store i8 20, ptr [[A_2]], align 1
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 2, ptr [[A]])
+; CHECK-NEXT:    call void @use(ptr [[A_1]])
 ; CHECK-NEXT:    ret void
 ;
   %A = alloca i32
 
-  %A.0 = bitcast i32 * %A to i8*
-  call void @llvm.lifetime.start.p0i8(i64 2, i8* %A.0)
-  %A.1 = getelementptr i8, i8* %A.0, i64 1
-  %A.2 = getelementptr i8, i8* %A.0, i64 2
+  call void @llvm.lifetime.start.p0(i64 2, ptr %A)
+  %A.1 = getelementptr i8, ptr %A, i64 1
+  %A.2 = getelementptr i8, ptr %A, i64 2
 
-  store i8 0, i8* %A.0
-  store i8 10, i8* %A.1
-  store i8 20, i8* %A.2
+  store i8 0, ptr %A
+  store i8 10, ptr %A.1
+  store i8 20, ptr %A.2
 
-  call void @llvm.lifetime.end.p0i8(i64 2, i8* %A.0)
-  call void @use(i8* %A.1)
+  call void @llvm.lifetime.end.p0(i64 2, ptr %A)
+  call void @use(ptr %A.1)
   ret void
 }
 
 ; lifetime.end only marks the first two bytes of %A as dead. Make sure
-; `store i8 20, i8* %A.2 is not removed.
+; `store i8 20, ptr %A.2 is not removed.
 define void @test4_lifetime_end_partial_loop() {
 ; CHECK-LABEL: @test4_lifetime_end_partial_loop(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[A_0:%.*]] = bitcast i32* [[A]] to i8*
 ; CHECK-NEXT:    br label [[LOOP:%.*]]
 ; CHECK:       loop:
 ; CHECK-NEXT:    [[IV:%.*]] = phi i8 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 2, i8* [[A_0]])
-; CHECK-NEXT:    [[A_1:%.*]] = getelementptr i8, i8* [[A_0]], i64 1
-; CHECK-NEXT:    [[A_2:%.*]] = getelementptr i8, i8* [[A_0]], i64 2
-; CHECK-NEXT:    call void @use(i8* [[A_1]])
-; CHECK-NEXT:    store i8 20, i8* [[A_2]], align 1
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 2, i8* [[A_0]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 2, ptr [[A]])
+; CHECK-NEXT:    [[A_1:%.*]] = getelementptr i8, ptr [[A]], i64 1
+; CHECK-NEXT:    [[A_2:%.*]] = getelementptr i8, ptr [[A]], i64 2
+; CHECK-NEXT:    call void @use(ptr [[A_1]])
+; CHECK-NEXT:    store i8 20, ptr [[A_2]], align 1
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 2, ptr [[A]])
 ; CHECK-NEXT:    [[IV_NEXT]] = add i8 [[IV]], 10
 ; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i8 [[IV_NEXT]], 10
 ; CHECK-NEXT:    br i1 [[EXITCOND]], label [[EXIT:%.*]], label [[LOOP]]
@@ -94,21 +89,20 @@ define void @test4_lifetime_end_partial_loop() {
 entry:
   %A = alloca i32
 
-  %A.0 = bitcast i32 * %A to i8*
   br label %loop
 
 loop:
   %iv = phi i8 [ 0, %entry ], [ %iv.next, %loop ]
-  call void @llvm.lifetime.start.p0i8(i64 2, i8* %A.0)
-  %A.1 = getelementptr i8, i8* %A.0, i64 1
-  %A.2 = getelementptr i8, i8* %A.0, i64 2
+  call void @llvm.lifetime.start.p0(i64 2, ptr %A)
+  %A.1 = getelementptr i8, ptr %A, i64 1
+  %A.2 = getelementptr i8, ptr %A, i64 2
 
-  call void @use(i8* %A.1)
+  call void @use(ptr %A.1)
 
-  store i8 20, i8* %A.2
-  store i8 10, i8* %A.1
-  store i8 0, i8* %A.0
-  call void @llvm.lifetime.end.p0i8(i64 2, i8* %A.0)
+  store i8 20, ptr %A.2
+  store i8 10, ptr %A.1
+  store i8 0, ptr %A
+  call void @llvm.lifetime.end.p0(i64 2, ptr %A)
 
   %iv.next = add i8 %iv, 10
   %exitcond = icmp eq i8 %iv.next, 10
@@ -119,36 +113,34 @@ exit:
 }
 
 ; lifetime.end only marks the first two bytes of %A as dead. Make sure
-; `store i8 20, i8* %A.2 is not removed.
-define void @test5_lifetime_end_partial(i32* %A) {
+; `store i8 20, ptr %A.2 is not removed.
+define void @test5_lifetime_end_partial(ptr %A) {
 ; CHECK-LABEL: @test5_lifetime_end_partial(
-; CHECK-NEXT:    [[A_0:%.*]] = bitcast i32* [[A:%.*]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 2, i8* [[A_0]])
-; CHECK-NEXT:    [[A_1:%.*]] = getelementptr i8, i8* [[A_0]], i64 1
-; CHECK-NEXT:    [[A_2:%.*]] = getelementptr i8, i8* [[A_0]], i64 2
-; CHECK-NEXT:    store i8 20, i8* [[A_2]], align 1
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 2, i8* [[A_0]])
-; CHECK-NEXT:    call void @use(i8* [[A_1]])
-; CHECK-NEXT:    store i8 30, i8* [[A_1]], align 1
-; CHECK-NEXT:    store i8 40, i8* [[A_2]], align 1
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 2, ptr [[A:%.*]])
+; CHECK-NEXT:    [[A_1:%.*]] = getelementptr i8, ptr [[A]], i64 1
+; CHECK-NEXT:    [[A_2:%.*]] = getelementptr i8, ptr [[A]], i64 2
+; CHECK-NEXT:    store i8 20, ptr [[A_2]], align 1
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 2, ptr [[A]])
+; CHECK-NEXT:    call void @use(ptr [[A_1]])
+; CHECK-NEXT:    store i8 30, ptr [[A_1]], align 1
+; CHECK-NEXT:    store i8 40, ptr [[A_2]], align 1
 ; CHECK-NEXT:    ret void
 ;
 
-  %A.0 = bitcast i32 * %A to i8*
-  call void @llvm.lifetime.start.p0i8(i64 2, i8* %A.0)
-  %A.1 = getelementptr i8, i8* %A.0, i64 1
-  %A.2 = getelementptr i8, i8* %A.0, i64 2
+  call void @llvm.lifetime.start.p0(i64 2, ptr %A)
+  %A.1 = getelementptr i8, ptr %A, i64 1
+  %A.2 = getelementptr i8, ptr %A, i64 2
 
-  store i8 0, i8* %A.0
-  store i8 10, i8* %A.1
-  store i8 20, i8* %A.2
+  store i8 0, ptr %A
+  store i8 10, ptr %A.1
+  store i8 20, ptr %A.2
 
-  call void @llvm.lifetime.end.p0i8(i64 2, i8* %A.0)
+  call void @llvm.lifetime.end.p0(i64 2, ptr %A)
 
-  call void @use(i8* %A.1)
-  store i8 30, i8* %A.1
-  store i8 40, i8* %A.2
+  call void @use(ptr %A.1)
+  store i8 30, ptr %A.1
+  store i8 40, ptr %A.2
   ret void
 }
 
-declare void @use(i8*) readonly
+declare void @use(ptr) readonly

diff  --git a/llvm/test/Transforms/DeadStoreElimination/loop-invariant-entry-block.ll b/llvm/test/Transforms/DeadStoreElimination/loop-invariant-entry-block.ll
index cd639e25bed07..4809ab21b323f 100644
--- a/llvm/test/Transforms/DeadStoreElimination/loop-invariant-entry-block.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/loop-invariant-entry-block.ll
@@ -3,7 +3,7 @@
 
 @BUFFER = external dso_local local_unnamed_addr global [0 x i8], align 1
 
-define void @MissedDSEOpportunity(i64 %idx, i1* noalias %cc) {
+define void @MissedDSEOpportunity(i64 %idx, ptr noalias %cc) {
 ;
 ; The DSE pass will try to kill the store in the loop exit block using the
 ; store in the function exit block. The loop invariant check on the pointer
@@ -13,37 +13,35 @@ define void @MissedDSEOpportunity(i64 %idx, i1* noalias %cc) {
 ;
 ; CHECK-LABEL: @MissedDSEOpportunity(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds [0 x i8], [0 x i8]* @BUFFER, i64 0, i64 [[IDX:%.*]]
-; CHECK-NEXT:    [[BC:%.*]] = bitcast i8* [[GEP]] to i64*
-; CHECK-NEXT:    [[CC0:%.*]] = load volatile i1, i1* [[CC:%.*]], align 1
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds [0 x i8], ptr @BUFFER, i64 0, i64 [[IDX:%.*]]
+; CHECK-NEXT:    [[CC0:%.*]] = load volatile i1, ptr [[CC:%.*]], align 1
 ; CHECK-NEXT:    br i1 [[CC0]], label [[HEAD:%.*]], label [[EXIT:%.*]]
 ; CHECK:       head:
-; CHECK-NEXT:    [[CC1:%.*]] = load volatile i1, i1* [[CC]], align 1
+; CHECK-NEXT:    [[CC1:%.*]] = load volatile i1, ptr [[CC]], align 1
 ; CHECK-NEXT:    br i1 [[CC1]], label [[HEAD]], label [[EXIT_LOOPEXIT:%.*]]
 ; CHECK:       exit.loopexit:
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
-; CHECK-NEXT:    store i64 0, i64* [[BC]], align 4
+; CHECK-NEXT:    store i64 0, ptr [[GEP]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
   ; The entry block cannot have predecessors or be part of a loop
-  %gep = getelementptr inbounds [0 x i8], [0 x i8]* @BUFFER, i64 0, i64 %idx
-  %bc = bitcast i8* %gep to i64*
-  %cc0 = load volatile i1, i1* %cc, align 1
+  %gep = getelementptr inbounds [0 x i8], ptr @BUFFER, i64 0, i64 %idx
+  %cc0 = load volatile i1, ptr %cc, align 1
   br i1 %cc0, label %head, label %exit
 
 head:                                             ; preds = %entry, %head
-  %cc1 = load volatile i1, i1* %cc, align 1
+  %cc1 = load volatile i1, ptr %cc, align 1
   br i1 %cc1, label %head, label %exit.loopexit
 
 exit.loopexit:                                    ; preds = %head
   ; Dead store
-  store i64 2, i64* %bc, align 4
+  store i64 2, ptr %gep, align 4
   br label %exit
 
 exit:                                             ; preds = %exit.loopexit, %entry
   ; Killer store
-  store i64 0, i64* %bc, align 4
+  store i64 0, ptr %gep, align 4
   ret void
 }
\ No newline at end of file

diff  --git a/llvm/test/Transforms/DeadStoreElimination/masked-dead-store-inseltpoison.ll b/llvm/test/Transforms/DeadStoreElimination/masked-dead-store-inseltpoison.ll
index b72e51a29d559..90157b16e5776 100644
--- a/llvm/test/Transforms/DeadStoreElimination/masked-dead-store-inseltpoison.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/masked-dead-store-inseltpoison.ll
@@ -2,63 +2,57 @@
 ; RUN: opt -tbaa -dse -S < %s | FileCheck %s
 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 
-define dllexport i32 @f0(i8** %a0, i8** %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7) #0 {
+define dllexport i32 @f0(ptr %a0, ptr %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7) #0 {
 ; CHECK-LABEL: @f0(
 ; CHECK-NEXT:  b0:
-; CHECK-NEXT:    [[V0:%.*]] = getelementptr inbounds i8*, i8** [[A0:%.*]], i32 [[A2:%.*]]
-; CHECK-NEXT:    [[V1:%.*]] = load i8*, i8** [[V0]], align 4, [[TBAA0:!tbaa !.*]]
-; CHECK-NEXT:    [[V2:%.*]] = getelementptr i8, i8* [[V1]], i32 [[A3:%.*]]
-; CHECK-NEXT:    [[V3:%.*]] = bitcast i8* [[V2]] to <128 x i8>*
-; CHECK-NEXT:    [[V6:%.*]] = getelementptr inbounds i8*, i8** [[A1:%.*]], i32 [[A4:%.*]]
-; CHECK-NEXT:    [[V7:%.*]] = load i8*, i8** [[V6]], align 4, [[TBAA3:!tbaa !.*]]
-; CHECK-NEXT:    [[V8:%.*]] = getelementptr i8, i8* [[V7]], i32 [[A5:%.*]]
-; CHECK-NEXT:    [[V9:%.*]] = bitcast i8* [[V8]] to <128 x i8>*
-; CHECK-NEXT:    [[V10:%.*]] = tail call <128 x i8> @llvm.masked.load.v128i8.p0v128i8(<128 x i8>* [[V9]], i32 32, <128 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>, <128 x i8> undef), [[TBAA5:!tbaa !.*]]
+; CHECK-NEXT:    [[V0:%.*]] = getelementptr inbounds ptr, ptr [[A0:%.*]], i32 [[A2:%.*]]
+; CHECK-NEXT:    [[V1:%.*]] = load ptr, ptr [[V0]], align 4, [[TBAA0:!tbaa !.*]]
+; CHECK-NEXT:    [[V2:%.*]] = getelementptr i8, ptr [[V1]], i32 [[A3:%.*]]
+; CHECK-NEXT:    [[V6:%.*]] = getelementptr inbounds ptr, ptr [[A1:%.*]], i32 [[A4:%.*]]
+; CHECK-NEXT:    [[V7:%.*]] = load ptr, ptr [[V6]], align 4, [[TBAA3:!tbaa !.*]]
+; CHECK-NEXT:    [[V8:%.*]] = getelementptr i8, ptr [[V7]], i32 [[A5:%.*]]
+; CHECK-NEXT:    [[V10:%.*]] = tail call <128 x i8> @llvm.masked.load.v128i8.p0(ptr [[V8]], i32 32, <128 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>, <128 x i8> undef), [[TBAA5:!tbaa !.*]]
 ; CHECK-NEXT:    [[V11:%.*]] = shufflevector <128 x i8> [[V10]], <128 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
 ; CHECK-NEXT:    [[V14:%.*]] = shufflevector <32 x i8> [[V11]], <32 x i8> poison, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
 ; CHECK-NEXT:    [[V16:%.*]] = shufflevector <128 x i8> [[V14]], <128 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT:    [[V17:%.*]] = getelementptr inbounds i8*, i8** [[A1]], i32 [[A6:%.*]]
-; CHECK-NEXT:    [[V18:%.*]] = load i8*, i8** [[V17]], align 4, [[TBAA3]]
-; CHECK-NEXT:    [[V19:%.*]] = getelementptr i8, i8* [[V18]], i32 [[A7:%.*]]
-; CHECK-NEXT:    [[V20:%.*]] = bitcast i8* [[V19]] to <128 x i8>*
-; CHECK-NEXT:    [[V21:%.*]] = tail call <128 x i8> @llvm.masked.load.v128i8.p0v128i8(<128 x i8>* [[V20]], i32 32, <128 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>, <128 x i8> undef), [[TBAA5]]
+; CHECK-NEXT:    [[V17:%.*]] = getelementptr inbounds ptr, ptr [[A1]], i32 [[A6:%.*]]
+; CHECK-NEXT:    [[V18:%.*]] = load ptr, ptr [[V17]], align 4, [[TBAA3]]
+; CHECK-NEXT:    [[V19:%.*]] = getelementptr i8, ptr [[V18]], i32 [[A7:%.*]]
+; CHECK-NEXT:    [[V21:%.*]] = tail call <128 x i8> @llvm.masked.load.v128i8.p0(ptr [[V19]], i32 32, <128 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>, <128 x i8> undef), [[TBAA5]]
 ; CHECK-NEXT:    [[V22:%.*]] = shufflevector <128 x i8> [[V21]], <128 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
 ; CHECK-NEXT:    [[V23:%.*]] = icmp ugt <32 x i8> [[V16]], [[V22]]
 ; CHECK-NEXT:    [[V24:%.*]] = select <32 x i1> [[V23]], <32 x i8> [[V16]], <32 x i8> [[V22]]
 ; CHECK-NEXT:    [[V25:%.*]] = shufflevector <32 x i8> [[V24]], <32 x i8> poison, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-; CHECK-NEXT:    tail call void @llvm.masked.store.v128i8.p0v128i8(<128 x i8> [[V25]], <128 x i8>* [[V3]], i32 32, <128 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>), [[TBAA8:!tbaa !.*]]
+; CHECK-NEXT:    tail call void @llvm.masked.store.v128i8.p0(<128 x i8> [[V25]], ptr [[V2]], i32 32, <128 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>), [[TBAA8:!tbaa !.*]]
 ; CHECK-NEXT:    ret i32 0
 ;
 b0:
-  %v0 = getelementptr inbounds i8*, i8** %a0, i32 %a2
-  %v1 = load i8*, i8** %v0, align 4, !tbaa !0
-  %v2 = getelementptr i8, i8* %v1, i32 %a3
-  %v3 = bitcast i8* %v2 to <128 x i8>*
-  tail call void @llvm.masked.store.v128i8.p0v128i8(<128 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>, <128 x i8>* %v3, i32 32, <128 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>), !tbaa !3
-  %v6 = getelementptr inbounds i8*, i8** %a1, i32 %a4
-  %v7 = load i8*, i8** %v6, align 4, !tbaa !6
-  %v8 = getelementptr i8, i8* %v7, i32 %a5
-  %v9 = bitcast i8* %v8 to <128 x i8>*
-  %v10 = tail call <128 x i8> @llvm.masked.load.v128i8.p0v128i8(<128 x i8>* %v9, i32 32, <128 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>, <128 x i8> undef), !tbaa !8
+  %v0 = getelementptr inbounds ptr, ptr %a0, i32 %a2
+  %v1 = load ptr, ptr %v0, align 4, !tbaa !0
+  %v2 = getelementptr i8, ptr %v1, i32 %a3
+  tail call void @llvm.masked.store.v128i8.p0(<128 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>, ptr %v2, i32 32, <128 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>), !tbaa !3
+  %v6 = getelementptr inbounds ptr, ptr %a1, i32 %a4
+  %v7 = load ptr, ptr %v6, align 4, !tbaa !6
+  %v8 = getelementptr i8, ptr %v7, i32 %a5
+  %v10 = tail call <128 x i8> @llvm.masked.load.v128i8.p0(ptr %v8, i32 32, <128 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>, <128 x i8> undef), !tbaa !8
   %v11 = shufflevector <128 x i8> %v10, <128 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %v14 = shufflevector <32 x i8> %v11, <32 x i8> poison, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-  tail call void @llvm.masked.store.v128i8.p0v128i8(<128 x i8> %v14, <128 x i8>* %v3, i32 32, <128 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>), !tbaa !3
+  tail call void @llvm.masked.store.v128i8.p0(<128 x i8> %v14, ptr %v2, i32 32, <128 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>), !tbaa !3
   %v16 = shufflevector <128 x i8> %v14, <128 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-  %v17 = getelementptr inbounds i8*, i8** %a1, i32 %a6
-  %v18 = load i8*, i8** %v17, align 4, !tbaa !6
-  %v19 = getelementptr i8, i8* %v18, i32 %a7
-  %v20 = bitcast i8* %v19 to <128 x i8>*
-  %v21 = tail call <128 x i8> @llvm.masked.load.v128i8.p0v128i8(<128 x i8>* %v20, i32 32, <128 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>, <128 x i8> undef), !tbaa !8
+  %v17 = getelementptr inbounds ptr, ptr %a1, i32 %a6
+  %v18 = load ptr, ptr %v17, align 4, !tbaa !6
+  %v19 = getelementptr i8, ptr %v18, i32 %a7
+  %v21 = tail call <128 x i8> @llvm.masked.load.v128i8.p0(ptr %v19, i32 32, <128 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>, <128 x i8> undef), !tbaa !8
   %v22 = shufflevector <128 x i8> %v21, <128 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %v23 = icmp ugt <32 x i8> %v16, %v22
   %v24 = select <32 x i1> %v23, <32 x i8> %v16, <32 x i8> %v22
   %v25 = shufflevector <32 x i8> %v24, <32 x i8> poison, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-  tail call void @llvm.masked.store.v128i8.p0v128i8(<128 x i8> %v25, <128 x i8>* %v3, i32 32, <128 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>), !tbaa !3
+  tail call void @llvm.masked.store.v128i8.p0(<128 x i8> %v25, ptr %v2, i32 32, <128 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>), !tbaa !3
   ret i32 0
 }
 
-declare void @llvm.masked.store.v128i8.p0v128i8(<128 x i8>, <128 x i8>*, i32 immarg, <128 x i1>) #1
-declare <128 x i8> @llvm.masked.load.v128i8.p0v128i8(<128 x i8>*, i32 immarg, <128 x i1>, <128 x i8>) #2
+declare void @llvm.masked.store.v128i8.p0(<128 x i8>, ptr, i32 immarg, <128 x i1>) #1
+declare <128 x i8> @llvm.masked.load.v128i8.p0(ptr, i32 immarg, <128 x i1>, <128 x i8>) #2
 
 attributes #0 = { nounwind willreturn }
 attributes #1 = { argmemonly nounwind willreturn }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/masked-dead-store.ll b/llvm/test/Transforms/DeadStoreElimination/masked-dead-store.ll
index 5830d45d01b94..cf35daca08e6b 100644
--- a/llvm/test/Transforms/DeadStoreElimination/masked-dead-store.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/masked-dead-store.ll
@@ -2,92 +2,82 @@
 ; RUN: opt -tbaa -dse -S < %s | FileCheck %s
 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 
-define dllexport i32 @f0(i8** %a0, i8** %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7) #0 {
+define dllexport i32 @f0(ptr %a0, ptr %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7) #0 {
 ; CHECK-LABEL: @f0(
 ; CHECK-NEXT:  b0:
-; CHECK-NEXT:    [[V0:%.*]] = getelementptr inbounds i8*, i8** [[A0:%.*]], i32 [[A2:%.*]]
-; CHECK-NEXT:    [[V1:%.*]] = load i8*, i8** [[V0]], align 4, !tbaa [[TBAA0:![0-9]+]]
-; CHECK-NEXT:    [[V2:%.*]] = getelementptr i8, i8* [[V1]], i32 [[A3:%.*]]
-; CHECK-NEXT:    [[V3:%.*]] = bitcast i8* [[V2]] to <128 x i8>*
-; CHECK-NEXT:    [[V6:%.*]] = getelementptr inbounds i8*, i8** [[A1:%.*]], i32 [[A4:%.*]]
-; CHECK-NEXT:    [[V7:%.*]] = load i8*, i8** [[V6]], align 4, !tbaa [[TBAA3:![0-9]+]]
-; CHECK-NEXT:    [[V8:%.*]] = getelementptr i8, i8* [[V7]], i32 [[A5:%.*]]
-; CHECK-NEXT:    [[V9:%.*]] = bitcast i8* [[V8]] to <128 x i8>*
-; CHECK-NEXT:    [[V10:%.*]] = tail call <128 x i8> @llvm.masked.load.v128i8.p0v128i8(<128 x i8>* [[V9]], i32 32, <128 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>, <128 x i8> undef), !tbaa [[TBAA5:![0-9]+]]
+; CHECK-NEXT:    [[V0:%.*]] = getelementptr inbounds ptr, ptr [[A0:%.*]], i32 [[A2:%.*]]
+; CHECK-NEXT:    [[V1:%.*]] = load ptr, ptr [[V0]], align 4, !tbaa [[TBAA0:![0-9]+]]
+; CHECK-NEXT:    [[V2:%.*]] = getelementptr i8, ptr [[V1]], i32 [[A3:%.*]]
+; CHECK-NEXT:    [[V6:%.*]] = getelementptr inbounds ptr, ptr [[A1:%.*]], i32 [[A4:%.*]]
+; CHECK-NEXT:    [[V7:%.*]] = load ptr, ptr [[V6]], align 4, !tbaa [[TBAA3:![0-9]+]]
+; CHECK-NEXT:    [[V8:%.*]] = getelementptr i8, ptr [[V7]], i32 [[A5:%.*]]
+; CHECK-NEXT:    [[V10:%.*]] = tail call <128 x i8> @llvm.masked.load.v128i8.p0(ptr [[V8]], i32 32, <128 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>, <128 x i8> undef), !tbaa [[TBAA5:![0-9]+]]
 ; CHECK-NEXT:    [[V11:%.*]] = shufflevector <128 x i8> [[V10]], <128 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
 ; CHECK-NEXT:    [[V14:%.*]] = shufflevector <32 x i8> [[V11]], <32 x i8> undef, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
 ; CHECK-NEXT:    [[V16:%.*]] = shufflevector <128 x i8> [[V14]], <128 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT:    [[V17:%.*]] = getelementptr inbounds i8*, i8** [[A1]], i32 [[A6:%.*]]
-; CHECK-NEXT:    [[V18:%.*]] = load i8*, i8** [[V17]], align 4, !tbaa [[TBAA3]]
-; CHECK-NEXT:    [[V19:%.*]] = getelementptr i8, i8* [[V18]], i32 [[A7:%.*]]
-; CHECK-NEXT:    [[V20:%.*]] = bitcast i8* [[V19]] to <128 x i8>*
-; CHECK-NEXT:    [[V21:%.*]] = tail call <128 x i8> @llvm.masked.load.v128i8.p0v128i8(<128 x i8>* [[V20]], i32 32, <128 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>, <128 x i8> undef), !tbaa [[TBAA5]]
+; CHECK-NEXT:    [[V17:%.*]] = getelementptr inbounds ptr, ptr [[A1]], i32 [[A6:%.*]]
+; CHECK-NEXT:    [[V18:%.*]] = load ptr, ptr [[V17]], align 4, !tbaa [[TBAA3]]
+; CHECK-NEXT:    [[V19:%.*]] = getelementptr i8, ptr [[V18]], i32 [[A7:%.*]]
+; CHECK-NEXT:    [[V21:%.*]] = tail call <128 x i8> @llvm.masked.load.v128i8.p0(ptr [[V19]], i32 32, <128 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>, <128 x i8> undef), !tbaa [[TBAA5]]
 ; CHECK-NEXT:    [[V22:%.*]] = shufflevector <128 x i8> [[V21]], <128 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
 ; CHECK-NEXT:    [[V23:%.*]] = icmp ugt <32 x i8> [[V16]], [[V22]]
 ; CHECK-NEXT:    [[V24:%.*]] = select <32 x i1> [[V23]], <32 x i8> [[V16]], <32 x i8> [[V22]]
 ; CHECK-NEXT:    [[V25:%.*]] = shufflevector <32 x i8> [[V24]], <32 x i8> undef, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-; CHECK-NEXT:    tail call void @llvm.masked.store.v128i8.p0v128i8(<128 x i8> [[V25]], <128 x i8>* [[V3]], i32 32, <128 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>), !tbaa [[TBAA8:![0-9]+]]
+; CHECK-NEXT:    tail call void @llvm.masked.store.v128i8.p0(<128 x i8> [[V25]], ptr [[V2]], i32 32, <128 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>), !tbaa [[TBAA8:![0-9]+]]
 ; CHECK-NEXT:    ret i32 0
 ;
 b0:
-  %v0 = getelementptr inbounds i8*, i8** %a0, i32 %a2
-  %v1 = load i8*, i8** %v0, align 4, !tbaa !0
-  %v2 = getelementptr i8, i8* %v1, i32 %a3
-  %v3 = bitcast i8* %v2 to <128 x i8>*
-  tail call void @llvm.masked.store.v128i8.p0v128i8(<128 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>, <128 x i8>* %v3, i32 32, <128 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>), !tbaa !3
-  %v6 = getelementptr inbounds i8*, i8** %a1, i32 %a4
-  %v7 = load i8*, i8** %v6, align 4, !tbaa !6
-  %v8 = getelementptr i8, i8* %v7, i32 %a5
-  %v9 = bitcast i8* %v8 to <128 x i8>*
-  %v10 = tail call <128 x i8> @llvm.masked.load.v128i8.p0v128i8(<128 x i8>* %v9, i32 32, <128 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>, <128 x i8> undef), !tbaa !8
+  %v0 = getelementptr inbounds ptr, ptr %a0, i32 %a2
+  %v1 = load ptr, ptr %v0, align 4, !tbaa !0
+  %v2 = getelementptr i8, ptr %v1, i32 %a3
+  tail call void @llvm.masked.store.v128i8.p0(<128 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>, ptr %v2, i32 32, <128 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>), !tbaa !3
+  %v6 = getelementptr inbounds ptr, ptr %a1, i32 %a4
+  %v7 = load ptr, ptr %v6, align 4, !tbaa !6
+  %v8 = getelementptr i8, ptr %v7, i32 %a5
+  %v10 = tail call <128 x i8> @llvm.masked.load.v128i8.p0(ptr %v8, i32 32, <128 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>, <128 x i8> undef), !tbaa !8
   %v11 = shufflevector <128 x i8> %v10, <128 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %v14 = shufflevector <32 x i8> %v11, <32 x i8> undef, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-  tail call void @llvm.masked.store.v128i8.p0v128i8(<128 x i8> %v14, <128 x i8>* %v3, i32 32, <128 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>), !tbaa !3
+  tail call void @llvm.masked.store.v128i8.p0(<128 x i8> %v14, ptr %v2, i32 32, <128 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>), !tbaa !3
   %v16 = shufflevector <128 x i8> %v14, <128 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-  %v17 = getelementptr inbounds i8*, i8** %a1, i32 %a6
-  %v18 = load i8*, i8** %v17, align 4, !tbaa !6
-  %v19 = getelementptr i8, i8* %v18, i32 %a7
-  %v20 = bitcast i8* %v19 to <128 x i8>*
-  %v21 = tail call <128 x i8> @llvm.masked.load.v128i8.p0v128i8(<128 x i8>* %v20, i32 32, <128 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>, <128 x i8> undef), !tbaa !8
+  %v17 = getelementptr inbounds ptr, ptr %a1, i32 %a6
+  %v18 = load ptr, ptr %v17, align 4, !tbaa !6
+  %v19 = getelementptr i8, ptr %v18, i32 %a7
+  %v21 = tail call <128 x i8> @llvm.masked.load.v128i8.p0(ptr %v19, i32 32, <128 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>, <128 x i8> undef), !tbaa !8
   %v22 = shufflevector <128 x i8> %v21, <128 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %v23 = icmp ugt <32 x i8> %v16, %v22
   %v24 = select <32 x i1> %v23, <32 x i8> %v16, <32 x i8> %v22
   %v25 = shufflevector <32 x i8> %v24, <32 x i8> undef, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-  tail call void @llvm.masked.store.v128i8.p0v128i8(<128 x i8> %v25, <128 x i8>* %v3, i32 32, <128 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>), !tbaa !3
+  tail call void @llvm.masked.store.v128i8.p0(<128 x i8> %v25, ptr %v2, i32 32, <128 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>), !tbaa !3
   ret i32 0
 }
 
-define dllexport i32 @f1(<4 x i32>* %a, <4 x i8> %v1, <4 x i32> %v2) {
+define dllexport i32 @f1(ptr %a, <4 x i8> %v1, <4 x i32> %v2) {
 ; CHECK-LABEL: @f1(
-; CHECK-NEXT:    call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[V2:%.*]], <4 x i32>* [[A:%.*]], i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
-; CHECK-NEXT:    [[PTR:%.*]] = bitcast <4 x i32>* [[A]] to <4 x i8>*
-; CHECK-NEXT:    call void @llvm.masked.store.v4i8.p0v4i8(<4 x i8> [[V1:%.*]], <4 x i8>* [[PTR]], i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+; CHECK-NEXT:    call void @llvm.masked.store.v4i32.p0(<4 x i32> [[V2:%.*]], ptr [[A:%.*]], i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+; CHECK-NEXT:    call void @llvm.masked.store.v4i8.p0(<4 x i8> [[V1:%.*]], ptr [[A]], i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
 ; CHECK-NEXT:    ret i32 0
 ;
-  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %v2, <4 x i32>* %a, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
-  %ptr = bitcast <4 x i32>* %a to <4 x i8>*
-  tail call void @llvm.masked.store.v4i8.p0(<4 x i8> %v1, <4 x i8>* %ptr, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %v2, ptr %a, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  tail call void @llvm.masked.store.v4i8.p0(<4 x i8> %v1, ptr %a, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret i32 0
 }
 
-define dllexport i32 @f2(<4 x i32>* %a, <4 x i8> %v1, <4 x i32> %v2, <4 x i1> %mask) {
+define dllexport i32 @f2(ptr %a, <4 x i8> %v1, <4 x i32> %v2, <4 x i1> %mask) {
 ; CHECK-LABEL: @f2(
-; CHECK-NEXT:    call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[V2:%.*]], <4 x i32>* [[A:%.*]], i32 1, <4 x i1> [[MASK:%.*]])
-; CHECK-NEXT:    [[PTR:%.*]] = bitcast <4 x i32>* [[A]] to <4 x i8>*
-; CHECK-NEXT:    call void @llvm.masked.store.v4i8.p0v4i8(<4 x i8> [[V1:%.*]], <4 x i8>* [[PTR]], i32 1, <4 x i1> [[MASK]])
+; CHECK-NEXT:    call void @llvm.masked.store.v4i32.p0(<4 x i32> [[V2:%.*]], ptr [[A:%.*]], i32 1, <4 x i1> [[MASK:%.*]])
+; CHECK-NEXT:    call void @llvm.masked.store.v4i8.p0(<4 x i8> [[V1:%.*]], ptr [[A]], i32 1, <4 x i1> [[MASK]])
 ; CHECK-NEXT:    ret i32 0
 ;
-  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %v2, <4 x i32>* %a, i32 1, <4 x i1> %mask)
-  %ptr = bitcast <4 x i32>* %a to <4 x i8>*
-  tail call void @llvm.masked.store.v4i8.p0(<4 x i8> %v1, <4 x i8>* %ptr, i32 1, <4 x i1> %mask)
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %v2, ptr %a, i32 1, <4 x i1> %mask)
+  tail call void @llvm.masked.store.v4i8.p0(<4 x i8> %v1, ptr %a, i32 1, <4 x i1> %mask)
   ret i32 0
 }
 
-declare void @llvm.masked.store.v4i8.p0(<4 x i8>, <4 x i8>*, i32, <4 x i1>)
-declare void @llvm.masked.store.v4i32.p0(<4 x i32>, <4 x i32>*, i32, <4 x i1>)
+declare void @llvm.masked.store.v4i8.p0(<4 x i8>, ptr, i32, <4 x i1>)
+declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32, <4 x i1>)
 
-declare void @llvm.masked.store.v128i8.p0v128i8(<128 x i8>, <128 x i8>*, i32 immarg, <128 x i1>) #1
-declare <128 x i8> @llvm.masked.load.v128i8.p0v128i8(<128 x i8>*, i32 immarg, <128 x i1>, <128 x i8>) #2
+declare void @llvm.masked.store.v128i8.p0(<128 x i8>, ptr, i32 immarg, <128 x i1>) #1
+declare <128 x i8> @llvm.masked.load.v128i8.p0(ptr, i32 immarg, <128 x i1>, <128 x i8>) #2
 
 attributes #0 = { nounwind willreturn }
 attributes #1 = { argmemonly nounwind willreturn }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/mda-with-dbg-values.ll b/llvm/test/Transforms/DeadStoreElimination/mda-with-dbg-values.ll
index 79211609a5400..9bb3bdbdb2855 100644
--- a/llvm/test/Transforms/DeadStoreElimination/mda-with-dbg-values.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/mda-with-dbg-values.ll
@@ -17,18 +17,17 @@ target triple = "x86_64-unknown-linux-gnu"
 define void @foo() #0 !dbg !14 {
 entry:
   %i = alloca i8, align 1
-  store i8 1, i8* %i, align 1, !dbg !19
+  store i8 1, ptr %i, align 1, !dbg !19
   call void @llvm.dbg.value(metadata i32 0, i64 0, metadata !17, metadata !DIExpression()), !dbg !18
   call void @llvm.dbg.value(metadata i32 0, i64 0, metadata !17, metadata !DIExpression()), !dbg !18
   call void @llvm.dbg.value(metadata i32 0, i64 0, metadata !17, metadata !DIExpression()), !dbg !18
   call void @llvm.dbg.value(metadata i32 0, i64 0, metadata !17, metadata !DIExpression()), !dbg !18
   call void @llvm.dbg.value(metadata i32 0, i64 0, metadata !17, metadata !DIExpression()), !dbg !18
   call void @llvm.dbg.value(metadata i32 0, i64 0, metadata !17, metadata !DIExpression()), !dbg !18
-  %0 = bitcast [1 x i8]* @g to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %i, i8* %0, i64 1, i1 false), !dbg !20
+  call void @llvm.memcpy.p0.p0.i64(ptr %i, ptr @g, i64 1, i1 false), !dbg !20
   br label %bb2
 
-bb2:                                              ; preds = %0
+bb2:                                              ; preds = @g
   ret void, !dbg !21
 }
 
@@ -36,7 +35,7 @@ bb2:                                              ; preds = %0
 declare void @llvm.dbg.value(metadata, i64, metadata, metadata) #1
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) #2
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1) #2
 
 attributes #0 = { noinline nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
 attributes #1 = { nounwind readnone speculatable }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/memcpy-complete-overwrite.ll b/llvm/test/Transforms/DeadStoreElimination/memcpy-complete-overwrite.ll
index 9b1624a931bc3..95be7b870e637 100644
--- a/llvm/test/Transforms/DeadStoreElimination/memcpy-complete-overwrite.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/memcpy-complete-overwrite.ll
@@ -5,156 +5,156 @@
 ; RUN: opt < %s -aa-pipeline=basic-aa -passes=dse -S | FileCheck %s
 target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
 
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
-declare void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* nocapture, i8, i64, i32) nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
-declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32) nounwind
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind
+declare void @llvm.memset.element.unordered.atomic.p0.i64(ptr nocapture, i8, i64, i32) nounwind
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
+declare void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i32) nounwind
 
 ; PR8701
 
 ;; Fully dead overwrite of memcpy.
-define void @test15(i8* %P, i8* %Q) nounwind ssp {
+define void @test15(ptr %P, ptr %Q) nounwind ssp {
 ; CHECK-LABEL: @test15(
-; CHECK-NEXT:    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[P:%.*]], i8* [[Q:%.*]], i64 12, i1 false)
+; CHECK-NEXT:    tail call void @llvm.memcpy.p0.p0.i64(ptr [[P:%.*]], ptr [[Q:%.*]], i64 12, i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i1 false)
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i1 false)
+  tail call void @llvm.memcpy.p0.p0.i64(ptr %P, ptr %Q, i64 12, i1 false)
+  tail call void @llvm.memcpy.p0.p0.i64(ptr %P, ptr %Q, i64 12, i1 false)
   ret void
 }
 
 ;; Fully dead overwrite of memcpy.
-define void @test15_atomic(i8* %P, i8* %Q) nounwind ssp {
+define void @test15_atomic(ptr %P, ptr %Q) nounwind ssp {
 ; CHECK-LABEL: @test15_atomic(
-; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 [[P:%.*]], i8* align 1 [[Q:%.*]], i64 12, i32 1)
+; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 [[P:%.*]], ptr align 1 [[Q:%.*]], i64 12, i32 1)
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i32 1)
-  tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i32 1)
+  tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %P, ptr align 1 %Q, i64 12, i32 1)
+  tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %P, ptr align 1 %Q, i64 12, i32 1)
   ret void
 }
 
 ;; Fully dead overwrite of memcpy.
-define void @test15_atomic_weaker(i8* %P, i8* %Q) nounwind ssp {
+define void @test15_atomic_weaker(ptr %P, ptr %Q) nounwind ssp {
 ; CHECK-LABEL: @test15_atomic_weaker(
-; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 [[P:%.*]], i8* align 1 [[Q:%.*]], i64 12, i32 1)
+; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 [[P:%.*]], ptr align 1 [[Q:%.*]], i64 12, i32 1)
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i1 false)
-  tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i32 1)
+  tail call void @llvm.memcpy.p0.p0.i64(ptr align 1 %P, ptr align 1 %Q, i64 12, i1 false)
+  tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %P, ptr align 1 %Q, i64 12, i32 1)
   ret void
 }
 
 ;; Fully dead overwrite of memcpy.
-define void @test15_atomic_weaker_2(i8* %P, i8* %Q) nounwind ssp {
+define void @test15_atomic_weaker_2(ptr %P, ptr %Q) nounwind ssp {
 ; CHECK-LABEL: @test15_atomic_weaker_2(
-; CHECK-NEXT:    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 [[P:%.*]], i8* align 1 [[Q:%.*]], i64 12, i1 false)
+; CHECK-NEXT:    tail call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[P:%.*]], ptr align 1 [[Q:%.*]], i64 12, i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i32 1)
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i1 false)
+  tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %P, ptr align 1 %Q, i64 12, i32 1)
+  tail call void @llvm.memcpy.p0.p0.i64(ptr align 1 %P, ptr align 1 %Q, i64 12, i1 false)
   ret void
 }
 
 ;; Full overwrite of smaller memcpy.
-define void @test16(i8* %P, i8* %Q) nounwind ssp {
+define void @test16(ptr %P, ptr %Q) nounwind ssp {
 ; CHECK-LABEL: @test16(
-; CHECK-NEXT:    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[P:%.*]], i8* [[Q:%.*]], i64 12, i1 false)
+; CHECK-NEXT:    tail call void @llvm.memcpy.p0.p0.i64(ptr [[P:%.*]], ptr [[Q:%.*]], i64 12, i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 8, i1 false)
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i1 false)
+  tail call void @llvm.memcpy.p0.p0.i64(ptr %P, ptr %Q, i64 8, i1 false)
+  tail call void @llvm.memcpy.p0.p0.i64(ptr %P, ptr %Q, i64 12, i1 false)
   ret void
 }
 
 ;; Full overwrite of smaller memcpy.
-define void @test16_atomic(i8* %P, i8* %Q) nounwind ssp {
+define void @test16_atomic(ptr %P, ptr %Q) nounwind ssp {
 ; CHECK-LABEL: @test16_atomic(
-; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 [[P:%.*]], i8* align 1 [[Q:%.*]], i64 12, i32 1)
+; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 [[P:%.*]], ptr align 1 [[Q:%.*]], i64 12, i32 1)
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 8, i32 1)
-  tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i32 1)
+  tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %P, ptr align 1 %Q, i64 8, i32 1)
+  tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %P, ptr align 1 %Q, i64 12, i32 1)
   ret void
 }
 
 ;; Full overwrite of smaller memory where overwrite has stronger atomicity
-define void @test16_atomic_weaker(i8* %P, i8* %Q) nounwind ssp {
+define void @test16_atomic_weaker(ptr %P, ptr %Q) nounwind ssp {
 ; CHECK-LABEL: @test16_atomic_weaker(
-; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 [[P:%.*]], i8* align 1 [[Q:%.*]], i64 12, i32 1)
+; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 [[P:%.*]], ptr align 1 [[Q:%.*]], i64 12, i32 1)
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 8, i1 false)
-  tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i32 1)
+  tail call void @llvm.memcpy.p0.p0.i64(ptr align 1 %P, ptr align 1 %Q, i64 8, i1 false)
+  tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %P, ptr align 1 %Q, i64 12, i32 1)
   ret void
 }
 
 ;; Full overwrite of smaller memory where overwrite has weaker atomicity.
-define void @test16_atomic_weaker_2(i8* %P, i8* %Q) nounwind ssp {
+define void @test16_atomic_weaker_2(ptr %P, ptr %Q) nounwind ssp {
 ; CHECK-LABEL: @test16_atomic_weaker_2(
-; CHECK-NEXT:    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 [[P:%.*]], i8* align 1 [[Q:%.*]], i64 12, i1 false)
+; CHECK-NEXT:    tail call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[P:%.*]], ptr align 1 [[Q:%.*]], i64 12, i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 8, i32 1)
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i1 false)
+  tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %P, ptr align 1 %Q, i64 8, i32 1)
+  tail call void @llvm.memcpy.p0.p0.i64(ptr align 1 %P, ptr align 1 %Q, i64 12, i1 false)
   ret void
 }
 
 ;; Overwrite of memset by memcpy.
-define void @test17(i8* %P, i8* noalias %Q) nounwind ssp {
+define void @test17(ptr %P, ptr noalias %Q) nounwind ssp {
 ; CHECK-LABEL: @test17(
-; CHECK-NEXT:    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[P:%.*]], i8* [[Q:%.*]], i64 12, i1 false)
+; CHECK-NEXT:    tail call void @llvm.memcpy.p0.p0.i64(ptr [[P:%.*]], ptr [[Q:%.*]], i64 12, i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memset.p0i8.i64(i8* %P, i8 42, i64 8, i1 false)
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i1 false)
+  tail call void @llvm.memset.p0.i64(ptr %P, i8 42, i64 8, i1 false)
+  tail call void @llvm.memcpy.p0.p0.i64(ptr %P, ptr %Q, i64 12, i1 false)
   ret void
 }
 
 ;; Overwrite of memset by memcpy.
-define void @test17_atomic(i8* %P, i8* noalias %Q) nounwind ssp {
+define void @test17_atomic(ptr %P, ptr noalias %Q) nounwind ssp {
 ; CHECK-LABEL: @test17_atomic(
-; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 [[P:%.*]], i8* align 1 [[Q:%.*]], i64 12, i32 1)
+; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 [[P:%.*]], ptr align 1 [[Q:%.*]], i64 12, i32 1)
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %P, i8 42, i64 8, i32 1)
-  tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i32 1)
+  tail call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 1 %P, i8 42, i64 8, i32 1)
+  tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %P, ptr align 1 %Q, i64 12, i32 1)
   ret void
 }
 
 ;; Overwrite of memset by memcpy. Overwrite is stronger atomicity. We can
 ;; remove the memset.
-define void @test17_atomic_weaker(i8* %P, i8* noalias %Q) nounwind ssp {
+define void @test17_atomic_weaker(ptr %P, ptr noalias %Q) nounwind ssp {
 ; CHECK-LABEL: @test17_atomic_weaker(
-; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 [[P:%.*]], i8* align 1 [[Q:%.*]], i64 12, i32 1)
+; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 [[P:%.*]], ptr align 1 [[Q:%.*]], i64 12, i32 1)
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memset.p0i8.i64(i8* align 1 %P, i8 42, i64 8, i1 false)
-  tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i32 1)
+  tail call void @llvm.memset.p0.i64(ptr align 1 %P, i8 42, i64 8, i1 false)
+  tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %P, ptr align 1 %Q, i64 12, i32 1)
   ret void
 }
 
 ;; Overwrite of memset by memcpy. Overwrite is weaker atomicity. We can remove
 ;; the memset.
-define void @test17_atomic_weaker_2(i8* %P, i8* noalias %Q) nounwind ssp {
+define void @test17_atomic_weaker_2(ptr %P, ptr noalias %Q) nounwind ssp {
 ; CHECK-LABEL: @test17_atomic_weaker_2(
-; CHECK-NEXT:    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 [[P:%.*]], i8* align 1 [[Q:%.*]], i64 12, i1 false)
+; CHECK-NEXT:    tail call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[P:%.*]], ptr align 1 [[Q:%.*]], i64 12, i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %P, i8 42, i64 8, i32 1)
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i1 false)
+  tail call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 1 %P, i8 42, i64 8, i32 1)
+  tail call void @llvm.memcpy.p0.p0.i64(ptr align 1 %P, ptr align 1 %Q, i64 12, i1 false)
   ret void
 }
 
 ; Should not delete the volatile memset.
-define void @test17v(i8* %P, i8* %Q) nounwind ssp {
+define void @test17v(ptr %P, ptr %Q) nounwind ssp {
 ; CHECK-LABEL: @test17v(
-; CHECK-NEXT:    tail call void @llvm.memset.p0i8.i64(i8* [[P:%.*]], i8 42, i64 8, i1 true)
-; CHECK-NEXT:    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[P]], i8* [[Q:%.*]], i64 12, i1 false)
+; CHECK-NEXT:    tail call void @llvm.memset.p0.i64(ptr [[P:%.*]], i8 42, i64 8, i1 true)
+; CHECK-NEXT:    tail call void @llvm.memcpy.p0.p0.i64(ptr [[P]], ptr [[Q:%.*]], i64 12, i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memset.p0i8.i64(i8* %P, i8 42, i64 8, i1 true)
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i1 false)
+  tail call void @llvm.memset.p0.i64(ptr %P, i8 42, i64 8, i1 true)
+  tail call void @llvm.memcpy.p0.p0.i64(ptr %P, ptr %Q, i64 12, i1 false)
   ret void
 }
 
@@ -165,25 +165,25 @@ define void @test17v(i8* %P, i8* %Q) nounwind ssp {
 ;
 ; NB! See PR11763 - currently LLVM allows memcpy's source and destination to be
 ; equal (but not inequal and overlapping).
-define void @test18(i8* %P, i8* %Q, i8* %R) nounwind ssp {
+define void @test18(ptr %P, ptr %Q, ptr %R) nounwind ssp {
 ; CHECK-LABEL: @test18(
-; CHECK-NEXT:    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[P:%.*]], i8* [[Q:%.*]], i64 12, i1 false)
-; CHECK-NEXT:    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[P]], i8* [[R:%.*]], i64 12, i1 false)
+; CHECK-NEXT:    tail call void @llvm.memcpy.p0.p0.i64(ptr [[P:%.*]], ptr [[Q:%.*]], i64 12, i1 false)
+; CHECK-NEXT:    tail call void @llvm.memcpy.p0.p0.i64(ptr [[P]], ptr [[R:%.*]], i64 12, i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i1 false)
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %R, i64 12, i1 false)
+  tail call void @llvm.memcpy.p0.p0.i64(ptr %P, ptr %Q, i64 12, i1 false)
+  tail call void @llvm.memcpy.p0.p0.i64(ptr %P, ptr %R, i64 12, i1 false)
   ret void
 }
 
-define void @test18_atomic(i8* %P, i8* %Q, i8* %R) nounwind ssp {
+define void @test18_atomic(ptr %P, ptr %Q, ptr %R) nounwind ssp {
 ; CHECK-LABEL: @test18_atomic(
-; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 [[P:%.*]], i8* align 1 [[Q:%.*]], i64 12, i32 1)
-; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 [[P]], i8* align 1 [[R:%.*]], i64 12, i32 1)
+; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 [[P:%.*]], ptr align 1 [[Q:%.*]], i64 12, i32 1)
+; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 [[P]], ptr align 1 [[R:%.*]], i64 12, i32 1)
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i32 1)
-  tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %R, i64 12, i32 1)
+  tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %P, ptr align 1 %Q, i64 12, i32 1)
+  tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %P, ptr align 1 %R, i64 12, i32 1)
   ret void
 }
 

diff  --git a/llvm/test/Transforms/DeadStoreElimination/memcpy-lifetimes.ll b/llvm/test/Transforms/DeadStoreElimination/memcpy-lifetimes.ll
index 62b3eff0bdd67..2368c6e66deba 100644
--- a/llvm/test/Transforms/DeadStoreElimination/memcpy-lifetimes.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/memcpy-lifetimes.ll
@@ -4,61 +4,55 @@
 
 target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
 
-%struct.Village = type { [4 x %struct.Village*], %struct.Village*, %struct.List, %struct.Hosp, i32, i64 }
-%struct.List = type { %struct.List*, %struct.Patient*, %struct.List* }
-%struct.Patient = type { i32, i32, i32, %struct.Village* }
+%struct.Village = type { [4 x ptr], ptr, %struct.List, %struct.Hosp, i32, i64 }
+%struct.List = type { ptr, ptr, ptr }
+%struct.Patient = type { i32, i32, i32, ptr }
 %struct.Hosp = type { i32, i32, i32, %struct.List, %struct.List, %struct.List, %struct.List }
 
-declare %struct.Village* @alloc(%struct.Village*)
+declare ptr @alloc(ptr)
 
-define i8* @alloc_tree() {
+define ptr @alloc_tree() {
 ; CHECK-LABEL: @alloc_tree(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[FVAL:%.*]] = alloca [4 x %struct.Village*], align 16
-; CHECK-NEXT:    [[TMP0:%.*]] = bitcast [4 x %struct.Village*]* [[FVAL]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 32, i8* nonnull [[TMP0]])
-; CHECK-NEXT:    [[CALL:%.*]] = tail call dereferenceable_or_null(192) i8* @malloc(i64 192)
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i8* [[CALL]] to %struct.Village*
-; CHECK-NEXT:    [[CALL3:%.*]] = tail call %struct.Village* @alloc(%struct.Village* [[TMP1]])
-; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x %struct.Village*], [4 x %struct.Village*]* [[FVAL]], i64 0, i64 3
-; CHECK-NEXT:    store %struct.Village* [[CALL3]], %struct.Village** [[ARRAYIDX]], align 8
-; CHECK-NEXT:    [[CALL3_1:%.*]] = tail call %struct.Village* @alloc(%struct.Village* [[TMP1]])
-; CHECK-NEXT:    [[ARRAYIDX_1:%.*]] = getelementptr inbounds [4 x %struct.Village*], [4 x %struct.Village*]* [[FVAL]], i64 0, i64 2
-; CHECK-NEXT:    store %struct.Village* [[CALL3_1]], %struct.Village** [[ARRAYIDX_1]], align 16
-; CHECK-NEXT:    [[CALL3_2:%.*]] = tail call %struct.Village* @alloc(%struct.Village* [[TMP1]])
-; CHECK-NEXT:    [[ARRAYIDX_2:%.*]] = getelementptr inbounds [4 x %struct.Village*], [4 x %struct.Village*]* [[FVAL]], i64 0, i64 1
-; CHECK-NEXT:    store %struct.Village* [[CALL3_2]], %struct.Village** [[ARRAYIDX_2]], align 8
-; CHECK-NEXT:    [[CALL3_3:%.*]] = tail call %struct.Village* @alloc(%struct.Village* [[TMP1]])
-; CHECK-NEXT:    [[ARRAYIDX_3:%.*]] = getelementptr inbounds [4 x %struct.Village*], [4 x %struct.Village*]* [[FVAL]], i64 0, i64 0
-; CHECK-NEXT:    store %struct.Village* [[CALL3_3]], %struct.Village** [[ARRAYIDX_3]], align 16
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull align 8 dereferenceable(32) [[CALL]], i8* nonnull align 16 dereferenceable(32) [[TMP0]], i64 32, i1 false)
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 32, i8* nonnull [[TMP0]])
-; CHECK-NEXT:    ret i8* [[CALL]]
+; CHECK-NEXT:    [[FVAL:%.*]] = alloca [4 x ptr], align 16
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[FVAL]])
+; CHECK-NEXT:    [[CALL:%.*]] = tail call dereferenceable_or_null(192) ptr @malloc(i64 192)
+; CHECK-NEXT:    [[CALL3:%.*]] = tail call ptr @alloc(ptr [[CALL]])
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x ptr], ptr [[FVAL]], i64 0, i64 3
+; CHECK-NEXT:    store ptr [[CALL3]], ptr [[ARRAYIDX]], align 8
+; CHECK-NEXT:    [[CALL3_1:%.*]] = tail call ptr @alloc(ptr [[CALL]])
+; CHECK-NEXT:    [[ARRAYIDX_1:%.*]] = getelementptr inbounds [4 x ptr], ptr [[FVAL]], i64 0, i64 2
+; CHECK-NEXT:    store ptr [[CALL3_1]], ptr [[ARRAYIDX_1]], align 16
+; CHECK-NEXT:    [[CALL3_2:%.*]] = tail call ptr @alloc(ptr [[CALL]])
+; CHECK-NEXT:    [[ARRAYIDX_2:%.*]] = getelementptr inbounds [4 x ptr], ptr [[FVAL]], i64 0, i64 1
+; CHECK-NEXT:    store ptr [[CALL3_2]], ptr [[ARRAYIDX_2]], align 8
+; CHECK-NEXT:    [[CALL3_3:%.*]] = tail call ptr @alloc(ptr [[CALL]])
+; CHECK-NEXT:    store ptr [[CALL3_3]], ptr [[FVAL]], align 16
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(32) [[CALL]], ptr nonnull align 16 dereferenceable(32) [[FVAL]], i64 32, i1 false)
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[FVAL]])
+; CHECK-NEXT:    ret ptr [[CALL]]
 ;
 entry:
-  %fval = alloca [4 x %struct.Village*], align 16
-  %0 = bitcast [4 x %struct.Village*]* %fval to i8*
-  call void @llvm.lifetime.start.p0i8(i64 32, i8* nonnull %0) #7
-  %call = tail call dereferenceable_or_null(192) i8* @malloc(i64 192) #8
-  %1 = bitcast i8* %call to %struct.Village*
-  %call3 = tail call %struct.Village* @alloc(%struct.Village* %1)
-  %arrayidx = getelementptr inbounds [4 x %struct.Village*], [4 x %struct.Village*]* %fval, i64 0, i64 3
-  store %struct.Village* %call3, %struct.Village** %arrayidx, align 8
-  %call3.1 = tail call %struct.Village* @alloc(%struct.Village* %1)
-  %arrayidx.1 = getelementptr inbounds [4 x %struct.Village*], [4 x %struct.Village*]* %fval, i64 0, i64 2
-  store %struct.Village* %call3.1, %struct.Village** %arrayidx.1, align 16
-  %call3.2 = tail call %struct.Village* @alloc(%struct.Village* %1)
-  %arrayidx.2 = getelementptr inbounds [4 x %struct.Village*], [4 x %struct.Village*]* %fval, i64 0, i64 1
-  store %struct.Village* %call3.2, %struct.Village** %arrayidx.2, align 8
-  %call3.3 = tail call %struct.Village* @alloc(%struct.Village* %1)
-  %arrayidx.3 = getelementptr inbounds [4 x %struct.Village*], [4 x %struct.Village*]* %fval, i64 0, i64 0
-  store %struct.Village* %call3.3, %struct.Village** %arrayidx.3, align 16
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull align 8 dereferenceable(32) %call, i8* nonnull align 16 dereferenceable(32) %0, i64 32, i1 false)
-  call void @llvm.lifetime.end.p0i8(i64 32, i8* nonnull %0) #7
-  ret i8* %call
+  %fval = alloca [4 x ptr], align 16
+  call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %fval) #7
+  %call = tail call dereferenceable_or_null(192) ptr @malloc(i64 192) #8
+  %call3 = tail call ptr @alloc(ptr %call)
+  %arrayidx = getelementptr inbounds [4 x ptr], ptr %fval, i64 0, i64 3
+  store ptr %call3, ptr %arrayidx, align 8
+  %call3.1 = tail call ptr @alloc(ptr %call)
+  %arrayidx.1 = getelementptr inbounds [4 x ptr], ptr %fval, i64 0, i64 2
+  store ptr %call3.1, ptr %arrayidx.1, align 16
+  %call3.2 = tail call ptr @alloc(ptr %call)
+  %arrayidx.2 = getelementptr inbounds [4 x ptr], ptr %fval, i64 0, i64 1
+  store ptr %call3.2, ptr %arrayidx.2, align 8
+  %call3.3 = tail call ptr @alloc(ptr %call)
+  store ptr %call3.3, ptr %fval, align 16
+  call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(32) %call, ptr nonnull align 16 dereferenceable(32) %fval, i64 32, i1 false)
+  call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %fval) #7
+  ret ptr %call
 }
 
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
-declare noalias i8* @malloc(i64)
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64, i1 immarg)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare noalias ptr @malloc(i64)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg)

diff  --git a/llvm/test/Transforms/DeadStoreElimination/memintrinsics.ll b/llvm/test/Transforms/DeadStoreElimination/memintrinsics.ll
index 02f555a061ede..ec62df732e372 100644
--- a/llvm/test/Transforms/DeadStoreElimination/memintrinsics.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/memintrinsics.ll
@@ -1,63 +1,63 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -S -dse < %s | FileCheck %s
 
-declare void @llvm.memcpy.p0i8.p0i8.i8(i8* nocapture, i8* nocapture, i8, i1) nounwind
-declare void @llvm.memcpy.inline.p0i8.p0i8.i8(i8* nocapture, i8* nocapture, i8, i1) nounwind
-declare void @llvm.memmove.p0i8.p0i8.i8(i8* nocapture, i8* nocapture, i8, i1) nounwind
-declare void @llvm.memset.p0i8.i8(i8* nocapture, i8, i8, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i8(ptr nocapture, ptr nocapture, i8, i1) nounwind
+declare void @llvm.memcpy.inline.p0.p0.i8(ptr nocapture, ptr nocapture, i8, i1) nounwind
+declare void @llvm.memmove.p0.p0.i8(ptr nocapture, ptr nocapture, i8, i1) nounwind
+declare void @llvm.memset.p0.i8(ptr nocapture, i8, i8, i1) nounwind
 
-define void @test1(i8* noalias %A, i8* noalias %B) {
+define void @test1(ptr noalias %A, ptr noalias %B) {
 ; CHECK-LABEL: @test1(
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i8(i8* [[A:%.*]], i8* [[B:%.*]], i8 12, i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i8(ptr [[A:%.*]], ptr [[B:%.*]], i8 12, i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  store i8 0, i8* %A  ;; Written to by memcpy
+  store i8 0, ptr %A  ;; Written to by memcpy
 
-  call void @llvm.memcpy.p0i8.p0i8.i8(i8* %A, i8* %B, i8 12, i1 false)
+  call void @llvm.memcpy.p0.p0.i8(ptr %A, ptr %B, i8 12, i1 false)
 
   ret void
 }
 
-define void @test2(i8* noalias %A, i8* noalias %B) {
+define void @test2(ptr noalias %A, ptr noalias %B) {
 ; CHECK-LABEL: @test2(
-; CHECK-NEXT:    call void @llvm.memmove.p0i8.p0i8.i8(i8* [[A]], i8* [[B:%.*]], i8 12, i1 false)
+; CHECK-NEXT:    call void @llvm.memmove.p0.p0.i8(ptr [[A]], ptr [[B:%.*]], i8 12, i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  store i8 0, i8* %A  ;; Written to by memmove
+  store i8 0, ptr %A  ;; Written to by memmove
 
-  call void @llvm.memmove.p0i8.p0i8.i8(i8* %A, i8* %B, i8 12, i1 false)
+  call void @llvm.memmove.p0.p0.i8(ptr %A, ptr %B, i8 12, i1 false)
 
   ret void
 }
 
-define void @test2a(i8* %A, i8* %B) {
+define void @test2a(ptr %A, ptr %B) {
 ; CHECK-LABEL: @test2a(
-; CHECK-NEXT:    store i8 0, i8* [[A:%.*]], align 1
-; CHECK-NEXT:    call void @llvm.memmove.p0i8.p0i8.i8(i8* [[A]], i8* [[B:%.*]], i8 12, i1 false)
+; CHECK-NEXT:    store i8 0, ptr [[A:%.*]], align 1
+; CHECK-NEXT:    call void @llvm.memmove.p0.p0.i8(ptr [[A]], ptr [[B:%.*]], i8 12, i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  store i8 0, i8* %A  ;; Written to by memmove
+  store i8 0, ptr %A  ;; Written to by memmove
 
-  call void @llvm.memmove.p0i8.p0i8.i8(i8* %A, i8* %B, i8 12, i1 false)
+  call void @llvm.memmove.p0.p0.i8(ptr %A, ptr %B, i8 12, i1 false)
 
   ret void
 }
 
-define void @test3(i8* noalias %A) {
+define void @test3(ptr noalias %A) {
 ; CHECK-LABEL: @test3(
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i8(i8* [[A:%.*]], i8 0, i8 12, i1 false)
+; CHECK-NEXT:    call void @llvm.memset.p0.i8(ptr [[A:%.*]], i8 0, i8 12, i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  store i8 0, i8* %A  ;; Written to by memset
+  store i8 0, ptr %A  ;; Written to by memset
 
-  call void @llvm.memset.p0i8.i8(i8* %A, i8 0, i8 12, i1 false)
+  call void @llvm.memset.p0.i8(ptr %A, i8 0, i8 12, i1 false)
 
   ret void
 }
 
-declare void @llvm.memcpy.element.unordered.atomic.p0i16.p0i16.i16(i16* nocapture, i16* nocapture, i16, i32) nounwind
-declare void @llvm.memmove.element.unordered.atomic.p0i16.p0i16.i16(i16* nocapture, i16* nocapture, i16, i32) nounwind
-declare void @llvm.memset.element.unordered.atomic.p0i16.i16(i16* nocapture, i8, i16, i32) nounwind
+declare void @llvm.memcpy.element.unordered.atomic.p0.p0.i16(ptr nocapture, ptr nocapture, i16, i32) nounwind
+declare void @llvm.memmove.element.unordered.atomic.p0.p0.i16(ptr nocapture, ptr nocapture, i16, i32) nounwind
+declare void @llvm.memset.element.unordered.atomic.p0.i16(ptr nocapture, i8, i16, i32) nounwind
 
 
 define void @test4() {
@@ -67,10 +67,10 @@ define void @test4() {
   %A = alloca i16, i16 1024, align 2
   %B = alloca i16, i16 1024, align 2
 
-  store atomic i16 0, i16* %A unordered, align 2 ;; Written to by memcpy
-  store atomic i16 0, i16* %B unordered, align 2 ;; Read by memcpy
+  store atomic i16 0, ptr %A unordered, align 2 ;; Written to by memcpy
+  store atomic i16 0, ptr %B unordered, align 2 ;; Read by memcpy
 
-  call void @llvm.memcpy.element.unordered.atomic.p0i16.p0i16.i16(i16* align 2 %A, i16* align 2 %B, i16 1024, i32 2)
+  call void @llvm.memcpy.element.unordered.atomic.p0.p0.i16(ptr align 2 %A, ptr align 2 %B, i16 1024, i32 2)
 
   ret void
 }
@@ -82,10 +82,10 @@ define void @test5() {
   %A = alloca i16, i16 1024, align 2
   %B = alloca i16, i16 1024, align 2
 
-  store atomic i16 0, i16* %A unordered, align 2 ;; Written to by memmove
-  store atomic i16 0, i16* %B unordered, align 2 ;; Read by memmove
+  store atomic i16 0, ptr %A unordered, align 2 ;; Written to by memmove
+  store atomic i16 0, ptr %B unordered, align 2 ;; Read by memmove
 
-  call void @llvm.memmove.element.unordered.atomic.p0i16.p0i16.i16(i16* align 2 %A, i16* align 2 %B, i16 1024, i32 2)
+  call void @llvm.memmove.element.unordered.atomic.p0.p0.i16(ptr align 2 %A, ptr align 2 %B, i16 1024, i32 2)
 
   ret void
 }
@@ -98,21 +98,21 @@ define void @test6() {
   %A = alloca i16, i16 1024, align 2
   %B = alloca i16, i16 1024, align 2
 
-  store atomic i16 0, i16* %A unordered, align 2 ;; Written to by memset
+  store atomic i16 0, ptr %A unordered, align 2 ;; Written to by memset
 
-  call void @llvm.memset.element.unordered.atomic.p0i16.i16(i16* align 2 %A, i8 0, i16 1024, i32 2)
+  call void @llvm.memset.element.unordered.atomic.p0.i16(ptr align 2 %A, i8 0, i16 1024, i32 2)
 
   ret void
 }
 
-define void @test7(i8* noalias %A, i8* noalias %B) {
+define void @test7(ptr noalias %A, ptr noalias %B) {
 ; CHECK-LABEL: @test7(
-; CHECK-NEXT:    call void @llvm.memcpy.inline.p0i8.p0i8.i8(i8* [[A:%.*]], i8* [[B:%.*]], i8 12, i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.inline.p0.p0.i8(ptr [[A:%.*]], ptr [[B:%.*]], i8 12, i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  store i8 0, i8* %A  ;; Written to by memcpy
+  store i8 0, ptr %A  ;; Written to by memcpy
 
-  call void @llvm.memcpy.inline.p0i8.p0i8.i8(i8* %A, i8* %B, i8 12, i1 false)
+  call void @llvm.memcpy.inline.p0.p0.i8(ptr %A, ptr %B, i8 12, i1 false)
 
   ret void
 }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/memory-intrinsics-sizes.ll b/llvm/test/Transforms/DeadStoreElimination/memory-intrinsics-sizes.ll
index 944a08d56ae35..aa3ba2fd004fa 100644
--- a/llvm/test/Transforms/DeadStoreElimination/memory-intrinsics-sizes.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/memory-intrinsics-sizes.ll
@@ -1,353 +1,349 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -dse -S %s | FileCheck %s
 
-define void @memset_equal_size_values(i8* %ptr, i64 %len) {
+define void @memset_equal_size_values(ptr %ptr, i64 %len) {
 ; CHECK-LABEL: @memset_equal_size_values(
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 1 [[PTR:%.*]], i8 0, i64 [[LEN:%.*]], i1 false)
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 1 [[PTR:%.*]], i8 0, i64 [[LEN:%.*]], i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.memset.p0i8.i64(i8* align 1 %ptr, i8 0, i64 %len, i1 false)
-  call void @llvm.memset.p0i8.i64(i8* align 1 %ptr, i8 0, i64 %len, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 1 %ptr, i8 0, i64 %len, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 1 %ptr, i8 0, i64 %len, i1 false)
   ret void
 }
 
-define void @memset_
diff erent_size_values_1(i8* %ptr, i64 %len.1, i64 %len.2) {
+define void @memset_
diff erent_size_values_1(ptr %ptr, i64 %len.1, i64 %len.2) {
 ; CHECK-LABEL: @memset_
diff erent_size_values_1(
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 1 [[PTR:%.*]], i8 0, i64 [[LEN_1:%.*]], i1 false)
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 1 [[PTR]], i8 0, i64 [[LEN_2:%.*]], i1 false)
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 1 [[PTR:%.*]], i8 0, i64 [[LEN_1:%.*]], i1 false)
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 1 [[PTR]], i8 0, i64 [[LEN_2:%.*]], i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.memset.p0i8.i64(i8* align 1 %ptr, i8 0, i64 %len.1, i1 false)
-  call void @llvm.memset.p0i8.i64(i8* align 1 %ptr, i8 0, i64 %len.2, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 1 %ptr, i8 0, i64 %len.1, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 1 %ptr, i8 0, i64 %len.2, i1 false)
   ret void
 }
 
-define void @memset_
diff erent_size_values_2(i8* %ptr, i64 %len) {
+define void @memset_
diff erent_size_values_2(ptr %ptr, i64 %len) {
 ; CHECK-LABEL: @memset_
diff erent_size_values_2(
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 1 [[PTR:%.*]], i8 0, i64 [[LEN:%.*]], i1 false)
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 1 [[PTR]], i8 0, i64 100, i1 false)
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 1 [[PTR:%.*]], i8 0, i64 [[LEN:%.*]], i1 false)
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 1 [[PTR]], i8 0, i64 100, i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.memset.p0i8.i64(i8* align 1 %ptr, i8 0, i64 %len, i1 false)
-  call void @llvm.memset.p0i8.i64(i8* align 1 %ptr, i8 0, i64 100, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 1 %ptr, i8 0, i64 %len, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 1 %ptr, i8 0, i64 100, i1 false)
   ret void
 }
 
-define void @memset_
diff erent_size_values_3(i8* %ptr, i64 %len) {
+define void @memset_
diff erent_size_values_3(ptr %ptr, i64 %len) {
 ; CHECK-LABEL: @memset_
diff erent_size_values_3(
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 1 [[PTR:%.*]], i8 0, i64 100, i1 false)
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 1 [[PTR]], i8 0, i64 [[LEN:%.*]], i1 false)
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 1 [[PTR:%.*]], i8 0, i64 100, i1 false)
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 1 [[PTR]], i8 0, i64 [[LEN:%.*]], i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.memset.p0i8.i64(i8* align 1 %ptr, i8 0, i64 100, i1 false)
-  call void @llvm.memset.p0i8.i64(i8* align 1 %ptr, i8 0, i64 %len, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 1 %ptr, i8 0, i64 100, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 1 %ptr, i8 0, i64 %len, i1 false)
   ret void
 }
 
-define void @memset_and_store_1(i8* %ptr, i64 %len) {
+define void @memset_and_store_1(ptr %ptr, i64 %len) {
 ; CHECK-LABEL: @memset_and_store_1(
-; CHECK-NEXT:    [[BC:%.*]] = bitcast i8* [[PTR:%.*]] to i64*
-; CHECK-NEXT:    store i64 123, i64* [[BC]], align 4
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 1 [[PTR]], i8 0, i64 [[LEN:%.*]], i1 false)
+; CHECK-NEXT:    store i64 123, ptr [[PTR:%.*]], align 4
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 1 [[PTR]], i8 0, i64 [[LEN:%.*]], i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  %bc = bitcast i8* %ptr to i64*
-  store i64 123, i64* %bc
-  call void @llvm.memset.p0i8.i64(i8* align 1 %ptr, i8 0, i64 %len, i1 false)
+  store i64 123, ptr %ptr
+  call void @llvm.memset.p0.i64(ptr align 1 %ptr, i8 0, i64 %len, i1 false)
   ret void
 }
 
-define void @memset_and_store_2(i8* %ptr, i64 %len) {
+define void @memset_and_store_2(ptr %ptr, i64 %len) {
 ; CHECK-LABEL: @memset_and_store_2(
-; CHECK-NEXT:    [[BC:%.*]] = bitcast i8* [[PTR:%.*]] to i64*
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 1 [[PTR]], i8 0, i64 [[LEN:%.*]], i1 false)
-; CHECK-NEXT:    store i64 123, i64* [[BC]], align 4
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 1 [[PTR:%.*]], i8 0, i64 [[LEN:%.*]], i1 false)
+; CHECK-NEXT:    store i64 123, ptr [[PTR]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  %bc = bitcast i8* %ptr to i64*
-  call void @llvm.memset.p0i8.i64(i8* align 1 %ptr, i8 0, i64 %len, i1 false)
-  store i64 123, i64* %bc
+  call void @llvm.memset.p0.i64(ptr align 1 %ptr, i8 0, i64 %len, i1 false)
+  store i64 123, ptr %ptr
   ret void
 }
 
-define void @memcpy_equal_size_values(i8* noalias %src, i8* noalias %dst, i64 %len) {
+define void @memcpy_equal_size_values(ptr noalias %src, ptr noalias %dst, i64 %len) {
 ; CHECK-LABEL: @memcpy_equal_size_values(
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[DST:%.*]], i8* [[SRC:%.*]], i64 [[LEN:%.*]], i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr [[DST:%.*]], ptr [[SRC:%.*]], i64 [[LEN:%.*]], i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %len, i1 false)
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %len, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %src, i64 %len, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %src, i64 %len, i1 false)
   ret void
 }
 
-define void @memcpy_
diff erent_size_values_1(i8* noalias %src, i8* noalias %dst, i64 %len.1, i64 %len.2) {
+define void @memcpy_
diff erent_size_values_1(ptr noalias %src, ptr noalias %dst, i64 %len.1, i64 %len.2) {
 ; CHECK-LABEL: @memcpy_
diff erent_size_values_1(
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[DST:%.*]], i8* [[SRC:%.*]], i64 [[LEN_1:%.*]], i1 false)
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[DST]], i8* [[SRC]], i64 [[LEN_2:%.*]], i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr [[DST:%.*]], ptr [[SRC:%.*]], i64 [[LEN_1:%.*]], i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr [[DST]], ptr [[SRC]], i64 [[LEN_2:%.*]], i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %len.1, i1 false)
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %len.2, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %src, i64 %len.1, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %src, i64 %len.2, i1 false)
   ret void
 }
 
-define void @memcpy_
diff erent_size_values_2(i8* noalias %src, i8* noalias %dst, i64 %len) {
+define void @memcpy_
diff erent_size_values_2(ptr noalias %src, ptr noalias %dst, i64 %len) {
 ; CHECK-LABEL: @memcpy_
diff erent_size_values_2(
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[DST:%.*]], i8* [[SRC:%.*]], i64 [[LEN:%.*]], i1 false)
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[DST]], i8* [[SRC]], i64 100, i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr [[DST:%.*]], ptr [[SRC:%.*]], i64 [[LEN:%.*]], i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr [[DST]], ptr [[SRC]], i64 100, i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %len, i1 false)
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 100, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %src, i64 %len, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %src, i64 100, i1 false)
   ret void
 }
 
-define void @memcpy_
diff erent_size_values_3(i8* noalias %src, i8* noalias %dst, i64 %len) {
+define void @memcpy_
diff erent_size_values_3(ptr noalias %src, ptr noalias %dst, i64 %len) {
 ; CHECK-LABEL: @memcpy_
diff erent_size_values_3(
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[DST:%.*]], i8* [[SRC:%.*]], i64 100, i1 false)
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[DST]], i8* [[SRC]], i64 [[LEN:%.*]], i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr [[DST:%.*]], ptr [[SRC:%.*]], i64 100, i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr [[DST]], ptr [[SRC]], i64 [[LEN:%.*]], i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 100, i1 false)
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %len, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %src, i64 100, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %src, i64 %len, i1 false)
   ret void
 }
 
-define void @memset_and_memcpy_equal_size_values(i8* noalias %src, i8* noalias %dst, i64 %len) {
+define void @memset_and_memcpy_equal_size_values(ptr noalias %src, ptr noalias %dst, i64 %len) {
 ; CHECK-LABEL: @memset_and_memcpy_equal_size_values(
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[DST:%.*]], i8* [[SRC:%.*]], i64 [[LEN:%.*]], i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr [[DST:%.*]], ptr [[SRC:%.*]], i64 [[LEN:%.*]], i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 0, i64 %len, i1 false)
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %len, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 0, i64 %len, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %src, i64 %len, i1 false)
   ret void
 }
 
-define void @memset_and_memcpy_
diff erent_size_values_1(i8* noalias %src, i8* noalias %dst, i64 %len.1, i64 %len.2) {
+define void @memset_and_memcpy_
diff erent_size_values_1(ptr noalias %src, ptr noalias %dst, i64 %len.1, i64 %len.2) {
 ; CHECK-LABEL: @memset_and_memcpy_
diff erent_size_values_1(
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 1 [[DST:%.*]], i8 0, i64 [[LEN_1:%.*]], i1 false)
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[DST]], i8* [[SRC:%.*]], i64 [[LEN_2:%.*]], i1 false)
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 1 [[DST:%.*]], i8 0, i64 [[LEN_1:%.*]], i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr [[DST]], ptr [[SRC:%.*]], i64 [[LEN_2:%.*]], i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 0, i64 %len.1, i1 false)
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %len.2, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 0, i64 %len.1, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %src, i64 %len.2, i1 false)
   ret void
 }
 
-define void @memset_and_memcpy_
diff erent_size_values_2(i8* noalias %src, i8* noalias %dst, i64 %len.1) {
+define void @memset_and_memcpy_
diff erent_size_values_2(ptr noalias %src, ptr noalias %dst, i64 %len.1) {
 ; CHECK-LABEL: @memset_and_memcpy_
diff erent_size_values_2(
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 1 [[DST:%.*]], i8 0, i64 [[LEN_1:%.*]], i1 false)
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[DST]], i8* [[SRC:%.*]], i64 100, i1 false)
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 1 [[DST:%.*]], i8 0, i64 [[LEN_1:%.*]], i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr [[DST]], ptr [[SRC:%.*]], i64 100, i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 0, i64 %len.1, i1 false)
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 100, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 0, i64 %len.1, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %src, i64 100, i1 false)
   ret void
 }
 
-define void @memset_and_memcpy_
diff erent_size_values_3(i8* noalias %src, i8* noalias %dst, i64 %len.1) {
+define void @memset_and_memcpy_
diff erent_size_values_3(ptr noalias %src, ptr noalias %dst, i64 %len.1) {
 ; CHECK-LABEL: @memset_and_memcpy_
diff erent_size_values_3(
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 1 [[DST:%.*]], i8 0, i64 100, i1 false)
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[DST]], i8* [[SRC:%.*]], i64 [[LEN_1:%.*]], i1 false)
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 1 [[DST:%.*]], i8 0, i64 100, i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr [[DST]], ptr [[SRC:%.*]], i64 [[LEN_1:%.*]], i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 0, i64 100, i1 false)
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %len.1, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 0, i64 100, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %src, i64 %len.1, i1 false)
   ret void
 }
 
-define void @memcpy_and_memset_equal_size_values(i8* noalias %src, i8* noalias %dst, i64 %len) {
+define void @memcpy_and_memset_equal_size_values(ptr noalias %src, ptr noalias %dst, i64 %len) {
 ; CHECK-LABEL: @memcpy_and_memset_equal_size_values(
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 1 [[DST:%.*]], i8 0, i64 [[LEN:%.*]], i1 false)
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 1 [[DST:%.*]], i8 0, i64 [[LEN:%.*]], i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %len, i1 false)
-  call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 0, i64 %len, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %src, i64 %len, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 0, i64 %len, i1 false)
   ret void
 }
 
-define void @memcpy_and_memset_
diff erent_size_values_1(i8* noalias %src, i8* noalias %dst, i64 %len.1, i64 %len.2) {
+define void @memcpy_and_memset_
diff erent_size_values_1(ptr noalias %src, ptr noalias %dst, i64 %len.1, i64 %len.2) {
 ; CHECK-LABEL: @memcpy_and_memset_
diff erent_size_values_1(
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[DST:%.*]], i8* [[SRC:%.*]], i64 [[LEN_1:%.*]], i1 false)
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 1 [[DST]], i8 0, i64 [[LEN_2:%.*]], i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr [[DST:%.*]], ptr [[SRC:%.*]], i64 [[LEN_1:%.*]], i1 false)
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 1 [[DST]], i8 0, i64 [[LEN_2:%.*]], i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %len.1, i1 false)
-  call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 0, i64 %len.2, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %src, i64 %len.1, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 0, i64 %len.2, i1 false)
   ret void
 }
 
-define void @memcpy_and_memset_
diff erent_size_values_2(i8* noalias %src, i8* noalias %dst, i64 %len.1) {
+define void @memcpy_and_memset_
diff erent_size_values_2(ptr noalias %src, ptr noalias %dst, i64 %len.1) {
 ; CHECK-LABEL: @memcpy_and_memset_
diff erent_size_values_2(
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[DST:%.*]], i8* [[SRC:%.*]], i64 [[LEN_1:%.*]], i1 false)
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 1 [[DST]], i8 0, i64 100, i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr [[DST:%.*]], ptr [[SRC:%.*]], i64 [[LEN_1:%.*]], i1 false)
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 1 [[DST]], i8 0, i64 100, i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %len.1, i1 false)
-  call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 0, i64 100, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %src, i64 %len.1, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 0, i64 100, i1 false)
   ret void
 }
 
-define void @memcpy_and_memset_
diff erent_size_values_3(i8* noalias %src, i8* noalias %dst, i64 %len.1) {
+define void @memcpy_and_memset_
diff erent_size_values_3(ptr noalias %src, ptr noalias %dst, i64 %len.1) {
 ; CHECK-LABEL: @memcpy_and_memset_
diff erent_size_values_3(
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[DST:%.*]], i8* [[SRC:%.*]], i64 100, i1 false)
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 1 [[DST]], i8 0, i64 [[LEN_1:%.*]], i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr [[DST:%.*]], ptr [[SRC:%.*]], i64 100, i1 false)
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 1 [[DST]], i8 0, i64 [[LEN_1:%.*]], i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 100, i1 false)
-  call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 0, i64 %len.1, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %src, i64 100, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 0, i64 %len.1, i1 false)
   ret void
 }
 
-define void @memmove_equal_size_values(i8* noalias %src, i8* noalias %dst, i64 %len) {
+define void @memmove_equal_size_values(ptr noalias %src, ptr noalias %dst, i64 %len) {
 ; CHECK-LABEL: @memmove_equal_size_values(
-; CHECK-NEXT:    call void @llvm.memmove.p0i8.p0i8.i64(i8* [[DST:%.*]], i8* [[SRC:%.*]], i64 [[LEN:%.*]], i1 false)
+; CHECK-NEXT:    call void @llvm.memmove.p0.p0.i64(ptr [[DST:%.*]], ptr [[SRC:%.*]], i64 [[LEN:%.*]], i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.memmove.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %len, i1 false)
-  call void @llvm.memmove.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %len, i1 false)
+  call void @llvm.memmove.p0.p0.i64(ptr %dst, ptr %src, i64 %len, i1 false)
+  call void @llvm.memmove.p0.p0.i64(ptr %dst, ptr %src, i64 %len, i1 false)
   ret void
 }
 
-define void @memmove_
diff erent_size_values_1(i8* noalias %src, i8* noalias %dst, i64 %len.1, i64 %len.2) {
+define void @memmove_
diff erent_size_values_1(ptr noalias %src, ptr noalias %dst, i64 %len.1, i64 %len.2) {
 ; CHECK-LABEL: @memmove_
diff erent_size_values_1(
-; CHECK-NEXT:    call void @llvm.memmove.p0i8.p0i8.i64(i8* [[DST:%.*]], i8* [[SRC:%.*]], i64 [[LEN_1:%.*]], i1 false)
-; CHECK-NEXT:    call void @llvm.memmove.p0i8.p0i8.i64(i8* [[DST]], i8* [[SRC]], i64 [[LEN_2:%.*]], i1 false)
+; CHECK-NEXT:    call void @llvm.memmove.p0.p0.i64(ptr [[DST:%.*]], ptr [[SRC:%.*]], i64 [[LEN_1:%.*]], i1 false)
+; CHECK-NEXT:    call void @llvm.memmove.p0.p0.i64(ptr [[DST]], ptr [[SRC]], i64 [[LEN_2:%.*]], i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.memmove.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %len.1, i1 false)
-  call void @llvm.memmove.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %len.2, i1 false)
+  call void @llvm.memmove.p0.p0.i64(ptr %dst, ptr %src, i64 %len.1, i1 false)
+  call void @llvm.memmove.p0.p0.i64(ptr %dst, ptr %src, i64 %len.2, i1 false)
   ret void
 }
 
-define void @memmove_
diff erent_size_values_2(i8* noalias %src, i8* noalias %dst, i64 %len) {
+define void @memmove_
diff erent_size_values_2(ptr noalias %src, ptr noalias %dst, i64 %len) {
 ; CHECK-LABEL: @memmove_
diff erent_size_values_2(
-; CHECK-NEXT:    call void @llvm.memmove.p0i8.p0i8.i64(i8* [[DST:%.*]], i8* [[SRC:%.*]], i64 [[LEN:%.*]], i1 false)
-; CHECK-NEXT:    call void @llvm.memmove.p0i8.p0i8.i64(i8* [[DST]], i8* [[SRC]], i64 100, i1 false)
+; CHECK-NEXT:    call void @llvm.memmove.p0.p0.i64(ptr [[DST:%.*]], ptr [[SRC:%.*]], i64 [[LEN:%.*]], i1 false)
+; CHECK-NEXT:    call void @llvm.memmove.p0.p0.i64(ptr [[DST]], ptr [[SRC]], i64 100, i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.memmove.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %len, i1 false)
-  call void @llvm.memmove.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 100, i1 false)
+  call void @llvm.memmove.p0.p0.i64(ptr %dst, ptr %src, i64 %len, i1 false)
+  call void @llvm.memmove.p0.p0.i64(ptr %dst, ptr %src, i64 100, i1 false)
   ret void
 }
 
-define void @memmove_
diff erent_size_values_3(i8* noalias %src, i8* noalias %dst, i64 %len) {
+define void @memmove_
diff erent_size_values_3(ptr noalias %src, ptr noalias %dst, i64 %len) {
 ; CHECK-LABEL: @memmove_
diff erent_size_values_3(
-; CHECK-NEXT:    call void @llvm.memmove.p0i8.p0i8.i64(i8* [[DST:%.*]], i8* [[SRC:%.*]], i64 100, i1 false)
-; CHECK-NEXT:    call void @llvm.memmove.p0i8.p0i8.i64(i8* [[DST]], i8* [[SRC]], i64 [[LEN:%.*]], i1 false)
+; CHECK-NEXT:    call void @llvm.memmove.p0.p0.i64(ptr [[DST:%.*]], ptr [[SRC:%.*]], i64 100, i1 false)
+; CHECK-NEXT:    call void @llvm.memmove.p0.p0.i64(ptr [[DST]], ptr [[SRC]], i64 [[LEN:%.*]], i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.memmove.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 100, i1 false)
-  call void @llvm.memmove.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %len, i1 false)
+  call void @llvm.memmove.p0.p0.i64(ptr %dst, ptr %src, i64 100, i1 false)
+  call void @llvm.memmove.p0.p0.i64(ptr %dst, ptr %src, i64 %len, i1 false)
   ret void
 }
 
-define void @memset_and_memmove_equal_size_values(i8* noalias %src, i8* noalias %dst, i64 %len) {
+define void @memset_and_memmove_equal_size_values(ptr noalias %src, ptr noalias %dst, i64 %len) {
 ; CHECK-LABEL: @memset_and_memmove_equal_size_values(
-; CHECK-NEXT:    call void @llvm.memmove.p0i8.p0i8.i64(i8* [[DST:%.*]], i8* [[SRC:%.*]], i64 [[LEN:%.*]], i1 false)
+; CHECK-NEXT:    call void @llvm.memmove.p0.p0.i64(ptr [[DST:%.*]], ptr [[SRC:%.*]], i64 [[LEN:%.*]], i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 0, i64 %len, i1 false)
-  call void @llvm.memmove.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %len, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 0, i64 %len, i1 false)
+  call void @llvm.memmove.p0.p0.i64(ptr %dst, ptr %src, i64 %len, i1 false)
   ret void
 }
 
-define void @memset_and_memmove_
diff erent_size_values_1(i8* noalias %src, i8* noalias %dst, i64 %len.1, i64 %len.2) {
+define void @memset_and_memmove_
diff erent_size_values_1(ptr noalias %src, ptr noalias %dst, i64 %len.1, i64 %len.2) {
 ; CHECK-LABEL: @memset_and_memmove_
diff erent_size_values_1(
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 1 [[DST:%.*]], i8 0, i64 [[LEN_1:%.*]], i1 false)
-; CHECK-NEXT:    call void @llvm.memmove.p0i8.p0i8.i64(i8* [[DST]], i8* [[SRC:%.*]], i64 [[LEN_2:%.*]], i1 false)
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 1 [[DST:%.*]], i8 0, i64 [[LEN_1:%.*]], i1 false)
+; CHECK-NEXT:    call void @llvm.memmove.p0.p0.i64(ptr [[DST]], ptr [[SRC:%.*]], i64 [[LEN_2:%.*]], i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 0, i64 %len.1, i1 false)
-  call void @llvm.memmove.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %len.2, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 0, i64 %len.1, i1 false)
+  call void @llvm.memmove.p0.p0.i64(ptr %dst, ptr %src, i64 %len.2, i1 false)
   ret void
 }
 
-define void @memset_and_memmove_
diff erent_size_values_2(i8* noalias %src, i8* noalias %dst, i64 %len.1) {
+define void @memset_and_memmove_
diff erent_size_values_2(ptr noalias %src, ptr noalias %dst, i64 %len.1) {
 ; CHECK-LABEL: @memset_and_memmove_
diff erent_size_values_2(
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 1 [[DST:%.*]], i8 0, i64 [[LEN_1:%.*]], i1 false)
-; CHECK-NEXT:    call void @llvm.memmove.p0i8.p0i8.i64(i8* [[DST]], i8* [[SRC:%.*]], i64 100, i1 false)
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 1 [[DST:%.*]], i8 0, i64 [[LEN_1:%.*]], i1 false)
+; CHECK-NEXT:    call void @llvm.memmove.p0.p0.i64(ptr [[DST]], ptr [[SRC:%.*]], i64 100, i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 0, i64 %len.1, i1 false)
-  call void @llvm.memmove.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 100, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 0, i64 %len.1, i1 false)
+  call void @llvm.memmove.p0.p0.i64(ptr %dst, ptr %src, i64 100, i1 false)
   ret void
 }
 
-define void @memset_and_memmove_
diff erent_size_values_3(i8* noalias %src, i8* noalias %dst, i64 %len.1) {
+define void @memset_and_memmove_
diff erent_size_values_3(ptr noalias %src, ptr noalias %dst, i64 %len.1) {
 ; CHECK-LABEL: @memset_and_memmove_
diff erent_size_values_3(
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 1 [[DST:%.*]], i8 0, i64 100, i1 false)
-; CHECK-NEXT:    call void @llvm.memmove.p0i8.p0i8.i64(i8* [[DST]], i8* [[SRC:%.*]], i64 [[LEN_1:%.*]], i1 false)
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 1 [[DST:%.*]], i8 0, i64 100, i1 false)
+; CHECK-NEXT:    call void @llvm.memmove.p0.p0.i64(ptr [[DST]], ptr [[SRC:%.*]], i64 [[LEN_1:%.*]], i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 0, i64 100, i1 false)
-  call void @llvm.memmove.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %len.1, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 0, i64 100, i1 false)
+  call void @llvm.memmove.p0.p0.i64(ptr %dst, ptr %src, i64 %len.1, i1 false)
   ret void
 }
 
-define void @memmove_and_memset_equal_size_values(i8* noalias %src, i8* noalias %dst, i64 %len) {
+define void @memmove_and_memset_equal_size_values(ptr noalias %src, ptr noalias %dst, i64 %len) {
 ; CHECK-LABEL: @memmove_and_memset_equal_size_values(
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 1 [[DST:%.*]], i8 0, i64 [[LEN:%.*]], i1 false)
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 1 [[DST:%.*]], i8 0, i64 [[LEN:%.*]], i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.memmove.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %len, i1 false)
-  call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 0, i64 %len, i1 false)
+  call void @llvm.memmove.p0.p0.i64(ptr %dst, ptr %src, i64 %len, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 0, i64 %len, i1 false)
   ret void
 }
 
-define void @memmove_and_memset_
diff erent_size_values_1(i8* noalias %src, i8* noalias %dst, i64 %len.1, i64 %len.2) {
+define void @memmove_and_memset_
diff erent_size_values_1(ptr noalias %src, ptr noalias %dst, i64 %len.1, i64 %len.2) {
 ; CHECK-LABEL: @memmove_and_memset_
diff erent_size_values_1(
-; CHECK-NEXT:    call void @llvm.memmove.p0i8.p0i8.i64(i8* [[DST:%.*]], i8* [[SRC:%.*]], i64 [[LEN_1:%.*]], i1 false)
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 1 [[DST]], i8 0, i64 [[LEN_2:%.*]], i1 false)
+; CHECK-NEXT:    call void @llvm.memmove.p0.p0.i64(ptr [[DST:%.*]], ptr [[SRC:%.*]], i64 [[LEN_1:%.*]], i1 false)
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 1 [[DST]], i8 0, i64 [[LEN_2:%.*]], i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.memmove.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %len.1, i1 false)
-  call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 0, i64 %len.2, i1 false)
+  call void @llvm.memmove.p0.p0.i64(ptr %dst, ptr %src, i64 %len.1, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 0, i64 %len.2, i1 false)
   ret void
 }
 
-define void @memmove_and_memset_
diff erent_size_values_2(i8* noalias %src, i8* noalias %dst, i64 %len.1) {
+define void @memmove_and_memset_
diff erent_size_values_2(ptr noalias %src, ptr noalias %dst, i64 %len.1) {
 ; CHECK-LABEL: @memmove_and_memset_
diff erent_size_values_2(
-; CHECK-NEXT:    call void @llvm.memmove.p0i8.p0i8.i64(i8* [[DST:%.*]], i8* [[SRC:%.*]], i64 [[LEN_1:%.*]], i1 false)
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 1 [[DST]], i8 0, i64 100, i1 false)
+; CHECK-NEXT:    call void @llvm.memmove.p0.p0.i64(ptr [[DST:%.*]], ptr [[SRC:%.*]], i64 [[LEN_1:%.*]], i1 false)
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 1 [[DST]], i8 0, i64 100, i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.memmove.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %len.1, i1 false)
-  call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 0, i64 100, i1 false)
+  call void @llvm.memmove.p0.p0.i64(ptr %dst, ptr %src, i64 %len.1, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 0, i64 100, i1 false)
   ret void
 }
 
-define void @memmove_and_memset_
diff erent_size_values_3(i8* noalias %src, i8* noalias %dst, i64 %len.1) {
+define void @memmove_and_memset_
diff erent_size_values_3(ptr noalias %src, ptr noalias %dst, i64 %len.1) {
 ; CHECK-LABEL: @memmove_and_memset_
diff erent_size_values_3(
-; CHECK-NEXT:    call void @llvm.memmove.p0i8.p0i8.i64(i8* [[DST:%.*]], i8* [[SRC:%.*]], i64 100, i1 false)
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 1 [[DST]], i8 0, i64 [[LEN_1:%.*]], i1 false)
+; CHECK-NEXT:    call void @llvm.memmove.p0.p0.i64(ptr [[DST:%.*]], ptr [[SRC:%.*]], i64 100, i1 false)
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 1 [[DST]], i8 0, i64 [[LEN_1:%.*]], i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.memmove.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 100, i1 false)
-  call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 0, i64 %len.1, i1 false)
+  call void @llvm.memmove.p0.p0.i64(ptr %dst, ptr %src, i64 100, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 0, i64 %len.1, i1 false)
   ret void
 }
 
-define void @memmove_and_memcpy_equal_size_values(i8* noalias %src, i8* noalias %dst, i64 %len) {
+define void @memmove_and_memcpy_equal_size_values(ptr noalias %src, ptr noalias %dst, i64 %len) {
 ; CHECK-LABEL: @memmove_and_memcpy_equal_size_values(
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[DST:%.*]], i8* [[SRC:%.*]], i64 [[LEN:%.*]], i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr [[DST:%.*]], ptr [[SRC:%.*]], i64 [[LEN:%.*]], i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.memmove.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %len, i1 false)
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %len, i1 false)
+  call void @llvm.memmove.p0.p0.i64(ptr %dst, ptr %src, i64 %len, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %src, i64 %len, i1 false)
   ret void
 }
 
-define void @memcpy_and_memmove_equal_size_values(i8* noalias %src, i8* noalias %dst, i64 %len) {
+define void @memcpy_and_memmove_equal_size_values(ptr noalias %src, ptr noalias %dst, i64 %len) {
 ; CHECK-LABEL: @memcpy_and_memmove_equal_size_values(
-; CHECK-NEXT:    call void @llvm.memmove.p0i8.p0i8.i64(i8* [[DST:%.*]], i8* [[SRC:%.*]], i64 [[LEN:%.*]], i1 false)
+; CHECK-NEXT:    call void @llvm.memmove.p0.p0.i64(ptr [[DST:%.*]], ptr [[SRC:%.*]], i64 [[LEN:%.*]], i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %len, i1 false)
-  call void @llvm.memmove.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %len, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %src, i64 %len, i1 false)
+  call void @llvm.memmove.p0.p0.i64(ptr %dst, ptr %src, i64 %len, i1 false)
   ret void
 }
 
-declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg)
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1)
-declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1)
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg)
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1)
+declare void @llvm.memmove.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1)

diff  --git a/llvm/test/Transforms/DeadStoreElimination/memoryssa-scan-limit.ll b/llvm/test/Transforms/DeadStoreElimination/memoryssa-scan-limit.ll
index 3a8b772b062e0..e4f733c5c1317 100644
--- a/llvm/test/Transforms/DeadStoreElimination/memoryssa-scan-limit.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/memoryssa-scan-limit.ll
@@ -8,7 +8,7 @@
 target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
 
 
-define void @test2(i32* noalias %P, i32* noalias %Q, i32* noalias %R) {
+define void @test2(ptr noalias %P, ptr noalias %Q, ptr noalias %R) {
 ; NO-LIMIT-LABEL: @test2(
 ; NO-LIMIT-NEXT:    br i1 true, label [[BB1:%.*]], label [[BB2:%.*]]
 ; NO-LIMIT:       bb1:
@@ -16,35 +16,35 @@ define void @test2(i32* noalias %P, i32* noalias %Q, i32* noalias %R) {
 ; NO-LIMIT:       bb2:
 ; NO-LIMIT-NEXT:    br label [[BB3]]
 ; NO-LIMIT:       bb3:
-; NO-LIMIT-NEXT:    store i32 0, i32* [[Q:%.*]], align 4
-; NO-LIMIT-NEXT:    store i32 0, i32* [[R:%.*]], align 4
-; NO-LIMIT-NEXT:    store i32 0, i32* [[P:%.*]], align 4
+; NO-LIMIT-NEXT:    store i32 0, ptr [[Q:%.*]], align 4
+; NO-LIMIT-NEXT:    store i32 0, ptr [[R:%.*]], align 4
+; NO-LIMIT-NEXT:    store i32 0, ptr [[P:%.*]], align 4
 ; NO-LIMIT-NEXT:    ret void
 ;
 ; LIMIT-0-LABEL: @test2(
-; LIMIT-0-NEXT:    store i32 1, i32* [[P:%.*]], align 4
+; LIMIT-0-NEXT:    store i32 1, ptr [[P:%.*]], align 4
 ; LIMIT-0-NEXT:    br i1 true, label [[BB1:%.*]], label [[BB2:%.*]]
 ; LIMIT-0:       bb1:
 ; LIMIT-0-NEXT:    br label [[BB3:%.*]]
 ; LIMIT-0:       bb2:
 ; LIMIT-0-NEXT:    br label [[BB3]]
 ; LIMIT-0:       bb3:
-; LIMIT-0-NEXT:    store i32 0, i32* [[Q:%.*]], align 4
-; LIMIT-0-NEXT:    store i32 0, i32* [[R:%.*]], align 4
-; LIMIT-0-NEXT:    store i32 0, i32* [[P]], align 4
+; LIMIT-0-NEXT:    store i32 0, ptr [[Q:%.*]], align 4
+; LIMIT-0-NEXT:    store i32 0, ptr [[R:%.*]], align 4
+; LIMIT-0-NEXT:    store i32 0, ptr [[P]], align 4
 ; LIMIT-0-NEXT:    ret void
 ;
 ; LIMIT-2-LABEL: @test2(
-; LIMIT-2-NEXT:    store i32 1, i32* [[P:%.*]], align 4
+; LIMIT-2-NEXT:    store i32 1, ptr [[P:%.*]], align 4
 ; LIMIT-2-NEXT:    br i1 true, label [[BB1:%.*]], label [[BB2:%.*]]
 ; LIMIT-2:       bb1:
 ; LIMIT-2-NEXT:    br label [[BB3:%.*]]
 ; LIMIT-2:       bb2:
 ; LIMIT-2-NEXT:    br label [[BB3]]
 ; LIMIT-2:       bb3:
-; LIMIT-2-NEXT:    store i32 0, i32* [[Q:%.*]], align 4
-; LIMIT-2-NEXT:    store i32 0, i32* [[R:%.*]], align 4
-; LIMIT-2-NEXT:    store i32 0, i32* [[P]], align 4
+; LIMIT-2-NEXT:    store i32 0, ptr [[Q:%.*]], align 4
+; LIMIT-2-NEXT:    store i32 0, ptr [[R:%.*]], align 4
+; LIMIT-2-NEXT:    store i32 0, ptr [[P]], align 4
 ; LIMIT-2-NEXT:    ret void
 ;
 ; LIMIT-3-LABEL: @test2(
@@ -54,20 +54,20 @@ define void @test2(i32* noalias %P, i32* noalias %Q, i32* noalias %R) {
 ; LIMIT-3:       bb2:
 ; LIMIT-3-NEXT:    br label [[BB3]]
 ; LIMIT-3:       bb3:
-; LIMIT-3-NEXT:    store i32 0, i32* [[Q:%.*]], align 4
-; LIMIT-3-NEXT:    store i32 0, i32* [[R:%.*]], align 4
-; LIMIT-3-NEXT:    store i32 0, i32* [[P:%.*]], align 4
+; LIMIT-3-NEXT:    store i32 0, ptr [[Q:%.*]], align 4
+; LIMIT-3-NEXT:    store i32 0, ptr [[R:%.*]], align 4
+; LIMIT-3-NEXT:    store i32 0, ptr [[P:%.*]], align 4
 ; LIMIT-3-NEXT:    ret void
 ;
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   br i1 true, label %bb1, label %bb2
 bb1:
   br label %bb3
 bb2:
   br label %bb3
 bb3:
-  store i32 0, i32* %Q
-  store i32 0, i32* %R
-  store i32 0, i32* %P
+  store i32 0, ptr %Q
+  store i32 0, ptr %R
+  store i32 0, ptr %P
   ret void
 }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/memset-and-memcpy.ll b/llvm/test/Transforms/DeadStoreElimination/memset-and-memcpy.ll
index 266f686905a21..c660b5ff6ad98 100644
--- a/llvm/test/Transforms/DeadStoreElimination/memset-and-memcpy.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/memset-and-memcpy.ll
@@ -3,119 +3,119 @@
 ; RUN: opt < %s -aa-pipeline=basic-aa -passes=dse -S | FileCheck %s
 target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
 
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
-declare void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* nocapture, i8, i64, i32) nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
-declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32) nounwind
-declare void @llvm.init.trampoline(i8*, i8*, i8*)
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind
+declare void @llvm.memset.element.unordered.atomic.p0.i64(ptr nocapture, i8, i64, i32) nounwind
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
+declare void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i32) nounwind
+declare void @llvm.init.trampoline(ptr, ptr, ptr)
 
 
 ;; Overwrite of memset by memcpy.
-define void @test17(i8* %P, i8* noalias %Q) nounwind ssp {
+define void @test17(ptr %P, ptr noalias %Q) nounwind ssp {
 ; CHECK-LABEL: @test17(
-; CHECK-NEXT:    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[P:%.*]], i8* [[Q:%.*]], i64 12, i1 false)
+; CHECK-NEXT:    tail call void @llvm.memcpy.p0.p0.i64(ptr [[P:%.*]], ptr [[Q:%.*]], i64 12, i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memset.p0i8.i64(i8* %P, i8 42, i64 8, i1 false)
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i1 false)
+  tail call void @llvm.memset.p0.i64(ptr %P, i8 42, i64 8, i1 false)
+  tail call void @llvm.memcpy.p0.p0.i64(ptr %P, ptr %Q, i64 12, i1 false)
   ret void
 }
 
 ;; Overwrite of memset by memcpy.
-define void @test17_atomic(i8* %P, i8* noalias %Q) nounwind ssp {
+define void @test17_atomic(ptr %P, ptr noalias %Q) nounwind ssp {
 ; CHECK-LABEL: @test17_atomic(
-; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 [[P:%.*]], i8* align 1 [[Q:%.*]], i64 12, i32 1)
+; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 [[P:%.*]], ptr align 1 [[Q:%.*]], i64 12, i32 1)
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %P, i8 42, i64 8, i32 1)
-  tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i32 1)
+  tail call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 1 %P, i8 42, i64 8, i32 1)
+  tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %P, ptr align 1 %Q, i64 12, i32 1)
   ret void
 }
 
 ;; Overwrite of memset by memcpy. Overwrite is stronger atomicity. We can
 ;; remove the memset.
-define void @test17_atomic_weaker(i8* %P, i8* noalias %Q) nounwind ssp {
+define void @test17_atomic_weaker(ptr %P, ptr noalias %Q) nounwind ssp {
 ; CHECK-LABEL: @test17_atomic_weaker(
-; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 [[P:%.*]], i8* align 1 [[Q:%.*]], i64 12, i32 1)
+; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 [[P:%.*]], ptr align 1 [[Q:%.*]], i64 12, i32 1)
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memset.p0i8.i64(i8* align 1 %P, i8 42, i64 8, i1 false)
-  tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i32 1)
+  tail call void @llvm.memset.p0.i64(ptr align 1 %P, i8 42, i64 8, i1 false)
+  tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %P, ptr align 1 %Q, i64 12, i32 1)
   ret void
 }
 
 ;; Overwrite of memset by memcpy. Overwrite is weaker atomicity. We can remove
 ;; the memset.
-define void @test17_atomic_weaker_2(i8* %P, i8* noalias %Q) nounwind ssp {
+define void @test17_atomic_weaker_2(ptr %P, ptr noalias %Q) nounwind ssp {
 ; CHECK-LABEL: @test17_atomic_weaker_2(
-; CHECK-NEXT:    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 [[P:%.*]], i8* align 1 [[Q:%.*]], i64 12, i1 false)
+; CHECK-NEXT:    tail call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[P:%.*]], ptr align 1 [[Q:%.*]], i64 12, i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %P, i8 42, i64 8, i32 1)
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i1 false)
+  tail call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 1 %P, i8 42, i64 8, i32 1)
+  tail call void @llvm.memcpy.p0.p0.i64(ptr align 1 %P, ptr align 1 %Q, i64 12, i1 false)
   ret void
 }
 
 ; Should not delete the volatile memset.
-define void @test17v(i8* %P, i8* %Q) nounwind ssp {
+define void @test17v(ptr %P, ptr %Q) nounwind ssp {
 ; CHECK-LABEL: @test17v(
-; CHECK-NEXT:    tail call void @llvm.memset.p0i8.i64(i8* [[P:%.*]], i8 42, i64 8, i1 true)
-; CHECK-NEXT:    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[P]], i8* [[Q:%.*]], i64 12, i1 false)
+; CHECK-NEXT:    tail call void @llvm.memset.p0.i64(ptr [[P:%.*]], i8 42, i64 8, i1 true)
+; CHECK-NEXT:    tail call void @llvm.memcpy.p0.p0.i64(ptr [[P]], ptr [[Q:%.*]], i64 12, i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memset.p0i8.i64(i8* %P, i8 42, i64 8, i1 true)
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i1 false)
+  tail call void @llvm.memset.p0.i64(ptr %P, i8 42, i64 8, i1 true)
+  tail call void @llvm.memcpy.p0.p0.i64(ptr %P, ptr %Q, i64 12, i1 false)
   ret void
 }
 
 ; See PR11763 - LLVM allows memcpy's source and destination to be equal (but not
 ; inequal and overlapping).
-define void @test18(i8* %P, i8* %Q, i8* %R) nounwind ssp {
+define void @test18(ptr %P, ptr %Q, ptr %R) nounwind ssp {
 ; CHECK-LABEL: @test18(
-; CHECK-NEXT:    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[P:%.*]], i8* [[Q:%.*]], i64 12, i1 false)
-; CHECK-NEXT:    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[P]], i8* [[R:%.*]], i64 12, i1 false)
+; CHECK-NEXT:    tail call void @llvm.memcpy.p0.p0.i64(ptr [[P:%.*]], ptr [[Q:%.*]], i64 12, i1 false)
+; CHECK-NEXT:    tail call void @llvm.memcpy.p0.p0.i64(ptr [[P]], ptr [[R:%.*]], i64 12, i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i1 false)
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %R, i64 12, i1 false)
+  tail call void @llvm.memcpy.p0.p0.i64(ptr %P, ptr %Q, i64 12, i1 false)
+  tail call void @llvm.memcpy.p0.p0.i64(ptr %P, ptr %R, i64 12, i1 false)
   ret void
 }
 
-define void @test18_atomic(i8* %P, i8* %Q, i8* %R) nounwind ssp {
+define void @test18_atomic(ptr %P, ptr %Q, ptr %R) nounwind ssp {
 ; CHECK-LABEL: @test18_atomic(
-; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 [[P:%.*]], i8* align 1 [[Q:%.*]], i64 12, i32 1)
-; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 [[P]], i8* align 1 [[R:%.*]], i64 12, i32 1)
+; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 [[P:%.*]], ptr align 1 [[Q:%.*]], i64 12, i32 1)
+; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 [[P]], ptr align 1 [[R:%.*]], i64 12, i32 1)
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i32 1)
-  tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %R, i64 12, i32 1)
+  tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %P, ptr align 1 %Q, i64 12, i32 1)
+  tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %P, ptr align 1 %R, i64 12, i32 1)
   ret void
 }
 
-define void @test_memset_memcpy_inline(i8* noalias %P, i8* noalias %Q) {
+define void @test_memset_memcpy_inline(ptr noalias %P, ptr noalias %Q) {
 ; CHECK-LABEL: @test_memset_memcpy_inline(
-; CHECK-NEXT:    tail call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* align 1 [[P:%.*]], i8* align 1 [[Q:%.*]], i64 12, i1 false)
+; CHECK-NEXT:    tail call void @llvm.memcpy.inline.p0.p0.i64(ptr align 1 [[P:%.*]], ptr align 1 [[Q:%.*]], i64 12, i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memset.p0i8.i64(i8* %P, i8 42, i64 8, i1 false)
-  tail call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i1 false)
+  tail call void @llvm.memset.p0.i64(ptr %P, i8 42, i64 8, i1 false)
+  tail call void @llvm.memcpy.inline.p0.p0.i64(ptr align 1 %P, ptr align 1 %Q, i64 12, i1 false)
   ret void
 }
 
-define void @test_store_memcpy_inline(i8* noalias %P, i8* noalias %Q) {
+define void @test_store_memcpy_inline(ptr noalias %P, ptr noalias %Q) {
 ; CHECK-LABEL: @test_store_memcpy_inline(
-; CHECK-NEXT:    [[P_4:%.*]] = getelementptr i8, i8* [[P:%.*]], i64 4
-; CHECK-NEXT:    store i8 4, i8* [[P_4]], align 1
-; CHECK-NEXT:    tail call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* align 1 [[P]], i8* align 1 [[Q:%.*]], i64 4, i1 false)
+; CHECK-NEXT:    [[P_4:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 4
+; CHECK-NEXT:    store i8 4, ptr [[P_4]], align 1
+; CHECK-NEXT:    tail call void @llvm.memcpy.inline.p0.p0.i64(ptr align 1 [[P]], ptr align 1 [[Q:%.*]], i64 4, i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  store i8 0, i8* %P
-  %P.1 = getelementptr i8, i8* %P, i64 1
-  store i8 1, i8* %P.1
-  %P.4 = getelementptr i8, i8* %P, i64 4
-  store i8 4, i8* %P.4
-  tail call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 4, i1 false)
+  store i8 0, ptr %P
+  %P.1 = getelementptr i8, ptr %P, i64 1
+  store i8 1, ptr %P.1
+  %P.4 = getelementptr i8, ptr %P, i64 4
+  store i8 4, ptr %P.4
+  tail call void @llvm.memcpy.inline.p0.p0.i64(ptr align 1 %P, ptr align 1 %Q, i64 4, i1 false)
   ret void
 }
 
-declare void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64 immarg, i1 immarg)
+declare void @llvm.memcpy.inline.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64 immarg, i1 immarg)

diff  --git a/llvm/test/Transforms/DeadStoreElimination/memset-missing-debugloc.ll b/llvm/test/Transforms/DeadStoreElimination/memset-missing-debugloc.ll
index 9229157a9b6ed..c7dcf54ae5f22 100644
--- a/llvm/test/Transforms/DeadStoreElimination/memset-missing-debugloc.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/memset-missing-debugloc.ll
@@ -3,9 +3,8 @@
 ; a memset can be shortened has the debugloc carried over from the memset.
 
 ; RUN: opt -S -march=native -dse < %s| FileCheck %s
-; CHECK: bitcast [5 x i64]* %{{[a-zA-Z_][a-zA-Z0-9_]*}} to i8*, !dbg
-; CHECK-NEXT: %{{[0-9]+}} = getelementptr inbounds i8, i8* %0, i64 32, !dbg ![[DBG:[0-9]+]]
-; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 16 %1, i8 0, i64 8, i1 false), !dbg ![[DBG:[0-9]+]]
+; CHECK: %{{[0-9]+}} = getelementptr inbounds i8, ptr %b, i64 32, !dbg ![[DBG:[0-9]+]]
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 16 %0, i8 0, i64 8, i1 false), !dbg ![[DBG:[0-9]+]]
 ; CHECK: ![[DBG]] = !DILocation(line: 2,
 
 ; The test IR is generated by running:
@@ -23,25 +22,22 @@
 ; }
 
 
-declare void @use([5 x i64]*)
+declare void @use(ptr)
 
 define dso_local i32 @_Z1av() !dbg !7 {
 entry:
   %retval = alloca i32, align 4
   %b = alloca [5 x i64], align 16
-  call void @llvm.dbg.declare(metadata [5 x i64]* %b, metadata !11, metadata !DIExpression()), !dbg !16
-  %0 = bitcast [5 x i64]* %b to i8*, !dbg !16
-  call void @llvm.memset.p0i8.i64(i8* align 16 %0, i8 0, i64 40, i1 false), !dbg !16
-  %1 = bitcast i8* %0 to [5 x i64]*, !dbg !16
-  %2 = getelementptr inbounds [5 x i64], [5 x i64]* %1, i32 0, i32 0, !dbg !16
-  store i64 2, i64* %2, align 16, !dbg !16
-  %3 = getelementptr inbounds [5 x i64], [5 x i64]* %1, i32 0, i32 1, !dbg !16
-  store i64 2, i64* %3, align 8, !dbg !16
-  %4 = getelementptr inbounds [5 x i64], [5 x i64]* %1, i32 0, i32 2, !dbg !16
-  store i64 2, i64* %4, align 16, !dbg !16
-  %5 = getelementptr inbounds [5 x i64], [5 x i64]* %1, i32 0, i32 3, !dbg !16
-  store i64 2, i64* %5, align 8, !dbg !16
-  call void @use([5 x i64]* %b)
+  call void @llvm.dbg.declare(metadata ptr %b, metadata !11, metadata !DIExpression()), !dbg !16
+  call void @llvm.memset.p0.i64(ptr align 16 %b, i8 0, i64 40, i1 false), !dbg !16
+  store i64 2, ptr %b, align 16, !dbg !16
+  %0 = getelementptr inbounds [5 x i64], ptr %b, i32 0, i32 1, !dbg !16
+  store i64 2, ptr %0, align 8, !dbg !16
+  %1 = getelementptr inbounds [5 x i64], ptr %b, i32 0, i32 2, !dbg !16
+  store i64 2, ptr %1, align 16, !dbg !16
+  %2 = getelementptr inbounds [5 x i64], ptr %b, i32 0, i32 3, !dbg !16
+  store i64 2, ptr %2, align 8, !dbg !16
+  call void @use(ptr %b)
   %call = call i32 @_Z1av(), !dbg !17
   %tobool = icmp ne i32 %call, 0, !dbg !17
   br i1 %tobool, label %if.then, label %if.end, !dbg !19
@@ -54,15 +50,15 @@ if.end:                                           ; preds = %if.then, %entry
   unreachable, !dbg !20
 
 return:                                           ; No predecessors!
-  %6 = load i32, i32* %retval, align 4, !dbg !21
-  ret i32 %6, !dbg !21
+  %3 = load i32, ptr %retval, align 4, !dbg !21
+  ret i32 %3, !dbg !21
 }
 
 ; Function Attrs: nounwind readnone speculatable
 declare void @llvm.dbg.declare(metadata, metadata, metadata)
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg)
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg)
 
 ; Function Attrs: cold noreturn nounwind
 declare void @llvm.trap()

diff  --git a/llvm/test/Transforms/DeadStoreElimination/memset-unknown-sizes.ll b/llvm/test/Transforms/DeadStoreElimination/memset-unknown-sizes.ll
index 65bfe3879763a..1dd5d77843969 100644
--- a/llvm/test/Transforms/DeadStoreElimination/memset-unknown-sizes.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/memset-unknown-sizes.ll
@@ -1,68 +1,64 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -dse -S %s | FileCheck %s
 
-declare i8* @_Znwm() local_unnamed_addr #0
+declare ptr @_Znwm() local_unnamed_addr #0
 
 ; Function Attrs: argmemonly nounwind willreturn writeonly
-declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg) #1
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) #1
 
-define i8* @test1(i1 %c, i64 %N) {
+define ptr @test1(i1 %c, i64 %N) {
 ; CHECK-LABEL: @test1(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[COND_TRUE_I_I_I:%.*]], label [[COND_END_I_I_I:%.*]]
 ; CHECK:       cond.true.i.i.i:
-; CHECK-NEXT:    ret i8* null
+; CHECK-NEXT:    ret ptr null
 ; CHECK:       cond.end.i.i.i:
-; CHECK-NEXT:    [[ALLOC:%.*]] = tail call noalias nonnull i8* @_Znam() #[[ATTR2:[0-9]+]]
-; CHECK-NEXT:    [[ALLOC_BC:%.*]] = bitcast i8* [[ALLOC]] to i64*
-; CHECK-NEXT:    tail call void @llvm.memset.p0i8.i64(i8* nonnull align 8 [[ALLOC]], i8 0, i64 [[N:%.*]], i1 false) #[[ATTR3:[0-9]+]]
-; CHECK-NEXT:    store i64 0, i64* [[ALLOC_BC]], align 8
-; CHECK-NEXT:    ret i8* [[ALLOC]]
+; CHECK-NEXT:    [[ALLOC:%.*]] = tail call noalias nonnull ptr @_Znam() #[[ATTR2:[0-9]+]]
+; CHECK-NEXT:    tail call void @llvm.memset.p0.i64(ptr nonnull align 8 [[ALLOC]], i8 0, i64 [[N:%.*]], i1 false) #[[ATTR3:[0-9]+]]
+; CHECK-NEXT:    store i64 0, ptr [[ALLOC]], align 8
+; CHECK-NEXT:    ret ptr [[ALLOC]]
 ;
 entry:
   br i1 %c, label %cond.true.i.i.i, label %cond.end.i.i.i
 
 cond.true.i.i.i:                                  ; preds = %entry
-  ret i8* null
+  ret ptr null
 
 cond.end.i.i.i:                                   ; preds = %entry
-  %alloc = tail call noalias nonnull i8* @_Znam() #2
-  %alloc.bc = bitcast i8* %alloc to i64*
-  tail call void @llvm.memset.p0i8.i64(i8* nonnull align 8 %alloc, i8 0, i64 %N, i1 false) #3
-  store i64 0, i64* %alloc.bc, align 8
-  ret i8* %alloc
+  %alloc = tail call noalias nonnull ptr @_Znam() #2
+  tail call void @llvm.memset.p0.i64(ptr nonnull align 8 %alloc, i8 0, i64 %N, i1 false) #3
+  store i64 0, ptr %alloc, align 8
+  ret ptr %alloc
 }
 
-declare i8* @_Znam()
+declare ptr @_Znam()
 
 
-define i8* @test2(i1 %c, i64 %N) {
+define ptr @test2(i1 %c, i64 %N) {
 ; CHECK-LABEL: @test2(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[CLEANUP_CONT104:%.*]], label [[IF_THEN:%.*]]
 ; CHECK:       if.then:
 ; CHECK-NEXT:    [[MUL:%.*]] = shl nuw nsw i64 [[N:%.*]], 3
-; CHECK-NEXT:    [[ALLOC:%.*]] = call noalias nonnull i8* @_Znwm() #[[ATTR2]]
-; CHECK-NEXT:    [[ALLOC_BC:%.*]] = bitcast i8* [[ALLOC]] to i64*
-; CHECK-NEXT:    store i64 0, i64* [[ALLOC_BC]], align 8
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* nonnull align 8 [[ALLOC]], i8 0, i64 [[MUL]], i1 false) #[[ATTR3]]
-; CHECK-NEXT:    ret i8* [[ALLOC]]
+; CHECK-NEXT:    [[ALLOC:%.*]] = call noalias nonnull ptr @_Znwm() #[[ATTR2]]
+; CHECK-NEXT:    store i64 0, ptr [[ALLOC]], align 8
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr nonnull align 8 [[ALLOC]], i8 0, i64 [[MUL]], i1 false) #[[ATTR3]]
+; CHECK-NEXT:    ret ptr [[ALLOC]]
 ; CHECK:       cleanup.cont104:
-; CHECK-NEXT:    ret i8* null
+; CHECK-NEXT:    ret ptr null
 ;
 entry:
   br i1 %c, label %cleanup.cont104, label %if.then
 
 if.then:                                          ; preds = %entry
   %mul = shl nuw nsw i64 %N, 3
-  %alloc = call noalias nonnull i8* @_Znwm() #2
-  %alloc.bc = bitcast i8* %alloc to i64*
-  store i64 0, i64* %alloc.bc, align 8
-  call void @llvm.memset.p0i8.i64(i8* nonnull align 8 %alloc, i8 0, i64 %mul, i1 false) #3
-  ret i8* %alloc
+  %alloc = call noalias nonnull ptr @_Znwm() #2
+  store i64 0, ptr %alloc, align 8
+  call void @llvm.memset.p0.i64(ptr nonnull align 8 %alloc, i8 0, i64 %mul, i1 false) #3
+  ret ptr %alloc
 
 cleanup.cont104:                                  ; preds = %entry
-  ret i8* null
+  ret ptr null
 }
 
 attributes #0 = { "use-soft-float"="false" }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/merge-stores-big-endian.ll b/llvm/test/Transforms/DeadStoreElimination/merge-stores-big-endian.ll
index 77784ac0c4047..3d957c0864b36 100644
--- a/llvm/test/Transforms/DeadStoreElimination/merge-stores-big-endian.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/merge-stores-big-endian.ll
@@ -2,153 +2,136 @@
 ; RUN: opt -dse -enable-dse-partial-store-merging -S < %s | FileCheck %s
 target datalayout = "E-m:e-i64:64-i128:128-n32:64-S128"
 
-define void @byte_by_byte_replacement(i32 *%ptr) {
+define void @byte_by_byte_replacement(ptr %ptr) {
 ; CHECK-LABEL: @byte_by_byte_replacement(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    store i32 151653132, i32* [[PTR:%.*]]
+; CHECK-NEXT:    store i32 151653132, ptr [[PTR:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 entry:
   ;; This store's value should be modified as it should be better to use one
   ;; larger store than several smaller ones.
   ;; store will turn into 0x090A0B0C == 151653132
-  store i32 305419896, i32* %ptr  ; 0x12345678
-  %bptr = bitcast i32* %ptr to i8*
-  %bptr1 = getelementptr inbounds i8, i8* %bptr, i64 1
-  %bptr2 = getelementptr inbounds i8, i8* %bptr, i64 2
-  %bptr3 = getelementptr inbounds i8, i8* %bptr, i64 3
+  store i32 305419896, ptr %ptr  ; 0x12345678
+  %bptr1 = getelementptr inbounds i8, ptr %ptr, i64 1
+  %bptr2 = getelementptr inbounds i8, ptr %ptr, i64 2
+  %bptr3 = getelementptr inbounds i8, ptr %ptr, i64 3
 
   ;; We should be able to merge these four stores with the i32 above
   ; value (and bytes) stored before  ; 0x12345678
-  store i8 9, i8* %bptr              ;   09
-  store i8 10, i8* %bptr1            ;     0A
-  store i8 11, i8* %bptr2            ;       0B
-  store i8 12, i8* %bptr3            ;         0C
+  store i8 9, ptr %ptr              ;   09
+  store i8 10, ptr %bptr1            ;     0A
+  store i8 11, ptr %bptr2            ;       0B
+  store i8 12, ptr %bptr3            ;         0C
   ;                                    0x090A0B0C
 
   ret void
 }
 
-define void @word_replacement(i64 *%ptr) {
+define void @word_replacement(ptr %ptr) {
 ; CHECK-LABEL: @word_replacement(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    store i64 72638273700655232, i64* [[PTR:%.*]]
+; CHECK-NEXT:    store i64 72638273700655232, ptr [[PTR:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  store i64 72623859790382856, i64* %ptr  ; 0x0102030405060708
+  store i64 72623859790382856, ptr %ptr  ; 0x0102030405060708
 
-  %wptr = bitcast i64* %ptr to i16*
-  %wptr1 = getelementptr inbounds i16, i16* %wptr, i64 1
-  %wptr3 = getelementptr inbounds i16, i16* %wptr, i64 3
+  %wptr1 = getelementptr inbounds i16, ptr %ptr, i64 1
+  %wptr3 = getelementptr inbounds i16, ptr %ptr, i64 3
 
   ;; We should be able to merge these two stores with the i64 one above
   ; value (and bytes) stored before  ; 0x0102030405060708
-  store i16  4128, i16* %wptr1       ;       1020
-  store i16 28800, i16* %wptr3       ;               7080
+  store i16  4128, ptr %wptr1       ;       1020
+  store i16 28800, ptr %wptr3       ;               7080
   ;                                    0x0102102005067080
 
   ret void
 }
 
 
-define void @
diff erently_sized_replacements(i64 *%ptr) {
+define void @
diff erently_sized_replacements(ptr %ptr) {
 ; CHECK-LABEL: @
diff erently_sized_replacements(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    store i64 289077004501059343, i64* [[PTR:%.*]]
+; CHECK-NEXT:    store i64 289077004501059343, ptr [[PTR:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  store i64 579005069656919567, i64* %ptr  ; 0x08090a0b0c0d0e0f
+  store i64 579005069656919567, ptr %ptr  ; 0x08090a0b0c0d0e0f
 
-  %bptr = bitcast i64* %ptr to i8*
-  %bptr6 = getelementptr inbounds i8, i8* %bptr, i64 6
-  %wptr = bitcast i64* %ptr to i16*
-  %wptr2 = getelementptr inbounds i16, i16* %wptr, i64 2
-  %dptr = bitcast i64* %ptr to i32*
+  %bptr6 = getelementptr inbounds i8, ptr %ptr, i64 6
+  %wptr2 = getelementptr inbounds i16, ptr %ptr, i64 2
 
   ;; We should be able to merge all these stores with the i64 one above
   ; value (and bytes) stored before  ; 0x08090a0b0c0d0e0f
-  store i8         7, i8*  %bptr6    ;               07
-  store i16     1541, i16* %wptr2    ;           0605
-  store i32 67305985, i32* %dptr     ;   04030201
+  store i8         7, ptr  %bptr6    ;               07
+  store i16     1541, ptr %wptr2    ;           0605
+  store i32 67305985, ptr %ptr     ;   04030201
   ;                                    0x040302010605070f
   ret void
 }
 
 
-define void @multiple_replacements_to_same_byte(i64 *%ptr) {
+define void @multiple_replacements_to_same_byte(ptr %ptr) {
 ; CHECK-LABEL: @multiple_replacements_to_same_byte(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    store i64 289077004602248719, i64* [[PTR:%.*]]
+; CHECK-NEXT:    store i64 289077004602248719, ptr [[PTR:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  store i64 579005069656919567, i64* %ptr  ; 0x08090a0b0c0d0e0f
+  store i64 579005069656919567, ptr %ptr  ; 0x08090a0b0c0d0e0f
 
-  %bptr = bitcast i64* %ptr to i8*
-  %bptr3 = getelementptr inbounds i8, i8* %bptr, i64 3
-  %wptr = bitcast i64* %ptr to i16*
-  %wptr1 = getelementptr inbounds i16, i16* %wptr, i64 1
-  %dptr = bitcast i64* %ptr to i32*
+  %bptr3 = getelementptr inbounds i8, ptr %ptr, i64 3
+  %wptr1 = getelementptr inbounds i16, ptr %ptr, i64 1
 
   ;; We should be able to merge all these stores with the i64 one above
   ; value (and bytes) stored before  ; 0x08090a0b0c0d0e0f
-  store i8         7, i8*  %bptr3    ;         07
-  store i16     1541, i16* %wptr1    ;       0605
-  store i32 67305985, i32* %dptr     ;   04030201
+  store i8         7, ptr  %bptr3    ;         07
+  store i16     1541, ptr %wptr1    ;       0605
+  store i32 67305985, ptr %ptr     ;   04030201
   ;                                    0x040302010c0d0e0f
   ret void
 }
 
-define void @merged_merges(i64 *%ptr) {
+define void @merged_merges(ptr %ptr) {
 ; CHECK-LABEL: @merged_merges(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    store i64 289081428418563599, i64* [[PTR:%.*]]
+; CHECK-NEXT:    store i64 289081428418563599, ptr [[PTR:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  store i64 579005069656919567, i64* %ptr  ; 0x08090a0b0c0d0e0f
+  store i64 579005069656919567, ptr %ptr  ; 0x08090a0b0c0d0e0f
 
-  %bptr = bitcast i64* %ptr to i8*
-  %bptr3 = getelementptr inbounds i8, i8* %bptr, i64 3
-  %wptr = bitcast i64* %ptr to i16*
-  %wptr1 = getelementptr inbounds i16, i16* %wptr, i64 1
-  %dptr = bitcast i64* %ptr to i32*
+  %bptr3 = getelementptr inbounds i8, ptr %ptr, i64 3
+  %wptr1 = getelementptr inbounds i16, ptr %ptr, i64 1
 
   ;; We should be able to merge all these stores with the i64 one above
   ; value (not bytes) stored before  ; 0x08090a0b0c0d0e0f
-  store i32 67305985, i32* %dptr     ;   04030201
-  store i16     1541, i16* %wptr1    ;       0605
-  store i8         7, i8*  %bptr3    ;         07
+  store i32 67305985, ptr %ptr     ;   04030201
+  store i16     1541, ptr %wptr1    ;       0605
+  store i8         7, ptr  %bptr3    ;         07
   ;                                    0x040306070c0d0e0f
   ret void
 }
 
-define signext i8 @shouldnt_merge_since_theres_a_full_overlap(i64 *%ptr) {
+define signext i8 @shouldnt_merge_since_theres_a_full_overlap(ptr %ptr) {
 ; CHECK-LABEL: @shouldnt_merge_since_theres_a_full_overlap(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[BPTR:%.*]] = bitcast i64* [[PTR:%.*]] to i8*
-; CHECK-NEXT:    [[BPTRM1:%.*]] = getelementptr inbounds i8, i8* [[BPTR]], i64 -1
-; CHECK-NEXT:    [[BPTR3:%.*]] = getelementptr inbounds i8, i8* [[BPTR]], i64 3
-; CHECK-NEXT:    [[DPTR:%.*]] = bitcast i8* [[BPTRM1]] to i32*
-; CHECK-NEXT:    [[QPTR:%.*]] = bitcast i8* [[BPTR3]] to i64*
-; CHECK-NEXT:    store i32 1234, i32* [[DPTR]], align 1
-; CHECK-NEXT:    store i64 5678, i64* [[QPTR]], align 1
+; CHECK-NEXT:    [[BPTRM1:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i64 -1
+; CHECK-NEXT:    [[BPTR3:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 3
+; CHECK-NEXT:    store i32 1234, ptr [[BPTRM1]], align 1
+; CHECK-NEXT:    store i64 5678, ptr [[BPTR3]], align 1
 ; CHECK-NEXT:    ret i8 0
 ;
 entry:
 
-  store i64 0, i64* %ptr
+  store i64 0, ptr %ptr
 
-  %bptr = bitcast i64* %ptr to i8*
-  %bptrm1 = getelementptr inbounds i8, i8* %bptr, i64 -1
-  %bptr3 = getelementptr inbounds i8, i8* %bptr, i64 3
-  %dptr = bitcast i8* %bptrm1 to i32*
-  %qptr = bitcast i8* %bptr3 to i64*
+  %bptrm1 = getelementptr inbounds i8, ptr %ptr, i64 -1
+  %bptr3 = getelementptr inbounds i8, ptr %ptr, i64 3
 
-  store i32 1234, i32* %dptr, align 1
-  store i64 5678, i64* %qptr, align 1
+  store i32 1234, ptr %bptrm1, align 1
+  store i64 5678, ptr %bptr3, align 1
 
   ret i8 0
 }
@@ -156,17 +139,14 @@ entry:
 ;; Test case from PR31777
 %union.U = type { i64 }
 
-define void @foo(%union.U* nocapture %u) {
+define void @foo(ptr nocapture %u) {
 ; CHECK-LABEL: @foo(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[I:%.*]] = getelementptr inbounds [[UNION_U:%.*]], %union.U* [[U:%.*]], i64 0, i32 0
-; CHECK-NEXT:    store i64 11821949021847552, i64* [[I]], align 8
+; CHECK-NEXT:    store i64 11821949021847552, ptr [[U:%.*]], align 8
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %i = getelementptr inbounds %union.U, %union.U* %u, i64 0, i32 0
-  store i64 0, i64* %i, align 8
-  %s = bitcast %union.U* %u to i16*
-  store i16 42, i16* %s, align 8
+  store i64 0, ptr %u, align 8
+  store i16 42, ptr %u, align 8
   ret void
 }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/merge-stores.ll b/llvm/test/Transforms/DeadStoreElimination/merge-stores.ll
index 11a9269b51485..ccc23d1044dbd 100644
--- a/llvm/test/Transforms/DeadStoreElimination/merge-stores.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/merge-stores.ll
@@ -2,152 +2,135 @@
 ; RUN: opt -dse -enable-dse-partial-store-merging -S < %s | FileCheck %s
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-f128:128:128-n8:16:32:64"
 
-define void @byte_by_byte_replacement(i32 *%ptr) {
+define void @byte_by_byte_replacement(ptr %ptr) {
 ; CHECK-LABEL: @byte_by_byte_replacement(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    store i32 202050057, i32* [[PTR:%.*]]
+; CHECK-NEXT:    store i32 202050057, ptr [[PTR:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 entry:
   ;; This store's value should be modified as it should be better to use one
   ;; larger store than several smaller ones.
   ;; store will turn into 0x0C0B0A09 == 202050057
-  store i32 305419896, i32* %ptr  ; 0x12345678
-  %bptr = bitcast i32* %ptr to i8*
-  %bptr1 = getelementptr inbounds i8, i8* %bptr, i64 1
-  %bptr2 = getelementptr inbounds i8, i8* %bptr, i64 2
-  %bptr3 = getelementptr inbounds i8, i8* %bptr, i64 3
+  store i32 305419896, ptr %ptr  ; 0x12345678
+  %bptr1 = getelementptr inbounds i8, ptr %ptr, i64 1
+  %bptr2 = getelementptr inbounds i8, ptr %ptr, i64 2
+  %bptr3 = getelementptr inbounds i8, ptr %ptr, i64 3
 
   ;; We should be able to merge these four stores with the i32 above
   ; value (and bytes) stored before  ; 0x12345678
-  store i8 9, i8* %bptr              ;         09
-  store i8 10, i8* %bptr1            ;       0A
-  store i8 11, i8* %bptr2            ;     0B
-  store i8 12, i8* %bptr3            ;   0C
+  store i8 9, ptr %ptr              ;         09
+  store i8 10, ptr %bptr1            ;       0A
+  store i8 11, ptr %bptr2            ;     0B
+  store i8 12, ptr %bptr3            ;   0C
   ;                                    0x0C0B0A09
   ret void
 }
 
-define void @word_replacement(i64 *%ptr) {
+define void @word_replacement(ptr %ptr) {
 ; CHECK-LABEL: @word_replacement(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    store i64 8106482645252179720, i64* [[PTR:%.*]]
+; CHECK-NEXT:    store i64 8106482645252179720, ptr [[PTR:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  store i64 72623859790382856, i64* %ptr  ; 0x0102030405060708
+  store i64 72623859790382856, ptr %ptr  ; 0x0102030405060708
 
-  %wptr = bitcast i64* %ptr to i16*
-  %wptr1 = getelementptr inbounds i16, i16* %wptr, i64 1
-  %wptr3 = getelementptr inbounds i16, i16* %wptr, i64 3
+  %wptr1 = getelementptr inbounds i16, ptr %ptr, i64 1
+  %wptr3 = getelementptr inbounds i16, ptr %ptr, i64 3
 
   ;; We should be able to merge these two stores with the i64 one above
   ; value (not bytes) stored before  ; 0x0102030405060708
-  store i16  4128, i16* %wptr1       ;           1020
-  store i16 28800, i16* %wptr3       ;   7080
+  store i16  4128, ptr %wptr1       ;           1020
+  store i16 28800, ptr %wptr3       ;   7080
   ;                                    0x7080030410200708
   ret void
 }
 
 
-define void @
diff erently_sized_replacements(i64 *%ptr) {
+define void @
diff erently_sized_replacements(ptr %ptr) {
 ; CHECK-LABEL: @
diff erently_sized_replacements(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    store i64 578437695752307201, i64* [[PTR:%.*]]
+; CHECK-NEXT:    store i64 578437695752307201, ptr [[PTR:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  store i64 579005069656919567, i64* %ptr  ; 0x08090a0b0c0d0e0f
+  store i64 579005069656919567, ptr %ptr  ; 0x08090a0b0c0d0e0f
 
-  %bptr = bitcast i64* %ptr to i8*
-  %bptr6 = getelementptr inbounds i8, i8* %bptr, i64 6
-  %wptr = bitcast i64* %ptr to i16*
-  %wptr2 = getelementptr inbounds i16, i16* %wptr, i64 2
-  %dptr = bitcast i64* %ptr to i32*
+  %bptr6 = getelementptr inbounds i8, ptr %ptr, i64 6
+  %wptr2 = getelementptr inbounds i16, ptr %ptr, i64 2
 
   ;; We should be able to merge all these stores with the i64 one above
   ; value (not bytes) stored before  ; 0x08090a0b0c0d0e0f
-  store i8         7, i8*  %bptr6    ;     07
-  store i16     1541, i16* %wptr2    ;       0605
-  store i32 67305985, i32* %dptr     ;           04030201
+  store i8         7, ptr  %bptr6    ;     07
+  store i16     1541, ptr %wptr2    ;       0605
+  store i32 67305985, ptr %ptr     ;           04030201
   ;                                    0x0807060504030201
   ret void
 }
 
 
-define void @multiple_replacements_to_same_byte(i64 *%ptr) {
+define void @multiple_replacements_to_same_byte(ptr %ptr) {
 ; CHECK-LABEL: @multiple_replacements_to_same_byte(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    store i64 579005069522043393, i64* [[PTR:%.*]]
+; CHECK-NEXT:    store i64 579005069522043393, ptr [[PTR:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  store i64 579005069656919567, i64* %ptr  ; 0x08090a0b0c0d0e0f
+  store i64 579005069656919567, ptr %ptr  ; 0x08090a0b0c0d0e0f
 
-  %bptr = bitcast i64* %ptr to i8*
-  %bptr3 = getelementptr inbounds i8, i8* %bptr, i64 3
-  %wptr = bitcast i64* %ptr to i16*
-  %wptr1 = getelementptr inbounds i16, i16* %wptr, i64 1
-  %dptr = bitcast i64* %ptr to i32*
+  %bptr3 = getelementptr inbounds i8, ptr %ptr, i64 3
+  %wptr1 = getelementptr inbounds i16, ptr %ptr, i64 1
 
   ;; We should be able to merge all these stores with the i64 one above
   ; value (not bytes) stored before  ; 0x08090a0b0c0d0e0f
-  store i8         7, i8*  %bptr3    ;           07
-  store i16     1541, i16* %wptr1    ;           0605
-  store i32 67305985, i32* %dptr     ;           04030201
+  store i8         7, ptr  %bptr3    ;           07
+  store i16     1541, ptr %wptr1    ;           0605
+  store i32 67305985, ptr %ptr     ;           04030201
   ;                                    0x08090a0b04030201
   ret void
 }
 
-define void @merged_merges(i64 *%ptr) {
+define void @merged_merges(ptr %ptr) {
 ; CHECK-LABEL: @merged_merges(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    store i64 579005069572506113, i64* [[PTR:%.*]]
+; CHECK-NEXT:    store i64 579005069572506113, ptr [[PTR:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  store i64 579005069656919567, i64* %ptr  ; 0x08090a0b0c0d0e0f
+  store i64 579005069656919567, ptr %ptr  ; 0x08090a0b0c0d0e0f
 
-  %bptr = bitcast i64* %ptr to i8*
-  %bptr3 = getelementptr inbounds i8, i8* %bptr, i64 3
-  %wptr = bitcast i64* %ptr to i16*
-  %wptr1 = getelementptr inbounds i16, i16* %wptr, i64 1
-  %dptr = bitcast i64* %ptr to i32*
+  %bptr3 = getelementptr inbounds i8, ptr %ptr, i64 3
+  %wptr1 = getelementptr inbounds i16, ptr %ptr, i64 1
 
   ;; We should be able to merge all these stores with the i64 one above
   ; value (not bytes) stored before  ; 0x08090a0b0c0d0e0f
-  store i32 67305985, i32* %dptr     ;           04030201
-  store i16     1541, i16* %wptr1    ;           0605
-  store i8         7, i8*  %bptr3    ;           07
+  store i32 67305985, ptr %ptr     ;           04030201
+  store i16     1541, ptr %wptr1    ;           0605
+  store i8         7, ptr  %bptr3    ;           07
   ;                                    0x08090a0b07050201
   ret void
 }
 
-define signext i8 @shouldnt_merge_since_theres_a_full_overlap(i64 *%ptr) {
+define signext i8 @shouldnt_merge_since_theres_a_full_overlap(ptr %ptr) {
 ; CHECK-LABEL: @shouldnt_merge_since_theres_a_full_overlap(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[BPTR:%.*]] = bitcast i64* [[PTR:%.*]] to i8*
-; CHECK-NEXT:    [[BPTRM1:%.*]] = getelementptr inbounds i8, i8* [[BPTR]], i64 -1
-; CHECK-NEXT:    [[BPTR3:%.*]] = getelementptr inbounds i8, i8* [[BPTR]], i64 3
-; CHECK-NEXT:    [[DPTR:%.*]] = bitcast i8* [[BPTRM1]] to i32*
-; CHECK-NEXT:    [[QPTR:%.*]] = bitcast i8* [[BPTR3]] to i64*
-; CHECK-NEXT:    store i32 1234, i32* [[DPTR]], align 1
-; CHECK-NEXT:    store i64 5678, i64* [[QPTR]], align 1
+; CHECK-NEXT:    [[BPTRM1:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i64 -1
+; CHECK-NEXT:    [[BPTR3:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 3
+; CHECK-NEXT:    store i32 1234, ptr [[BPTRM1]], align 1
+; CHECK-NEXT:    store i64 5678, ptr [[BPTR3]], align 1
 ; CHECK-NEXT:    ret i8 0
 ;
 entry:
 
   ; Also check that alias.scope metadata doesn't get dropped
-  store i64 0, i64* %ptr, !alias.scope !32
+  store i64 0, ptr %ptr, !alias.scope !32
 
-  %bptr = bitcast i64* %ptr to i8*
-  %bptrm1 = getelementptr inbounds i8, i8* %bptr, i64 -1
-  %bptr3 = getelementptr inbounds i8, i8* %bptr, i64 3
-  %dptr = bitcast i8* %bptrm1 to i32*
-  %qptr = bitcast i8* %bptr3 to i64*
+  %bptrm1 = getelementptr inbounds i8, ptr %ptr, i64 -1
+  %bptr3 = getelementptr inbounds i8, ptr %ptr, i64 3
 
-  store i32 1234, i32* %dptr, align 1
-  store i64 5678, i64* %qptr, align 1
+  store i32 1234, ptr %bptrm1, align 1
+  store i64 5678, ptr %bptr3, align 1
 
   ret i8 0
 }
@@ -155,50 +138,44 @@ entry:
 ;; Test case from PR31777
 %union.U = type { i64 }
 
-define void @foo(%union.U* nocapture %u) {
+define void @foo(ptr nocapture %u) {
 ; CHECK-LABEL: @foo(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[I:%.*]] = getelementptr inbounds [[UNION_U:%.*]], %union.U* [[U:%.*]], i64 0, i32 0
-; CHECK-NEXT:    store i64 42, i64* [[I]], align 8
+; CHECK-NEXT:    store i64 42, ptr [[U:%.*]], align 8
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %i = getelementptr inbounds %union.U, %union.U* %u, i64 0, i32 0
-  store i64 0, i64* %i, align 8, !dbg !22, !tbaa !26, !noalias !32, !nontemporal !29
-  %s = bitcast %union.U* %u to i16*
-  store i16 42, i16* %s, align 8
+  store i64 0, ptr %u, align 8, !dbg !22, !tbaa !26, !noalias !32, !nontemporal !29
+  store i16 42, ptr %u, align 8
   ret void
 }
 
 ; Don't crash by operating on stale data if we merge (kill) the last 2 stores.
 
-define void @PR34074(i32* %x, i64* %y) {
+define void @PR34074(ptr %x, ptr %y) {
 ; CHECK-LABEL: @PR34074(
-; CHECK-NEXT:    store i64 42, i64* %y
-; CHECK-NEXT:    store i32 4, i32* %x
+; CHECK-NEXT:    store i64 42, ptr %y
+; CHECK-NEXT:    store i32 4, ptr %x
 ; CHECK-NEXT:    ret void
 ;
-  store i64 42, i64* %y          ; independent store
-  %xbc = bitcast i32* %x to i8*
-  store i32 0, i32* %x           ; big store of constant
-  store i8 4, i8* %xbc           ; small store with mergeable constant
+  store i64 42, ptr %y          ; independent store
+  store i32 0, ptr %x           ; big store of constant
+  store i8 4, ptr %x           ; small store with mergeable constant
   ret void
 }
 
 ; We can't eliminate the last store because P and Q may alias.
 
-define void @PR36129(i32* %P, i32* %Q) {
+define void @PR36129(ptr %P, ptr %Q) {
 ; CHECK-LABEL: @PR36129(
-; CHECK-NEXT:    store i32 1, i32* [[P:%.*]], align 4
-; CHECK-NEXT:    [[P2:%.*]] = bitcast i32* [[P]] to i8*
-; CHECK-NEXT:    store i32 2, i32* [[Q:%.*]], align 4
-; CHECK-NEXT:    store i8 3, i8* [[P2]], align 1
+; CHECK-NEXT:    store i32 1, ptr [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 2, ptr [[Q:%.*]], align 4
+; CHECK-NEXT:    store i8 3, ptr [[P]], align 1
 ; CHECK-NEXT:    ret void
 ;
-  store i32 1, i32* %P, align 4
-  %P2 = bitcast i32* %P to i8*
-  store i32 2, i32* %Q, align 4
-  store i8 3, i8* %P2, align 1
+  store i32 1, ptr %P, align 4
+  store i32 2, ptr %Q, align 4
+  store i8 3, ptr %P, align 1
   ret void
 }
 

diff  --git a/llvm/test/Transforms/DeadStoreElimination/multiblock-captures.ll b/llvm/test/Transforms/DeadStoreElimination/multiblock-captures.ll
index 45f3e2c429754..544b0ad3d9228 100644
--- a/llvm/test/Transforms/DeadStoreElimination/multiblock-captures.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/multiblock-captures.ll
@@ -3,214 +3,212 @@
 
 target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
 
-declare noalias i8* @malloc(i64)
+declare noalias ptr @malloc(i64)
 
 declare void @foo()
-declare void @capture(i8*)
+declare void @capture(ptr)
 
 ; Check that we do not remove the second store, as %m is returned.
-define i8* @test_return_captures_1() {
+define ptr @test_return_captures_1() {
 ; CHECK-LABEL: @test_return_captures_1(
-; CHECK-NEXT:    [[M:%.*]] = call i8* @malloc(i64 24)
-; CHECK-NEXT:    store i8 1, i8* [[M]], align 1
-; CHECK-NEXT:    ret i8* [[M]]
+; CHECK-NEXT:    [[M:%.*]] = call ptr @malloc(i64 24)
+; CHECK-NEXT:    store i8 1, ptr [[M]], align 1
+; CHECK-NEXT:    ret ptr [[M]]
 ;
-  %m = call i8* @malloc(i64 24)
-  store i8 0, i8* %m
-  store i8 1, i8* %m
-  ret i8* %m
+  %m = call ptr @malloc(i64 24)
+  store i8 0, ptr %m
+  store i8 1, ptr %m
+  ret ptr %m
 }
 
 ; Same as @test_return_captures_1, but across BBs.
-define i8* @test_return_captures_2() {
+define ptr @test_return_captures_2() {
 ; CHECK-LABEL: @test_return_captures_2(
-; CHECK-NEXT:    [[M:%.*]] = call i8* @malloc(i64 24)
+; CHECK-NEXT:    [[M:%.*]] = call ptr @malloc(i64 24)
 ; CHECK-NEXT:    br label [[EXIT:%.*]]
 ; CHECK:       exit:
-; CHECK-NEXT:    store i8 1, i8* [[M]], align 1
-; CHECK-NEXT:    ret i8* [[M]]
+; CHECK-NEXT:    store i8 1, ptr [[M]], align 1
+; CHECK-NEXT:    ret ptr [[M]]
 ;
-  %m = call i8* @malloc(i64 24)
-  store i8 0, i8* %m
+  %m = call ptr @malloc(i64 24)
+  store i8 0, ptr %m
   br label %exit
 
 exit:
-  store i8 1, i8* %m
-  ret i8* %m
+  store i8 1, ptr %m
+  ret ptr %m
 }
 
 
-%S1 = type { i8 * }
+%S1 = type { ptr }
 
 ; We cannot remove the last store to %m, because it escapes by storing it to %E.
-define void @test_malloc_capture_1(%S1* %E) {
+define void @test_malloc_capture_1(ptr %E) {
 ; CHECK-LABEL: @test_malloc_capture_1(
-; CHECK-NEXT:    [[M:%.*]] = call i8* @malloc(i64 24)
+; CHECK-NEXT:    [[M:%.*]] = call ptr @malloc(i64 24)
 ; CHECK-NEXT:    br label [[EXIT:%.*]]
 ; CHECK:       exit:
-; CHECK-NEXT:    [[F_PTR:%.*]] = getelementptr [[S1:%.*]], %S1* [[E:%.*]], i32 0, i32 0
-; CHECK-NEXT:    store i8* [[M]], i8** [[F_PTR]], align 4
-; CHECK-NEXT:    store i8 1, i8* [[M]], align 1
+; CHECK-NEXT:    store ptr [[M]], ptr [[E:%.*]], align 4
+; CHECK-NEXT:    store i8 1, ptr [[M]], align 1
 ; CHECK-NEXT:    ret void
 ;
-  %m = call i8* @malloc(i64 24)
+  %m = call ptr @malloc(i64 24)
   br label %exit
 
 exit:
-  %f.ptr = getelementptr %S1, %S1* %E, i32 0, i32 0
-  store i8* %m, i8** %f.ptr
-  store i8 1, i8* %m
+  store ptr %m, ptr %E
+  store i8 1, ptr %m
   ret void
 }
 
 ; Check we do not eliminate either store. The first one cannot be eliminated,
 ; due to the call of @capture. The second one because %m escapes.
-define i8* @test_malloc_capture_2() {
+define ptr @test_malloc_capture_2() {
 ; CHECK-LABEL: @test_malloc_capture_2(
-; CHECK-NEXT:    [[M:%.*]] = call i8* @malloc(i64 24)
-; CHECK-NEXT:    store i8 0, i8* [[M]], align 1
-; CHECK-NEXT:    call void @capture(i8* [[M]])
+; CHECK-NEXT:    [[M:%.*]] = call ptr @malloc(i64 24)
+; CHECK-NEXT:    store i8 0, ptr [[M]], align 1
+; CHECK-NEXT:    call void @capture(ptr [[M]])
 ; CHECK-NEXT:    br label [[EXIT:%.*]]
 ; CHECK:       exit:
-; CHECK-NEXT:    store i8 1, i8* [[M]], align 1
-; CHECK-NEXT:    ret i8* [[M]]
+; CHECK-NEXT:    store i8 1, ptr [[M]], align 1
+; CHECK-NEXT:    ret ptr [[M]]
 ;
-  %m = call i8* @malloc(i64 24)
-  store i8 0, i8* %m
-  call void @capture(i8* %m)
+  %m = call ptr @malloc(i64 24)
+  store i8 0, ptr %m
+  call void @capture(ptr %m)
   br label %exit
 
 exit:
-  store i8 1, i8* %m
-  ret i8* %m
+  store i8 1, ptr %m
+  ret ptr %m
 }
 
-; We can remove the first store store i8 0, i8* %m because there are no throwing
+; We can remove the first store store i8 0, ptr %m because there are no throwing
 ; instructions between the 2 stores and also %m escapes after the killing store.
-define i8* @test_malloc_capture_3() {
+define ptr @test_malloc_capture_3() {
 ; CHECK-LABEL: @test_malloc_capture_3(
-; CHECK-NEXT:    [[M:%.*]] = call i8* @malloc(i64 24)
+; CHECK-NEXT:    [[M:%.*]] = call ptr @malloc(i64 24)
 ; CHECK-NEXT:    br label [[EXIT:%.*]]
 ; CHECK:       exit:
-; CHECK-NEXT:    store i8 1, i8* [[M]], align 1
-; CHECK-NEXT:    call void @capture(i8* [[M]])
-; CHECK-NEXT:    ret i8* [[M]]
+; CHECK-NEXT:    store i8 1, ptr [[M]], align 1
+; CHECK-NEXT:    call void @capture(ptr [[M]])
+; CHECK-NEXT:    ret ptr [[M]]
 ;
-  %m = call i8* @malloc(i64 24)
-  store i8 0, i8* %m
+  %m = call ptr @malloc(i64 24)
+  store i8 0, ptr %m
   br label %exit
 
 exit:
-  store i8 1, i8* %m
-  call void @capture(i8* %m)
-  ret i8* %m
+  store i8 1, ptr %m
+  call void @capture(ptr %m)
+  ret ptr %m
 }
 
-; TODO: We could remove the first store store i8 0, i8* %m because %m escapes
+; TODO: We could remove the first store store i8 0, ptr %m because %m escapes
 ; after the killing store.
-define i8* @test_malloc_capture_4() {
+define ptr @test_malloc_capture_4() {
 ; CHECK-LABEL: @test_malloc_capture_4(
-; CHECK-NEXT:    [[M:%.*]] = call i8* @malloc(i64 24)
-; CHECK-NEXT:    store i8 0, i8* [[M]], align 1
+; CHECK-NEXT:    [[M:%.*]] = call ptr @malloc(i64 24)
+; CHECK-NEXT:    store i8 0, ptr [[M]], align 1
 ; CHECK-NEXT:    call void @may_throw_readnone()
 ; CHECK-NEXT:    br label [[EXIT:%.*]]
 ; CHECK:       exit:
-; CHECK-NEXT:    store i8 1, i8* [[M]], align 1
-; CHECK-NEXT:    call void @capture(i8* [[M]])
-; CHECK-NEXT:    ret i8* [[M]]
+; CHECK-NEXT:    store i8 1, ptr [[M]], align 1
+; CHECK-NEXT:    call void @capture(ptr [[M]])
+; CHECK-NEXT:    ret ptr [[M]]
 ;
 
-  %m = call i8* @malloc(i64 24)
-  store i8 0, i8* %m
+  %m = call ptr @malloc(i64 24)
+  store i8 0, ptr %m
   call void @may_throw_readnone()
   br label %exit
 
 exit:
-  store i8 1, i8* %m
-  call void @capture(i8* %m)
-  ret i8* %m
+  store i8 1, ptr %m
+  call void @capture(ptr %m)
+  ret ptr %m
 }
 
 
-; We cannot remove the first store store i8 0, i8* %m because %m escapes
+; We cannot remove the first store store i8 0, ptr %m because %m escapes
 ; before the killing store and we may throw in between.
-define i8* @test_malloc_capture_5() {
+define ptr @test_malloc_capture_5() {
 ; CHECK-LABEL: @test_malloc_capture_5(
-; CHECK-NEXT:    [[M:%.*]] = call i8* @malloc(i64 24)
-; CHECK-NEXT:    call void @capture(i8* [[M]])
-; CHECK-NEXT:    store i8 0, i8* [[M]], align 1
+; CHECK-NEXT:    [[M:%.*]] = call ptr @malloc(i64 24)
+; CHECK-NEXT:    call void @capture(ptr [[M]])
+; CHECK-NEXT:    store i8 0, ptr [[M]], align 1
 ; CHECK-NEXT:    call void @may_throw_readnone()
 ; CHECK-NEXT:    br label [[EXIT:%.*]]
 ; CHECK:       exit:
-; CHECK-NEXT:    store i8 1, i8* [[M]], align 1
-; CHECK-NEXT:    ret i8* [[M]]
+; CHECK-NEXT:    store i8 1, ptr [[M]], align 1
+; CHECK-NEXT:    ret ptr [[M]]
 ;
 
-  %m = call i8* @malloc(i64 24)
-  call void @capture(i8* %m)
-  store i8 0, i8* %m
+  %m = call ptr @malloc(i64 24)
+  call void @capture(ptr %m)
+  store i8 0, ptr %m
   call void @may_throw_readnone()
   br label %exit
 
 exit:
-  store i8 1, i8* %m
-  ret i8* %m
+  store i8 1, ptr %m
+  ret ptr %m
 }
 
 
-; TODO: We could remove the first store 'store i8 0, i8* %m' even though there
+; TODO: We could remove the first store 'store i8 0, ptr %m' even though there
 ; is a throwing instruction between them, because %m escapes after the killing
 ; store.
-define i8* @test_malloc_capture_6() {
+define ptr @test_malloc_capture_6() {
 ; CHECK-LABEL: @test_malloc_capture_6(
-; CHECK-NEXT:    [[M:%.*]] = call i8* @malloc(i64 24)
-; CHECK-NEXT:    store i8 0, i8* [[M]], align 1
+; CHECK-NEXT:    [[M:%.*]] = call ptr @malloc(i64 24)
+; CHECK-NEXT:    store i8 0, ptr [[M]], align 1
 ; CHECK-NEXT:    call void @may_throw_readnone()
 ; CHECK-NEXT:    br label [[EXIT:%.*]]
 ; CHECK:       exit:
-; CHECK-NEXT:    store i8 1, i8* [[M]], align 1
-; CHECK-NEXT:    call void @capture(i8* [[M]])
-; CHECK-NEXT:    ret i8* [[M]]
+; CHECK-NEXT:    store i8 1, ptr [[M]], align 1
+; CHECK-NEXT:    call void @capture(ptr [[M]])
+; CHECK-NEXT:    ret ptr [[M]]
 ;
 
-  %m = call i8* @malloc(i64 24)
-  store i8 0, i8* %m
+  %m = call ptr @malloc(i64 24)
+  store i8 0, ptr %m
   call void @may_throw_readnone()
   br label %exit
 
 exit:
-  store i8 1, i8* %m
-  call void @capture(i8* %m)
-  ret i8* %m
+  store i8 1, ptr %m
+  call void @capture(ptr %m)
+  ret ptr %m
 }
 
-; We *could* remove the first store 'store i8 0, i8* %m' even though there is a
+; We *could* remove the first store 'store i8 0, ptr %m' even though there is a
 ; throwing instruction between them, because %m escapes after the killing store.
 ; But this would require using PointerMayBeCapturedBefore in
 ; isInvisibleToCallerBeforeRet, which we currently do not do to limit
 ; compile-time, as this appears to hardly ever lead to more stores eliminated
 ; in practice.
-define i8* @test_malloc_capture_7() {
+define ptr @test_malloc_capture_7() {
 ; CHECK-LABEL: @test_malloc_capture_7(
-; CHECK-NEXT:    [[M:%.*]] = call i8* @malloc(i64 24)
-; CHECK-NEXT:    store i8 0, i8* [[M]], align 1
+; CHECK-NEXT:    [[M:%.*]] = call ptr @malloc(i64 24)
+; CHECK-NEXT:    store i8 0, ptr [[M]], align 1
 ; CHECK-NEXT:    call void @may_throw()
 ; CHECK-NEXT:    br label [[EXIT:%.*]]
 ; CHECK:       exit:
-; CHECK-NEXT:    store i8 1, i8* [[M]], align 1
-; CHECK-NEXT:    call void @capture(i8* [[M]])
-; CHECK-NEXT:    ret i8* [[M]]
+; CHECK-NEXT:    store i8 1, ptr [[M]], align 1
+; CHECK-NEXT:    call void @capture(ptr [[M]])
+; CHECK-NEXT:    ret ptr [[M]]
 ;
 
-  %m = call i8* @malloc(i64 24)
-  store i8 0, i8* %m
+  %m = call ptr @malloc(i64 24)
+  store i8 0, ptr %m
   call void @may_throw()
   br label %exit
 
 exit:
-  store i8 1, i8* %m
-  call void @capture(i8* %m)
-  ret i8* %m
+  store i8 1, ptr %m
+  call void @capture(ptr %m)
+  ret ptr %m
 }
 
 ; Stores to stack objects can be eliminated if they are not captured inside the function.
@@ -222,100 +220,98 @@ define void @test_alloca_nocapture_1() {
 ; CHECK-NEXT:    ret void
 ;
   %m = alloca i8
-  store i8 0, i8* %m
+  store i8 0, ptr %m
   call void @foo()
   br label %exit
 
 exit:
-  store i8 1, i8* %m
+  store i8 1, ptr %m
   ret void
 }
 
-; Cannot remove first store i8 0, i8* %m, as the call to @capture captures the object.
+; Cannot remove first store i8 0, ptr %m, as the call to @capture captures the object.
 define void @test_alloca_capture_1() {
 ; CHECK-LABEL: @test_alloca_capture_1(
 ; CHECK-NEXT:    [[M:%.*]] = alloca i8, align 1
-; CHECK-NEXT:    store i8 0, i8* [[M]], align 1
-; CHECK-NEXT:    call void @capture(i8* [[M]])
+; CHECK-NEXT:    store i8 0, ptr [[M]], align 1
+; CHECK-NEXT:    call void @capture(ptr [[M]])
 ; CHECK-NEXT:    br label [[EXIT:%.*]]
 ; CHECK:       exit:
 ; CHECK-NEXT:    ret void
 ;
   %m = alloca i8
-  store i8 0, i8* %m
-  call void @capture(i8* %m)
+  store i8 0, ptr %m
+  call void @capture(ptr %m)
   br label %exit
 
 exit:
-  store i8 1, i8* %m
+  store i8 1, ptr %m
   ret void
 }
 
 ; We can remove the last store to %m, even though it escapes because the alloca
 ; becomes invalid after the function returns.
-define void @test_alloca_capture_2(%S1* %E) {
+define void @test_alloca_capture_2(ptr %E) {
 ; CHECK-LABEL: @test_alloca_capture_2(
 ; CHECK-NEXT:    [[M:%.*]] = alloca i8, align 1
 ; CHECK-NEXT:    br label [[EXIT:%.*]]
 ; CHECK:       exit:
-; CHECK-NEXT:    [[F_PTR:%.*]] = getelementptr [[S1:%.*]], %S1* [[E:%.*]], i32 0, i32 0
-; CHECK-NEXT:    store i8* [[M]], i8** [[F_PTR]], align 4
+; CHECK-NEXT:    store ptr [[M]], ptr [[E:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %m = alloca i8
   br label %exit
 
 exit:
-  %f.ptr = getelementptr %S1, %S1* %E, i32 0, i32 0
-  store i8* %m, i8** %f.ptr
-  store i8 1, i8* %m
+  store ptr %m, ptr %E
+  store i8 1, ptr %m
   ret void
 }
 
 ; Readnone functions are not modeled in MemorySSA, but could throw.
-; Make sure we do not eliminate the first store 'store i8 2, i8* %call'
+; Make sure we do not eliminate the first store 'store i8 2, ptr %call'
 define void @malloc_capture_throw_1() {
 ; CHECK-LABEL: @malloc_capture_throw_1(
-; CHECK-NEXT:    [[CALL:%.*]] = call i8* @malloc(i64 1)
-; CHECK-NEXT:    call void @may_capture(i8* [[CALL]])
-; CHECK-NEXT:    store i8 2, i8* [[CALL]], align 1
+; CHECK-NEXT:    [[CALL:%.*]] = call ptr @malloc(i64 1)
+; CHECK-NEXT:    call void @may_capture(ptr [[CALL]])
+; CHECK-NEXT:    store i8 2, ptr [[CALL]], align 1
 ; CHECK-NEXT:    call void @may_throw_readnone()
-; CHECK-NEXT:    store i8 3, i8* [[CALL]], align 1
+; CHECK-NEXT:    store i8 3, ptr [[CALL]], align 1
 ; CHECK-NEXT:    ret void
 ;
-  %call = call i8* @malloc(i64 1)
-  call void @may_capture(i8* %call)
-  store i8 2, i8* %call, align 1
+  %call = call ptr @malloc(i64 1)
+  call void @may_capture(ptr %call)
+  store i8 2, ptr %call, align 1
   call void @may_throw_readnone()
-  store i8 3, i8* %call, align 1
+  store i8 3, ptr %call, align 1
   ret void
 }
 
 ; Readnone functions are not modeled in MemorySSA, but could throw.
-; Make sure we do not eliminate the first store 'store i8 2, i8* %call'
+; Make sure we do not eliminate the first store 'store i8 2, ptr %call'
 define void @malloc_capture_throw_2() {
 ; CHECK-LABEL: @malloc_capture_throw_2(
-; CHECK-NEXT:    [[CALL:%.*]] = call i8* @malloc(i64 1)
-; CHECK-NEXT:    call void @may_capture(i8* [[CALL]])
-; CHECK-NEXT:    store i8 2, i8* [[CALL]], align 1
+; CHECK-NEXT:    [[CALL:%.*]] = call ptr @malloc(i64 1)
+; CHECK-NEXT:    call void @may_capture(ptr [[CALL]])
+; CHECK-NEXT:    store i8 2, ptr [[CALL]], align 1
 ; CHECK-NEXT:    br label [[BB:%.*]]
 ; CHECK:       bb:
 ; CHECK-NEXT:    call void @may_throw_readnone()
-; CHECK-NEXT:    store i8 3, i8* [[CALL]], align 1
+; CHECK-NEXT:    store i8 3, ptr [[CALL]], align 1
 ; CHECK-NEXT:    ret void
 ;
-  %call = call i8* @malloc(i64 1)
-  call void @may_capture(i8* %call)
-  store i8 2, i8* %call, align 1
+  %call = call ptr @malloc(i64 1)
+  call void @may_capture(ptr %call)
+  store i8 2, ptr %call, align 1
   br label %bb
 
 bb:
   call void @may_throw_readnone()
-  store i8 3, i8* %call, align 1
+  store i8 3, ptr %call, align 1
   ret void
 }
 
 
-declare void @may_capture(i8*)
+declare void @may_capture(ptr)
 declare void @may_throw_readnone() readnone
 declare void @may_throw()

diff  --git a/llvm/test/Transforms/DeadStoreElimination/multiblock-exceptions.ll b/llvm/test/Transforms/DeadStoreElimination/multiblock-exceptions.ll
index 08a15565e18ff..7754b1058e0d8 100644
--- a/llvm/test/Transforms/DeadStoreElimination/multiblock-exceptions.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/multiblock-exceptions.ll
@@ -6,15 +6,15 @@ declare void @f()
 declare i32 @__CxxFrameHandler3(...)
 
 
-; Make sure we do not eliminate `store i32 20, i32* %sv`. Even though it is a store
+; Make sure we do not eliminate `store i32 20, ptr %sv`. Even though it is a store
 ; to a stack object, we can read it in the landing/catchpad.
-define void @test12(i32* %p) personality i32 (...)* @__CxxFrameHandler3 {
+define void @test12(ptr %p) personality ptr @__CxxFrameHandler3 {
 ; CHECK-LABEL: @test12(
 ; CHECK-NEXT:  block1:
 ; CHECK-NEXT:    [[SV:%.*]] = alloca i32
 ; CHECK-NEXT:    br label [[BLOCK2:%.*]]
 ; CHECK:       block2:
-; CHECK-NEXT:    store i32 20, i32* [[SV]]
+; CHECK-NEXT:    store i32 20, ptr [[SV]]
 ; CHECK-NEXT:    invoke void @f()
 ; CHECK-NEXT:    to label [[BLOCK3:%.*]] unwind label [[CATCH_DISPATCH:%.*]]
 ; CHECK:       block3:
@@ -23,7 +23,7 @@ define void @test12(i32* %p) personality i32 (...)* @__CxxFrameHandler3 {
 ; CHECK-NEXT:    [[CS1:%.*]] = catchswitch within none [label %catch] unwind label [[CLEANUP:%.*]]
 ; CHECK:       catch:
 ; CHECK-NEXT:    [[C:%.*]] = catchpad within [[CS1]] []
-; CHECK-NEXT:    [[LV:%.*]] = load i32, i32* [[SV]]
+; CHECK-NEXT:    [[LV:%.*]] = load i32, ptr [[SV]]
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       cleanup:
 ; CHECK-NEXT:    [[C1:%.*]] = cleanuppad within none []
@@ -36,12 +36,12 @@ block1:
   br label %block2
 
 block2:
-  store i32 20, i32* %sv
+  store i32 20, ptr %sv
   invoke void @f()
   to label %block3 unwind label %catch.dispatch
 
 block3:
-  store i32 30, i32* %sv
+  store i32 30, ptr %sv
   br label %exit
 
 catch.dispatch:
@@ -49,7 +49,7 @@ catch.dispatch:
 
 catch:
   %c = catchpad within %cs1 []
-  %lv = load i32, i32* %sv
+  %lv = load i32, ptr %sv
   br label %exit
 
 cleanup:
@@ -57,6 +57,6 @@ cleanup:
   br label %exit
 
 exit:
-  store i32 40, i32* %sv
+  store i32 40, ptr %sv
   ret void
 }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/multiblock-loop-carried-dependence.ll b/llvm/test/Transforms/DeadStoreElimination/multiblock-loop-carried-dependence.ll
index 3f2b007905014..4a2c2666bc4a5 100644
--- a/llvm/test/Transforms/DeadStoreElimination/multiblock-loop-carried-dependence.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/multiblock-loop-carried-dependence.ll
@@ -16,8 +16,8 @@ define void @test.1() {
 ; CHECK-NEXT:    br label [[LOOP_1:%.*]]
 ; CHECK:       loop.1:
 ; CHECK-NEXT:    [[IV_1:%.*]] = phi i64 [ 1, [[ENTRY:%.*]] ], [ [[IV_1_NEXT:%.*]], [[LOOP_1]] ]
-; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds [100 x i32], [100 x i32]* [[A]], i64 0, i64 [[IV_1]]
-; CHECK-NEXT:    store i32 0, i32* [[ARRAYIDX1]], align 4
+; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds [100 x i32], ptr [[A]], i64 0, i64 [[IV_1]]
+; CHECK-NEXT:    store i32 0, ptr [[ARRAYIDX1]], align 4
 ; CHECK-NEXT:    [[IV_1_NEXT]] = add nsw i64 [[IV_1]], 1
 ; CHECK-NEXT:    [[C_1:%.*]] = icmp slt i64 [[IV_1_NEXT]], 100
 ; CHECK-NEXT:    br i1 [[C_1]], label [[LOOP_1]], label [[LOOP_2_PH:%.*]]
@@ -25,13 +25,13 @@ define void @test.1() {
 ; CHECK-NEXT:    br label [[LOOP_2:%.*]]
 ; CHECK:       loop.2:
 ; CHECK-NEXT:    [[IV_2:%.*]] = phi i64 [ [[IV_2_NEXT:%.*]], [[LOOP_2]] ], [ 0, [[LOOP_2_PH]] ]
-; CHECK-NEXT:    [[PTR_IV_2:%.*]] = getelementptr inbounds [100 x i32], [100 x i32]* [[A]], i64 0, i64 [[IV_2]]
-; CHECK-NEXT:    [[L_0:%.*]] = load i32, i32* [[PTR_IV_2]], align 4
+; CHECK-NEXT:    [[PTR_IV_2:%.*]] = getelementptr inbounds [100 x i32], ptr [[A]], i64 0, i64 [[IV_2]]
+; CHECK-NEXT:    [[L_0:%.*]] = load i32, ptr [[PTR_IV_2]], align 4
 ; CHECK-NEXT:    call void @use(i32 [[L_0]])
 ; CHECK-NEXT:    [[ADD:%.*]] = add nsw i64 [[IV_2]], 1
-; CHECK-NEXT:    [[PTR_IV_2_ADD_1:%.*]] = getelementptr inbounds [100 x i32], [100 x i32]* [[A]], i64 0, i64 [[ADD]]
-; CHECK-NEXT:    store i32 10, i32* [[PTR_IV_2_ADD_1]], align 4
-; CHECK-NEXT:    [[L_1:%.*]] = load i32, i32* [[PTR_IV_2]], align 4
+; CHECK-NEXT:    [[PTR_IV_2_ADD_1:%.*]] = getelementptr inbounds [100 x i32], ptr [[A]], i64 0, i64 [[ADD]]
+; CHECK-NEXT:    store i32 10, ptr [[PTR_IV_2_ADD_1]], align 4
+; CHECK-NEXT:    [[L_1:%.*]] = load i32, ptr [[PTR_IV_2]], align 4
 ; CHECK-NEXT:    call void @use(i32 [[L_1]])
 ; CHECK-NEXT:    [[IV_2_NEXT]] = add nsw i64 [[IV_2]], 1
 ; CHECK-NEXT:    [[C_2:%.*]] = icmp slt i64 [[IV_2_NEXT]], 100
@@ -45,8 +45,8 @@ entry:
 
 loop.1:
   %iv.1 = phi i64 [ 1, %entry ], [ %iv.1.next, %loop.1 ]
-  %arrayidx1 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 0, i64 %iv.1
-  store i32 0, i32* %arrayidx1, align 4
+  %arrayidx1 = getelementptr inbounds [100 x i32], ptr %A, i64 0, i64 %iv.1
+  store i32 0, ptr %arrayidx1, align 4
   %iv.1.next = add nsw i64 %iv.1, 1
   %c.1 = icmp slt i64 %iv.1.next, 100
   br i1 %c.1, label %loop.1, label %loop.2.ph
@@ -56,13 +56,13 @@ loop.2.ph:
 
 loop.2:
   %iv.2 = phi i64 [ %iv.2.next, %loop.2 ], [ 0, %loop.2.ph ]
-  %ptr.iv.2 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 0, i64 %iv.2
-  %l.0 = load i32, i32* %ptr.iv.2, align 4
+  %ptr.iv.2 = getelementptr inbounds [100 x i32], ptr %A, i64 0, i64 %iv.2
+  %l.0 = load i32, ptr %ptr.iv.2, align 4
   call void @use(i32 %l.0)
   %add = add nsw i64 %iv.2, 1
-  %ptr.iv.2.add.1 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 0, i64 %add
-  store i32 10, i32* %ptr.iv.2.add.1, align 4
-  %l.1 = load i32, i32* %ptr.iv.2, align 4
+  %ptr.iv.2.add.1 = getelementptr inbounds [100 x i32], ptr %A, i64 0, i64 %add
+  store i32 10, ptr %ptr.iv.2.add.1, align 4
+  %l.1 = load i32, ptr %ptr.iv.2, align 4
   call void @use(i32 %l.1)
   %iv.2.next = add nsw i64 %iv.2, 1
   %c.2 = icmp slt i64 %iv.2.next, 100
@@ -79,12 +79,11 @@ define void @test.2() {
 ; CHECK-LABEL: @test.2(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[A:%.*]] = alloca [100 x i32], align 4
-; CHECK-NEXT:    [[A_CAST:%.*]] = bitcast [100 x i32]* [[A]] to i8*
 ; CHECK-NEXT:    br label [[LOOP_1:%.*]]
 ; CHECK:       loop.1:
 ; CHECK-NEXT:    [[IV_1:%.*]] = phi i64 [ 1, [[ENTRY:%.*]] ], [ [[IV_1_NEXT:%.*]], [[LOOP_1]] ]
-; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds [100 x i32], [100 x i32]* [[A]], i64 0, i64 [[IV_1]]
-; CHECK-NEXT:    store i32 0, i32* [[ARRAYIDX1]], align 4
+; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds [100 x i32], ptr [[A]], i64 0, i64 [[IV_1]]
+; CHECK-NEXT:    store i32 0, ptr [[ARRAYIDX1]], align 4
 ; CHECK-NEXT:    [[IV_1_NEXT]] = add nsw i64 [[IV_1]], 1
 ; CHECK-NEXT:    [[C_1:%.*]] = icmp slt i64 [[IV_1_NEXT]], 100
 ; CHECK-NEXT:    br i1 [[C_1]], label [[LOOP_1]], label [[LOOP_2_PH:%.*]]
@@ -92,30 +91,29 @@ define void @test.2() {
 ; CHECK-NEXT:    br label [[LOOP_2:%.*]]
 ; CHECK:       loop.2:
 ; CHECK-NEXT:    [[IV_2:%.*]] = phi i64 [ [[IV_2_NEXT:%.*]], [[LOOP_2]] ], [ 0, [[LOOP_2_PH]] ]
-; CHECK-NEXT:    [[PTR_IV_2:%.*]] = getelementptr inbounds [100 x i32], [100 x i32]* [[A]], i64 0, i64 [[IV_2]]
-; CHECK-NEXT:    [[L_0:%.*]] = load i32, i32* [[PTR_IV_2]], align 4
+; CHECK-NEXT:    [[PTR_IV_2:%.*]] = getelementptr inbounds [100 x i32], ptr [[A]], i64 0, i64 [[IV_2]]
+; CHECK-NEXT:    [[L_0:%.*]] = load i32, ptr [[PTR_IV_2]], align 4
 ; CHECK-NEXT:    call void @use(i32 [[L_0]])
 ; CHECK-NEXT:    [[ADD:%.*]] = add nsw i64 [[IV_2]], 1
-; CHECK-NEXT:    [[PTR_IV_2_ADD_1:%.*]] = getelementptr inbounds [100 x i32], [100 x i32]* [[A]], i64 0, i64 [[ADD]]
-; CHECK-NEXT:    store i32 10, i32* [[PTR_IV_2_ADD_1]], align 4
-; CHECK-NEXT:    [[L_1:%.*]] = load i32, i32* [[PTR_IV_2]], align 4
+; CHECK-NEXT:    [[PTR_IV_2_ADD_1:%.*]] = getelementptr inbounds [100 x i32], ptr [[A]], i64 0, i64 [[ADD]]
+; CHECK-NEXT:    store i32 10, ptr [[PTR_IV_2_ADD_1]], align 4
+; CHECK-NEXT:    [[L_1:%.*]] = load i32, ptr [[PTR_IV_2]], align 4
 ; CHECK-NEXT:    call void @use(i32 [[L_1]])
 ; CHECK-NEXT:    [[IV_2_NEXT]] = add nsw i64 [[IV_2]], 1
 ; CHECK-NEXT:    [[C_2:%.*]] = icmp slt i64 [[IV_2_NEXT]], 100
 ; CHECK-NEXT:    br i1 [[C_2]], label [[LOOP_2]], label [[EXIT:%.*]]
 ; CHECK:       exit:
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 400, i8* nonnull [[A_CAST]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 400, ptr nonnull [[A]])
 ; CHECK-NEXT:    ret void
 ;
 entry:
   %A = alloca [100 x i32], align 4
-  %A.cast = bitcast [100 x i32]* %A to i8*
   br label %loop.1
 
 loop.1:
   %iv.1 = phi i64 [ 1, %entry ], [ %iv.1.next, %loop.1 ]
-  %arrayidx1 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 0, i64 %iv.1
-  store i32 0, i32* %arrayidx1, align 4
+  %arrayidx1 = getelementptr inbounds [100 x i32], ptr %A, i64 0, i64 %iv.1
+  store i32 0, ptr %arrayidx1, align 4
   %iv.1.next = add nsw i64 %iv.1, 1
   %c.1 = icmp slt i64 %iv.1.next, 100
   br i1 %c.1, label %loop.1, label %loop.2.ph
@@ -125,33 +123,32 @@ loop.2.ph:
 
 loop.2:
   %iv.2 = phi i64 [ %iv.2.next, %loop.2 ], [ 0, %loop.2.ph ]
-  %ptr.iv.2 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 0, i64 %iv.2
-  %l.0 = load i32, i32* %ptr.iv.2, align 4
+  %ptr.iv.2 = getelementptr inbounds [100 x i32], ptr %A, i64 0, i64 %iv.2
+  %l.0 = load i32, ptr %ptr.iv.2, align 4
   call void @use(i32 %l.0)
   %add = add nsw i64 %iv.2, 1
-  %ptr.iv.2.add.1 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 0, i64 %add
-  store i32 10, i32* %ptr.iv.2.add.1, align 4
-  %l.1 = load i32, i32* %ptr.iv.2, align 4
+  %ptr.iv.2.add.1 = getelementptr inbounds [100 x i32], ptr %A, i64 0, i64 %add
+  store i32 10, ptr %ptr.iv.2.add.1, align 4
+  %l.1 = load i32, ptr %ptr.iv.2, align 4
   call void @use(i32 %l.1)
   %iv.2.next = add nsw i64 %iv.2, 1
   %c.2 = icmp slt i64 %iv.2.next, 100
   br i1 %c.2, label %loop.2, label %exit
 
 exit:
-  call void @llvm.lifetime.end.p0i8(i64 400, i8* nonnull %A.cast) #5
+  call void @llvm.lifetime.end.p0(i64 400, ptr nonnull %A) #5
   ret void
 }
 
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
 
-; Make sure `store i32 10, i32* %ptr.2` in %cond.store is not removed. The
-; stored value may be read by `%use = load i32, i32* %ptr.1` in a future
+; Make sure `store i32 10, ptr %ptr.2` in %cond.store is not removed. The
+; stored value may be read by `%use = load i32, ptr %ptr.1` in a future
 ; iteration.
 define void at test.3() {
 ; CHECK-LABEL: @test.3(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[NODESTACK:%.*]] = alloca [12 x i32], align 4
-; CHECK-NEXT:    [[NODESTACK_CAST:%.*]] = bitcast [12 x i32]* [[NODESTACK]] to i8*
 ; CHECK-NEXT:    [[C_1:%.*]] = call i1 @cond(i32 1)
 ; CHECK-NEXT:    br i1 [[C_1]], label [[CLEANUP:%.*]], label [[LOOP_HEADER:%.*]]
 ; CHECK:       loop.header:
@@ -160,13 +157,13 @@ define void at test.3() {
 ; CHECK-NEXT:    br i1 [[CMP]], label [[COND_READ:%.*]], label [[COND_STORE:%.*]]
 ; CHECK:       cond.read:
 ; CHECK-NEXT:    [[SUB:%.*]] = add nsw i32 [[DEPTH_1]], -3
-; CHECK-NEXT:    [[PTR_1:%.*]] = getelementptr inbounds [12 x i32], [12 x i32]* [[NODESTACK]], i32 0, i32 [[SUB]]
-; CHECK-NEXT:    [[USE:%.*]] = load i32, i32* [[PTR_1]], align 4
+; CHECK-NEXT:    [[PTR_1:%.*]] = getelementptr inbounds [12 x i32], ptr [[NODESTACK]], i32 0, i32 [[SUB]]
+; CHECK-NEXT:    [[USE:%.*]] = load i32, ptr [[PTR_1]], align 4
 ; CHECK-NEXT:    [[C_2:%.*]] = call i1 @cond(i32 [[USE]])
 ; CHECK-NEXT:    br i1 [[C_2]], label [[LOOP_LATCH]], label [[COND_STORE]]
 ; CHECK:       cond.store:
-; CHECK-NEXT:    [[PTR_2:%.*]] = getelementptr inbounds [12 x i32], [12 x i32]* [[NODESTACK]], i32 0, i32 [[DEPTH_1]]
-; CHECK-NEXT:    store i32 10, i32* [[PTR_2]], align 4
+; CHECK-NEXT:    [[PTR_2:%.*]] = getelementptr inbounds [12 x i32], ptr [[NODESTACK]], i32 0, i32 [[DEPTH_1]]
+; CHECK-NEXT:    store i32 10, ptr [[PTR_2]], align 4
 ; CHECK-NEXT:    [[INC:%.*]] = add nsw i32 [[DEPTH_1]], 1
 ; CHECK-NEXT:    [[C_3:%.*]] = call i1 @cond(i32 20)
 ; CHECK-NEXT:    br i1 [[C_3]], label [[CLEANUP]], label [[LOOP_LATCH]]
@@ -174,12 +171,11 @@ define void at test.3() {
 ; CHECK-NEXT:    [[DEPTH_1_BE]] = phi i32 [ [[SUB]], [[COND_READ]] ], [ [[INC]], [[COND_STORE]] ]
 ; CHECK-NEXT:    br label [[LOOP_HEADER]]
 ; CHECK:       cleanup:
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 48, i8* nonnull [[NODESTACK_CAST]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 48, ptr nonnull [[NODESTACK]])
 ; CHECK-NEXT:    ret void
 ;
 entry:
   %nodeStack = alloca [12 x i32], align 4
-  %nodeStack.cast = bitcast [12 x i32]* %nodeStack to i8*
   %c.1 = call i1 @cond(i32 1)
   br i1 %c.1, label %cleanup, label %loop.header
 
@@ -190,14 +186,14 @@ loop.header:                                       ; preds = %entry, %while.cond
 
 cond.read:                                        ; preds = %while.cond
   %sub = add nsw i32 %depth.1, -3
-  %ptr.1 = getelementptr inbounds [12 x i32], [12 x i32]* %nodeStack, i32 0, i32 %sub
-  %use = load i32, i32* %ptr.1, align 4
+  %ptr.1 = getelementptr inbounds [12 x i32], ptr %nodeStack, i32 0, i32 %sub
+  %use = load i32, ptr %ptr.1, align 4
   %c.2 = call i1 @cond(i32 %use)
   br i1 %c.2, label %loop.latch, label %cond.store
 
 cond.store:
-  %ptr.2 = getelementptr inbounds [12 x i32], [12 x i32]* %nodeStack, i32 0, i32 %depth.1
-  store i32 10, i32* %ptr.2, align 4
+  %ptr.2 = getelementptr inbounds [12 x i32], ptr %nodeStack, i32 0, i32 %depth.1
+  store i32 10, ptr %ptr.2, align 4
   %inc = add nsw i32 %depth.1, 1
   %c.3 = call i1 @cond(i32 20)
   br i1 %c.3, label %cleanup, label %loop.latch
@@ -207,7 +203,7 @@ loop.latch:
   br label %loop.header
 
 cleanup:                                          ; preds = %while.body, %while.end, %entry
-  call void @llvm.lifetime.end.p0i8(i64 48, i8* nonnull %nodeStack.cast) #3
+  call void @llvm.lifetime.end.p0(i64 48, ptr nonnull %nodeStack) #3
   ret void
 }
 
@@ -215,14 +211,14 @@ define void @store_to_invariant_loc() {
 ; CHECK-LABEL: @store_to_invariant_loc(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[A:%.*]] = alloca [100 x i32], align 4
-; CHECK-NEXT:    [[PTR_20:%.*]] = getelementptr inbounds [100 x i32], [100 x i32]* [[A]], i64 0, i64 20
+; CHECK-NEXT:    [[PTR_20:%.*]] = getelementptr inbounds [100 x i32], ptr [[A]], i64 0, i64 20
 ; CHECK-NEXT:    br label [[LOOP:%.*]]
 ; CHECK:       loop:
 ; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ 0, [[ENTRY:%.*]] ]
-; CHECK-NEXT:    [[PTR_IV:%.*]] = getelementptr inbounds [100 x i32], [100 x i32]* [[A]], i64 0, i64 [[IV]]
-; CHECK-NEXT:    [[L_0:%.*]] = load i32, i32* [[PTR_IV]], align 4
+; CHECK-NEXT:    [[PTR_IV:%.*]] = getelementptr inbounds [100 x i32], ptr [[A]], i64 0, i64 [[IV]]
+; CHECK-NEXT:    [[L_0:%.*]] = load i32, ptr [[PTR_IV]], align 4
 ; CHECK-NEXT:    call void @use(i32 [[L_0]])
-; CHECK-NEXT:    store i32 10, i32* [[PTR_20]], align 4
+; CHECK-NEXT:    store i32 10, ptr [[PTR_20]], align 4
 ; CHECK-NEXT:    [[IV_NEXT]] = add nsw i64 [[IV]], 1
 ; CHECK-NEXT:    [[C:%.*]] = icmp slt i64 [[IV_NEXT]], 100
 ; CHECK-NEXT:    br i1 [[C]], label [[LOOP]], label [[EXIT:%.*]]
@@ -231,15 +227,15 @@ define void @store_to_invariant_loc() {
 ;
 entry:
   %A = alloca [100 x i32], align 4
-  %ptr.20 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 0, i64 20
+  %ptr.20 = getelementptr inbounds [100 x i32], ptr %A, i64 0, i64 20
   br label %loop
 
 loop:
   %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ]
-  %ptr.iv = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 0, i64 %iv
-  %l.0 = load i32, i32* %ptr.iv, align 4
+  %ptr.iv = getelementptr inbounds [100 x i32], ptr %A, i64 0, i64 %iv
+  %l.0 = load i32, ptr %ptr.iv, align 4
   call void @use(i32 %l.0)
-  store i32 10, i32* %ptr.20, align 4
+  store i32 10, ptr %ptr.20, align 4
   %iv.next = add nsw i64 %iv, 1
   %c = icmp slt i64 %iv.next, 100
   br i1 %c, label %loop , label %exit

diff  --git a/llvm/test/Transforms/DeadStoreElimination/multiblock-loops.ll b/llvm/test/Transforms/DeadStoreElimination/multiblock-loops.ll
index e555e04f51d1c..fb8ba28eadf1e 100644
--- a/llvm/test/Transforms/DeadStoreElimination/multiblock-loops.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/multiblock-loops.ll
@@ -2,14 +2,14 @@
 ; RUN: opt < %s -basic-aa -dse -S | FileCheck %s
 
 target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i32, i1) nounwind
 
-define void @test13(i32* noalias %P) {
+define void @test13(ptr noalias %P) {
 ; CHECK-LABEL: @test13(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br label [[FOR:%.*]]
 ; CHECK:       for:
-; CHECK-NEXT:    store i32 0, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br i1 false, label [[FOR]], label [[END:%.*]]
 ; CHECK:       end:
 ; CHECK-NEXT:    ret void
@@ -17,89 +17,85 @@ define void @test13(i32* noalias %P) {
 entry:
   br label %for
 for:
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   br i1 false, label %for, label %end
 end:
   ret void
 }
 
 
-define void @test14(i32* noalias %P) {
+define void @test14(ptr noalias %P) {
 ; CHECK-LABEL: @test14(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br label [[FOR:%.*]]
 ; CHECK:       for:
-; CHECK-NEXT:    store i32 0, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br i1 false, label [[FOR]], label [[END:%.*]]
 ; CHECK:       end:
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   br label %for
 for:
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   br i1 false, label %for, label %end
 end:
   ret void
 }
 
-define void @test18(i32* noalias %P) {
+define void @test18(ptr noalias %P) {
 ; CHECK-LABEL: @test18(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[P2:%.*]] = bitcast i32* [[P:%.*]] to i8*
-; CHECK-NEXT:    store i32 0, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br label [[FOR:%.*]]
 ; CHECK:       for:
-; CHECK-NEXT:    store i8 1, i8* [[P2]], align 1
-; CHECK-NEXT:    [[X:%.*]] = load i32, i32* [[P]], align 4
-; CHECK-NEXT:    store i8 2, i8* [[P2]], align 1
+; CHECK-NEXT:    store i8 1, ptr [[P]], align 1
+; CHECK-NEXT:    [[X:%.*]] = load i32, ptr [[P]], align 4
+; CHECK-NEXT:    store i8 2, ptr [[P]], align 1
 ; CHECK-NEXT:    br i1 false, label [[FOR]], label [[END:%.*]]
 ; CHECK:       end:
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %P2 = bitcast i32* %P to i8*
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   br label %for
 for:
-  store i8 1, i8* %P2
-  %x = load i32, i32* %P
-  store i8 2, i8* %P2
+  store i8 1, ptr %P
+  %x = load i32, ptr %P
+  store i8 2, ptr %P
   br i1 false, label %for, label %end
 end:
   ret void
 }
 
-define void @test21(i32* noalias %P) {
+define void @test21(ptr noalias %P) {
 ; CHECK-LABEL: @test21(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 1
-; CHECK-NEXT:    [[P3:%.*]] = bitcast i32* [[ARRAYIDX0]] to i8*
-; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, i8* [[P3]], i64 4
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 4 [[TMP0]], i8 0, i64 24, i1 false)
+; CHECK-NEXT:    [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 1
+; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[ARRAYIDX0]], i64 4
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 4 [[TMP0]], i8 0, i64 24, i1 false)
 ; CHECK-NEXT:    br label [[FOR:%.*]]
 ; CHECK:       for:
-; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 1
-; CHECK-NEXT:    store i32 1, i32* [[ARRAYIDX1]], align 4
+; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 1
+; CHECK-NEXT:    store i32 1, ptr [[ARRAYIDX1]], align 4
 ; CHECK-NEXT:    br i1 false, label [[FOR]], label [[END:%.*]]
 ; CHECK:       end:
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %arrayidx0 = getelementptr inbounds i32, i32* %P, i64 1
-  %p3 = bitcast i32* %arrayidx0 to i8*
-  call void @llvm.memset.p0i8.i64(i8* %p3, i8 0, i64 28, i32 4, i1 false)
+  %arrayidx0 = getelementptr inbounds i32, ptr %P, i64 1
+  call void @llvm.memset.p0.i64(ptr %arrayidx0, i8 0, i64 28, i32 4, i1 false)
   br label %for
 for:
-  %arrayidx1 = getelementptr inbounds i32, i32* %P, i64 1
-  store i32 1, i32* %arrayidx1, align 4
+  %arrayidx1 = getelementptr inbounds i32, ptr %P, i64 1
+  store i32 1, ptr %arrayidx1, align 4
   br i1 false, label %for, label %end
 end:
   ret void
 }
 
-define void @test_loop(i32 %N, i32* noalias nocapture readonly %A, i32* noalias nocapture readonly %x, i32* noalias nocapture %b) local_unnamed_addr {
+define void @test_loop(i32 %N, ptr noalias nocapture readonly %A, ptr noalias nocapture readonly %x, ptr noalias nocapture %b) local_unnamed_addr {
 ; CHECK-LABEL: @test_loop(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[CMP27:%.*]] = icmp sgt i32 [[N:%.*]], 0
@@ -110,24 +106,24 @@ define void @test_loop(i32 %N, i32* noalias nocapture readonly %A, i32* noalias
 ; CHECK-NEXT:    ret void
 ; CHECK:       for.body4.lr.ph:
 ; CHECK-NEXT:    [[I_028:%.*]] = phi i32 [ [[INC11:%.*]], [[FOR_COND_CLEANUP3:%.*]] ], [ 0, [[FOR_BODY4_LR_PH_PREHEADER]] ]
-; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i32 [[I_028]]
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i32 [[I_028]]
 ; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i32 [[I_028]], [[N]]
 ; CHECK-NEXT:    br label [[FOR_BODY4:%.*]]
 ; CHECK:       for.body4:
 ; CHECK-NEXT:    [[TMP0:%.*]] = phi i32 [ 0, [[FOR_BODY4_LR_PH]] ], [ [[ADD9:%.*]], [[FOR_BODY4]] ]
 ; CHECK-NEXT:    [[J_026:%.*]] = phi i32 [ 0, [[FOR_BODY4_LR_PH]] ], [ [[INC:%.*]], [[FOR_BODY4]] ]
 ; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 [[J_026]], [[MUL]]
-; CHECK-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i32 [[ADD]]
-; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[ARRAYIDX5]], align 4
-; CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[X:%.*]], i32 [[J_026]]
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4
+; CHECK-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[ADD]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX5]], align 4
+; CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, ptr [[X:%.*]], i32 [[J_026]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX6]], align 4
 ; CHECK-NEXT:    [[MUL7:%.*]] = mul nsw i32 [[TMP2]], [[TMP1]]
 ; CHECK-NEXT:    [[ADD9]] = add nsw i32 [[MUL7]], [[TMP0]]
 ; CHECK-NEXT:    [[INC]] = add nuw nsw i32 [[J_026]], 1
 ; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]]
 ; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP3]], label [[FOR_BODY4]]
 ; CHECK:       for.cond.cleanup3:
-; CHECK-NEXT:    store i32 [[ADD9]], i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT:    store i32 [[ADD9]], ptr [[ARRAYIDX]], align 4
 ; CHECK-NEXT:    [[INC11]] = add nuw nsw i32 [[I_028]], 1
 ; CHECK-NEXT:    [[EXITCOND29:%.*]] = icmp eq i32 [[INC11]], [[N]]
 ; CHECK-NEXT:    br i1 [[EXITCOND29]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY4_LR_PH]]
@@ -144,8 +140,8 @@ for.cond.cleanup:                                 ; preds = %for.cond.cleanup3,
 
 for.body4.lr.ph:                                  ; preds = %for.body4.lr.ph.preheader, %for.cond.cleanup3
   %i.028 = phi i32 [ %inc11, %for.cond.cleanup3 ], [ 0, %for.body4.lr.ph.preheader ]
-  %arrayidx = getelementptr inbounds i32, i32* %b, i32 %i.028
-  store i32 0, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %b, i32 %i.028
+  store i32 0, ptr %arrayidx, align 4
   %mul = mul nsw i32 %i.028, %N
   br label %for.body4
 
@@ -153,10 +149,10 @@ for.body4:                                        ; preds = %for.body4, %for.bod
   %0 = phi i32 [ 0, %for.body4.lr.ph ], [ %add9, %for.body4 ]
   %j.026 = phi i32 [ 0, %for.body4.lr.ph ], [ %inc, %for.body4 ]
   %add = add nsw i32 %j.026, %mul
-  %arrayidx5 = getelementptr inbounds i32, i32* %A, i32 %add
-  %1 = load i32, i32* %arrayidx5, align 4
-  %arrayidx6 = getelementptr inbounds i32, i32* %x, i32 %j.026
-  %2 = load i32, i32* %arrayidx6, align 4
+  %arrayidx5 = getelementptr inbounds i32, ptr %A, i32 %add
+  %1 = load i32, ptr %arrayidx5, align 4
+  %arrayidx6 = getelementptr inbounds i32, ptr %x, i32 %j.026
+  %2 = load i32, ptr %arrayidx6, align 4
   %mul7 = mul nsw i32 %2, %1
   %add9 = add nsw i32 %mul7, %0
   %inc = add nuw nsw i32 %j.026, 1
@@ -164,25 +160,25 @@ for.body4:                                        ; preds = %for.body4, %for.bod
   br i1 %exitcond, label %for.cond.cleanup3, label %for.body4
 
 for.cond.cleanup3:                                ; preds = %for.body4
-  store i32 %add9, i32* %arrayidx, align 4
+  store i32 %add9, ptr %arrayidx, align 4
   %inc11 = add nuw nsw i32 %i.028, 1
   %exitcond29 = icmp eq i32 %inc11, %N
   br i1 %exitcond29, label %for.cond.cleanup, label %for.body4.lr.ph
 }
 
-define i32 @test_if(i1 %c, i32* %p, i32 %i) {
+define i32 @test_if(i1 %c, ptr %p, i32 %i) {
 ; CHECK-LABEL: @test_if(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br label [[BB1:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    [[PH:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[BB3:%.*]] ]
 ; CHECK-NEXT:    [[INC]] = add i32 [[PH]], 1
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i32 [[PH]]
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i32 [[PH]]
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[BB2:%.*]], label [[BB3]]
 ; CHECK:       bb2:
 ; CHECK-NEXT:    br label [[BB3]]
 ; CHECK:       bb3:
-; CHECK-NEXT:    store i32 2, i32* [[GEP]], align 4
+; CHECK-NEXT:    store i32 2, ptr [[GEP]], align 4
 ; CHECK-NEXT:    [[C1:%.*]] = icmp slt i32 [[PH]], 10
 ; CHECK-NEXT:    br i1 [[C1]], label [[BB1]], label [[EXIT:%.*]]
 ; CHECK:       exit:
@@ -193,34 +189,34 @@ entry:
 bb1:
   %ph = phi i32 [ 0, %entry ], [ %inc, %bb3 ]
   %inc = add i32 %ph, 1
-  %gep = getelementptr inbounds i32, i32* %p, i32 %ph
-  store i32 %i, i32* %gep, align 4
+  %gep = getelementptr inbounds i32, ptr %p, i32 %ph
+  store i32 %i, ptr %gep, align 4
   br i1 %c, label %bb2, label %bb3
 bb2:
   br label %bb3
 bb3:
-  store i32 2, i32* %gep, align 4
+  store i32 2, ptr %gep, align 4
   %c1 = icmp slt i32 %ph, 10
   br i1 %c1, label %bb1, label %exit
 exit:
   ret i32 0
 }
 
-define i32 @test_if2(i1 %c, i32* %p, i32 %i) {
+define i32 @test_if2(i1 %c, ptr %p, i32 %i) {
 ; CHECK-LABEL: @test_if2(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br label [[BB1:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    [[PH:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[BB2:%.*]] ], [ [[INC]], [[BB3:%.*]] ]
 ; CHECK-NEXT:    [[INC]] = add i32 [[PH]], 1
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i32 [[PH]]
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i32 [[PH]]
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[BB2]], label [[BB3]]
 ; CHECK:       bb2:
-; CHECK-NEXT:    store i32 2, i32* [[GEP]], align 4
+; CHECK-NEXT:    store i32 2, ptr [[GEP]], align 4
 ; CHECK-NEXT:    [[C1:%.*]] = icmp slt i32 [[PH]], 10
 ; CHECK-NEXT:    br i1 [[C1]], label [[BB1]], label [[EXIT:%.*]]
 ; CHECK:       bb3:
-; CHECK-NEXT:    store i32 3, i32* [[GEP]], align 4
+; CHECK-NEXT:    store i32 3, ptr [[GEP]], align 4
 ; CHECK-NEXT:    [[C2:%.*]] = icmp slt i32 [[PH]], 5
 ; CHECK-NEXT:    br i1 [[C2]], label [[BB1]], label [[EXIT]]
 ; CHECK:       exit:
@@ -231,33 +227,33 @@ entry:
 bb1:
   %ph = phi i32 [ 0, %entry ], [ %inc, %bb2 ], [ %inc, %bb3 ]
   %inc = add i32 %ph, 1
-  %gep = getelementptr inbounds i32, i32* %p, i32 %ph
-  store i32 %i, i32* %gep, align 4
+  %gep = getelementptr inbounds i32, ptr %p, i32 %ph
+  store i32 %i, ptr %gep, align 4
   br i1 %c, label %bb2, label %bb3
 bb2:
-  store i32 2, i32* %gep, align 4
+  store i32 2, ptr %gep, align 4
   %c1 = icmp slt i32 %ph, 10
   br i1 %c1, label %bb1, label %exit
 bb3:
-  store i32 3, i32* %gep, align 4
+  store i32 3, ptr %gep, align 4
   %c2 = icmp slt i32 %ph, 5
   br i1 %c2, label %bb1, label %exit
 exit:
   ret i32 0
 }
 
-define i32 @test_if3(i1 %c, i32* %p, i32 %i) {
+define i32 @test_if3(i1 %c, ptr %p, i32 %i) {
 ; CHECK-LABEL: @test_if3(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br label [[BB1:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    [[PH:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[BB3:%.*]] ]
 ; CHECK-NEXT:    [[INC]] = add i32 [[PH]], 1
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i32 [[PH]]
-; CHECK-NEXT:    store i32 [[I:%.*]], i32* [[GEP]], align 4
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i32 [[PH]]
+; CHECK-NEXT:    store i32 [[I:%.*]], ptr [[GEP]], align 4
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[BB2:%.*]], label [[BB3]]
 ; CHECK:       bb2:
-; CHECK-NEXT:    store i32 2, i32* [[GEP]], align 4
+; CHECK-NEXT:    store i32 2, ptr [[GEP]], align 4
 ; CHECK-NEXT:    br label [[BB3]]
 ; CHECK:       bb3:
 ; CHECK-NEXT:    [[C1:%.*]] = icmp slt i32 [[PH]], 10
@@ -270,11 +266,11 @@ entry:
 bb1:
   %ph = phi i32 [ 0, %entry ], [ %inc, %bb3 ]
   %inc = add i32 %ph, 1
-  %gep = getelementptr inbounds i32, i32* %p, i32 %ph
-  store i32 %i, i32* %gep, align 4
+  %gep = getelementptr inbounds i32, ptr %p, i32 %ph
+  store i32 %i, ptr %gep, align 4
   br i1 %c, label %bb2, label %bb3
 bb2:
-  store i32 2, i32* %gep, align 4
+  store i32 2, ptr %gep, align 4
   br label %bb3
 bb3:
   %c1 = icmp slt i32 %ph, 10
@@ -283,18 +279,18 @@ exit:
   ret i32 0
 }
 
-define i32 @test_if4(i1 %c, i32* %p, i32 %i) {
+define i32 @test_if4(i1 %c, ptr %p, i32 %i) {
 ; CHECK-LABEL: @test_if4(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br label [[BB1:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    [[PH:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[BB1]] ], [ [[INC]], [[BB2:%.*]] ]
 ; CHECK-NEXT:    [[INC]] = add i32 [[PH]], 1
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i32 [[PH]]
-; CHECK-NEXT:    store i32 [[I:%.*]], i32* [[GEP]], align 4
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i32 [[PH]]
+; CHECK-NEXT:    store i32 [[I:%.*]], ptr [[GEP]], align 4
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[BB2]], label [[BB1]]
 ; CHECK:       bb2:
-; CHECK-NEXT:    store i32 2, i32* [[GEP]], align 4
+; CHECK-NEXT:    store i32 2, ptr [[GEP]], align 4
 ; CHECK-NEXT:    [[C1:%.*]] = icmp slt i32 [[PH]], 10
 ; CHECK-NEXT:    br i1 [[C1]], label [[BB1]], label [[EXIT:%.*]]
 ; CHECK:       exit:
@@ -305,11 +301,11 @@ entry:
 bb1:
   %ph = phi i32 [ 0, %entry ], [ %inc, %bb1 ], [ %inc, %bb2 ]
   %inc = add i32 %ph, 1
-  %gep = getelementptr inbounds i32, i32* %p, i32 %ph
-  store i32 %i, i32* %gep, align 4
+  %gep = getelementptr inbounds i32, ptr %p, i32 %ph
+  store i32 %i, ptr %gep, align 4
   br i1 %c, label %bb2, label %bb1
 bb2:
-  store i32 2, i32* %gep, align 4
+  store i32 2, ptr %gep, align 4
   %c1 = icmp slt i32 %ph, 10
   br i1 %c1, label %bb1, label %exit
 exit:
@@ -317,20 +313,20 @@ exit:
 }
 
 declare void @clobber()
-define i32 @test_self(i1 %c, i32* %p, i32 %i) {
+define i32 @test_self(i1 %c, ptr %p, i32 %i) {
 ; CHECK-LABEL: @test_self(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br label [[BB1:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    [[PH:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[BB1]] ], [ [[INC]], [[BB2:%.*]] ]
 ; CHECK-NEXT:    [[INC]] = add i32 [[PH]], 1
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i32 [[PH]]
-; CHECK-NEXT:    store i32 1, i32* [[GEP]], align 4
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i32 [[PH]]
+; CHECK-NEXT:    store i32 1, ptr [[GEP]], align 4
 ; CHECK-NEXT:    call void @clobber()
-; CHECK-NEXT:    store i32 2, i32* [[GEP]], align 4
+; CHECK-NEXT:    store i32 2, ptr [[GEP]], align 4
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[BB2]], label [[BB1]]
 ; CHECK:       bb2:
-; CHECK-NEXT:    store i32 3, i32* [[GEP]], align 4
+; CHECK-NEXT:    store i32 3, ptr [[GEP]], align 4
 ; CHECK-NEXT:    [[C1:%.*]] = icmp slt i32 [[PH]], 10
 ; CHECK-NEXT:    br i1 [[C1]], label [[BB1]], label [[EXIT:%.*]]
 ; CHECK:       exit:
@@ -341,13 +337,13 @@ entry:
 bb1:
   %ph = phi i32 [ 0, %entry ], [ %inc, %bb1 ], [ %inc, %bb2 ]
   %inc = add i32 %ph, 1
-  %gep = getelementptr inbounds i32, i32* %p, i32 %ph
-  store i32 1, i32* %gep, align 4
+  %gep = getelementptr inbounds i32, ptr %p, i32 %ph
+  store i32 1, ptr %gep, align 4
   call void @clobber()
-  store i32 2, i32* %gep, align 4
+  store i32 2, ptr %gep, align 4
   br i1 %c, label %bb2, label %bb1
 bb2:
-  store i32 3, i32* %gep, align 4
+  store i32 3, ptr %gep, align 4
   %c1 = icmp slt i32 %ph, 10
   br i1 %c1, label %bb1, label %exit
 exit:
@@ -362,17 +358,17 @@ define i32 @test_selfalloca(i1 %c, i32 %i) {
 ; CHECK:       bb1:
 ; CHECK-NEXT:    [[PH:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[BB1]] ], [ [[INC]], [[BB2:%.*]] ]
 ; CHECK-NEXT:    [[INC]] = add i32 [[PH]], 1
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i32, i32* [[P]], i32 [[PH]]
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i32, ptr [[P]], i32 [[PH]]
 ; CHECK-NEXT:    call void @clobber()
-; CHECK-NEXT:    store i32 2, i32* [[GEP]], align 4
+; CHECK-NEXT:    store i32 2, ptr [[GEP]], align 4
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[BB2]], label [[BB1]]
 ; CHECK:       bb2:
-; CHECK-NEXT:    store i32 3, i32* [[GEP]], align 4
+; CHECK-NEXT:    store i32 3, ptr [[GEP]], align 4
 ; CHECK-NEXT:    [[C1:%.*]] = icmp slt i32 [[PH]], 10
 ; CHECK-NEXT:    br i1 [[C1]], label [[BB1]], label [[EXIT:%.*]]
 ; CHECK:       exit:
-; CHECK-NEXT:    [[PG:%.*]] = getelementptr inbounds i32, i32* [[P]], i32 [[I:%.*]]
-; CHECK-NEXT:    [[L:%.*]] = load i32, i32* [[PG]], align 4
+; CHECK-NEXT:    [[PG:%.*]] = getelementptr inbounds i32, ptr [[P]], i32 [[I:%.*]]
+; CHECK-NEXT:    [[L:%.*]] = load i32, ptr [[PG]], align 4
 ; CHECK-NEXT:    ret i32 [[L]]
 ;
 entry:
@@ -381,165 +377,163 @@ entry:
 bb1:
   %ph = phi i32 [ 0, %entry ], [ %inc, %bb1 ], [ %inc, %bb2 ]
   %inc = add i32 %ph, 1
-  %gep = getelementptr inbounds i32, i32* %p, i32 %ph
-  store i32 1, i32* %gep, align 4
+  %gep = getelementptr inbounds i32, ptr %p, i32 %ph
+  store i32 1, ptr %gep, align 4
   call void @clobber()
-  store i32 2, i32* %gep, align 4
+  store i32 2, ptr %gep, align 4
   br i1 %c, label %bb2, label %bb1
 bb2:
-  store i32 3, i32* %gep, align 4
+  store i32 3, ptr %gep, align 4
   %c1 = icmp slt i32 %ph, 10
   br i1 %c1, label %bb1, label %exit
 exit:
-  %pg = getelementptr inbounds i32, i32* %p, i32 %i
-  %l = load i32, i32* %pg
+  %pg = getelementptr inbounds i32, ptr %p, i32 %i
+  %l = load i32, ptr %pg
   ret i32 %l
 }
 
 declare i1 @cond() readnone
 
 ; TODO: We can eliminate the store in for.header, but we currently hit a MemoryPhi.
-define void @loop_multiple_def_uses(i32* noalias %P) {
+define void @loop_multiple_def_uses(ptr noalias %P) {
 ; CHECK-LABEL: @loop_multiple_def_uses(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br label [[FOR_HEADER:%.*]]
 ; CHECK:       for.header:
-; CHECK-NEXT:    store i32 1, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 1, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    [[C1:%.*]] = call i1 @cond()
 ; CHECK-NEXT:    br i1 [[C1]], label [[FOR_BODY:%.*]], label [[END:%.*]]
 ; CHECK:       for.body:
-; CHECK-NEXT:    store i32 2, i32* [[P]], align 4
-; CHECK-NEXT:    [[LV:%.*]] = load i32, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 2, ptr [[P]], align 4
+; CHECK-NEXT:    [[LV:%.*]] = load i32, ptr [[P]], align 4
 ; CHECK-NEXT:    br label [[FOR_HEADER]]
 ; CHECK:       end:
-; CHECK-NEXT:    store i32 3, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 3, ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
   br label %for.header
 
 for.header:
-  store i32 1, i32* %P, align 4
+  store i32 1, ptr %P, align 4
   %c1 = call i1 @cond()
   br i1 %c1, label %for.body, label %end
 
 for.body:
-  store i32 2, i32* %P, align 4
-  %lv = load i32, i32* %P
+  store i32 2, ptr %P, align 4
+  %lv = load i32, ptr %P
   br label %for.header
 
 end:
-  store i32 3, i32* %P, align 4
+  store i32 3, ptr %P, align 4
   ret void
 }
 
 ; We cannot eliminate the store in for.header, as it is only partially
 ; overwritten in for.body and read afterwards.
-define void @loop_multiple_def_uses_partial_write(i32* noalias %p) {
+define void @loop_multiple_def_uses_partial_write(ptr noalias %p) {
 ; CHECK-LABEL: @loop_multiple_def_uses_partial_write(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br label [[FOR_HEADER:%.*]]
 ; CHECK:       for.header:
-; CHECK-NEXT:    store i32 1239491, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 1239491, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    [[C1:%.*]] = call i1 @cond()
 ; CHECK-NEXT:    br i1 [[C1]], label [[FOR_BODY:%.*]], label [[END:%.*]]
 ; CHECK:       for.body:
-; CHECK-NEXT:    [[C:%.*]] = bitcast i32* [[P]] to i8*
-; CHECK-NEXT:    store i8 1, i8* [[C]], align 4
-; CHECK-NEXT:    [[LV:%.*]] = load i32, i32* [[P]], align 4
+; CHECK-NEXT:    store i8 1, ptr [[P]], align 4
+; CHECK-NEXT:    [[LV:%.*]] = load i32, ptr [[P]], align 4
 ; CHECK-NEXT:    br label [[FOR_HEADER]]
 ; CHECK:       end:
-; CHECK-NEXT:    store i32 3, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 3, ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
   br label %for.header
 
 for.header:
-  store i32 1239491, i32* %p, align 4
+  store i32 1239491, ptr %p, align 4
   %c1 = call i1 @cond()
   br i1 %c1, label %for.body, label %end
 
 for.body:
-  %c = bitcast i32* %p to i8*
-  store i8 1, i8* %c, align 4
-  %lv = load i32, i32* %p
+  store i8 1, ptr %p, align 4
+  %lv = load i32, ptr %p
   br label %for.header
 
 end:
-  store i32 3, i32* %p, align 4
+  store i32 3, ptr %p, align 4
   ret void
 }
 
 ; We cannot eliminate the store in for.header, as the location is not overwritten
 ; in for.body and read afterwards.
-define void @loop_multiple_def_uses_mayalias_write(i32* %p, i32* %q) {
+define void @loop_multiple_def_uses_mayalias_write(ptr %p, ptr %q) {
 ; CHECK-LABEL: @loop_multiple_def_uses_mayalias_write(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br label [[FOR_HEADER:%.*]]
 ; CHECK:       for.header:
-; CHECK-NEXT:    store i32 1239491, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 1239491, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    [[C1:%.*]] = call i1 @cond()
 ; CHECK-NEXT:    br i1 [[C1]], label [[FOR_BODY:%.*]], label [[END:%.*]]
 ; CHECK:       for.body:
-; CHECK-NEXT:    store i32 1, i32* [[Q:%.*]], align 4
-; CHECK-NEXT:    [[LV:%.*]] = load i32, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 1, ptr [[Q:%.*]], align 4
+; CHECK-NEXT:    [[LV:%.*]] = load i32, ptr [[P]], align 4
 ; CHECK-NEXT:    br label [[FOR_HEADER]]
 ; CHECK:       end:
-; CHECK-NEXT:    store i32 3, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 3, ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
   br label %for.header
 
 for.header:
-  store i32 1239491, i32* %p, align 4
+  store i32 1239491, ptr %p, align 4
   %c1 = call i1 @cond()
   br i1 %c1, label %for.body, label %end
 
 for.body:
-  store i32 1, i32* %q, align 4
-  %lv = load i32, i32* %p
+  store i32 1, ptr %q, align 4
+  %lv = load i32, ptr %p
   br label %for.header
 
 end:
-  store i32 3, i32* %p, align 4
+  store i32 3, ptr %p, align 4
   ret void
 }
 
 %struct.hoge = type { i32, i32 }
 
- at global = external local_unnamed_addr global %struct.hoge*, align 8
+ at global = external local_unnamed_addr global ptr, align 8
 
-define void @widget(i8* %tmp) {
+define void @widget(ptr %tmp) {
 ; CHECK-LABEL: @widget(
 ; CHECK-NEXT:  bb:
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 [[TMP:%.*]], i8* nonnull align 16 undef, i64 64, i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[TMP:%.*]], ptr nonnull align 16 undef, i64 64, i1 false)
 ; CHECK-NEXT:    br label [[BB1:%.*]]
 ; CHECK:       bb1:
-; CHECK-NEXT:    [[TMP2:%.*]] = load %struct.hoge*, %struct.hoge** @global, align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_HOGE:%.*]], %struct.hoge* [[TMP2]], i64 undef, i32 1
-; CHECK-NEXT:    store i32 0, i32* [[TMP3]], align 4
-; CHECK-NEXT:    [[TMP4:%.*]] = load %struct.hoge*, %struct.hoge** @global, align 8
-; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_HOGE]], %struct.hoge* [[TMP4]], i64 undef, i32 1
-; CHECK-NEXT:    store i32 10, i32* [[TMP5]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = load ptr, ptr @global, align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_HOGE:%.*]], ptr [[TMP2]], i64 undef, i32 1
+; CHECK-NEXT:    store i32 0, ptr [[TMP3]], align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = load ptr, ptr @global, align 8
+; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_HOGE]], ptr [[TMP4]], i64 undef, i32 1
+; CHECK-NEXT:    store i32 10, ptr [[TMP5]], align 4
 ; CHECK-NEXT:    br label [[BB1]]
 ;
 bb:
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %tmp, i8* nonnull align 16 undef, i64 64, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 1 %tmp, ptr nonnull align 16 undef, i64 64, i1 false)
   br label %bb1
 
 bb1:                                              ; preds = %bb1, %bb
-  %tmp2 = load %struct.hoge*, %struct.hoge** @global, align 8
-  %tmp3 = getelementptr inbounds %struct.hoge, %struct.hoge* %tmp2, i64 undef, i32 1
-  store i32 0, i32* %tmp3, align 4
-  %tmp4 = load %struct.hoge*, %struct.hoge** @global, align 8
-  %tmp5 = getelementptr inbounds %struct.hoge, %struct.hoge* %tmp4, i64 undef, i32 1
-  store i32 10, i32* %tmp5, align 4
+  %tmp2 = load ptr, ptr @global, align 8
+  %tmp3 = getelementptr inbounds %struct.hoge, ptr %tmp2, i64 undef, i32 1
+  store i32 0, ptr %tmp3, align 4
+  %tmp4 = load ptr, ptr @global, align 8
+  %tmp5 = getelementptr inbounds %struct.hoge, ptr %tmp4, i64 undef, i32 1
+  store i32 10, ptr %tmp5, align 4
   br label %bb1
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64, i1 immarg)
+declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg)
 
 @x = global [10 x i16] zeroinitializer, align 1
 
@@ -552,15 +546,15 @@ define i16 @test_loop_carried_dep() {
 ; CHECK-NEXT:    br label [[DO_BODY:%.*]]
 ; CHECK:       do.body:
 ; CHECK-NEXT:    [[I_0:%.*]] = phi i16 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[IF_END:%.*]] ]
-; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [10 x i16], [10 x i16]* @x, i16 0, i16 [[I_0]]
-; CHECK-NEXT:    store i16 2, i16* [[ARRAYIDX2]], align 1
+; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [10 x i16], ptr @x, i16 0, i16 [[I_0]]
+; CHECK-NEXT:    store i16 2, ptr [[ARRAYIDX2]], align 1
 ; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i16 [[I_0]], 4
 ; CHECK-NEXT:    br i1 [[EXITCOND]], label [[IF_END10:%.*]], label [[IF_END]]
 ; CHECK:       if.end:
 ; CHECK-NEXT:    [[INC]] = add nuw nsw i16 [[I_0]], 1
 ; CHECK-NEXT:    br label [[DO_BODY]]
 ; CHECK:       if.end10:
-; CHECK-NEXT:    store i16 1, i16* [[ARRAYIDX2]], align 1
+; CHECK-NEXT:    store i16 1, ptr [[ARRAYIDX2]], align 1
 ; CHECK-NEXT:    ret i16 0
 ;
 entry:
@@ -568,8 +562,8 @@ entry:
 
 do.body:                                          ; preds = %if.end, %entry
   %i.0 = phi i16 [ 0, %entry ], [ %inc, %if.end ]
-  %arrayidx2 = getelementptr inbounds [10 x i16], [10 x i16]* @x, i16 0, i16 %i.0
-  store i16 2, i16* %arrayidx2, align 1
+  %arrayidx2 = getelementptr inbounds [10 x i16], ptr @x, i16 0, i16 %i.0
+  store i16 2, ptr %arrayidx2, align 1
   %exitcond = icmp eq i16 %i.0, 4
   br i1 %exitcond, label %if.end10, label %if.end
 
@@ -578,7 +572,7 @@ if.end:                                           ; preds = %do.body
   br label %do.body
 
 if.end10:                                         ; preds = %do.body
-  store i16 1, i16* %arrayidx2, align 1
+  store i16 1, ptr %arrayidx2, align 1
   ret i16 0
 }
 
@@ -589,17 +583,17 @@ define i16 @irreducible(i1 %c) {
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[A:%.*]], label [[B:%.*]]
 ; CHECK:       A:
 ; CHECK-NEXT:    [[I_0:%.*]] = phi i16 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[B]] ]
-; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [10 x i16], [10 x i16]* @x, i16 0, i16 [[I_0]]
+; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [10 x i16], ptr @x, i16 0, i16 [[I_0]]
 ; CHECK-NEXT:    br label [[B]]
 ; CHECK:       B:
 ; CHECK-NEXT:    [[J_0:%.*]] = phi i16 [ 0, [[ENTRY]] ], [ [[I_0]], [[A]] ]
-; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i16], [10 x i16]* @x, i16 0, i16 [[J_0]]
-; CHECK-NEXT:    store i16 2, i16* [[ARRAYIDX]], align 1
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i16], ptr @x, i16 0, i16 [[J_0]]
+; CHECK-NEXT:    store i16 2, ptr [[ARRAYIDX]], align 1
 ; CHECK-NEXT:    [[INC]] = add nuw nsw i16 [[J_0]], 1
 ; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i16 [[J_0]], 4
 ; CHECK-NEXT:    br i1 [[EXITCOND]], label [[EXIT:%.*]], label [[A]]
 ; CHECK:       exit:
-; CHECK-NEXT:    store i16 1, i16* [[ARRAYIDX]], align 1
+; CHECK-NEXT:    store i16 1, ptr [[ARRAYIDX]], align 1
 ; CHECK-NEXT:    ret i16 0
 ;
 entry:
@@ -607,19 +601,19 @@ entry:
 
 A:
   %i.0 = phi i16 [ 0, %entry ], [ %inc, %B ]
-  %arrayidx2 = getelementptr inbounds [10 x i16], [10 x i16]* @x, i16 0, i16 %i.0
+  %arrayidx2 = getelementptr inbounds [10 x i16], ptr @x, i16 0, i16 %i.0
   br label %B
 
 B:
   %j.0 = phi i16 [ 0, %entry ], [ %i.0, %A ]
-  %arrayidx = getelementptr inbounds [10 x i16], [10 x i16]* @x, i16 0, i16 %j.0
-  store i16 2, i16* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds [10 x i16], ptr @x, i16 0, i16 %j.0
+  store i16 2, ptr %arrayidx, align 1
   %inc = add nuw nsw i16 %j.0, 1
   %exitcond = icmp eq i16 %j.0, 4
   br i1 %exitcond, label %exit, label %A
 
 exit:
-  store i16 1, i16* %arrayidx, align 1
+  store i16 1, ptr %arrayidx, align 1
   ret i16 0
 }
 
@@ -634,17 +628,17 @@ define i16 @irreducible_nested() {
 ; CHECK-NEXT:    br i1 [[C]], label [[A:%.*]], label [[B:%.*]]
 ; CHECK:       A:
 ; CHECK-NEXT:    [[I_0:%.*]] = phi i16 [ 0, [[OUTER]] ], [ [[INC:%.*]], [[B]] ]
-; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [10 x i16], [10 x i16]* @x, i16 0, i16 [[I_0]]
+; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [10 x i16], ptr @x, i16 0, i16 [[I_0]]
 ; CHECK-NEXT:    br label [[B]]
 ; CHECK:       B:
 ; CHECK-NEXT:    [[J_0:%.*]] = phi i16 [ 0, [[OUTER]] ], [ [[I_0]], [[A]] ]
-; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i16], [10 x i16]* @x, i16 0, i16 [[J_0]]
-; CHECK-NEXT:    store i16 2, i16* [[ARRAYIDX]], align 1
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i16], ptr @x, i16 0, i16 [[J_0]]
+; CHECK-NEXT:    store i16 2, ptr [[ARRAYIDX]], align 1
 ; CHECK-NEXT:    [[INC]] = add nuw nsw i16 [[J_0]], 1
 ; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i16 [[J_0]], 4
 ; CHECK-NEXT:    br i1 [[EXITCOND]], label [[OUTERL]], label [[A]]
 ; CHECK:       outerl:
-; CHECK-NEXT:    store i16 1, i16* [[ARRAYIDX]], align 1
+; CHECK-NEXT:    store i16 1, ptr [[ARRAYIDX]], align 1
 ; CHECK-NEXT:    [[INCX]] = add nuw nsw i16 [[X]], 1
 ; CHECK-NEXT:    [[EXITCONDX:%.*]] = icmp eq i16 [[X]], 4
 ; CHECK-NEXT:    br i1 [[EXITCONDX]], label [[END:%.*]], label [[OUTER]]
@@ -661,19 +655,19 @@ outer:
 
 A:
   %i.0 = phi i16 [ 0, %outer ], [ %inc, %B ]
-  %arrayidx2 = getelementptr inbounds [10 x i16], [10 x i16]* @x, i16 0, i16 %i.0
+  %arrayidx2 = getelementptr inbounds [10 x i16], ptr @x, i16 0, i16 %i.0
   br label %B
 
 B:
   %j.0 = phi i16 [ 0, %outer ], [ %i.0, %A ]
-  %arrayidx = getelementptr inbounds [10 x i16], [10 x i16]* @x, i16 0, i16 %j.0
-  store i16 2, i16* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds [10 x i16], ptr @x, i16 0, i16 %j.0
+  store i16 2, ptr %arrayidx, align 1
   %inc = add nuw nsw i16 %j.0, 1
   %exitcond = icmp eq i16 %j.0, 4
   br i1 %exitcond, label %outerl, label %A
 
 outerl:
-  store i16 1, i16* %arrayidx, align 1
+  store i16 1, ptr %arrayidx, align 1
   %incx = add nuw nsw i16 %x, 1
   %exitcondx = icmp eq i16 %x, 4
   br i1 %exitcondx, label %end, label %outer
@@ -688,20 +682,20 @@ define i16 @multi_overwrite(i1 %cond) {
 ; CHECK-NEXT:    br label [[DO_BODY:%.*]]
 ; CHECK:       do.body:
 ; CHECK-NEXT:    [[I_0:%.*]] = phi i16 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[IF_END2:%.*]] ]
-; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [10 x i16], [10 x i16]* @x, i16 0, i16 [[I_0]]
-; CHECK-NEXT:    store i16 2, i16* [[ARRAYIDX2]], align 1
+; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [10 x i16], ptr @x, i16 0, i16 [[I_0]]
+; CHECK-NEXT:    store i16 2, ptr [[ARRAYIDX2]], align 1
 ; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i16 [[I_0]], 4
 ; CHECK-NEXT:    br i1 [[EXITCOND]], label [[EXIT:%.*]], label [[IF_END:%.*]]
 ; CHECK:       if.end:
 ; CHECK-NEXT:    br i1 [[COND:%.*]], label [[DO_STORE:%.*]], label [[IF_END2]]
 ; CHECK:       do.store:
-; CHECK-NEXT:    store i16 3, i16* [[ARRAYIDX2]], align 1
+; CHECK-NEXT:    store i16 3, ptr [[ARRAYIDX2]], align 1
 ; CHECK-NEXT:    br label [[IF_END2]]
 ; CHECK:       if.end2:
 ; CHECK-NEXT:    [[INC]] = add nuw nsw i16 [[I_0]], 1
 ; CHECK-NEXT:    br label [[DO_BODY]]
 ; CHECK:       exit:
-; CHECK-NEXT:    store i16 1, i16* [[ARRAYIDX2]], align 1
+; CHECK-NEXT:    store i16 1, ptr [[ARRAYIDX2]], align 1
 ; CHECK-NEXT:    ret i16 0
 ;
 entry:
@@ -709,8 +703,8 @@ entry:
 
 do.body:
   %i.0 = phi i16 [ 0, %entry ], [ %inc, %if.end2 ]
-  %arrayidx2 = getelementptr inbounds [10 x i16], [10 x i16]* @x, i16 0, i16 %i.0
-  store i16 2, i16* %arrayidx2, align 1
+  %arrayidx2 = getelementptr inbounds [10 x i16], ptr @x, i16 0, i16 %i.0
+  store i16 2, ptr %arrayidx2, align 1
   %exitcond = icmp eq i16 %i.0, 4
   br i1 %exitcond, label %exit, label %if.end
 
@@ -718,7 +712,7 @@ if.end:
   br i1 %cond, label %do.store, label %if.end2
 
 do.store:
-  store i16 3, i16* %arrayidx2, align 1
+  store i16 3, ptr %arrayidx2, align 1
   br label %if.end2
 
 if.end2:
@@ -726,11 +720,11 @@ if.end2:
   br label %do.body
 
 exit:
-  store i16 1, i16* %arrayidx2, align 1
+  store i16 1, ptr %arrayidx2, align 1
   ret i16 0
 }
 
-define void @test(i8* noalias %data1, i8* %data2, i16* %data3, i32 %i1)
+define void @test(ptr noalias %data1, ptr %data2, ptr %data3, i32 %i1)
 ; CHECK-LABEL: @test(
 ; CHECK-NEXT:    [[C:%.*]] = icmp eq i32 [[I1:%.*]], 0
 ; CHECK-NEXT:    br label [[PH0:%.*]]
@@ -740,11 +734,11 @@ define void @test(i8* noalias %data1, i8* %data2, i16* %data3, i32 %i1)
 ; CHECK-NEXT:    [[P1:%.*]] = phi i32 [ 0, [[PH0]] ], [ [[PN1:%.*]], [[END1:%.*]] ]
 ; CHECK-NEXT:    [[PN1]] = add i32 [[P1]], 1
 ; CHECK-NEXT:    [[PC1:%.*]] = icmp slt i32 [[PN1]], 5
-; CHECK-NEXT:    [[V2:%.*]] = getelementptr [10 x i16], [10 x i16]* @x, i32 0, i32 [[P1]]
-; CHECK-NEXT:    store i16 1, i16* [[V2]], align 2
+; CHECK-NEXT:    [[V2:%.*]] = getelementptr [10 x i16], ptr @x, i32 0, i32 [[P1]]
+; CHECK-NEXT:    store i16 1, ptr [[V2]], align 2
 ; CHECK-NEXT:    br i1 [[C]], label [[THEN1:%.*]], label [[ELSE1:%.*]]
 ; CHECK:       then1:
-; CHECK-NEXT:    store i16 2, i16* [[V2]], align 2
+; CHECK-NEXT:    store i16 2, ptr [[V2]], align 2
 ; CHECK-NEXT:    br label [[END1]]
 ; CHECK:       else1:
 ; CHECK-NEXT:    br label [[END1]]
@@ -756,7 +750,7 @@ define void @test(i8* noalias %data1, i8* %data2, i16* %data3, i32 %i1)
 ; CHECK-NEXT:    [[P3:%.*]] = phi i32 [ 0, [[END0]] ], [ [[PN3:%.*]], [[HEADER2]] ]
 ; CHECK-NEXT:    [[PN3]] = add i32 [[P3]], 1
 ; CHECK-NEXT:    [[PC3:%.*]] = icmp slt i32 [[PN3]], 5
-; CHECK-NEXT:    store i16 4, i16* [[V2]], align 2
+; CHECK-NEXT:    store i16 4, ptr [[V2]], align 2
 ; CHECK-NEXT:    br i1 [[PC3]], label [[HEADER2]], label [[END2:%.*]]
 ; CHECK:       end2:
 ; CHECK-NEXT:    ret void
@@ -770,11 +764,11 @@ header0:
   %p1 = phi i32 [0, %ph0], [%pn1, %end1]
   %pn1 = add i32 %p1, 1
   %pc1 = icmp slt i32 %pn1, 5
-  %v2 = getelementptr [10 x i16], [10 x i16]* @x, i32 0, i32 %p1
-  store i16 1, i16* %v2
+  %v2 = getelementptr [10 x i16], ptr @x, i32 0, i32 %p1
+  store i16 1, ptr %v2
   br i1 %c, label %then1, label %else1
 then1:
-  store i16 2, i16* %v2
+  store i16 2, ptr %v2
   br label %end1
 else1:
   br label %end1
@@ -786,7 +780,7 @@ header2:
   %p3 = phi i32 [0, %end0], [%pn3, %header2]
   %pn3 = add i32 %p3, 1
   %pc3 = icmp slt i32 %pn3, 5
-  store i16 4, i16* %v2
+  store i16 4, ptr %v2
   br i1 %pc3, label %header2, label %end2
 end2:
   ret void
@@ -799,23 +793,22 @@ define i16 @partial_override_fromloop(i1 %c, i32 %i) {
 ; CHECK-NEXT:    br label [[DO_BODY:%.*]]
 ; CHECK:       do.body:
 ; CHECK-NEXT:    [[I_0:%.*]] = phi i16 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[IF_END2:%.*]] ]
-; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [10 x i16], [10 x i16]* @x, i16 0, i16 [[I_0]]
-; CHECK-NEXT:    store i16 2, i16* [[ARRAYIDX2]], align 1
+; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [10 x i16], ptr @x, i16 0, i16 [[I_0]]
+; CHECK-NEXT:    store i16 2, ptr [[ARRAYIDX2]], align 1
 ; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i16 [[I_0]], 4
 ; CHECK-NEXT:    br i1 [[EXITCOND]], label [[EXIT:%.*]], label [[IF_END:%.*]]
 ; CHECK:       if.end:
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[DO_STORE:%.*]], label [[IF_END2]]
 ; CHECK:       do.store:
-; CHECK-NEXT:    store i16 3, i16* [[ARRAYIDX2]], align 1
+; CHECK-NEXT:    store i16 3, ptr [[ARRAYIDX2]], align 1
 ; CHECK-NEXT:    br label [[IF_END2]]
 ; CHECK:       if.end2:
 ; CHECK-NEXT:    [[INC]] = add nuw nsw i16 [[I_0]], 1
 ; CHECK-NEXT:    br label [[DO_BODY]]
 ; CHECK:       exit:
-; CHECK-NEXT:    [[BC:%.*]] = bitcast i16* [[ARRAYIDX2]] to i8*
-; CHECK-NEXT:    [[BC2:%.*]] = getelementptr inbounds i8, i8* [[BC]], i32 1
-; CHECK-NEXT:    store i8 10, i8* [[BC]], align 1
-; CHECK-NEXT:    store i8 11, i8* [[BC2]], align 1
+; CHECK-NEXT:    [[BC2:%.*]] = getelementptr inbounds i8, ptr [[ARRAYIDX2]], i32 1
+; CHECK-NEXT:    store i8 10, ptr [[ARRAYIDX2]], align 1
+; CHECK-NEXT:    store i8 11, ptr [[BC2]], align 1
 ; CHECK-NEXT:    ret i16 0
 ;
 entry:
@@ -823,8 +816,8 @@ entry:
 
 do.body:
   %i.0 = phi i16 [ 0, %entry ], [ %inc, %if.end2 ]
-  %arrayidx2 = getelementptr inbounds [10 x i16], [10 x i16]* @x, i16 0, i16 %i.0
-  store i16 2, i16* %arrayidx2, align 1
+  %arrayidx2 = getelementptr inbounds [10 x i16], ptr @x, i16 0, i16 %i.0
+  store i16 2, ptr %arrayidx2, align 1
   %exitcond = icmp eq i16 %i.0, 4
   br i1 %exitcond, label %exit, label %if.end
 
@@ -832,7 +825,7 @@ if.end:
   br i1 %c, label %do.store, label %if.end2
 
 do.store:
-  store i16 3, i16* %arrayidx2, align 1
+  store i16 3, ptr %arrayidx2, align 1
   br label %if.end2
 
 if.end2:
@@ -840,10 +833,9 @@ if.end2:
   br label %do.body
 
 exit:
-  %bc = bitcast i16* %arrayidx2 to i8*
-  %bc2 = getelementptr inbounds i8, i8* %bc, i32 1
-  store i8 10, i8* %bc, align 1
-  store i8 11, i8* %bc2, align 1
+  %bc2 = getelementptr inbounds i8, ptr %arrayidx2, i32 1
+  store i8 10, ptr %arrayidx2, align 1
+  store i8 11, ptr %bc2, align 1
   ret i16 0
 }
 
@@ -853,21 +845,20 @@ define i16 @partial_override_overloop(i1 %c, i32 %i) {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br label [[FIRST:%.*]]
 ; CHECK:       first:
-; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i16], [10 x i16]* @x, i16 0, i32 [[I:%.*]]
-; CHECK-NEXT:    store i16 1, i16* [[ARRAYIDX]], align 1
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i16], ptr @x, i16 0, i32 [[I:%.*]]
+; CHECK-NEXT:    store i16 1, ptr [[ARRAYIDX]], align 1
 ; CHECK-NEXT:    br label [[DO_BODY:%.*]]
 ; CHECK:       do.body:
 ; CHECK-NEXT:    [[I_0:%.*]] = phi i16 [ 0, [[FIRST]] ], [ [[INC:%.*]], [[DO_BODY]] ]
-; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [10 x i16], [10 x i16]* @x, i16 0, i16 [[I_0]]
-; CHECK-NEXT:    store i16 2, i16* [[ARRAYIDX2]], align 1
+; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [10 x i16], ptr @x, i16 0, i16 [[I_0]]
+; CHECK-NEXT:    store i16 2, ptr [[ARRAYIDX2]], align 1
 ; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i16 [[I_0]], 4
 ; CHECK-NEXT:    [[INC]] = add nuw nsw i16 [[I_0]], 1
 ; CHECK-NEXT:    br i1 [[EXITCOND]], label [[EXIT:%.*]], label [[DO_BODY]]
 ; CHECK:       exit:
-; CHECK-NEXT:    [[BC:%.*]] = bitcast i16* [[ARRAYIDX]] to i8*
-; CHECK-NEXT:    [[BC2:%.*]] = getelementptr inbounds i8, i8* [[BC]], i32 1
-; CHECK-NEXT:    store i8 10, i8* [[BC]], align 1
-; CHECK-NEXT:    store i8 11, i8* [[BC2]], align 1
+; CHECK-NEXT:    [[BC2:%.*]] = getelementptr inbounds i8, ptr [[ARRAYIDX]], i32 1
+; CHECK-NEXT:    store i8 10, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT:    store i8 11, ptr [[BC2]], align 1
 ; CHECK-NEXT:    ret i16 0
 ;
 entry:
@@ -875,23 +866,22 @@ entry:
   br label %first
 
 first:
-  %arrayidx = getelementptr inbounds [10 x i16], [10 x i16]* @x, i16 0, i32 %i
-  store i16 1, i16* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds [10 x i16], ptr @x, i16 0, i32 %i
+  store i16 1, ptr %arrayidx, align 1
   br label %do.body
 
 do.body:
   %i.0 = phi i16 [ 0, %first ], [ %inc, %do.body ]
-  %arrayidx2 = getelementptr inbounds [10 x i16], [10 x i16]* @x, i16 0, i16 %i.0
-  store i16 2, i16* %arrayidx2, align 1
+  %arrayidx2 = getelementptr inbounds [10 x i16], ptr @x, i16 0, i16 %i.0
+  store i16 2, ptr %arrayidx2, align 1
   %exitcond = icmp eq i16 %i.0, 4
   %inc = add nuw nsw i16 %i.0, 1
   br i1 %exitcond, label %exit, label %do.body
 
 exit:
-  %bc = bitcast i16* %arrayidx to i8*
-  %bc2 = getelementptr inbounds i8, i8* %bc, i32 1
-  store i8 10, i8* %bc, align 1
-  store i8 11, i8* %bc2, align 1
+  %bc2 = getelementptr inbounds i8, ptr %arrayidx, i32 1
+  store i8 10, ptr %arrayidx, align 1
+  store i8 11, ptr %bc2, align 1
   ret i16 0
 }
 
@@ -901,15 +891,14 @@ define i16 @partial_override_multi(i1 %c, i32 %i) {
 ; CHECK-NEXT:    br label [[DO_BODY:%.*]]
 ; CHECK:       do.body:
 ; CHECK-NEXT:    [[I_0:%.*]] = phi i16 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[DO_BODY]] ]
-; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [10 x i16], [10 x i16]* @x, i16 0, i16 [[I_0]]
-; CHECK-NEXT:    store i16 10, i16* [[ARRAYIDX2]], align 1
-; CHECK-NEXT:    [[BC:%.*]] = bitcast i16* [[ARRAYIDX2]] to i8*
+; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [10 x i16], ptr @x, i16 0, i16 [[I_0]]
+; CHECK-NEXT:    store i16 10, ptr [[ARRAYIDX2]], align 1
 ; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i16 [[I_0]], 4
 ; CHECK-NEXT:    [[INC]] = add nuw nsw i16 [[I_0]], 1
 ; CHECK-NEXT:    br i1 [[EXITCOND]], label [[EXIT:%.*]], label [[DO_BODY]]
 ; CHECK:       exit:
-; CHECK-NEXT:    [[BC2:%.*]] = getelementptr inbounds i8, i8* [[BC]], i32 1
-; CHECK-NEXT:    store i8 11, i8* [[BC2]], align 1
+; CHECK-NEXT:    [[BC2:%.*]] = getelementptr inbounds i8, ptr [[ARRAYIDX2]], i32 1
+; CHECK-NEXT:    store i8 11, ptr [[BC2]], align 1
 ; CHECK-NEXT:    ret i16 0
 ;
 entry:
@@ -917,34 +906,33 @@ entry:
 
 do.body:
   %i.0 = phi i16 [ 0, %entry ], [ %inc, %do.body ]
-  %arrayidx2 = getelementptr inbounds [10 x i16], [10 x i16]* @x, i16 0, i16 %i.0
-  store i16 2, i16* %arrayidx2, align 1
-  %bc = bitcast i16* %arrayidx2 to i8*
-  store i8 10, i8* %bc, align 1
+  %arrayidx2 = getelementptr inbounds [10 x i16], ptr @x, i16 0, i16 %i.0
+  store i16 2, ptr %arrayidx2, align 1
+  store i8 10, ptr %arrayidx2, align 1
   %exitcond = icmp eq i16 %i.0, 4
   %inc = add nuw nsw i16 %i.0, 1
   br i1 %exitcond, label %exit, label %do.body
 
 exit:
-  %bc2 = getelementptr inbounds i8, i8* %bc, i32 1
-  store i8 11, i8* %bc2, align 1
+  %bc2 = getelementptr inbounds i8, ptr %arrayidx2, i32 1
+  store i8 11, ptr %bc2, align 1
   ret i16 0
 }
 
-define void @InitializeMasks(i64* %p) {
+define void @InitializeMasks(ptr %p) {
 ; CHECK-LABEL: @InitializeMasks(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br label [[FOR_BODY98:%.*]]
 ; CHECK:       for.body98:
 ; CHECK-NEXT:    [[INDVARS_IV377:%.*]] = phi i64 [ 8, [[ENTRY:%.*]] ], [ [[INC2:%.*]], [[FOR_INC140:%.*]] ], [ [[INC1:%.*]], [[FOR_INC140_THREAD:%.*]] ]
-; CHECK-NEXT:    [[ARRAYIDX106:%.*]] = getelementptr inbounds i64, i64* [[P:%.*]], i64 [[INDVARS_IV377]]
-; CHECK-NEXT:    store i64 1, i64* [[ARRAYIDX106]], align 8
+; CHECK-NEXT:    [[ARRAYIDX106:%.*]] = getelementptr inbounds i64, ptr [[P:%.*]], i64 [[INDVARS_IV377]]
+; CHECK-NEXT:    store i64 1, ptr [[ARRAYIDX106]], align 8
 ; CHECK-NEXT:    [[CMP107:%.*]] = icmp ugt i64 [[INDVARS_IV377]], 15
 ; CHECK-NEXT:    br i1 [[CMP107]], label [[IF_END:%.*]], label [[IF_END_THREAD:%.*]]
 ; CHECK:       if.end.thread:
 ; CHECK-NEXT:    br label [[FOR_INC140_THREAD]]
 ; CHECK:       if.end:
-; CHECK-NEXT:    store i64 2, i64* [[ARRAYIDX106]], align 8
+; CHECK-NEXT:    store i64 2, ptr [[ARRAYIDX106]], align 8
 ; CHECK-NEXT:    [[CMP127:%.*]] = icmp ult i64 [[INDVARS_IV377]], 48
 ; CHECK-NEXT:    br i1 [[CMP127]], label [[FOR_INC140_THREAD]], label [[FOR_INC140]]
 ; CHECK:       for.inc140.thread:
@@ -962,8 +950,8 @@ entry:
 
 for.body98:                                       ; preds = %for.inc140, %for.inc140.thread, %entry
   %indvars.iv377 = phi i64 [ 8, %entry ], [ %inc2, %for.inc140 ], [ %inc1, %for.inc140.thread ]
-  %arrayidx106 = getelementptr inbounds i64, i64* %p, i64 %indvars.iv377
-  store i64 1, i64* %arrayidx106, align 8
+  %arrayidx106 = getelementptr inbounds i64, ptr %p, i64 %indvars.iv377
+  store i64 1, ptr %arrayidx106, align 8
   %cmp107 = icmp ugt i64 %indvars.iv377, 15
   br i1 %cmp107, label %if.end, label %if.end.thread
 
@@ -971,7 +959,7 @@ if.end.thread:                                    ; preds = %for.body98
   br label %for.inc140.thread
 
 if.end:                                           ; preds = %for.body98
-  store i64 2, i64* %arrayidx106, align 8
+  store i64 2, ptr %arrayidx106, align 8
   %cmp127 = icmp ult i64 %indvars.iv377, 48
   br i1 %cmp127, label %for.inc140.thread, label %for.inc140
 

diff  --git a/llvm/test/Transforms/DeadStoreElimination/multiblock-malloc-free.ll b/llvm/test/Transforms/DeadStoreElimination/multiblock-malloc-free.ll
index fcc877ae37c6f..c778a3870e684 100644
--- a/llvm/test/Transforms/DeadStoreElimination/multiblock-malloc-free.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/multiblock-malloc-free.ll
@@ -4,173 +4,163 @@
 
 target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
 declare void @unknown_func()
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) nounwind
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i32, i1) nounwind
 
-declare noalias i8* @calloc(i64, i64) #5
-declare noalias i8* @malloc(i64) #0
-declare noalias i8* @strdup(i8* nocapture readonly) #1
-declare void @free(i8* nocapture) #2
+declare noalias ptr @calloc(i64, i64) #5
+declare noalias ptr @malloc(i64) #0
+declare noalias ptr @strdup(ptr nocapture readonly) #1
+declare void @free(ptr nocapture) #2
 
-define void @test16(i32* noalias %P) {
+define void @test16(ptr noalias %P) {
 ; CHECK-LABEL: @test16(
-; CHECK-NEXT:    [[P2:%.*]] = bitcast i32* [[P:%.*]] to i8*
 ; CHECK-NEXT:    br i1 true, label [[BB1:%.*]], label [[BB3:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    br label [[BB3]]
 ; CHECK:       bb3:
-; CHECK-NEXT:    call void @free(i8* [[P2]])
-; CHECK-NEXT:    store i32 1, i32* [[P]], align 4
+; CHECK-NEXT:    call void @free(ptr [[P:%.*]])
+; CHECK-NEXT:    store i32 1, ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  %P2 = bitcast i32* %P to i8*
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   br i1 true, label %bb1, label %bb3
 bb1:
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   br label %bb3
 bb3:
-  call void @free(i8* %P2)
-  store i32 1, i32* %P
+  call void @free(ptr %P)
+  store i32 1, ptr %P
   ret void
 }
 
 ; We cannot remove the store in the entry block, because @unknown_func could
 ; unwind and the stored value could be read by the caller.
-define void @test17(i32* noalias %P) {
+define void @test17(ptr noalias %P) {
 ; CHECK-LABEL: @test17(
-; CHECK-NEXT:    [[P2:%.*]] = bitcast i32* [[P:%.*]] to i8*
-; CHECK-NEXT:    store i32 1, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 1, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br i1 true, label [[BB1:%.*]], label [[BB3:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    call void @unknown_func()
 ; CHECK-NEXT:    br label [[BB3]]
 ; CHECK:       bb3:
-; CHECK-NEXT:    call void @free(i8* [[P2]])
+; CHECK-NEXT:    call void @free(ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
-  %P2 = bitcast i32* %P to i8*
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   br i1 true, label %bb1, label %bb3
 bb1:
   call void @unknown_func()
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   br label %bb3
 bb3:
-  call void @free(i8* %P2)
+  call void @free(ptr %P)
   ret void
 }
 
-define void @test17_read_after_free(i32* noalias %P) {
+define void @test17_read_after_free(ptr noalias %P) {
 ; CHECK-LABEL: @test17_read_after_free(
-; CHECK-NEXT:    [[P2:%.*]] = bitcast i32* [[P:%.*]] to i8*
 ; CHECK-NEXT:    br i1 true, label [[BB1:%.*]], label [[BB3:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    br label [[BB3]]
 ; CHECK:       bb3:
-; CHECK-NEXT:    call void @free(i8* [[P2]])
-; CHECK-NEXT:    [[LV:%.*]] = load i8, i8* [[P2]], align 1
+; CHECK-NEXT:    call void @free(ptr [[P:%.*]])
+; CHECK-NEXT:    [[LV:%.*]] = load i8, ptr [[P]], align 1
 ; CHECK-NEXT:    ret void
 ;
-  %P2 = bitcast i32* %P to i8*
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   br i1 true, label %bb1, label %bb3
 bb1:
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   br label %bb3
 bb3:
-  call void @free(i8* %P2)
-  %lv = load i8, i8* %P2
+  call void @free(ptr %P)
+  %lv = load i8, ptr %P
   ret void
 }
 
-define void @test19(i32* noalias %P) {
+define void @test19(ptr noalias %P) {
 ; CHECK-LABEL: @test19(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 1
-; CHECK-NEXT:    [[P3:%.*]] = bitcast i32* [[ARRAYIDX0]] to i8*
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 4 [[P3]], i8 0, i64 28, i1 false)
+; CHECK-NEXT:    [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 1
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 4 [[ARRAYIDX0]], i8 0, i64 28, i1 false)
 ; CHECK-NEXT:    br i1 true, label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    br label [[BB3:%.*]]
 ; CHECK:       bb2:
-; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 1
-; CHECK-NEXT:    store i32 1, i32* [[ARRAYIDX1]], align 4
+; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 1
+; CHECK-NEXT:    store i32 1, ptr [[ARRAYIDX1]], align 4
 ; CHECK-NEXT:    br label [[BB3]]
 ; CHECK:       bb3:
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %arrayidx0 = getelementptr inbounds i32, i32* %P, i64 1
-  %p3 = bitcast i32* %arrayidx0 to i8*
-  call void @llvm.memset.p0i8.i64(i8* %p3, i8 0, i64 28, i32 4, i1 false)
+  %arrayidx0 = getelementptr inbounds i32, ptr %P, i64 1
+  call void @llvm.memset.p0.i64(ptr %arrayidx0, i8 0, i64 28, i32 4, i1 false)
   br i1 true, label %bb1, label %bb2
 bb1:
   br label %bb3
 bb2:
-  %arrayidx1 = getelementptr inbounds i32, i32* %P, i64 1
-  store i32 1, i32* %arrayidx1, align 4
+  %arrayidx1 = getelementptr inbounds i32, ptr %P, i64 1
+  store i32 1, ptr %arrayidx1, align 4
   br label %bb3
 bb3:
   ret void
 }
 
 
-define void @test20(i32* noalias %P) {
+define void @test20(ptr noalias %P) {
 ; CHECK-LABEL: @test20(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 1
-; CHECK-NEXT:    [[P3:%.*]] = bitcast i32* [[ARRAYIDX0]] to i8*
-; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, i8* [[P3]], i64 4
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 4 [[TMP0]], i8 0, i64 24, i1 false)
+; CHECK-NEXT:    [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 1
+; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[ARRAYIDX0]], i64 4
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 4 [[TMP0]], i8 0, i64 24, i1 false)
 ; CHECK-NEXT:    br i1 true, label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    br label [[BB3:%.*]]
 ; CHECK:       bb2:
 ; CHECK-NEXT:    br label [[BB3]]
 ; CHECK:       bb3:
-; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 1
-; CHECK-NEXT:    store i32 1, i32* [[ARRAYIDX1]], align 4
+; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 1
+; CHECK-NEXT:    store i32 1, ptr [[ARRAYIDX1]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %arrayidx0 = getelementptr inbounds i32, i32* %P, i64 1
-  %p3 = bitcast i32* %arrayidx0 to i8*
-  call void @llvm.memset.p0i8.i64(i8* %p3, i8 0, i64 28, i32 4, i1 false)
+  %arrayidx0 = getelementptr inbounds i32, ptr %P, i64 1
+  call void @llvm.memset.p0.i64(ptr %arrayidx0, i8 0, i64 28, i32 4, i1 false)
   br i1 true, label %bb1, label %bb2
 bb1:
   br label %bb3
 bb2:
   br label %bb3
 bb3:
-  %arrayidx1 = getelementptr inbounds i32, i32* %P, i64 1
-  store i32 1, i32* %arrayidx1, align 4
+  %arrayidx1 = getelementptr inbounds i32, ptr %P, i64 1
+  store i32 1, ptr %arrayidx1, align 4
   ret void
 }
 
-define i8* @test26() {
+define ptr @test26() {
 ; CHECK-LABEL: @test26(
 ; CHECK-NEXT:  bb1:
 ; CHECK-NEXT:    br i1 true, label [[BB2:%.*]], label [[BB3:%.*]]
 ; CHECK:       bb2:
-; CHECK-NEXT:    [[M:%.*]] = call noalias i8* @malloc(i64 10)
-; CHECK-NEXT:    store i8 1, i8* [[M]], align 1
+; CHECK-NEXT:    [[M:%.*]] = call noalias ptr @malloc(i64 10)
+; CHECK-NEXT:    store i8 1, ptr [[M]], align 1
 ; CHECK-NEXT:    br label [[BB3]]
 ; CHECK:       bb3:
-; CHECK-NEXT:    [[R:%.*]] = phi i8* [ null, [[BB1:%.*]] ], [ [[M]], [[BB2]] ]
-; CHECK-NEXT:    ret i8* [[R]]
+; CHECK-NEXT:    [[R:%.*]] = phi ptr [ null, [[BB1:%.*]] ], [ [[M]], [[BB2]] ]
+; CHECK-NEXT:    ret ptr [[R]]
 ;
 bb1:
   br i1 true, label %bb2, label %bb3
 bb2:
-  %m = call noalias i8* @malloc(i64 10)
-  store i8 1, i8* %m
+  %m = call noalias ptr @malloc(i64 10)
+  store i8 1, ptr %m
   br label %bb3
 bb3:
-  %r = phi i8* [ null, %bb1 ], [ %m, %bb2 ]
-  ret i8* %r
+  %r = phi ptr [ null, %bb1 ], [ %m, %bb2 ]
+  ret ptr %r
 }
 
 
@@ -179,214 +169,184 @@ define void @test27() {
 ; CHECK-NEXT:  bb1:
 ; CHECK-NEXT:    br i1 true, label [[BB2:%.*]], label [[BB3:%.*]]
 ; CHECK:       bb2:
-; CHECK-NEXT:    [[M:%.*]] = call noalias i8* @malloc(i64 10)
-; CHECK-NEXT:    store i8 1, i8* [[M]], align 1
+; CHECK-NEXT:    [[M:%.*]] = call noalias ptr @malloc(i64 10)
+; CHECK-NEXT:    store i8 1, ptr [[M]], align 1
 ; CHECK-NEXT:    br label [[BB3]]
 ; CHECK:       bb3:
-; CHECK-NEXT:    [[R:%.*]] = phi i8* [ null, [[BB1:%.*]] ], [ [[M]], [[BB2]] ]
+; CHECK-NEXT:    [[R:%.*]] = phi ptr [ null, [[BB1:%.*]] ], [ [[M]], [[BB2]] ]
 ; CHECK-NEXT:    ret void
 ;
 bb1:
   br i1 true, label %bb2, label %bb3
 bb2:
-  %m = call noalias i8* @malloc(i64 10)
-  store i8 1, i8* %m
+  %m = call noalias ptr @malloc(i64 10)
+  store i8 1, ptr %m
   br label %bb3
 bb3:
-  %r = phi i8* [ null, %bb1 ], [ %m, %bb2 ]
+  %r = phi ptr [ null, %bb1 ], [ %m, %bb2 ]
   ret void
 }
 
-define i8* @test27_pointer_escape() {
+define ptr @test27_pointer_escape() {
 ; CHECK-LABEL: @test27_pointer_escape(
 ; CHECK-NEXT:  bb1:
 ; CHECK-NEXT:    br i1 true, label [[BB2:%.*]], label [[BB3:%.*]]
 ; CHECK:       bb2:
-; CHECK-NEXT:    [[M:%.*]] = call noalias i8* @malloc(i64 10)
-; CHECK-NEXT:    store i8 1, i8* [[M]], align 1
+; CHECK-NEXT:    [[M:%.*]] = call noalias ptr @malloc(i64 10)
+; CHECK-NEXT:    store i8 1, ptr [[M]], align 1
 ; CHECK-NEXT:    br label [[BB3]]
 ; CHECK:       bb3:
-; CHECK-NEXT:    [[R:%.*]] = phi i8* [ null, [[BB1:%.*]] ], [ [[M]], [[BB2]] ]
-; CHECK-NEXT:    ret i8* [[R]]
+; CHECK-NEXT:    [[R:%.*]] = phi ptr [ null, [[BB1:%.*]] ], [ [[M]], [[BB2]] ]
+; CHECK-NEXT:    ret ptr [[R]]
 ;
 bb1:
   br i1 true, label %bb2, label %bb3
 bb2:
-  %m = call noalias i8* @malloc(i64 10)
-  store i8 1, i8* %m
+  %m = call noalias ptr @malloc(i64 10)
+  store i8 1, ptr %m
   br label %bb3
 bb3:
-  %r = phi i8* [ null, %bb1 ], [ %m, %bb2 ]
-  ret i8* %r
+  %r = phi ptr [ null, %bb1 ], [ %m, %bb2 ]
+  ret ptr %r
 }
 
-define i8* @test28() {
+define ptr @test28() {
 ; CHECK-LABEL: @test28(
 ; CHECK-NEXT:  bb0:
-; CHECK-NEXT:    [[M:%.*]] = call noalias i8* @malloc(i64 10)
-; CHECK-NEXT:    [[MC0:%.*]] = bitcast i8* [[M]] to i8*
-; CHECK-NEXT:    [[MC1:%.*]] = bitcast i8* [[MC0]] to i8*
-; CHECK-NEXT:    [[MC2:%.*]] = bitcast i8* [[MC1]] to i8*
-; CHECK-NEXT:    [[MC3:%.*]] = bitcast i8* [[MC2]] to i8*
-; CHECK-NEXT:    [[MC4:%.*]] = bitcast i8* [[MC3]] to i8*
-; CHECK-NEXT:    [[MC5:%.*]] = bitcast i8* [[MC4]] to i8*
-; CHECK-NEXT:    [[MC6:%.*]] = bitcast i8* [[MC5]] to i8*
-; CHECK-NEXT:    [[M0:%.*]] = bitcast i8* [[MC6]] to i8*
-; CHECK-NEXT:    store i8 2, i8* [[M]], align 1
-; CHECK-NEXT:    ret i8* [[M0]]
+; CHECK-NEXT:    [[M:%.*]] = call noalias ptr @malloc(i64 10)
+; CHECK-NEXT:    store i8 2, ptr [[M]], align 1
+; CHECK-NEXT:    ret ptr [[M]]
 ;
 bb0:
-  %m = call noalias i8* @malloc(i64 10)
-  %mc0 = bitcast i8* %m to i8*
-  %mc1 = bitcast i8* %mc0 to i8*
-  %mc2 = bitcast i8* %mc1 to i8*
-  %mc3 = bitcast i8* %mc2 to i8*
-  %mc4 = bitcast i8* %mc3 to i8*
-  %mc5 = bitcast i8* %mc4 to i8*
-  %mc6 = bitcast i8* %mc5 to i8*
-  %m0 = bitcast i8* %mc6 to i8*
-  store i8 2, i8* %m
-  ret i8* %m0
+  %m = call noalias ptr @malloc(i64 10)
+  store i8 2, ptr %m
+  ret ptr %m
 }
 
-%struct.SystemCallMapElementStruct = type { i8*, i32, %struct.NodePtrVecStruct* }
-%struct.NodePtrVecStruct = type { i32, i32, %struct.NodeStruct** }
-%struct.NodeStruct = type { i32, i32, i8*, i32, i32, %struct.NodeStruct*, %struct.NodeListStruct*, %struct.EdgeListStruct*, i32, i32 }
-%struct.NodeListStruct = type { %struct.NodeStruct*, %struct.NodeListStruct* }
-%struct.EdgeListStruct = type { i32, %struct.NodeStruct*, %struct.EdgeListStruct* }
-%struct.SystemCallMapStruct = type { i32, i32, %struct.SystemCallMapElementStruct** }
+%struct.SystemCallMapElementStruct = type { ptr, i32, ptr }
+%struct.NodePtrVecStruct = type { i32, i32, ptr }
+%struct.NodeStruct = type { i32, i32, ptr, i32, i32, ptr, ptr, ptr, i32, i32 }
+%struct.NodeListStruct = type { ptr, ptr }
+%struct.EdgeListStruct = type { i32, ptr, ptr }
+%struct.SystemCallMapStruct = type { i32, i32, ptr }
 
-declare %struct.NodePtrVecStruct* @NodePtrVec_new(i32)
+declare ptr @NodePtrVec_new(i32)
 
-define noalias %struct.SystemCallMapElementStruct* @SystemCallMapElement_new(i8* nocapture readonly %label, i32 %initialSize) {
+define noalias ptr @SystemCallMapElement_new(ptr nocapture readonly %label, i32 %initialSize) {
 ; CHECK-LABEL: @SystemCallMapElement_new(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[CALL:%.*]] = tail call dereferenceable_or_null(24) i8* @malloc(i64 24) #[[ATTR7:[0-9]+]]
-; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i8* [[CALL]] to %struct.SystemCallMapElementStruct*
-; CHECK-NEXT:    [[TOBOOL:%.*]] = icmp eq i8* [[CALL]], null
+; CHECK-NEXT:    [[CALL:%.*]] = tail call dereferenceable_or_null(24) ptr @malloc(i64 24) #[[ATTR7:[0-9]+]]
+; CHECK-NEXT:    [[TOBOOL:%.*]] = icmp eq ptr [[CALL]], null
 ; CHECK-NEXT:    br i1 [[TOBOOL]], label [[CLEANUP:%.*]], label [[IF_THEN:%.*]]
 ; CHECK:       if.then:
-; CHECK-NEXT:    [[CALL1:%.*]] = tail call i8* @strdup(i8* [[LABEL:%.*]])
-; CHECK-NEXT:    [[LABEL2:%.*]] = bitcast i8* [[CALL]] to i8**
-; CHECK-NEXT:    store i8* [[CALL1]], i8** [[LABEL2]], align 8
-; CHECK-NEXT:    [[INDEX:%.*]] = getelementptr inbounds i8, i8* [[CALL]], i64 8
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i8* [[INDEX]] to i32*
-; CHECK-NEXT:    store i32 -1, i32* [[TMP1]], align 8
-; CHECK-NEXT:    [[TOBOOL4:%.*]] = icmp eq i8* [[CALL1]], null
+; CHECK-NEXT:    [[CALL1:%.*]] = tail call ptr @strdup(ptr [[LABEL:%.*]])
+; CHECK-NEXT:    store ptr [[CALL1]], ptr [[CALL]], align 8
+; CHECK-NEXT:    [[INDEX:%.*]] = getelementptr inbounds i8, ptr [[CALL]], i64 8
+; CHECK-NEXT:    store i32 -1, ptr [[INDEX]], align 8
+; CHECK-NEXT:    [[TOBOOL4:%.*]] = icmp eq ptr [[CALL1]], null
 ; CHECK-NEXT:    br i1 [[TOBOOL4]], label [[IF_THEN5:%.*]], label [[IF_END:%.*]]
 ; CHECK:       if.then5:
-; CHECK-NEXT:    tail call void @free(i8* nonnull [[CALL]])
+; CHECK-NEXT:    tail call void @free(ptr nonnull [[CALL]])
 ; CHECK-NEXT:    br label [[CLEANUP]]
 ; CHECK:       if.end:
-; CHECK-NEXT:    [[CALL6:%.*]] = tail call %struct.NodePtrVecStruct* @NodePtrVec_new(i32 [[INITIALSIZE:%.*]]) #[[ATTR5:[0-9]+]]
-; CHECK-NEXT:    [[NODES:%.*]] = getelementptr inbounds i8, i8* [[CALL]], i64 16
-; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i8* [[NODES]] to %struct.NodePtrVecStruct**
-; CHECK-NEXT:    store %struct.NodePtrVecStruct* [[CALL6]], %struct.NodePtrVecStruct** [[TMP2]], align 8
-; CHECK-NEXT:    [[TOBOOL8:%.*]] = icmp eq %struct.NodePtrVecStruct* [[CALL6]], null
+; CHECK-NEXT:    [[CALL6:%.*]] = tail call ptr @NodePtrVec_new(i32 [[INITIALSIZE:%.*]]) #[[ATTR5:[0-9]+]]
+; CHECK-NEXT:    [[NODES:%.*]] = getelementptr inbounds i8, ptr [[CALL]], i64 16
+; CHECK-NEXT:    store ptr [[CALL6]], ptr [[NODES]], align 8
+; CHECK-NEXT:    [[TOBOOL8:%.*]] = icmp eq ptr [[CALL6]], null
 ; CHECK-NEXT:    br i1 [[TOBOOL8]], label [[IF_THEN9:%.*]], label [[CLEANUP]]
 ; CHECK:       if.then9:
-; CHECK-NEXT:    tail call void @free(i8* nonnull [[CALL]])
+; CHECK-NEXT:    tail call void @free(ptr nonnull [[CALL]])
 ; CHECK-NEXT:    br label [[CLEANUP]]
 ; CHECK:       cleanup:
-; CHECK-NEXT:    [[RETVAL_0:%.*]] = phi %struct.SystemCallMapElementStruct* [ null, [[IF_THEN9]] ], [ null, [[IF_THEN5]] ], [ [[TMP0]], [[IF_END]] ], [ [[TMP0]], [[ENTRY:%.*]] ]
-; CHECK-NEXT:    ret %struct.SystemCallMapElementStruct* [[RETVAL_0]]
+; CHECK-NEXT:    [[RETVAL_0:%.*]] = phi ptr [ null, [[IF_THEN9]] ], [ null, [[IF_THEN5]] ], [ [[CALL]], [[IF_END]] ], [ [[CALL]], [[ENTRY:%.*]] ]
+; CHECK-NEXT:    ret ptr [[RETVAL_0]]
 ;
 entry:
-  %call = tail call dereferenceable_or_null(24) i8* @malloc(i64 24) #4
-  %0 = bitcast i8* %call to %struct.SystemCallMapElementStruct*
-  %tobool = icmp eq i8* %call, null
+  %call = tail call dereferenceable_or_null(24) ptr @malloc(i64 24) #4
+  %tobool = icmp eq ptr %call, null
   br i1 %tobool, label %cleanup, label %if.then
 
 if.then:                                          ; preds = %entry
-  %call1 = tail call i8* @strdup(i8* %label)
-  %label2 = bitcast i8* %call to i8**
-  store i8* %call1, i8** %label2, align 8
-  %index = getelementptr inbounds i8, i8* %call, i64 8
-  %1 = bitcast i8* %index to i32*
-  store i32 -1, i32* %1, align 8
-  %tobool4 = icmp eq i8* %call1, null
+  %call1 = tail call ptr @strdup(ptr %label)
+  store ptr %call1, ptr %call, align 8
+  %index = getelementptr inbounds i8, ptr %call, i64 8
+  store i32 -1, ptr %index, align 8
+  %tobool4 = icmp eq ptr %call1, null
   br i1 %tobool4, label %if.then5, label %if.end
 
 if.then5:                                         ; preds = %if.then
-  tail call void @free(i8* nonnull %call)
+  tail call void @free(ptr nonnull %call)
   br label %cleanup
 
 if.end:                                           ; preds = %if.then
-  %call6 = tail call %struct.NodePtrVecStruct* @NodePtrVec_new(i32 %initialSize) #2
-  %nodes = getelementptr inbounds i8, i8* %call, i64 16
-  %2 = bitcast i8* %nodes to %struct.NodePtrVecStruct**
-  store %struct.NodePtrVecStruct* %call6, %struct.NodePtrVecStruct** %2, align 8
-  %tobool8 = icmp eq %struct.NodePtrVecStruct* %call6, null
+  %call6 = tail call ptr @NodePtrVec_new(i32 %initialSize) #2
+  %nodes = getelementptr inbounds i8, ptr %call, i64 16
+  store ptr %call6, ptr %nodes, align 8
+  %tobool8 = icmp eq ptr %call6, null
   br i1 %tobool8, label %if.then9, label %cleanup
 
 if.then9:                                         ; preds = %if.end
-  tail call void @free(i8* nonnull %call)
+  tail call void @free(ptr nonnull %call)
   br label %cleanup
 
 cleanup:                                          ; preds = %entry, %if.end, %if.then9, %if.then5
-  %retval.0 = phi %struct.SystemCallMapElementStruct* [ null, %if.then9 ], [ null, %if.then5 ], [ %0, %if.end ], [ %0, %entry ]
-  ret %struct.SystemCallMapElementStruct* %retval.0
+  %retval.0 = phi ptr [ null, %if.then9 ], [ null, %if.then5 ], [ %call, %if.end ], [ %call, %entry ]
+  ret ptr %retval.0
 }
 
-%struct.BitfieldStruct = type { i32, i8* }
+%struct.BitfieldStruct = type { i32, ptr }
 
-define noalias %struct.BitfieldStruct* @Bitfield_new(i32 %bitsNeeded) {
+define noalias ptr @Bitfield_new(i32 %bitsNeeded) {
 ; CHECK-LABEL: @Bitfield_new(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[CALL:%.*]] = tail call dereferenceable_or_null(16) i8* @malloc(i64 16) #[[ATTR7]]
-; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i8* [[CALL]] to %struct.BitfieldStruct*
-; CHECK-NEXT:    [[TOBOOL:%.*]] = icmp eq i8* [[CALL]], null
+; CHECK-NEXT:    [[CALL:%.*]] = tail call dereferenceable_or_null(16) ptr @malloc(i64 16) #[[ATTR7]]
+; CHECK-NEXT:    [[TOBOOL:%.*]] = icmp eq ptr [[CALL]], null
 ; CHECK-NEXT:    br i1 [[TOBOOL]], label [[CLEANUP:%.*]], label [[IF_END:%.*]]
 ; CHECK:       if.end:
 ; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 [[BITSNEEDED:%.*]], 7
 ; CHECK-NEXT:    [[DIV:%.*]] = sdiv i32 [[ADD]], 8
 ; CHECK-NEXT:    [[CONV:%.*]] = sext i32 [[DIV]] to i64
-; CHECK-NEXT:    [[CALL1:%.*]] = tail call i8* @calloc(i64 [[CONV]], i64 1) #[[ATTR8:[0-9]+]]
-; CHECK-NEXT:    [[BITFIELD:%.*]] = getelementptr inbounds i8, i8* [[CALL]], i64 8
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i8* [[BITFIELD]] to i8**
-; CHECK-NEXT:    store i8* [[CALL1]], i8** [[TMP1]], align 8
-; CHECK-NEXT:    [[TOBOOL3:%.*]] = icmp eq i8* [[CALL1]], null
+; CHECK-NEXT:    [[CALL1:%.*]] = tail call ptr @calloc(i64 [[CONV]], i64 1) #[[ATTR8:[0-9]+]]
+; CHECK-NEXT:    [[BITFIELD:%.*]] = getelementptr inbounds i8, ptr [[CALL]], i64 8
+; CHECK-NEXT:    store ptr [[CALL1]], ptr [[BITFIELD]], align 8
+; CHECK-NEXT:    [[TOBOOL3:%.*]] = icmp eq ptr [[CALL1]], null
 ; CHECK-NEXT:    br i1 [[TOBOOL3]], label [[IF_THEN4:%.*]], label [[IF_END5:%.*]]
 ; CHECK:       if.then4:
-; CHECK-NEXT:    tail call void @free(i8* nonnull [[CALL]])
+; CHECK-NEXT:    tail call void @free(ptr nonnull [[CALL]])
 ; CHECK-NEXT:    br label [[CLEANUP]]
 ; CHECK:       if.end5:
-; CHECK-NEXT:    [[BITSNEEDED6:%.*]] = bitcast i8* [[CALL]] to i32*
-; CHECK-NEXT:    store i32 [[BITSNEEDED]], i32* [[BITSNEEDED6]], align 8
+; CHECK-NEXT:    store i32 [[BITSNEEDED]], ptr [[CALL]], align 8
 ; CHECK-NEXT:    br label [[CLEANUP]]
 ; CHECK:       cleanup:
-; CHECK-NEXT:    [[RETVAL_0:%.*]] = phi %struct.BitfieldStruct* [ [[TMP0]], [[IF_END5]] ], [ null, [[IF_THEN4]] ], [ null, [[ENTRY:%.*]] ]
-; CHECK-NEXT:    ret %struct.BitfieldStruct* [[RETVAL_0]]
+; CHECK-NEXT:    [[RETVAL_0:%.*]] = phi ptr [ [[CALL]], [[IF_END5]] ], [ null, [[IF_THEN4]] ], [ null, [[ENTRY:%.*]] ]
+; CHECK-NEXT:    ret ptr [[RETVAL_0]]
 ;
 entry:
-  %call = tail call dereferenceable_or_null(16) i8* @malloc(i64 16) #4
-  %0 = bitcast i8* %call to %struct.BitfieldStruct*
-  %tobool = icmp eq i8* %call, null
+  %call = tail call dereferenceable_or_null(16) ptr @malloc(i64 16) #4
+  %tobool = icmp eq ptr %call, null
   br i1 %tobool, label %cleanup, label %if.end
 
 if.end:                                           ; preds = %entry
   %add = add nsw i32 %bitsNeeded, 7
   %div = sdiv i32 %add, 8
   %conv = sext i32 %div to i64
-  %call1 = tail call i8* @calloc(i64 %conv, i64 1) #3
-  %bitfield = getelementptr inbounds i8, i8* %call, i64 8
-  %1 = bitcast i8* %bitfield to i8**
-  store i8* %call1, i8** %1, align 8
-  %tobool3 = icmp eq i8* %call1, null
+  %call1 = tail call ptr @calloc(i64 %conv, i64 1) #3
+  %bitfield = getelementptr inbounds i8, ptr %call, i64 8
+  store ptr %call1, ptr %bitfield, align 8
+  %tobool3 = icmp eq ptr %call1, null
   br i1 %tobool3, label %if.then4, label %if.end5
 
 if.then4:                                         ; preds = %if.end
-  tail call void @free(i8* nonnull %call)
+  tail call void @free(ptr nonnull %call)
   br label %cleanup
 
 if.end5:                                          ; preds = %if.end
-  %bitsNeeded6 = bitcast i8* %call to i32*
-  store i32 %bitsNeeded, i32* %bitsNeeded6, align 8
+  store i32 %bitsNeeded, ptr %call, align 8
   br label %cleanup
 
 cleanup:                                          ; preds = %entry, %if.end5, %if.then4
-  %retval.0 = phi %struct.BitfieldStruct* [ %0, %if.end5 ], [ null, %if.then4 ], [ null, %entry ]
-  ret %struct.BitfieldStruct* %retval.0
+  %retval.0 = phi ptr [ %call, %if.end5 ], [ null, %if.then4 ], [ null, %entry ]
+  ret ptr %retval.0
 }
 
 attributes #0 = { nofree nounwind allocsize(0) allockind("alloc,uninitialized") }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/multiblock-memintrinsics.ll b/llvm/test/Transforms/DeadStoreElimination/multiblock-memintrinsics.ll
index d0d13228ac5a2..99ae312e8df9c 100644
--- a/llvm/test/Transforms/DeadStoreElimination/multiblock-memintrinsics.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/multiblock-memintrinsics.ll
@@ -3,40 +3,38 @@
 
 target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
 declare void @unknown_func()
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i32, i1) nounwind
 
 ; Tests where the pointer/object is accessible after the function returns.
 
 ; Overwriting store along one path to the exit.
-define void @accessible_after_return_1(i32* noalias %P, i1 %c) {
+define void @accessible_after_return_1(ptr noalias %P, i1 %c) {
 ; CHECK-LABEL: @accessible_after_return_1(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 1
-; CHECK-NEXT:    [[P3:%.*]] = bitcast i32* [[ARRAYIDX0]] to i8*
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 4 [[P3]], i8 0, i64 28, i1 false)
+; CHECK-NEXT:    [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 1
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 4 [[ARRAYIDX0]], i8 0, i64 28, i1 false)
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    br label [[BB3:%.*]]
 ; CHECK:       bb2:
-; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 1
-; CHECK-NEXT:    store i32 1, i32* [[ARRAYIDX1]], align 4
+; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 1
+; CHECK-NEXT:    store i32 1, ptr [[ARRAYIDX1]], align 4
 ; CHECK-NEXT:    br label [[BB3]]
 ; CHECK:       bb3:
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %arrayidx0 = getelementptr inbounds i32, i32* %P, i64 1
-  %p3 = bitcast i32* %arrayidx0 to i8*
-  call void @llvm.memset.p0i8.i64(i8* %p3, i8 0, i64 28, i32 4, i1 false)
+  %arrayidx0 = getelementptr inbounds i32, ptr %P, i64 1
+  call void @llvm.memset.p0.i64(ptr %arrayidx0, i8 0, i64 28, i32 4, i1 false)
   br i1 %c, label %bb1, label %bb2
 
 bb1:
   br label %bb3
 
 bb2:
-  %arrayidx1 = getelementptr inbounds i32, i32* %P, i64 1
-  store i32 1, i32* %arrayidx1, align 4
+  %arrayidx1 = getelementptr inbounds i32, ptr %P, i64 1
+  store i32 1, ptr %arrayidx1, align 4
   br label %bb3
 
 bb3:
@@ -45,38 +43,36 @@ bb3:
 
 ; Post-dominating store.
 ; TODO: The memset can be shortened.
-define void @accessible_after_return_2(i32* noalias %P, i1 %c) {
+define void @accessible_after_return_2(ptr noalias %P, i1 %c) {
 ; CHECK-LABEL: @accessible_after_return_2(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 1
-; CHECK-NEXT:    [[P3:%.*]] = bitcast i32* [[ARRAYIDX0]] to i8*
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 4 [[P3]], i8 0, i64 28, i1 false)
+; CHECK-NEXT:    [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 1
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 4 [[ARRAYIDX0]], i8 0, i64 28, i1 false)
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
-; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 1
-; CHECK-NEXT:    store i32 1, i32* [[ARRAYIDX1]], align 4
+; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 1
+; CHECK-NEXT:    store i32 1, ptr [[ARRAYIDX1]], align 4
 ; CHECK-NEXT:    br label [[BB3:%.*]]
 ; CHECK:       bb2:
-; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 1
-; CHECK-NEXT:    store i32 1, i32* [[ARRAYIDX2]], align 4
+; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 1
+; CHECK-NEXT:    store i32 1, ptr [[ARRAYIDX2]], align 4
 ; CHECK-NEXT:    br label [[BB3]]
 ; CHECK:       bb3:
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %arrayidx0 = getelementptr inbounds i32, i32* %P, i64 1
-  %p3 = bitcast i32* %arrayidx0 to i8*
-  call void @llvm.memset.p0i8.i64(i8* %p3, i8 0, i64 28, i32 4, i1 false)
+  %arrayidx0 = getelementptr inbounds i32, ptr %P, i64 1
+  call void @llvm.memset.p0.i64(ptr %arrayidx0, i8 0, i64 28, i32 4, i1 false)
   br i1 %c, label %bb1, label %bb2
 
 bb1:
-  %arrayidx1 = getelementptr inbounds i32, i32* %P, i64 1
-  store i32 1, i32* %arrayidx1, align 4
+  %arrayidx1 = getelementptr inbounds i32, ptr %P, i64 1
+  store i32 1, ptr %arrayidx1, align 4
   br label %bb3
 
 bb2:
-  %arrayidx2 = getelementptr inbounds i32, i32* %P, i64 1
-  store i32 1, i32* %arrayidx2, align 4
+  %arrayidx2 = getelementptr inbounds i32, ptr %P, i64 1
+  store i32 1, ptr %arrayidx2, align 4
   br label %bb3
 
 bb3:
@@ -84,27 +80,25 @@ bb3:
 }
 
 ; Stores along  both exit paths.
-define void @accessible_after_return_3(i32* noalias %P, i1 %c) {
+define void @accessible_after_return_3(ptr noalias %P, i1 %c) {
 ; CHECK-LABEL: @accessible_after_return_3(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 1
-; CHECK-NEXT:    [[P3:%.*]] = bitcast i32* [[ARRAYIDX0]] to i8*
-; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, i8* [[P3]], i64 4
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 4 [[TMP0]], i8 0, i64 24, i1 false)
+; CHECK-NEXT:    [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 1
+; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[ARRAYIDX0]], i64 4
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 4 [[TMP0]], i8 0, i64 24, i1 false)
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    br label [[BB3:%.*]]
 ; CHECK:       bb2:
 ; CHECK-NEXT:    br label [[BB3]]
 ; CHECK:       bb3:
-; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 1
-; CHECK-NEXT:    store i32 1, i32* [[ARRAYIDX1]], align 4
+; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 1
+; CHECK-NEXT:    store i32 1, ptr [[ARRAYIDX1]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %arrayidx0 = getelementptr inbounds i32, i32* %P, i64 1
-  %p3 = bitcast i32* %arrayidx0 to i8*
-  call void @llvm.memset.p0i8.i64(i8* %p3, i8 0, i64 28, i32 4, i1 false)
+  %arrayidx0 = getelementptr inbounds i32, ptr %P, i64 1
+  call void @llvm.memset.p0.i64(ptr %arrayidx0, i8 0, i64 28, i32 4, i1 false)
   br i1 %c, label %bb1, label %bb2
 
 bb1:
@@ -114,13 +108,13 @@ bb2:
   br label %bb3
 
 bb3:
-  %arrayidx1 = getelementptr inbounds i32, i32* %P, i64 1
-  store i32 1, i32* %arrayidx1, align 4
+  %arrayidx1 = getelementptr inbounds i32, ptr %P, i64 1
+  store i32 1, ptr %arrayidx1, align 4
   ret void
 }
 
 
-declare void @readonly_use(i32* nocapture) readonly
+declare void @readonly_use(ptr nocapture) readonly
 
 ; Tests where the pointer/object is *NOT* accessible after the function returns.
 
@@ -129,11 +123,9 @@ define void @alloca_1(i1 %c) {
 ; CHECK-LABEL: @alloca_1(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[P_ALLOCA:%.*]] = alloca [32 x i32], align 4
-; CHECK-NEXT:    [[P:%.*]] = bitcast [32 x i32]* [[P_ALLOCA]] to i32*
-; CHECK-NEXT:    [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 1
-; CHECK-NEXT:    [[P3:%.*]] = bitcast i32* [[ARRAYIDX0]] to i8*
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 4 [[P3]], i8 0, i64 28, i1 false)
-; CHECK-NEXT:    call void @readonly_use(i32* [[P]])
+; CHECK-NEXT:    [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, ptr [[P_ALLOCA]], i64 1
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 4 [[ARRAYIDX0]], i8 0, i64 28, i1 false)
+; CHECK-NEXT:    call void @readonly_use(ptr [[P_ALLOCA]])
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    br label [[BB3:%.*]]
@@ -144,19 +136,17 @@ define void @alloca_1(i1 %c) {
 ;
 entry:
   %P.alloca = alloca [32 x i32]
-  %P = bitcast [32 x i32]* %P.alloca to i32*
-  %arrayidx0 = getelementptr inbounds i32, i32* %P, i64 1
-  %p3 = bitcast i32* %arrayidx0 to i8*
-  call void @llvm.memset.p0i8.i64(i8* %p3, i8 0, i64 28, i32 4, i1 false)
-  call void @readonly_use(i32* %P)
+  %arrayidx0 = getelementptr inbounds i32, ptr %P.alloca, i64 1
+  call void @llvm.memset.p0.i64(ptr %arrayidx0, i8 0, i64 28, i32 4, i1 false)
+  call void @readonly_use(ptr %P.alloca)
   br i1 %c, label %bb1, label %bb2
 
 bb1:
   br label %bb3
 
 bb2:
-  %arrayidx1 = getelementptr inbounds i32, i32* %P, i64 1
-  store i32 1, i32* %arrayidx1, align 4
+  %arrayidx1 = getelementptr inbounds i32, ptr %P.alloca, i64 1
+  store i32 1, ptr %arrayidx1, align 4
   br label %bb3
 
 bb3:
@@ -168,11 +158,9 @@ define void @alloca_2(i1 %c) {
 ; CHECK-LABEL: @alloca_2(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[P_ALLOCA:%.*]] = alloca [32 x i32], align 4
-; CHECK-NEXT:    [[P:%.*]] = bitcast [32 x i32]* [[P_ALLOCA]] to i32*
-; CHECK-NEXT:    [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 1
-; CHECK-NEXT:    [[P3:%.*]] = bitcast i32* [[ARRAYIDX0]] to i8*
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 4 [[P3]], i8 0, i64 28, i1 false)
-; CHECK-NEXT:    call void @readonly_use(i32* [[P]])
+; CHECK-NEXT:    [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, ptr [[P_ALLOCA]], i64 1
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 4 [[ARRAYIDX0]], i8 0, i64 28, i1 false)
+; CHECK-NEXT:    call void @readonly_use(ptr [[P_ALLOCA]])
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    br label [[BB3:%.*]]
@@ -183,21 +171,19 @@ define void @alloca_2(i1 %c) {
 ;
 entry:
   %P.alloca = alloca [32 x i32]
-  %P = bitcast [32 x i32]* %P.alloca to i32*
-  %arrayidx0 = getelementptr inbounds i32, i32* %P, i64 1
-  %p3 = bitcast i32* %arrayidx0 to i8*
-  call void @llvm.memset.p0i8.i64(i8* %p3, i8 0, i64 28, i32 4, i1 false)
-  call void @readonly_use(i32* %P)
+  %arrayidx0 = getelementptr inbounds i32, ptr %P.alloca, i64 1
+  call void @llvm.memset.p0.i64(ptr %arrayidx0, i8 0, i64 28, i32 4, i1 false)
+  call void @readonly_use(ptr %P.alloca)
   br i1 %c, label %bb1, label %bb2
 
 bb1:
-  %arrayidx1 = getelementptr inbounds i32, i32* %P, i64 1
-  store i32 1, i32* %arrayidx1, align 4
+  %arrayidx1 = getelementptr inbounds i32, ptr %P.alloca, i64 1
+  store i32 1, ptr %arrayidx1, align 4
   br label %bb3
 
 bb2:
-  %arrayidx2 = getelementptr inbounds i32, i32* %P, i64 1
-  store i32 1, i32* %arrayidx2, align 4
+  %arrayidx2 = getelementptr inbounds i32, ptr %P.alloca, i64 1
+  store i32 1, ptr %arrayidx2, align 4
   br label %bb3
 
 bb3:
@@ -218,10 +204,8 @@ define void @alloca_3(i1 %c) {
 ;
 entry:
   %P.alloca = alloca [32 x i32]
-  %P = bitcast [32 x i32]* %P.alloca to i32*
-  %arrayidx0 = getelementptr inbounds i32, i32* %P, i64 1
-  %p3 = bitcast i32* %arrayidx0 to i8*
-  call void @llvm.memset.p0i8.i64(i8* %p3, i8 0, i64 28, i32 4, i1 false)
+  %arrayidx0 = getelementptr inbounds i32, ptr %P.alloca, i64 1
+  call void @llvm.memset.p0.i64(ptr %arrayidx0, i8 0, i64 28, i32 4, i1 false)
   br i1 %c, label %bb1, label %bb2
 
 bb1:
@@ -231,7 +215,7 @@ bb2:
   br label %bb3
 
 bb3:
-  %arrayidx1 = getelementptr inbounds i32, i32* %P, i64 1
-  store i32 1, i32* %arrayidx1, align 4
+  %arrayidx1 = getelementptr inbounds i32, ptr %P.alloca, i64 1
+  store i32 1, ptr %arrayidx1, align 4
   ret void
 }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/multiblock-memoryphis.ll b/llvm/test/Transforms/DeadStoreElimination/multiblock-memoryphis.ll
index b3faafd15b3bc..22cffc621fe85 100644
--- a/llvm/test/Transforms/DeadStoreElimination/multiblock-memoryphis.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/multiblock-memoryphis.ll
@@ -4,32 +4,32 @@
 target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
 
 
-define void @test4(i32* noalias %P) {
+define void @test4(ptr noalias %P) {
 ; CHECK-LABEL: @test4(
-; CHECK-NEXT:    store i32 0, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br i1 true, label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    br label [[BB3:%.*]]
 ; CHECK:       bb2:
-; CHECK-NEXT:    [[X:%.*]] = load i32, i32* [[P]], align 4
+; CHECK-NEXT:    [[X:%.*]] = load i32, ptr [[P]], align 4
 ; CHECK-NEXT:    br label [[BB3]]
 ; CHECK:       bb3:
-; CHECK-NEXT:    store i32 1, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 1, ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   br i1 true, label %bb1, label %bb2
 bb1:
   br label %bb3
 bb2:
-  %x = load i32, i32* %P
+  %x = load i32, ptr %P
   br label %bb3
 bb3:
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   ret void
 }
 
-define void @test5(i32* noalias %P) {
+define void @test5(ptr noalias %P) {
 ; CHECK-LABEL: @test5(
 ; CHECK-NEXT:    br i1 true, label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
@@ -37,48 +37,48 @@ define void @test5(i32* noalias %P) {
 ; CHECK:       bb2:
 ; CHECK-NEXT:    br label [[BB3]]
 ; CHECK:       bb3:
-; CHECK-NEXT:    store i32 0, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
   br i1 true, label %bb1, label %bb2
 bb1:
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   br label %bb3
 bb2:
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   br label %bb3
 bb3:
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   ret void
 }
 
-define void @test8(i32* %P, i32* %Q) {
+define void @test8(ptr %P, ptr %Q) {
 ; CHECK-LABEL: @test8(
 ; CHECK-NEXT:    br i1 true, label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    br label [[BB3:%.*]]
 ; CHECK:       bb2:
-; CHECK-NEXT:    store i32 1, i32* [[Q:%.*]], align 4
+; CHECK-NEXT:    store i32 1, ptr [[Q:%.*]], align 4
 ; CHECK-NEXT:    br label [[BB3]]
 ; CHECK:       bb3:
-; CHECK-NEXT:    store i32 0, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
   br i1 true, label %bb1, label %bb2
 bb1:
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   br label %bb3
 bb2:
-  store i32 1, i32* %Q
+  store i32 1, ptr %Q
   br label %bb3
 bb3:
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   ret void
 }
 
-define void @test10(i32* noalias %P) {
+define void @test10(ptr noalias %P) {
 ; CHECK-LABEL: @test10(
-; CHECK-NEXT:    store i32 1, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 1, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br i1 true, label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    br label [[BB3:%.*]]
@@ -87,22 +87,21 @@ define void @test10(i32* noalias %P) {
 ; CHECK:       bb3:
 ; CHECK-NEXT:    ret void
 ;
-  %P2 = bitcast i32* %P to i8*
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   br i1 true, label %bb1, label %bb2
 bb1:
   br label %bb3
 bb2:
   br label %bb3
 bb3:
-  store i8 1, i8* %P2
+  store i8 1, ptr %P
   ret void
 }
 
 declare void @hoge()
 
 ; Check a function with a MemoryPhi with 3 incoming values.
-define void @widget(i32* %Ptr, i1 %c1, i1 %c2, i32 %v1, i32 %v2, i32 %v3) {
+define void @widget(ptr %Ptr, i1 %c1, i1 %c2, i32 %v1, i32 %v2, i32 %v3) {
 ; CHECK-LABEL: @widget(
 ; CHECK-NEXT:  bb:
 ; CHECK-NEXT:    tail call void @hoge()
@@ -120,13 +119,13 @@ define void @widget(i32* %Ptr, i1 %c1, i1 %c2, i32 %v1, i32 %v2, i32 %v3) {
 ; CHECK-NEXT:    i32 2, label [[BB7:%.*]]
 ; CHECK-NEXT:    ]
 ; CHECK:       bb5:
-; CHECK-NEXT:    store i32 0, i32* [[PTR:%.*]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[PTR:%.*]], align 4
 ; CHECK-NEXT:    br label [[BB8]]
 ; CHECK:       bb6:
-; CHECK-NEXT:    store i32 1, i32* [[PTR]], align 4
+; CHECK-NEXT:    store i32 1, ptr [[PTR]], align 4
 ; CHECK-NEXT:    br label [[BB8]]
 ; CHECK:       bb7:
-; CHECK-NEXT:    store i32 2, i32* [[PTR]], align 4
+; CHECK-NEXT:    store i32 2, ptr [[PTR]], align 4
 ; CHECK-NEXT:    br label [[BB8]]
 ; CHECK:       bb8:
 ; CHECK-NEXT:    br label [[BB4]]
@@ -139,7 +138,7 @@ bb1:                                              ; preds = %bb
   br i1 %c2, label %bb2, label %bb3
 
 bb2:                                              ; preds = %bb1
-  store i32 -1, i32* %Ptr, align 4
+  store i32 -1, ptr %Ptr, align 4
   br label %bb3
 
 bb3:                                              ; preds = %bb2, %bb1, %bb
@@ -153,15 +152,15 @@ bb4:                                              ; preds = %bb8, %bb3
   ]
 
 bb5:                                              ; preds = %bb4
-  store i32 0, i32* %Ptr, align 4
+  store i32 0, ptr %Ptr, align 4
   br label %bb8
 
 bb6:                                              ; preds = %bb4
-  store i32 1, i32* %Ptr, align 4
+  store i32 1, ptr %Ptr, align 4
   br label %bb8
 
 bb7:                                              ; preds = %bb4
-  store i32 2, i32* %Ptr, align 4
+  store i32 2, ptr %Ptr, align 4
   br label %bb8
 
 bb8:                                              ; preds = %bb7, %bb6, %bb5, %bb4
@@ -172,7 +171,7 @@ bb8:                                              ; preds = %bb7, %bb6, %bb5, %b
 declare void @fn1_test11()
 declare void @fn2_test11()
 
-define void @test11(i1 %c, i8** %ptr.1) {
+define void @test11(i1 %c, ptr %ptr.1) {
 ; CHECK-LABEL: @test11(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[IF_THEN:%.*]], label [[EXIT:%.*]]
@@ -180,7 +179,7 @@ define void @test11(i1 %c, i8** %ptr.1) {
 ; CHECK-NEXT:    tail call void @fn2_test11() #0
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
-; CHECK-NEXT:    store i8* null, i8** [[PTR_1:%.*]], align 8
+; CHECK-NEXT:    store ptr null, ptr [[PTR_1:%.*]], align 8
 ; CHECK-NEXT:    tail call void @fn2_test11() #0
 ; CHECK-NEXT:    ret void
 ;
@@ -192,7 +191,7 @@ if.then:                                      ; preds = %entry
   br label %exit
 
 exit:
-  store i8* null, i8** %ptr.1, align 8
+  store ptr null, ptr %ptr.1, align 8
   tail call void @fn2_test11() #1
   ret void
 }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/multiblock-multipath-throwing.ll b/llvm/test/Transforms/DeadStoreElimination/multiblock-multipath-throwing.ll
index 4fe04e5467d3d..00c7de46ad93a 100644
--- a/llvm/test/Transforms/DeadStoreElimination/multiblock-multipath-throwing.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/multiblock-multipath-throwing.ll
@@ -5,80 +5,80 @@ target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
 
 declare void @readnone_may_throw() readnone
 
-declare void @use(i32 *)
+declare void @use(ptr)
 
 ; Tests where the pointer/object is accessible after the function returns.
 
 ; Cannot remove the store from the entry block, because the call in bb2 may throw.
-define void @accessible_after_return_1(i32* noalias %P, i1 %c1) {
+define void @accessible_after_return_1(ptr noalias %P, i1 %c1) {
 ; CHECK-LABEL: @accessible_after_return_1(
-; CHECK-NEXT:    store i32 1, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 1, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br i1 [[C1:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
-; CHECK-NEXT:    store i32 0, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P]], align 4
 ; CHECK-NEXT:    br label [[BB5:%.*]]
 ; CHECK:       bb2:
 ; CHECK-NEXT:    call void @readnone_may_throw()
-; CHECK-NEXT:    store i32 3, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 3, ptr [[P]], align 4
 ; CHECK-NEXT:    br label [[BB5]]
 ; CHECK:       bb5:
-; CHECK-NEXT:    call void @use(i32* [[P]])
+; CHECK-NEXT:    call void @use(ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   br i1 %c1, label %bb1, label %bb2
 
 bb1:
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   br label %bb5
 
 bb2:
   call void @readnone_may_throw()
-  store i32 3, i32* %P
+  store i32 3, ptr %P
   br label %bb5
 
 bb5:
-  call void @use(i32* %P)
+  call void @use(ptr %P)
   ret void
 }
 
 ; Cannot remove the store from the entry block, because the call in bb3 may throw.
-define void @accessible_after_return6(i32* %P, i1 %c.1, i1 %c.2) {
+define void @accessible_after_return6(ptr %P, i1 %c.1, i1 %c.2) {
 ; CHECK-LABEL: @accessible_after_return6(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    store i32 0, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    br i1 [[C_2:%.*]], label [[BB3:%.*]], label [[BB4:%.*]]
 ; CHECK:       bb2:
-; CHECK-NEXT:    store i32 1, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 1, ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ; CHECK:       bb3:
 ; CHECK-NEXT:    call void @readnone_may_throw()
-; CHECK-NEXT:    store i32 2, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 2, ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ; CHECK:       bb4:
-; CHECK-NEXT:    store i32 3, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 3, ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   br i1 %c.1, label %bb1, label %bb2
 
 bb1:
   br i1 %c.2, label %bb3, label %bb4
 
 bb2:
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   ret void
 
 bb3:
   call void @readnone_may_throw()
-  store i32 2, i32* %P
+  store i32 2, ptr %P
   ret void
 
 bb4:
-  store i32 3, i32* %P
+  store i32 3, ptr %P
   ret void
 }
 
@@ -93,32 +93,32 @@ define void @alloca_1(i1 %c1) {
 ; CHECK-NEXT:    [[P:%.*]] = alloca i32
 ; CHECK-NEXT:    br i1 [[C1:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
-; CHECK-NEXT:    store i32 0, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P]], align 4
 ; CHECK-NEXT:    br label [[BB5:%.*]]
 ; CHECK:       bb2:
 ; CHECK-NEXT:    call void @readnone_may_throw()
-; CHECK-NEXT:    store i32 3, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 3, ptr [[P]], align 4
 ; CHECK-NEXT:    br label [[BB5]]
 ; CHECK:       bb5:
-; CHECK-NEXT:    call void @use(i32* [[P]])
+; CHECK-NEXT:    call void @use(ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
 entry:
   %P = alloca i32
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   br i1 %c1, label %bb1, label %bb2
 
 bb1:
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   br label %bb5
 
 bb2:
   call void @readnone_may_throw()
-  store i32 3, i32* %P
+  store i32 3, ptr %P
   br label %bb5
 
 bb5:
-  call void @use(i32* %P)
+  call void @use(ptr %P)
   ret void
 }
 
@@ -130,27 +130,27 @@ define void @alloca_2(i1 %c.1, i1 %c.2) {
 ; CHECK-NEXT:    [[P:%.*]] = alloca i32
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
-; CHECK-NEXT:    store i32 0, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P]], align 4
 ; CHECK-NEXT:    br label [[BB5:%.*]]
 ; CHECK:       bb2:
 ; CHECK-NEXT:    br i1 [[C_2:%.*]], label [[BB3:%.*]], label [[BB4:%.*]]
 ; CHECK:       bb3:
 ; CHECK-NEXT:    call void @readnone_may_throw()
-; CHECK-NEXT:    store i32 3, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 3, ptr [[P]], align 4
 ; CHECK-NEXT:    br label [[BB5]]
 ; CHECK:       bb4:
-; CHECK-NEXT:    store i32 5, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 5, ptr [[P]], align 4
 ; CHECK-NEXT:    br label [[BB5]]
 ; CHECK:       bb5:
-; CHECK-NEXT:    call void @use(i32* [[P]])
+; CHECK-NEXT:    call void @use(ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
   %P = alloca i32
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   br i1 %c.1, label %bb1, label %bb2
 
 bb1:
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   br label %bb5
 
 bb2:
@@ -158,14 +158,14 @@ bb2:
 
 bb3:
   call void @readnone_may_throw()
-  store i32 3, i32* %P
+  store i32 3, ptr %P
   br label %bb5
 
 bb4:
-  store i32 5, i32* %P
+  store i32 5, ptr %P
   br label %bb5
 
 bb5:
-  call void @use(i32* %P)
+  call void @use(ptr %P)
   ret void
 }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/multiblock-multipath.ll b/llvm/test/Transforms/DeadStoreElimination/multiblock-multipath.ll
index ab7a056f7018d..6a8daa81981c9 100644
--- a/llvm/test/Transforms/DeadStoreElimination/multiblock-multipath.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/multiblock-multipath.ll
@@ -3,129 +3,129 @@
 
 target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
 
-declare void @use(i32 *)
+declare void @use(ptr)
 
 ; Tests where the pointer/object is accessible after the function returns.
 
-define void @accessible_after_return_1(i32* noalias %P, i1 %c1) {
+define void @accessible_after_return_1(ptr noalias %P, i1 %c1) {
 ; CHECK-LABEL: @accessible_after_return_1(
 ; CHECK-NEXT:    br i1 [[C1:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
-; CHECK-NEXT:    store i32 0, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br label [[BB5:%.*]]
 ; CHECK:       bb2:
-; CHECK-NEXT:    store i32 3, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 3, ptr [[P]], align 4
 ; CHECK-NEXT:    br label [[BB5]]
 ; CHECK:       bb5:
-; CHECK-NEXT:    call void @use(i32* [[P]])
+; CHECK-NEXT:    call void @use(ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   br i1 %c1, label %bb1, label %bb2
 
 bb1:
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   br label %bb5
 bb2:
-  store i32 3, i32* %P
+  store i32 3, ptr %P
   br label %bb5
 
 bb5:
-  call void @use(i32* %P)
+  call void @use(ptr %P)
   ret void
 }
 
-define void @accessible_after_return_2(i32* noalias %P, i1 %c.1, i1 %c.2) {
+define void @accessible_after_return_2(ptr noalias %P, i1 %c.1, i1 %c.2) {
 ; CHECK-LABEL: @accessible_after_return_2(
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
-; CHECK-NEXT:    store i32 0, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br label [[BB5:%.*]]
 ; CHECK:       bb2:
 ; CHECK-NEXT:    br i1 [[C_2:%.*]], label [[BB3:%.*]], label [[BB4:%.*]]
 ; CHECK:       bb3:
-; CHECK-NEXT:    store i32 3, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 3, ptr [[P]], align 4
 ; CHECK-NEXT:    br label [[BB5]]
 ; CHECK:       bb4:
-; CHECK-NEXT:    store i32 5, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 5, ptr [[P]], align 4
 ; CHECK-NEXT:    br label [[BB5]]
 ; CHECK:       bb5:
-; CHECK-NEXT:    call void @use(i32* [[P]])
+; CHECK-NEXT:    call void @use(ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   br i1 %c.1, label %bb1, label %bb2
 bb1:
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   br label %bb5
 
 bb2:
   br i1 %c.2, label %bb3, label %bb4
 
 bb3:
-  store i32 3, i32* %P
+  store i32 3, ptr %P
   br label %bb5
 
 bb4:
-  store i32 5, i32* %P
+  store i32 5, ptr %P
   br label %bb5
 
 bb5:
-  call void @use(i32* %P)
+  call void @use(ptr %P)
   ret void
 }
 
 ; Cannot remove store in entry block because it is not overwritten on path
 ; entry->bb2->bb5.
-define void @accessible_after_return_3(i32* noalias %P, i1 %c1) {
+define void @accessible_after_return_3(ptr noalias %P, i1 %c1) {
 ; CHECK-LABEL: @accessible_after_return_3(
-; CHECK-NEXT:    store i32 1, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 1, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br i1 [[C1:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
-; CHECK-NEXT:    store i32 0, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P]], align 4
 ; CHECK-NEXT:    br label [[BB5:%.*]]
 ; CHECK:       bb2:
 ; CHECK-NEXT:    br label [[BB5]]
 ; CHECK:       bb5:
-; CHECK-NEXT:    call void @use(i32* [[P]])
+; CHECK-NEXT:    call void @use(ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   br i1 %c1, label %bb1, label %bb2
 
 bb1:
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   br label %bb5
 
 bb2:
   br label %bb5
 
 bb5:
-  call void @use(i32* %P)
+  call void @use(ptr %P)
   ret void
 }
 
 ; Cannot remove store in entry block because it is not overwritten on path
 ; entry->bb2->bb5.
-define void @accessible_after_return_4(i32* noalias %P, i1 %c1) {
+define void @accessible_after_return_4(ptr noalias %P, i1 %c1) {
 ; CHECK-LABEL: @accessible_after_return_4(
-; CHECK-NEXT:    store i32 1, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 1, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br i1 [[C1:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
-; CHECK-NEXT:    store i32 0, i32* [[P]], align 4
-; CHECK-NEXT:    call void @use(i32* [[P]])
+; CHECK-NEXT:    store i32 0, ptr [[P]], align 4
+; CHECK-NEXT:    call void @use(ptr [[P]])
 ; CHECK-NEXT:    br label [[BB5:%.*]]
 ; CHECK:       bb2:
 ; CHECK-NEXT:    br label [[BB5]]
 ; CHECK:       bb5:
 ; CHECK-NEXT:    ret void
 ;
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   br i1 %c1, label %bb1, label %bb2
 
 bb1:
-  store i32 0, i32* %P
-  call void @use(i32* %P)
+  store i32 0, ptr %P
+  call void @use(ptr %P)
   br label %bb5
 
 bb2:
@@ -137,18 +137,18 @@ bb5:
 
 ; Cannot remove the store in entry, as it is not overwritten on all paths to an
 ; exit (patch including bb4).
-define void @accessible_after_return5(i32* %P, i1 %c.1, i1 %c.2) {
+define void @accessible_after_return5(ptr %P, i1 %c.1, i1 %c.2) {
 ; CHECK-LABEL: @accessible_after_return5(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    store i32 0, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    br i1 [[C_2:%.*]], label [[BB3:%.*]], label [[BB4:%.*]]
 ; CHECK:       bb2:
-; CHECK-NEXT:    store i32 1, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 1, ptr [[P]], align 4
 ; CHECK-NEXT:    br label [[BB5:%.*]]
 ; CHECK:       bb3:
-; CHECK-NEXT:    store i32 2, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 2, ptr [[P]], align 4
 ; CHECK-NEXT:    br label [[BB5]]
 ; CHECK:       bb4:
 ; CHECK-NEXT:    br label [[BB5]]
@@ -156,18 +156,18 @@ define void @accessible_after_return5(i32* %P, i1 %c.1, i1 %c.2) {
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   br i1 %c.1, label %bb1, label %bb2
 
 bb1:
   br i1 %c.2, label %bb3, label %bb4
 
 bb2:
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   br label %bb5
 
 bb3:
-  store i32 2, i32* %P
+  store i32 2, ptr %P
   br label %bb5
 
 bb4:
@@ -178,55 +178,55 @@ bb5:
 }
 
 ; Can remove store in entry block, because it is overwritten before each return.
-define void @accessible_after_return6(i32* %P, i1 %c.1, i1 %c.2) {
+define void @accessible_after_return6(ptr %P, i1 %c.1, i1 %c.2) {
 ; CHECK-LABEL: @accessible_after_return6(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    br i1 [[C_2:%.*]], label [[BB3:%.*]], label [[BB4:%.*]]
 ; CHECK:       bb2:
-; CHECK-NEXT:    store i32 1, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 1, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ; CHECK:       bb3:
-; CHECK-NEXT:    store i32 2, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 2, ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ; CHECK:       bb4:
-; CHECK-NEXT:    store i32 3, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 3, ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   br i1 %c.1, label %bb1, label %bb2
 
 bb1:
   br i1 %c.2, label %bb3, label %bb4
 
 bb2:
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   ret void
 
 bb3:
-  store i32 2, i32* %P
+  store i32 2, ptr %P
   ret void
 
 bb4:
-  store i32 3, i32* %P
+  store i32 3, ptr %P
   ret void
 }
 
 ; Can remove store in bb1, because it is overwritten along each path
 ; from bb1 to the exit.
-define void @accessible_after_return7(i32* %P, i1 %c.1, i1 %c.2) {
+define void @accessible_after_return7(ptr %P, i1 %c.1, i1 %c.2) {
 ; CHECK-LABEL: @accessible_after_return7(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    br i1 [[C_2:%.*]], label [[BB3:%.*]], label [[BB4:%.*]]
 ; CHECK:       bb3:
-; CHECK-NEXT:    store i32 2, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 2, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br label [[BB5:%.*]]
 ; CHECK:       bb4:
-; CHECK-NEXT:    store i32 1, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 1, ptr [[P]], align 4
 ; CHECK-NEXT:    br label [[BB5]]
 ; CHECK:       bb2:
 ; CHECK-NEXT:    br label [[BB5]]
@@ -237,15 +237,15 @@ entry:
   br i1 %c.1, label %bb1, label %bb2
 
 bb1:
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   br i1 %c.2, label %bb3, label %bb4
 
 bb3:
-  store i32 2, i32* %P
+  store i32 2, ptr %P
   br label %bb5
 
 bb4:
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   br label %bb5
 
 bb2:
@@ -258,18 +258,18 @@ bb5:
 
 ; Cannot remove store in entry block, because it is overwritten along each path to
 ; the exit (entry->bb1->bb4->bb5).
-define void @accessible_after_return8(i32* %P, i1 %c.1, i1 %c.2) {
+define void @accessible_after_return8(ptr %P, i1 %c.1, i1 %c.2) {
 ; CHECK-LABEL: @accessible_after_return8(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    store i32 0, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    br i1 [[C_2:%.*]], label [[BB3:%.*]], label [[BB4:%.*]]
 ; CHECK:       bb2:
-; CHECK-NEXT:    store i32 1, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 1, ptr [[P]], align 4
 ; CHECK-NEXT:    br label [[BB5:%.*]]
 ; CHECK:       bb3:
-; CHECK-NEXT:    store i32 2, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 2, ptr [[P]], align 4
 ; CHECK-NEXT:    br label [[BB5]]
 ; CHECK:       bb4:
 ; CHECK-NEXT:    br label [[BB5]]
@@ -277,18 +277,18 @@ define void @accessible_after_return8(i32* %P, i1 %c.1, i1 %c.2) {
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   br i1 %c.1, label %bb1, label %bb2
 
 bb1:
   br i1 %c.2, label %bb3, label %bb4
 
 bb2:
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   br label %bb5
 
 bb3:
-  store i32 2, i32* %P
+  store i32 2, ptr %P
   br label %bb5
 
 bb4:
@@ -300,23 +300,23 @@ bb5:
 
 ; Make sure no stores are removed here. In particular, the store in if.then
 ; should not be removed.
-define void @accessible_after_return9(i8* noalias %ptr) {
+define void @accessible_after_return9(ptr noalias %ptr) {
 ; CHECK-LABEL: @accessible_after_return9(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[C_0:%.*]] = call i1 @cond()
 ; CHECK-NEXT:    br i1 [[C_0]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
 ; CHECK:       for.body:
-; CHECK-NEXT:    store i8 99, i8* [[PTR:%.*]], align 8
+; CHECK-NEXT:    store i8 99, ptr [[PTR:%.*]], align 8
 ; CHECK-NEXT:    [[C_1:%.*]] = call i1 @cond()
 ; CHECK-NEXT:    br i1 [[C_1]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
 ; CHECK:       if.then:
-; CHECK-NEXT:    store i8 20, i8* [[PTR]], align 8
+; CHECK-NEXT:    store i8 20, ptr [[PTR]], align 8
 ; CHECK-NEXT:    br label [[IF_END]]
 ; CHECK:       if.end:
 ; CHECK-NEXT:    [[C_2:%.*]] = call i1 @cond()
 ; CHECK-NEXT:    br i1 [[C_2]], label [[IF_THEN10:%.*]], label [[FOR_INC:%.*]]
 ; CHECK:       if.then10:
-; CHECK-NEXT:    store i8 0, i8* [[PTR]], align 8
+; CHECK-NEXT:    store i8 0, ptr [[PTR]], align 8
 ; CHECK-NEXT:    br label [[FOR_INC]]
 ; CHECK:       for.inc:
 ; CHECK-NEXT:    [[C_3:%.*]] = call i1 @cond()
@@ -329,12 +329,12 @@ entry:
   br i1 %c.0, label %for.body, label %for.end
 
 for.body:
-  store i8 99, i8* %ptr, align 8
+  store i8 99, ptr %ptr, align 8
   %c.1 = call i1 @cond()
   br i1 %c.1, label %if.end, label %if.then
 
 if.then:
-  store i8 20, i8* %ptr, align 8
+  store i8 20, ptr %ptr, align 8
   br label %if.end
 
 if.end:
@@ -342,7 +342,7 @@ if.end:
   br i1 %c.2, label %if.then10, label %for.inc
 
 if.then10:
-  store i8 0, i8* %ptr, align 8
+  store i8 0, ptr %ptr, align 8
   br label %for.inc
 
 for.inc:
@@ -356,18 +356,18 @@ for.end:
 ; Cannot remove store in entry block because it is not overwritten on path
 ; entry->bb2->bb4. Also make sure we deal with dead exit blocks without
 ; crashing.
-define void @accessible_after_return10_dead_block(i32* %P, i1 %c.1, i1 %c.2) {
+define void @accessible_after_return10_dead_block(ptr %P, i1 %c.1, i1 %c.2) {
 ; CHECK-LABEL: @accessible_after_return10_dead_block(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    store i32 0, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    br i1 [[C_2:%.*]], label [[BB3:%.*]], label [[BB4:%.*]]
 ; CHECK:       bb2:
-; CHECK-NEXT:    store i32 1, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 1, ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ; CHECK:       bb3:
-; CHECK-NEXT:    store i32 2, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 2, ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ; CHECK:       bb4:
 ; CHECK-NEXT:    ret void
@@ -375,18 +375,18 @@ define void @accessible_after_return10_dead_block(i32* %P, i1 %c.1, i1 %c.2) {
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   br i1 %c.1, label %bb1, label %bb2
 
 bb1:
   br i1 %c.2, label %bb3, label %bb4
 
 bb2:
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   ret void
 
 bb3:
-  store i32 2, i32* %P
+  store i32 2, ptr %P
   ret void
 
 bb4:
@@ -406,8 +406,8 @@ define void @accessible_after_return11_loop() {
 ; CHECK-NEXT:    [[C_1:%.*]] = call i1 @cond()
 ; CHECK-NEXT:    br i1 [[C_1]], label [[FOR_BODY_I]], label [[INIT_PARSE_EXIT:%.*]]
 ; CHECK:       init_parse.exit:
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull undef)
-; CHECK-NEXT:    store i32 0, i32* @linenum, align 4
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 16, ptr nonnull undef)
+; CHECK-NEXT:    store i32 0, ptr @linenum, align 4
 ; CHECK-NEXT:    br label [[FOR_BODY_I20:%.*]]
 ; CHECK:       for.body.i20:
 ; CHECK-NEXT:    [[C_2:%.*]] = call i1 @cond()
@@ -423,9 +423,9 @@ for.body.i:                                       ; preds = %for.body.i, %entry
   br i1 %c.1, label %for.body.i, label %init_parse.exit
 
 init_parse.exit:                                  ; preds = %for.body.i
-  store i32 0, i32* @linenum, align 4
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull undef) #2
-  store i32 0, i32* @linenum, align 4
+  store i32 0, ptr @linenum, align 4
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull undef) #2
+  store i32 0, ptr @linenum, align 4
   br label %for.body.i20
 
 for.body.i20:                                     ; preds = %for.body.i20, %init_parse.exit
@@ -435,7 +435,7 @@ for.body.i20:                                     ; preds = %for.body.i20, %init
 exit:
   ret void
 }
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
 declare i1 @cond() readnone nounwind
 
 ; Tests where the pointer/object is *NOT* accessible after the function returns.
@@ -447,28 +447,28 @@ define void @alloca_1(i1 %c1) {
 ; CHECK-NEXT:    [[P:%.*]] = alloca i32, align 4
 ; CHECK-NEXT:    br i1 [[C1:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
-; CHECK-NEXT:    store i32 0, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P]], align 4
 ; CHECK-NEXT:    br label [[BB5:%.*]]
 ; CHECK:       bb2:
-; CHECK-NEXT:    store i32 3, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 3, ptr [[P]], align 4
 ; CHECK-NEXT:    br label [[BB5]]
 ; CHECK:       bb5:
-; CHECK-NEXT:    call void @use(i32* [[P]])
+; CHECK-NEXT:    call void @use(ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
   %P = alloca i32
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   br i1 %c1, label %bb1, label %bb2
 
 bb1:
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   br label %bb5
 bb2:
-  store i32 3, i32* %P
+  store i32 3, ptr %P
   br label %bb5
 
 bb5:
-  call void @use(i32* %P)
+  call void @use(ptr %P)
   ret void
 }
 
@@ -479,41 +479,41 @@ define void @alloca_2(i1 %c.1, i1 %c.2) {
 ; CHECK-NEXT:    [[P:%.*]] = alloca i32, align 4
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
-; CHECK-NEXT:    store i32 0, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P]], align 4
 ; CHECK-NEXT:    br label [[BB5:%.*]]
 ; CHECK:       bb2:
 ; CHECK-NEXT:    br i1 [[C_2:%.*]], label [[BB3:%.*]], label [[BB4:%.*]]
 ; CHECK:       bb3:
-; CHECK-NEXT:    store i32 3, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 3, ptr [[P]], align 4
 ; CHECK-NEXT:    br label [[BB5]]
 ; CHECK:       bb4:
-; CHECK-NEXT:    store i32 5, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 5, ptr [[P]], align 4
 ; CHECK-NEXT:    br label [[BB5]]
 ; CHECK:       bb5:
-; CHECK-NEXT:    call void @use(i32* [[P]])
+; CHECK-NEXT:    call void @use(ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
   %P = alloca i32
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   br i1 %c.1, label %bb1, label %bb2
 
 bb1:
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   br label %bb5
 
 bb2:
   br i1 %c.2, label %bb3, label %bb4
 
 bb3:
-  store i32 3, i32* %P
+  store i32 3, ptr %P
   br label %bb5
 
 bb4:
-  store i32 5, i32* %P
+  store i32 5, ptr %P
   br label %bb5
 
 bb5:
-  call void @use(i32* %P)
+  call void @use(ptr %P)
   ret void
 }
 
@@ -522,29 +522,29 @@ bb5:
 define void @alloca_3(i1 %c1) {
 ; CHECK-LABEL: @alloca_3(
 ; CHECK-NEXT:    [[P:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 1, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 1, ptr [[P]], align 4
 ; CHECK-NEXT:    br i1 [[C1:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
-; CHECK-NEXT:    store i32 0, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P]], align 4
 ; CHECK-NEXT:    br label [[BB5:%.*]]
 ; CHECK:       bb2:
 ; CHECK-NEXT:    br label [[BB5]]
 ; CHECK:       bb5:
-; CHECK-NEXT:    call void @use(i32* [[P]])
+; CHECK-NEXT:    call void @use(ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
   %P = alloca i32
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   br i1 %c1, label %bb1, label %bb2
 
 bb1:
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   br label %bb5
 bb2:
   br label %bb5
 
 bb5:
-  call void @use(i32* %P)
+  call void @use(ptr %P)
   ret void
 }
 
@@ -556,8 +556,8 @@ define void @alloca_4(i1 %c1) {
 ; CHECK-NEXT:    [[P:%.*]] = alloca i32, align 4
 ; CHECK-NEXT:    br i1 [[C1:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
-; CHECK-NEXT:    store i32 0, i32* [[P]], align 4
-; CHECK-NEXT:    call void @use(i32* [[P]])
+; CHECK-NEXT:    store i32 0, ptr [[P]], align 4
+; CHECK-NEXT:    call void @use(ptr [[P]])
 ; CHECK-NEXT:    br label [[BB5:%.*]]
 ; CHECK:       bb2:
 ; CHECK-NEXT:    br label [[BB5]]
@@ -565,12 +565,12 @@ define void @alloca_4(i1 %c1) {
 ; CHECK-NEXT:    ret void
 ;
   %P = alloca i32
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   br i1 %c1, label %bb1, label %bb2
 
 bb1:
-  store i32 0, i32* %P
-  call void @use(i32* %P)
+  store i32 0, ptr %P
+  call void @use(ptr %P)
   br label %bb5
 
 bb2:
@@ -581,60 +581,57 @@ bb5:
 }
 
 %struct.blam.4 = type { %struct.bar.5, [4 x i8] }
-%struct.bar.5 = type <{ i64, i64*, i32, i64 }>
+%struct.bar.5 = type <{ i64, ptr, i32, i64 }>
 
 ; Make sure we do not eliminate the store in %bb.
 define void @alloca_5(i1 %c) {
 ; CHECK-LABEL: @alloca_5(
 ; CHECK-NEXT:  bb:
 ; CHECK-NEXT:    [[TMP:%.*]] = alloca [[STRUCT_BLAM_4:%.*]], align 8
-; CHECK-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [[STRUCT_BLAM_4]], %struct.blam.4* [[TMP]], i64 0, i32 0, i32 3
-; CHECK-NEXT:    [[TMP39:%.*]] = bitcast i64* [[TMP38]] to i64*
-; CHECK-NEXT:    store i64 0, i64* [[TMP39]], align 4
+; CHECK-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [[STRUCT_BLAM_4]], ptr [[TMP]], i64 0, i32 0, i32 3
+; CHECK-NEXT:    store i64 0, ptr [[TMP38]], align 4
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[BB46:%.*]], label [[BB47:%.*]]
 ; CHECK:       bb46:
 ; CHECK-NEXT:    ret void
 ; CHECK:       bb47:
-; CHECK-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_BLAM_4]], %struct.blam.4* [[TMP]], i64 0, i32 0, i32 2
-; CHECK-NEXT:    store i32 20, i32* [[TMP48]], align 8
+; CHECK-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_BLAM_4]], ptr [[TMP]], i64 0, i32 0, i32 2
+; CHECK-NEXT:    store i32 20, ptr [[TMP48]], align 8
 ; CHECK-NEXT:    br label [[BB52:%.*]]
 ; CHECK:       bb52:
 ; CHECK-NEXT:    br i1 [[C]], label [[BB68:%.*]], label [[BB59:%.*]]
 ; CHECK:       bb59:
-; CHECK-NEXT:    call void @use.2(%struct.blam.4* [[TMP]])
+; CHECK-NEXT:    call void @use.2(ptr [[TMP]])
 ; CHECK-NEXT:    ret void
 ; CHECK:       bb68:
 ; CHECK-NEXT:    ret void
 ;
 bb:
   %tmp = alloca %struct.blam.4, align 8
-  %tmp36 = getelementptr inbounds %struct.blam.4, %struct.blam.4* %tmp, i64 0, i32 0, i32 1
-  %tmp37 = bitcast i64** %tmp36 to i8*
-  %tmp38 = getelementptr inbounds %struct.blam.4, %struct.blam.4* %tmp, i64 0, i32 0, i32 3
-  %tmp39 = bitcast i64* %tmp38 to i64*
-  store i64 0, i64* %tmp39, align 4
+  %tmp36 = getelementptr inbounds %struct.blam.4, ptr %tmp, i64 0, i32 0, i32 1
+  %tmp38 = getelementptr inbounds %struct.blam.4, ptr %tmp, i64 0, i32 0, i32 3
+  store i64 0, ptr %tmp38, align 4
   br i1 %c, label %bb46, label %bb47
 
 bb46:                                             ; preds = %bb12
-  call void @llvm.memset.p0i8.i64(i8* nonnull align 8 dereferenceable(20) %tmp37, i8 0, i64 26, i1 false)
+  call void @llvm.memset.p0.i64(ptr nonnull align 8 dereferenceable(20) %tmp36, i8 0, i64 26, i1 false)
   ret void
 
 bb47:                                             ; preds = %bb12
-  %tmp48 = getelementptr inbounds %struct.blam.4, %struct.blam.4* %tmp, i64 0, i32 0, i32 2
-  store i32 20, i32* %tmp48, align 8
+  %tmp48 = getelementptr inbounds %struct.blam.4, ptr %tmp, i64 0, i32 0, i32 2
+  store i32 20, ptr %tmp48, align 8
   br label %bb52
 
 bb52:                                             ; preds = %bb47
   br i1 %c, label %bb68, label %bb59
 
 bb59:                                             ; preds = %bb52
-  call void @use.2(%struct.blam.4* %tmp)
+  call void @use.2(ptr %tmp)
   ret void
 
 bb68:                                             ; preds = %bb52
   ret void
 }
 
-declare void @use.2(%struct.blam.4*)
+declare void @use.2(ptr)
 
-declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg)
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg)

diff  --git a/llvm/test/Transforms/DeadStoreElimination/multiblock-overlap.ll b/llvm/test/Transforms/DeadStoreElimination/multiblock-overlap.ll
index 2ed717343a8a3..85e9ab2a6df89 100644
--- a/llvm/test/Transforms/DeadStoreElimination/multiblock-overlap.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/multiblock-overlap.ll
@@ -5,42 +5,39 @@
 %struct.ham = type { [3 x double], [3 x double]}
 
 declare void @may_throw()
-declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg)
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg)
 
 ; We miss this case, because of an aggressive limit of partial overlap analysis.
 ; With a larger partial store limit, we remove the memset.
-define void @overlap1(%struct.ham* %arg, i1 %cond) {
+define void @overlap1(ptr %arg, i1 %cond) {
 ; CHECK-LABEL: @overlap1(
 ; CHECK-NEXT:  bb:
-; CHECK-NEXT:    [[TMP:%.*]] = getelementptr inbounds [[STRUCT_HAM:%.*]], %struct.ham* [[ARG:%.*]], i64 0, i32 0, i64 2
-; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_HAM]], %struct.ham* [[ARG]], i64 0, i32 0, i64 1
-; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_HAM]], %struct.ham* [[ARG]], i64 0, i32 0, i64 0
-; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_HAM]], %struct.ham* [[ARG]], i64 0, i32 1, i64 2
-; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_HAM]], %struct.ham* [[ARG]], i64 0, i32 1, i64 1
-; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_HAM]], %struct.ham* [[ARG]], i64 0, i32 1, i32 0
+; CHECK-NEXT:    [[TMP:%.*]] = getelementptr inbounds [[STRUCT_HAM:%.*]], ptr [[ARG:%.*]], i64 0, i32 0, i64 2
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_HAM]], ptr [[ARG]], i64 0, i32 0, i64 1
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_HAM]], ptr [[ARG]], i64 0, i32 1, i64 2
+; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_HAM]], ptr [[ARG]], i64 0, i32 1, i64 1
+; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_HAM]], ptr [[ARG]], i64 0, i32 1, i32 0
 ; CHECK-NEXT:    br i1 [[COND:%.*]], label [[BB7:%.*]], label [[BB8:%.*]]
 ; CHECK:       bb7:
 ; CHECK-NEXT:    br label [[BB9:%.*]]
 ; CHECK:       bb8:
 ; CHECK-NEXT:    br label [[BB9]]
 ; CHECK:       bb9:
-; CHECK-NEXT:    store double 1.000000e+00, double* [[TMP2]], align 8
-; CHECK-NEXT:    store double 2.000000e+00, double* [[TMP1]], align 8
-; CHECK-NEXT:    store double 3.000000e+00, double* [[TMP]], align 8
-; CHECK-NEXT:    store double 4.000000e+00, double* [[TMP5]], align 8
-; CHECK-NEXT:    store double 5.000000e+00, double* [[TMP4]], align 8
-; CHECK-NEXT:    store double 6.000000e+00, double* [[TMP3]], align 8
+; CHECK-NEXT:    store double 1.000000e+00, ptr [[ARG]], align 8
+; CHECK-NEXT:    store double 2.000000e+00, ptr [[TMP1]], align 8
+; CHECK-NEXT:    store double 3.000000e+00, ptr [[TMP]], align 8
+; CHECK-NEXT:    store double 4.000000e+00, ptr [[TMP5]], align 8
+; CHECK-NEXT:    store double 5.000000e+00, ptr [[TMP4]], align 8
+; CHECK-NEXT:    store double 6.000000e+00, ptr [[TMP3]], align 8
 ; CHECK-NEXT:    ret void
 ;
 bb:
-  %tmp = getelementptr inbounds %struct.ham, %struct.ham* %arg, i64 0, i32 0, i64 2
-  %tmp1 = getelementptr inbounds %struct.ham, %struct.ham* %arg, i64 0, i32 0, i64 1
-  %tmp2 = getelementptr inbounds %struct.ham, %struct.ham* %arg, i64 0, i32 0, i64 0
-  %tmp3 = getelementptr inbounds %struct.ham, %struct.ham* %arg, i64 0,i32 1, i64 2
-  %tmp4 = getelementptr inbounds %struct.ham, %struct.ham* %arg, i64 0, i32 1, i64 1
-  %tmp5 = getelementptr inbounds %struct.ham, %struct.ham* %arg, i64 0, i32 1, i32 0
-  %tmp6 = bitcast double* %tmp2 to i8*
-  call void @llvm.memset.p0i8.i64(i8* nonnull align 8 dereferenceable(48) %tmp6, i8 0, i64 48, i1 false)
+  %tmp = getelementptr inbounds %struct.ham, ptr %arg, i64 0, i32 0, i64 2
+  %tmp1 = getelementptr inbounds %struct.ham, ptr %arg, i64 0, i32 0, i64 1
+  %tmp3 = getelementptr inbounds %struct.ham, ptr %arg, i64 0,i32 1, i64 2
+  %tmp4 = getelementptr inbounds %struct.ham, ptr %arg, i64 0, i32 1, i64 1
+  %tmp5 = getelementptr inbounds %struct.ham, ptr %arg, i64 0, i32 1, i32 0
+  call void @llvm.memset.p0.i64(ptr nonnull align 8 dereferenceable(48) %arg, i8 0, i64 48, i1 false)
   br i1 %cond, label %bb7, label %bb8
 
 bb7:                                              ; preds = %bb
@@ -50,26 +47,24 @@ bb8:                                              ; preds = %bb
   br label %bb9
 
 bb9:                                              ; preds = %bb8, %bb7
-  store double 1.0, double* %tmp2, align 8
-  store double 2.0, double* %tmp1, align 8
-  store double 3.0, double* %tmp, align 8
-  store double 4.0, double* %tmp5, align 8
-  store double 5.0, double* %tmp4, align 8
-  store double 6.0, double* %tmp3, align 8
+  store double 1.0, ptr %arg, align 8
+  store double 2.0, ptr %tmp1, align 8
+  store double 3.0, ptr %tmp, align 8
+  store double 4.0, ptr %tmp5, align 8
+  store double 5.0, ptr %tmp4, align 8
+  store double 6.0, ptr %tmp3, align 8
   ret void
 }
 
-define void @overlap2(%struct.ham* %arg, i1 %cond) {
+define void @overlap2(ptr %arg, i1 %cond) {
 ; CHECK-LABEL: @overlap2(
 ; CHECK-NEXT:  bb:
-; CHECK-NEXT:    [[TMP:%.*]] = getelementptr inbounds [[STRUCT_HAM:%.*]], %struct.ham* [[ARG:%.*]], i64 0, i32 0, i64 2
-; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_HAM]], %struct.ham* [[ARG]], i64 0, i32 0, i64 1
-; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_HAM]], %struct.ham* [[ARG]], i64 0, i32 0, i64 0
-; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_HAM]], %struct.ham* [[ARG]], i64 0, i32 1, i64 2
-; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_HAM]], %struct.ham* [[ARG]], i64 0, i32 1, i64 1
-; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_HAM]], %struct.ham* [[ARG]], i64 0, i32 1, i32 0
-; CHECK-NEXT:    [[TMP6:%.*]] = bitcast double* [[TMP2]] to i8*
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* nonnull align 8 dereferenceable(48) [[TMP6]], i8 0, i64 48, i1 false)
+; CHECK-NEXT:    [[TMP:%.*]] = getelementptr inbounds [[STRUCT_HAM:%.*]], ptr [[ARG:%.*]], i64 0, i32 0, i64 2
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_HAM]], ptr [[ARG]], i64 0, i32 0, i64 1
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_HAM]], ptr [[ARG]], i64 0, i32 1, i64 2
+; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_HAM]], ptr [[ARG]], i64 0, i32 1, i64 1
+; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_HAM]], ptr [[ARG]], i64 0, i32 1, i32 0
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr nonnull align 8 dereferenceable(48) [[ARG]], i8 0, i64 48, i1 false)
 ; CHECK-NEXT:    br i1 [[COND:%.*]], label [[BB7:%.*]], label [[BB8:%.*]]
 ; CHECK:       bb7:
 ; CHECK-NEXT:    call void @may_throw()
@@ -77,23 +72,21 @@ define void @overlap2(%struct.ham* %arg, i1 %cond) {
 ; CHECK:       bb8:
 ; CHECK-NEXT:    br label [[BB9]]
 ; CHECK:       bb9:
-; CHECK-NEXT:    store double 1.000000e+00, double* [[TMP2]], align 8
-; CHECK-NEXT:    store double 2.000000e+00, double* [[TMP1]], align 8
-; CHECK-NEXT:    store double 3.000000e+00, double* [[TMP]], align 8
-; CHECK-NEXT:    store double 4.000000e+00, double* [[TMP5]], align 8
-; CHECK-NEXT:    store double 5.000000e+00, double* [[TMP4]], align 8
-; CHECK-NEXT:    store double 6.000000e+00, double* [[TMP3]], align 8
+; CHECK-NEXT:    store double 1.000000e+00, ptr [[ARG]], align 8
+; CHECK-NEXT:    store double 2.000000e+00, ptr [[TMP1]], align 8
+; CHECK-NEXT:    store double 3.000000e+00, ptr [[TMP]], align 8
+; CHECK-NEXT:    store double 4.000000e+00, ptr [[TMP5]], align 8
+; CHECK-NEXT:    store double 5.000000e+00, ptr [[TMP4]], align 8
+; CHECK-NEXT:    store double 6.000000e+00, ptr [[TMP3]], align 8
 ; CHECK-NEXT:    ret void
 ;
 bb:
-  %tmp = getelementptr inbounds %struct.ham, %struct.ham* %arg, i64 0, i32 0, i64 2
-  %tmp1 = getelementptr inbounds %struct.ham, %struct.ham* %arg, i64 0, i32 0, i64 1
-  %tmp2 = getelementptr inbounds %struct.ham, %struct.ham* %arg, i64 0, i32 0, i64 0
-  %tmp3 = getelementptr inbounds %struct.ham, %struct.ham* %arg, i64 0,i32 1, i64 2
-  %tmp4 = getelementptr inbounds %struct.ham, %struct.ham* %arg, i64 0, i32 1, i64 1
-  %tmp5 = getelementptr inbounds %struct.ham, %struct.ham* %arg, i64 0, i32 1, i32 0
-  %tmp6 = bitcast double* %tmp2 to i8*
-  call void @llvm.memset.p0i8.i64(i8* nonnull align 8 dereferenceable(48) %tmp6, i8 0, i64 48, i1 false)
+  %tmp = getelementptr inbounds %struct.ham, ptr %arg, i64 0, i32 0, i64 2
+  %tmp1 = getelementptr inbounds %struct.ham, ptr %arg, i64 0, i32 0, i64 1
+  %tmp3 = getelementptr inbounds %struct.ham, ptr %arg, i64 0,i32 1, i64 2
+  %tmp4 = getelementptr inbounds %struct.ham, ptr %arg, i64 0, i32 1, i64 1
+  %tmp5 = getelementptr inbounds %struct.ham, ptr %arg, i64 0, i32 1, i32 0
+  call void @llvm.memset.p0.i64(ptr nonnull align 8 dereferenceable(48) %arg, i8 0, i64 48, i1 false)
   br i1 %cond, label %bb7, label %bb8
 
 bb7:                                              ; preds = %bb
@@ -104,41 +97,37 @@ bb8:                                              ; preds = %bb
   br label %bb9
 
 bb9:                                              ; preds = %bb8, %bb7
-  store double 1.0, double* %tmp2, align 8
-  store double 2.0, double* %tmp1, align 8
-  store double 3.0, double* %tmp, align 8
-  store double 4.0, double* %tmp5, align 8
-  store double 5.0, double* %tmp4, align 8
-  store double 6.0, double* %tmp3, align 8
+  store double 1.0, ptr %arg, align 8
+  store double 2.0, ptr %tmp1, align 8
+  store double 3.0, ptr %tmp, align 8
+  store double 4.0, ptr %tmp5, align 8
+  store double 5.0, ptr %tmp4, align 8
+  store double 6.0, ptr %tmp3, align 8
   ret void
 }
 
 ; Test case from PR46513. Make sure we do not crash.
-; TODO: we should be able to shorten store i32 844283136, i32* %cast.i32 to a
+; TODO: we should be able to shorten store i32 844283136, ptr %cast.i32 to a
 ; store of i16.
-define void @overlap_no_dominance([4 x i8]* %arg, i1 %c)  {
+define void @overlap_no_dominance(ptr %arg, i1 %c)  {
 ; CHECK-LABEL: @overlap_no_dominance(
 ; CHECK-NEXT:  bb:
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[BB13:%.*]], label [[BB9:%.*]]
 ; CHECK:       bb9:
-; CHECK-NEXT:    [[CAST_I32:%.*]] = bitcast [4 x i8]* [[ARG:%.*]] to i32*
-; CHECK-NEXT:    store i32 844283136, i32* [[CAST_I32]], align 4
+; CHECK-NEXT:    store i32 844283136, ptr [[ARG:%.*]], align 4
 ; CHECK-NEXT:    br label [[BB13]]
 ; CHECK:       bb13:
-; CHECK-NEXT:    [[CAST_I16:%.*]] = bitcast [4 x i8]* [[ARG]] to i16*
-; CHECK-NEXT:    store i16 0, i16* [[CAST_I16]], align 4
+; CHECK-NEXT:    store i16 0, ptr [[ARG]], align 4
 ; CHECK-NEXT:    ret void
 ;
 bb:
   br i1 %c, label %bb13, label %bb9
 
 bb9:                                              ; preds = %bb
-  %cast.i32 = bitcast [4 x i8]* %arg to i32*
-  store i32 844283136, i32* %cast.i32, align 4
+  store i32 844283136, ptr %arg, align 4
   br label %bb13
 
 bb13:                                             ; preds = %bb9, %bb
-  %cast.i16 = bitcast [4 x i8]* %arg to i16*
-  store i16 0, i16* %cast.i16, align 4
+  store i16 0, ptr %arg, align 4
   ret void
 }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/multiblock-partial.ll b/llvm/test/Transforms/DeadStoreElimination/multiblock-partial.ll
index f998bb44a4716..0d1889da86e00 100644
--- a/llvm/test/Transforms/DeadStoreElimination/multiblock-partial.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/multiblock-partial.ll
@@ -3,9 +3,9 @@
 
 target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
 
-define void @second_store_smaller_1(i32* noalias %P, i1 %c) {
+define void @second_store_smaller_1(ptr noalias %P, i1 %c) {
 ; CHECK-LABEL: @second_store_smaller_1(
-; CHECK-NEXT:    store i32 0, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    br label [[BB3:%.*]]
@@ -14,21 +14,20 @@ define void @second_store_smaller_1(i32* noalias %P, i1 %c) {
 ; CHECK:       bb3:
 ; CHECK-NEXT:    ret void
 ;
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   br i1 %c, label %bb1, label %bb2
 bb1:
   br label %bb3
 bb2:
   br label %bb3
 bb3:
-  %P.i16 = bitcast i32* %P to i16*
-  store i16 0, i16* %P.i16
+  store i16 0, ptr %P
   ret void
 }
 
-define void @second_store_smaller_2(i32* noalias %P, i1 %c) {
+define void @second_store_smaller_2(ptr noalias %P, i1 %c) {
 ; CHECK-LABEL: @second_store_smaller_2(
-; CHECK-NEXT:    store i32 12345, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 12345, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    br label [[BB3:%.*]]
@@ -37,109 +36,100 @@ define void @second_store_smaller_2(i32* noalias %P, i1 %c) {
 ; CHECK:       bb3:
 ; CHECK-NEXT:    ret void
 ;
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   br i1 %c, label %bb1, label %bb2
 bb1:
   br label %bb3
 bb2:
   br label %bb3
 bb3:
-  %P.i16 = bitcast i32* %P to i16*
-  store i16 12345, i16* %P.i16
+  store i16 12345, ptr %P
   ret void
 }
 
 declare void @use(i16) readnone
 declare void @use.i8(i8) readnone
 
-define void @second_store_smaller_3(i32* noalias %P, i1 %c) {
+define void @second_store_smaller_3(ptr noalias %P, i1 %c) {
 ; CHECK-LABEL: @second_store_smaller_3(
-; CHECK-NEXT:    store i32 1, i32* [[P:%.*]], align 4
-; CHECK-NEXT:    [[P_I16:%.*]] = bitcast i32* [[P]] to i16*
+; CHECK-NEXT:    store i32 1, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
-; CHECK-NEXT:    [[L1:%.*]] = load i16, i16* [[P_I16]], align 2
+; CHECK-NEXT:    [[L1:%.*]] = load i16, ptr [[P]], align 2
 ; CHECK-NEXT:    call void @use(i16 [[L1]])
 ; CHECK-NEXT:    br label [[BB3:%.*]]
 ; CHECK:       bb2:
 ; CHECK-NEXT:    br label [[BB3]]
 ; CHECK:       bb3:
-; CHECK-NEXT:    store i16 -31073, i16* [[P_I16]], align 2
+; CHECK-NEXT:    store i16 -31073, ptr [[P]], align 2
 ; CHECK-NEXT:    ret void
 ;
-  store i32 1, i32* %P
-  %P.i16 = bitcast i32* %P to i16*
+  store i32 1, ptr %P
   br i1 %c, label %bb1, label %bb2
 
 bb1:
-  %l1 = load i16, i16* %P.i16
+  %l1 = load i16, ptr %P
   call void @use(i16 %l1)
   br label %bb3
 bb2:
   br label %bb3
 bb3:
-  store i16 -31073, i16* %P.i16
+  store i16 -31073, ptr %P
   ret void
 }
 
-define void @second_store_smaller_4(i32* noalias %P, i1 %c) {
+define void @second_store_smaller_4(ptr noalias %P, i1 %c) {
 ; CHECK-LABEL: @second_store_smaller_4(
-; CHECK-NEXT:    store i32 1, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 1, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
-; CHECK-NEXT:    [[P_I8:%.*]] = bitcast i32* [[P]] to i8*
-; CHECK-NEXT:    [[L1:%.*]] = load i8, i8* [[P_I8]], align 1
+; CHECK-NEXT:    [[L1:%.*]] = load i8, ptr [[P]], align 1
 ; CHECK-NEXT:    call void @use.i8(i8 [[L1]])
 ; CHECK-NEXT:    br label [[BB3:%.*]]
 ; CHECK:       bb2:
 ; CHECK-NEXT:    br label [[BB3]]
 ; CHECK:       bb3:
-; CHECK-NEXT:    [[P_I16:%.*]] = bitcast i32* [[P]] to i16*
-; CHECK-NEXT:    store i16 -31073, i16* [[P_I16]], align 2
+; CHECK-NEXT:    store i16 -31073, ptr [[P]], align 2
 ; CHECK-NEXT:    ret void
 ;
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   br i1 %c, label %bb1, label %bb2
 
 bb1:
-  %P.i8 = bitcast i32* %P to i8*
-  %l1 = load i8, i8* %P.i8
+  %l1 = load i8, ptr %P
   call void @use.i8(i8 %l1)
   br label %bb3
 bb2:
   br label %bb3
 bb3:
-  %P.i16 = bitcast i32* %P to i16*
-  store i16 -31073, i16* %P.i16
+  store i16 -31073, ptr %P
   ret void
 }
 
-define void @second_store_smaller_5(i32* noalias %P, i16 %x, i1 %c) {
+define void @second_store_smaller_5(ptr noalias %P, i16 %x, i1 %c) {
 ; CHECK-LABEL: @second_store_smaller_5(
-; CHECK-NEXT:    store i32 1, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 1, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    br label [[BB3:%.*]]
 ; CHECK:       bb2:
 ; CHECK-NEXT:    br label [[BB3]]
 ; CHECK:       bb3:
-; CHECK-NEXT:    [[P_I16:%.*]] = bitcast i32* [[P]] to i16*
-; CHECK-NEXT:    store i16 [[X:%.*]], i16* [[P_I16]], align 2
+; CHECK-NEXT:    store i16 [[X:%.*]], ptr [[P]], align 2
 ; CHECK-NEXT:    ret void
 ;
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   br i1 %c, label %bb1, label %bb2
 bb1:
   br label %bb3
 bb2:
   br label %bb3
 bb3:
-  %P.i16 = bitcast i32* %P to i16*
-  store i16 %x, i16* %P.i16
+  store i16 %x, ptr %P
   ret void
 }
 
-define void @second_store_bigger(i32* noalias %P, i1 %c) {
+define void @second_store_bigger(ptr noalias %P, i1 %c) {
 ; CHECK-LABEL: @second_store_bigger(
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
@@ -147,18 +137,16 @@ define void @second_store_bigger(i32* noalias %P, i1 %c) {
 ; CHECK:       bb2:
 ; CHECK-NEXT:    br label [[BB3]]
 ; CHECK:       bb3:
-; CHECK-NEXT:    [[P_I64:%.*]] = bitcast i32* [[P:%.*]] to i64*
-; CHECK-NEXT:    store i64 0, i64* [[P_I64]], align 8
+; CHECK-NEXT:    store i64 0, ptr [[P:%.*]], align 8
 ; CHECK-NEXT:    ret void
 ;
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   br i1 %c, label %bb1, label %bb2
 bb1:
   br label %bb3
 bb2:
   br label %bb3
 bb3:
-  %P.i64 = bitcast i32* %P to i64*
-  store i64 0, i64* %P.i64
+  store i64 0, ptr %P
   ret void
 }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/multiblock-simple.ll b/llvm/test/Transforms/DeadStoreElimination/multiblock-simple.ll
index e30c78e44e02a..3c4d7e9ce2e84 100644
--- a/llvm/test/Transforms/DeadStoreElimination/multiblock-simple.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/multiblock-simple.ll
@@ -4,7 +4,7 @@
 target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
 
 
-define void @test2(i32* noalias %P) {
+define void @test2(ptr noalias %P) {
 ; CHECK-LABEL: @test2(
 ; CHECK-NEXT:    br i1 true, label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
@@ -12,158 +12,156 @@ define void @test2(i32* noalias %P) {
 ; CHECK:       bb2:
 ; CHECK-NEXT:    br label [[BB3]]
 ; CHECK:       bb3:
-; CHECK-NEXT:    store i32 0, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   br i1 true, label %bb1, label %bb2
 bb1:
   br label %bb3
 bb2:
   br label %bb3
 bb3:
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   ret void
 }
 
-define void @test3(i32* noalias %P) {
+define void @test3(ptr noalias %P) {
 ; CHECK-LABEL: @test3(
-; CHECK-NEXT:    store i32 0, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br i1 true, label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    br label [[BB3:%.*]]
 ; CHECK:       bb2:
-; CHECK-NEXT:    store i32 1, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 1, ptr [[P]], align 4
 ; CHECK-NEXT:    br label [[BB3]]
 ; CHECK:       bb3:
 ; CHECK-NEXT:    ret void
 ;
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   br i1 true, label %bb1, label %bb2
 bb1:
   br label %bb3
 bb2:
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   br label %bb3
 bb3:
   ret void
 }
 
 
-define void @test7(i32* noalias %P, i32* noalias %Q) {
+define void @test7(ptr noalias %P, ptr noalias %Q) {
 ; CHECK-LABEL: @test7(
 ; CHECK-NEXT:    br i1 true, label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
-; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br label [[BB3:%.*]]
 ; CHECK:       bb2:
 ; CHECK-NEXT:    br label [[BB3]]
 ; CHECK:       bb3:
-; CHECK-NEXT:    store i32 0, i32* [[Q:%.*]], align 4
-; CHECK-NEXT:    store i32 0, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[Q:%.*]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  store i32 1, i32* %Q
+  store i32 1, ptr %Q
   br i1 true, label %bb1, label %bb2
 bb1:
-  load i32, i32* %P
+  load i32, ptr %P
   br label %bb3
 bb2:
   br label %bb3
 bb3:
-  store i32 0, i32* %Q
-  store i32 0, i32* %P
+  store i32 0, ptr %Q
+  store i32 0, ptr %P
   ret void
 }
 
-define i32 @test22(i32* %P, i32* noalias %Q, i32* %R) {
+define i32 @test22(ptr %P, ptr noalias %Q, ptr %R) {
 ; CHECK-LABEL: @test22(
-; CHECK-NEXT:    store i32 2, i32* [[P:%.*]], align 4
-; CHECK-NEXT:    store i32 3, i32* [[Q:%.*]], align 4
-; CHECK-NEXT:    [[L:%.*]] = load i32, i32* [[R:%.*]], align 4
+; CHECK-NEXT:    store i32 2, ptr [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 3, ptr [[Q:%.*]], align 4
+; CHECK-NEXT:    [[L:%.*]] = load i32, ptr [[R:%.*]], align 4
 ; CHECK-NEXT:    ret i32 [[L]]
 ;
-  store i32 1, i32* %Q
-  store i32 2, i32* %P
-  store i32 3, i32* %Q
-  %l = load i32, i32* %R
+  store i32 1, ptr %Q
+  store i32 2, ptr %P
+  store i32 3, ptr %Q
+  %l = load i32, ptr %R
   ret i32 %l
 }
 
-define void @test9(i32* noalias %P) {
+define void @test9(ptr noalias %P) {
 ; CHECK-LABEL: @test9(
-; CHECK-NEXT:    store i32 0, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br i1 true, label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    br label [[BB3:%.*]]
 ; CHECK:       bb2:
 ; CHECK-NEXT:    ret void
 ; CHECK:       bb3:
-; CHECK-NEXT:    store i32 1, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 1, ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   br i1 true, label %bb1, label %bb2
 bb1:
   br label %bb3
 bb2:
   ret void
 bb3:
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   ret void
 }
 
-; We cannot eliminate `store i32 0, i32* %P`, as it is read by the later load.
-; Make sure that we check the uses of `store i32 1, i32* %P.1 which does not
+; We cannot eliminate `store i32 0, ptr %P`, as it is read by the later load.
+; Make sure that we check the uses of `store i32 1, ptr %P.1 which does not
 ; alias %P. Note that uses point to the *first* def that may alias.
-define void @overlapping_read(i32* %P) {
+define void @overlapping_read(ptr %P) {
 ; CHECK-LABEL: @overlapping_read(
-; CHECK-NEXT:    store i32 0, i32* [[P:%.*]], align 4
-; CHECK-NEXT:    [[P_1:%.*]] = getelementptr i32, i32* [[P]], i32 1
-; CHECK-NEXT:    store i32 1, i32* [[P_1]], align 4
-; CHECK-NEXT:    [[P_64:%.*]] = bitcast i32* [[P]] to i64*
-; CHECK-NEXT:    [[LV:%.*]] = load i64, i64* [[P_64]], align 8
+; CHECK-NEXT:    store i32 0, ptr [[P:%.*]], align 4
+; CHECK-NEXT:    [[P_1:%.*]] = getelementptr i32, ptr [[P]], i32 1
+; CHECK-NEXT:    store i32 1, ptr [[P_1]], align 4
+; CHECK-NEXT:    [[LV:%.*]] = load i64, ptr [[P]], align 8
 ; CHECK-NEXT:    br i1 true, label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    br label [[BB3:%.*]]
 ; CHECK:       bb2:
 ; CHECK-NEXT:    br label [[BB3]]
 ; CHECK:       bb3:
-; CHECK-NEXT:    store i32 2, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 2, ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  store i32 0, i32* %P
-  %P.1 = getelementptr i32, i32* %P, i32 1
-  store i32 1, i32* %P.1
+  store i32 0, ptr %P
+  %P.1 = getelementptr i32, ptr %P, i32 1
+  store i32 1, ptr %P.1
 
-  %P.64 = bitcast i32* %P to i64*
-  %lv = load i64, i64* %P.64
+  %lv = load i64, ptr %P
   br i1 true, label %bb1, label %bb2
 bb1:
   br label %bb3
 bb2:
   br label %bb3
 bb3:
-  store i32 2, i32* %P
+  store i32 2, ptr %P
   ret void
 }
 
-define void @test10(i32* %P) {
+define void @test10(ptr %P) {
 ; CHECK-LABEL: @test10(
-; CHECK-NEXT:    store i32 0, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br i1 true, label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
-; CHECK-NEXT:    store i32 1, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 1, ptr [[P]], align 4
 ; CHECK-NEXT:    br label [[BB3:%.*]]
 ; CHECK:       bb2:
 ; CHECK-NEXT:    ret void
 ; CHECK:       bb3:
 ; CHECK-NEXT:    ret void
 ;
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   br i1 true, label %bb1, label %bb2
 bb1:
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   br label %bb3
 bb2:
   ret void
@@ -183,10 +181,10 @@ define void @test11() {
 ; CHECK-NEXT:    ret void
 ;
   %P = alloca i32
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   br i1 true, label %bb1, label %bb2
 bb1:
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   br label %bb3
 bb2:
   ret void
@@ -195,50 +193,50 @@ bb3:
 }
 
 
-define void @test12(i32* %P) {
+define void @test12(ptr %P) {
 ; CHECK-LABEL: @test12(
 ; CHECK-NEXT:    br i1 true, label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
-; CHECK-NEXT:    store i32 1, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 1, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br label [[BB3:%.*]]
 ; CHECK:       bb2:
-; CHECK-NEXT:    store i32 1, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 1, ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ; CHECK:       bb3:
 ; CHECK-NEXT:    ret void
 ;
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   br i1 true, label %bb1, label %bb2
 bb1:
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   br label %bb3
 bb2:
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   ret void
 bb3:
   ret void
 }
 
 
-define void @test13(i32* %P) {
+define void @test13(ptr %P) {
 ; CHECK-LABEL: @test13(
 ; CHECK-NEXT:    br i1 true, label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
-; CHECK-NEXT:    store i32 1, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 1, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br label [[BB3:%.*]]
 ; CHECK:       bb2:
-; CHECK-NEXT:    store i32 1, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 1, ptr [[P]], align 4
 ; CHECK-NEXT:    br label [[BB3]]
 ; CHECK:       bb3:
 ; CHECK-NEXT:    ret void
 ;
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   br i1 true, label %bb1, label %bb2
 bb1:
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   br label %bb3
 bb2:
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   br label %bb3
 bb3:
   ret void

diff  --git a/llvm/test/Transforms/DeadStoreElimination/multiblock-throwing.ll b/llvm/test/Transforms/DeadStoreElimination/multiblock-throwing.ll
index 2033b34e05159..6e5cb6e6c31f5 100644
--- a/llvm/test/Transforms/DeadStoreElimination/multiblock-throwing.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/multiblock-throwing.ll
@@ -4,9 +4,9 @@
 target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
 declare void @unknown_func()
 
-define void @test6_store_same_value(i32* noalias %P) {
+define void @test6_store_same_value(ptr noalias %P) {
 ; CHECK-LABEL: @test6_store_same_value(
-; CHECK-NEXT:    store i32 0, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br i1 true, label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    br label [[BB3:%.*]]
@@ -14,10 +14,10 @@ define void @test6_store_same_value(i32* noalias %P) {
 ; CHECK-NEXT:    call void @unknown_func()
 ; CHECK-NEXT:    br label [[BB3]]
 ; CHECK:       bb3:
-; CHECK-NEXT:    store i32 0, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   br i1 true, label %bb1, label %bb2
 bb1:
   br label %bb3
@@ -25,13 +25,13 @@ bb2:
   call void @unknown_func()
   br label %bb3
 bb3:
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   ret void
 }
 
-define void @test6_store_other_value(i32* noalias %P) {
+define void @test6_store_other_value(ptr noalias %P) {
 ; CHECK-LABEL: @test6_store_other_value(
-; CHECK-NEXT:    store i32 0, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br i1 true, label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    br label [[BB3:%.*]]
@@ -39,10 +39,10 @@ define void @test6_store_other_value(i32* noalias %P) {
 ; CHECK-NEXT:    call void @unknown_func()
 ; CHECK-NEXT:    br label [[BB3]]
 ; CHECK:       bb3:
-; CHECK-NEXT:    store i32 1, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 1, ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   br i1 true, label %bb1, label %bb2
 bb1:
   br label %bb3
@@ -50,11 +50,11 @@ bb2:
   call void @unknown_func()
   br label %bb3
 bb3:
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   ret void
 }
 
-define void @test23(i32* noalias %P) {
+define void @test23(ptr noalias %P) {
 ; CHECK-LABEL: @test23(
 ; CHECK-NEXT:    br i1 true, label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
@@ -63,23 +63,23 @@ define void @test23(i32* noalias %P) {
 ; CHECK-NEXT:    call void @unknown_func()
 ; CHECK-NEXT:    br label [[BB3]]
 ; CHECK:       bb3:
-; CHECK-NEXT:    store i32 0, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
   br i1 true, label %bb1, label %bb2
 bb1:
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   br label %bb3
 bb2:
   call void @unknown_func()
   br label %bb3
 bb3:
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   ret void
 }
 
 
-define void @test24(i32* noalias %P) {
+define void @test24(ptr noalias %P) {
 ; CHECK-LABEL: @test24(
 ; CHECK-NEXT:    br i1 true, label [[BB2:%.*]], label [[BB1:%.*]]
 ; CHECK:       bb1:
@@ -88,17 +88,17 @@ define void @test24(i32* noalias %P) {
 ; CHECK-NEXT:    call void @unknown_func()
 ; CHECK-NEXT:    br label [[BB3]]
 ; CHECK:       bb3:
-; CHECK-NEXT:    store i32 0, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
   br i1 true, label %bb2, label %bb1
 bb1:
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   br label %bb3
 bb2:
   call void @unknown_func()
   br label %bb3
 bb3:
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   ret void
 }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/multiblock-unreachable.ll b/llvm/test/Transforms/DeadStoreElimination/multiblock-unreachable.ll
index a1af7271dcce0..013d8e5b8015f 100644
--- a/llvm/test/Transforms/DeadStoreElimination/multiblock-unreachable.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/multiblock-unreachable.ll
@@ -5,7 +5,7 @@ target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16
 
 ; Make sure we do not crash when we encounter unreachable blocks while checking
 ; if all paths to DomAccess go through a killing block.
-define void @test(float* %ptr, i1 %c.1, i1 %c.2, i1 %c.3) {
+define void @test(ptr %ptr, i1 %c.1, i1 %c.2, i1 %c.3) {
 ; CHECK-LABEL: @test(
 ; CHECK-NEXT:  bb:
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[BB27:%.*]], label [[BB53:%.*]]
@@ -16,12 +16,12 @@ define void @test(float* %ptr, i1 %c.1, i1 %c.2, i1 %c.3) {
 ; CHECK:       bb27:
 ; CHECK-NEXT:    br i1 [[C_3:%.*]], label [[BB38:%.*]], label [[BB39:%.*]]
 ; CHECK:       bb38:
-; CHECK-NEXT:    store float 0.000000e+00, float* [[PTR:%.*]], align 4
+; CHECK-NEXT:    store float 0.000000e+00, ptr [[PTR:%.*]], align 4
 ; CHECK-NEXT:    br label [[BB38]]
 ; CHECK:       bb39:
 ; CHECK-NEXT:    br i1 [[C_2]], label [[BB43]], label [[BB38]]
 ; CHECK:       bb43:
-; CHECK-NEXT:    store float 0.000000e+00, float* [[PTR]], align 4
+; CHECK-NEXT:    store float 0.000000e+00, ptr [[PTR]], align 4
 ; CHECK-NEXT:    br label [[BB50:%.*]]
 ; CHECK:       bb50:
 ; CHECK-NEXT:    br i1 [[C_3]], label [[BB27]], label [[BB53]]
@@ -41,14 +41,14 @@ bb27:                                             ; preds = %bb50, %bb
   br i1 %c.3, label %bb38, label %bb39
 
 bb38:                                             ; preds = %bb39, %bb38, %bb27
-  store float 0.000000e+00, float* %ptr, align 4
+  store float 0.000000e+00, ptr %ptr, align 4
   br label %bb38
 
 bb39:                                             ; preds = %bb27
   br i1 %c.2, label %bb43, label %bb38
 
 bb43:                                             ; preds = %bb39, %bb10
-  store float 0.000000e+00, float* %ptr, align 4
+  store float 0.000000e+00, ptr %ptr, align 4
   br label %bb50
 
 bb50:                                             ; preds = %bb43
@@ -60,30 +60,30 @@ bb53:                                             ; preds = %bb53, %bb50, %bb22,
 
 declare void @exit()
 
-define void @unreachable_exit_with_no_call(i64* noalias %ptr, i1 %c.1) {
+define void @unreachable_exit_with_no_call(ptr noalias %ptr, i1 %c.1) {
 ; CHECK-LABEL: @unreachable_exit_with_no_call(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
 ; CHECK:       if.then:
 ; CHECK-NEXT:    unreachable
 ; CHECK:       if.end:
-; CHECK-NEXT:    store i64 0, i64* [[PTR:%.*]], align 8
+; CHECK-NEXT:    store i64 0, ptr [[PTR:%.*]], align 8
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  store i64 1, i64* %ptr, align 8
+  store i64 1, ptr %ptr, align 8
   br i1 %c.1, label %if.then, label %if.end
 
 if.then:
   unreachable
 
 if.end:
-  store i64 0, i64* %ptr, align 8
+  store i64 0, ptr %ptr, align 8
   ret void
 }
 
 ; Test for PR53800.
-define void @unreachable_exit_with_nounwind_call_pr53800(i64* noalias %ptr, i1 %c.1) {
+define void @unreachable_exit_with_nounwind_call_pr53800(ptr noalias %ptr, i1 %c.1) {
 ; CHECK-LABEL: @unreachable_exit_with_nounwind_call_pr53800(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
@@ -91,11 +91,11 @@ define void @unreachable_exit_with_nounwind_call_pr53800(i64* noalias %ptr, i1 %
 ; CHECK-NEXT:    tail call void @exit() #[[ATTR0:[0-9]+]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       if.end:
-; CHECK-NEXT:    store i64 0, i64* [[PTR:%.*]], align 8
+; CHECK-NEXT:    store i64 0, ptr [[PTR:%.*]], align 8
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  store i64 1, i64* %ptr, align 8
+  store i64 1, ptr %ptr, align 8
   br i1 %c.1, label %if.then, label %if.end
 
 if.then:
@@ -103,25 +103,25 @@ if.then:
   unreachable
 
 if.end:
-  store i64 0, i64* %ptr, align 8
+  store i64 0, ptr %ptr, align 8
   ret void
 }
 
 ; The call @exit may read %ptr as it is not marked as noalias
-define void @unreachable_exit_and_call_may_read(i64* %ptr, i1 %c.1) {
+define void @unreachable_exit_and_call_may_read(ptr %ptr, i1 %c.1) {
 ; CHECK-LABEL: @unreachable_exit_and_call_may_read(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    store i64 1, i64* [[PTR:%.*]], align 8
+; CHECK-NEXT:    store i64 1, ptr [[PTR:%.*]], align 8
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
 ; CHECK:       if.then:
 ; CHECK-NEXT:    tail call void @exit() #[[ATTR0]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       if.end:
-; CHECK-NEXT:    store i64 0, i64* [[PTR]], align 8
+; CHECK-NEXT:    store i64 0, ptr [[PTR]], align 8
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  store i64 1, i64* %ptr, align 8
+  store i64 1, ptr %ptr, align 8
   br i1 %c.1, label %if.then, label %if.end
 
 if.then:
@@ -129,24 +129,24 @@ if.then:
   unreachable
 
 if.end:
-  store i64 0, i64* %ptr, align 8
+  store i64 0, ptr %ptr, align 8
   ret void
 }
 
-define void @unreachable_exit_with_may_unwind_call(i64* noalias %ptr, i1 %c.1) {
+define void @unreachable_exit_with_may_unwind_call(ptr noalias %ptr, i1 %c.1) {
 ; CHECK-LABEL: @unreachable_exit_with_may_unwind_call(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    store i64 1, i64* [[PTR:%.*]], align 8
+; CHECK-NEXT:    store i64 1, ptr [[PTR:%.*]], align 8
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
 ; CHECK:       if.then:
 ; CHECK-NEXT:    tail call void @exit()
 ; CHECK-NEXT:    unreachable
 ; CHECK:       if.end:
-; CHECK-NEXT:    store i64 0, i64* [[PTR]], align 8
+; CHECK-NEXT:    store i64 0, ptr [[PTR]], align 8
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  store i64 1, i64* %ptr, align 8
+  store i64 1, ptr %ptr, align 8
   br i1 %c.1, label %if.then, label %if.end
 
 if.then:
@@ -154,15 +154,15 @@ if.then:
   unreachable
 
 if.end:
-  store i64 0, i64* %ptr, align 8
+  store i64 0, ptr %ptr, align 8
   ret void
 }
 
 ; Cannot remove the store in entry, because it is not dead on the path to e.1
-define void @unreachable_exit_but_another_exit(i64* noalias %ptr, i1 %c.1, i32 %s, i1 %c.2) {
+define void @unreachable_exit_but_another_exit(ptr noalias %ptr, i1 %c.1, i32 %s, i1 %c.2) {
 ; CHECK-LABEL: @unreachable_exit_but_another_exit(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    store i64 1, i64* [[PTR:%.*]], align 8
+; CHECK-NEXT:    store i64 1, ptr [[PTR:%.*]], align 8
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
 ; CHECK:       if.then:
 ; CHECK-NEXT:    br i1 [[C_2:%.*]], label [[E_0:%.*]], label [[E_1:%.*]]
@@ -172,11 +172,11 @@ define void @unreachable_exit_but_another_exit(i64* noalias %ptr, i1 %c.1, i32 %
 ; CHECK:       e.1:
 ; CHECK-NEXT:    ret void
 ; CHECK:       if.end:
-; CHECK-NEXT:    store i64 0, i64* [[PTR]], align 8
+; CHECK-NEXT:    store i64 0, ptr [[PTR]], align 8
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  store i64 1, i64* %ptr, align 8
+  store i64 1, ptr %ptr, align 8
   br i1 %c.1, label %if.then, label %if.end
 
 if.then:
@@ -190,6 +190,6 @@ e.1:
   ret void
 
 if.end:
-  store i64 0, i64* %ptr, align 8
+  store i64 0, ptr %ptr, align 8
   ret void
 }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/no-targetdata.ll b/llvm/test/Transforms/DeadStoreElimination/no-targetdata.ll
index aec3076678787..61126da57c45b 100644
--- a/llvm/test/Transforms/DeadStoreElimination/no-targetdata.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/no-targetdata.ll
@@ -1,21 +1,21 @@
 ; RUN: opt -basic-aa -dse -S < %s | FileCheck %s
 
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
 
-define void @fn(i8* nocapture %buf) #0 {
+define void @fn(ptr nocapture %buf) #0 {
 entry:
 
 ; We would not eliminate the first memcpy with data layout, and we should not
 ; eliminate it without data layout.
 ; CHECK-LABEL: @fn
-; CHECK: tail call void @llvm.memcpy.p0i8.p0i8.i64
-; CHECK: tail call void @llvm.memcpy.p0i8.p0i8.i64
+; CHECK: tail call void @llvm.memcpy.p0.p0.i64
+; CHECK: tail call void @llvm.memcpy.p0.p0.i64
 ; CHECK: ret void
 
-  %arrayidx = getelementptr i8, i8* %buf, i64 18
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %arrayidx, i8* %buf, i64 18, i1 false)
-  store i8 1, i8* %arrayidx, align 1
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %buf, i8* %arrayidx, i64 18, i1 false)
+  %arrayidx = getelementptr i8, ptr %buf, i64 18
+  tail call void @llvm.memcpy.p0.p0.i64(ptr %arrayidx, ptr %buf, i64 18, i1 false)
+  store i8 1, ptr %arrayidx, align 1
+  tail call void @llvm.memcpy.p0.p0.i64(ptr %buf, ptr %arrayidx, i64 18, i1 false)
   ret void
 }
 

diff  --git a/llvm/test/Transforms/DeadStoreElimination/noop-stores.ll b/llvm/test/Transforms/DeadStoreElimination/noop-stores.ll
index 075e49ac23d58..030815ad36eac 100644
--- a/llvm/test/Transforms/DeadStoreElimination/noop-stores.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/noop-stores.ll
@@ -3,60 +3,60 @@
 ; RUN: opt < %s -aa-pipeline=basic-aa -passes='dse,verify<memoryssa>' -S | FileCheck %s
 target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
 
-declare void @memset_pattern16(i8*, i8*, i64)
+declare void @memset_pattern16(ptr, ptr, i64)
 
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
-declare void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* nocapture, i8, i64, i32) nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
-declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32) nounwind
-declare void @llvm.init.trampoline(i8*, i8*, i8*)
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind
+declare void @llvm.memset.element.unordered.atomic.p0.i64(ptr nocapture, i8, i64, i32) nounwind
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
+declare void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i32) nounwind
+declare void @llvm.init.trampoline(ptr, ptr, ptr)
 
 ; **** Noop load->store tests **************************************************
 
 ; We CAN optimize volatile loads.
-define void @test_load_volatile(i32* %Q) {
+define void @test_load_volatile(ptr %Q) {
 ; CHECK-LABEL: @test_load_volatile(
-; CHECK-NEXT:    [[A:%.*]] = load volatile i32, i32* [[Q:%.*]], align 4
+; CHECK-NEXT:    [[A:%.*]] = load volatile i32, ptr [[Q:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  %a = load volatile i32, i32* %Q
-  store i32 %a, i32* %Q
+  %a = load volatile i32, ptr %Q
+  store i32 %a, ptr %Q
   ret void
 }
 
 ; We can NOT optimize volatile stores.
-define void @test_store_volatile(i32* %Q) {
+define void @test_store_volatile(ptr %Q) {
 ; CHECK-LABEL: @test_store_volatile(
-; CHECK-NEXT:    [[A:%.*]] = load i32, i32* [[Q:%.*]], align 4
-; CHECK-NEXT:    store volatile i32 [[A]], i32* [[Q]], align 4
+; CHECK-NEXT:    [[A:%.*]] = load i32, ptr [[Q:%.*]], align 4
+; CHECK-NEXT:    store volatile i32 [[A]], ptr [[Q]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  %a = load i32, i32* %Q
-  store volatile i32 %a, i32* %Q
+  %a = load i32, ptr %Q
+  store volatile i32 %a, ptr %Q
   ret void
 }
 
 ; PR2599 - load -> store to same address.
-define void @test12({ i32, i32 }* %x) nounwind  {
+define void @test12(ptr %x) nounwind  {
 ; CHECK-LABEL: @test12(
-; CHECK-NEXT:    [[TEMP7:%.*]] = getelementptr { i32, i32 }, { i32, i32 }* [[X:%.*]], i32 0, i32 1
-; CHECK-NEXT:    [[TEMP8:%.*]] = load i32, i32* [[TEMP7]], align 4
+; CHECK-NEXT:    [[TEMP7:%.*]] = getelementptr { i32, i32 }, ptr [[X:%.*]], i32 0, i32 1
+; CHECK-NEXT:    [[TEMP8:%.*]] = load i32, ptr [[TEMP7]], align 4
 ; CHECK-NEXT:    [[TEMP17:%.*]] = sub i32 0, [[TEMP8]]
-; CHECK-NEXT:    store i32 [[TEMP17]], i32* [[TEMP7]], align 4
+; CHECK-NEXT:    store i32 [[TEMP17]], ptr [[TEMP7]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  %temp4 = getelementptr { i32, i32 }, { i32, i32 }* %x, i32 0, i32 0
-  %temp5 = load i32, i32* %temp4, align 4
-  %temp7 = getelementptr { i32, i32 }, { i32, i32 }* %x, i32 0, i32 1
-  %temp8 = load i32, i32* %temp7, align 4
+  %temp4 = getelementptr { i32, i32 }, ptr %x, i32 0, i32 0
+  %temp5 = load i32, ptr %temp4, align 4
+  %temp7 = getelementptr { i32, i32 }, ptr %x, i32 0, i32 1
+  %temp8 = load i32, ptr %temp7, align 4
   %temp17 = sub i32 0, %temp8
-  store i32 %temp5, i32* %temp4, align 4
-  store i32 %temp17, i32* %temp7, align 4
+  store i32 %temp5, ptr %temp4, align 4
+  store i32 %temp17, ptr %temp7, align 4
   ret void
 }
 
 ; Remove redundant store if loaded value is in another block.
-define i32 @test26(i1 %c, i32* %p) {
+define i32 @test26(i1 %c, ptr %p) {
 ; CHECK-LABEL: @test26(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
@@ -68,19 +68,19 @@ define i32 @test26(i1 %c, i32* %p) {
 ; CHECK-NEXT:    ret i32 0
 ;
 entry:
-  %v = load i32, i32* %p, align 4
+  %v = load i32, ptr %p, align 4
   br i1 %c, label %bb1, label %bb2
 bb1:
   br label %bb3
 bb2:
-  store i32 %v, i32* %p, align 4
+  store i32 %v, ptr %p, align 4
   br label %bb3
 bb3:
   ret i32 0
 }
 
 ; Remove redundant store if loaded value is in another block.
-define i32 @test27(i1 %c, i32* %p) {
+define i32 @test27(i1 %c, ptr %p) {
 ; CHECK-LABEL: @test27(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
@@ -92,19 +92,19 @@ define i32 @test27(i1 %c, i32* %p) {
 ; CHECK-NEXT:    ret i32 0
 ;
 entry:
-  %v = load i32, i32* %p, align 4
+  %v = load i32, ptr %p, align 4
   br i1 %c, label %bb1, label %bb2
 bb1:
   br label %bb3
 bb2:
   br label %bb3
 bb3:
-  store i32 %v, i32* %p, align 4
+  store i32 %v, ptr %p, align 4
   ret i32 0
 }
 
 ; Remove redundant store if loaded value is in another block inside a loop.
-define i32 @test31(i1 %c, i32* %p, i32 %i) {
+define i32 @test31(i1 %c, ptr %p, i32 %i) {
 ; CHECK-LABEL: @test31(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br label [[BB1:%.*]]
@@ -114,38 +114,38 @@ define i32 @test31(i1 %c, i32* %p, i32 %i) {
 ; CHECK-NEXT:    ret i32 0
 ;
 entry:
-  %v = load i32, i32* %p, align 4
+  %v = load i32, ptr %p, align 4
   br label %bb1
 bb1:
-  store i32 %v, i32* %p, align 4
+  store i32 %v, ptr %p, align 4
   br i1 %c, label %bb1, label %bb2
 bb2:
   ret i32 0
 }
 
 ; Don't remove "redundant" store if %p is possibly stored to.
-define i32 @test46(i1 %c, i32* %p, i32* %p2, i32 %i) {
+define i32 @test46(i1 %c, ptr %p, ptr %p2, i32 %i) {
 ; CHECK-LABEL: @test46(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[V:%.*]] = load i32, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    [[V:%.*]] = load i32, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br label [[BB1:%.*]]
 ; CHECK:       bb1:
-; CHECK-NEXT:    store i32 [[V]], i32* [[P]], align 4
+; CHECK-NEXT:    store i32 [[V]], ptr [[P]], align 4
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[BB1]], label [[BB2:%.*]]
 ; CHECK:       bb2:
-; CHECK-NEXT:    store i32 0, i32* [[P2:%.*]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P2:%.*]], align 4
 ; CHECK-NEXT:    br i1 [[C]], label [[BB3:%.*]], label [[BB1]]
 ; CHECK:       bb3:
 ; CHECK-NEXT:    ret i32 0
 ;
 entry:
-  %v = load i32, i32* %p, align 4
+  %v = load i32, ptr %p, align 4
   br label %bb1
 bb1:
-  store i32 %v, i32* %p, align 4
+  store i32 %v, ptr %p, align 4
   br i1 %c, label %bb1, label %bb2
 bb2:
-  store i32 0, i32* %p2, align 4
+  store i32 0, ptr %p2, align 4
   br i1 %c, label %bb3, label %bb1
 bb3:
   ret i32 0
@@ -154,7 +154,7 @@ bb3:
 declare void @unknown_func()
 
 ; Remove redundant store, which is in the lame loop as the load.
-define i32 @test33(i1 %c, i32* %p, i32 %i) {
+define i32 @test33(i1 %c, ptr %p, i32 %i) {
 ; CHECK-LABEL: @test33(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br label [[BB1:%.*]]
@@ -169,10 +169,10 @@ define i32 @test33(i1 %c, i32* %p, i32 %i) {
 entry:
   br label %bb1
 bb1:
-  %v = load i32, i32* %p, align 4
+  %v = load i32, ptr %p, align 4
   br label %bb2
 bb2:
-  store i32 %v, i32* %p, align 4
+  store i32 %v, ptr %p, align 4
   ; Might read and overwrite value at %p, but doesn't matter.
   call void @unknown_func()
   br i1 %c, label %bb1, label %bb3
@@ -180,79 +180,79 @@ bb3:
   ret i32 0
 }
 
-declare void @unkown_write(i32*)
+declare void @unkown_write(ptr)
 
 ; We can't remove the "noop" store around an unkown write.
-define void @test43(i32* %Q) {
+define void @test43(ptr %Q) {
 ; CHECK-LABEL: @test43(
-; CHECK-NEXT:    [[A:%.*]] = load i32, i32* [[Q:%.*]], align 4
-; CHECK-NEXT:    call void @unkown_write(i32* [[Q]])
-; CHECK-NEXT:    store i32 [[A]], i32* [[Q]], align 4
+; CHECK-NEXT:    [[A:%.*]] = load i32, ptr [[Q:%.*]], align 4
+; CHECK-NEXT:    call void @unkown_write(ptr [[Q]])
+; CHECK-NEXT:    store i32 [[A]], ptr [[Q]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  %a = load i32, i32* %Q
-  call void @unkown_write(i32* %Q)
-  store i32 %a, i32* %Q
+  %a = load i32, ptr %Q
+  call void @unkown_write(ptr %Q)
+  store i32 %a, ptr %Q
   ret void
 }
 
 ; We CAN remove it when the unkown write comes AFTER.
-define void @test44(i32* %Q) {
+define void @test44(ptr %Q) {
 ; CHECK-LABEL: @test44(
-; CHECK-NEXT:    call void @unkown_write(i32* [[Q:%.*]])
+; CHECK-NEXT:    call void @unkown_write(ptr [[Q:%.*]])
 ; CHECK-NEXT:    ret void
 ;
-  %a = load i32, i32* %Q
-  store i32 %a, i32* %Q
-  call void @unkown_write(i32* %Q)
+  %a = load i32, ptr %Q
+  store i32 %a, ptr %Q
+  call void @unkown_write(ptr %Q)
   ret void
 }
 
-define void @test45(i32* %Q) {
+define void @test45(ptr %Q) {
 ; CHECK-LABEL: @test45(
 ; CHECK-NEXT:    ret void
 ;
-  %a = load i32, i32* %Q
-  store i32 10, i32* %Q
-  store i32 %a, i32* %Q
+  %a = load i32, ptr %Q
+  store i32 10, ptr %Q
+  store i32 %a, ptr %Q
   ret void
 }
 
-define i32 @test48(i1 %c, i32* %p) {
+define i32 @test48(i1 %c, ptr %p) {
 ; CHECK-LABEL: @test48(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[V:%.*]] = load i32, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    [[V:%.*]] = load i32, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[BB0:%.*]], label [[BB0_0:%.*]]
 ; CHECK:       bb0:
-; CHECK-NEXT:    store i32 0, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P]], align 4
 ; CHECK-NEXT:    br i1 [[C]], label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb0.0:
 ; CHECK-NEXT:    br label [[BB1]]
 ; CHECK:       bb1:
-; CHECK-NEXT:    store i32 [[V]], i32* [[P]], align 4
+; CHECK-NEXT:    store i32 [[V]], ptr [[P]], align 4
 ; CHECK-NEXT:    br i1 [[C]], label [[BB2]], label [[BB0]]
 ; CHECK:       bb2:
 ; CHECK-NEXT:    ret i32 0
 ;
 entry:
-  %v = load i32, i32* %p, align 4
+  %v = load i32, ptr %p, align 4
   br i1 %c, label %bb0, label %bb0.0
 
 bb0:
-  store i32 0, i32* %p
+  store i32 0, ptr %p
   br i1 %c, label %bb1, label %bb2
 
 bb0.0:
   br label %bb1
 
 bb1:
-  store i32 %v, i32* %p, align 4
+  store i32 %v, ptr %p, align 4
   br i1 %c, label %bb2, label %bb0
 bb2:
   ret i32 0
 }
 
-define i32 @test47(i1 %c, i32* %p, i32 %i) {
+define i32 @test47(i1 %c, ptr %p, i32 %i) {
 ; CHECK-LABEL: @test47(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br label [[BB1:%.*]]
@@ -264,527 +264,521 @@ define i32 @test47(i1 %c, i32* %p, i32 %i) {
 ; CHECK-NEXT:    ret i32 0
 ;
 entry:
-  %v = load i32, i32* %p, align 4
+  %v = load i32, ptr %p, align 4
   br label %bb1
 bb1:
-  store i32 %v, i32* %p, align 4
+  store i32 %v, ptr %p, align 4
   br i1 %c, label %bb1, label %bb2
 bb2:
-  store i32 %v, i32* %p, align 4
+  store i32 %v, ptr %p, align 4
   br i1 %c, label %bb3, label %bb1
 bb3:
   ret i32 0
 }
 
 ; Test case from PR47887.
-define void @test_noalias_store_between_load_and_store(i32* noalias %x, i32* noalias %y) {
+define void @test_noalias_store_between_load_and_store(ptr noalias %x, ptr noalias %y) {
 ; CHECK-LABEL: @test_noalias_store_between_load_and_store(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    store i32 0, i32* [[Y:%.*]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[Y:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %lv = load i32, i32* %x, align 4
-  store i32 0, i32* %y, align 4
-  store i32 %lv, i32* %x, align 4
+  %lv = load i32, ptr %x, align 4
+  store i32 0, ptr %y, align 4
+  store i32 %lv, ptr %x, align 4
   ret void
 }
 
-; Test case from PR47887. Currently we eliminate the dead `store i32 %inc, i32* %x`,
-; but not the no-op `store i32 %lv, i32* %x`. That is because no-op stores are
+; Test case from PR47887. Currently we eliminate the dead `store i32 %inc, ptr %x`,
+; but not the no-op `store i32 %lv, ptr %x`. That is because no-op stores are
 ; eliminated before dead stores for the same def.
-define void @test_noalias_store_between_load_and_store_elimin_order(i32* noalias %x, i32* noalias %y) {
+define void @test_noalias_store_between_load_and_store_elimin_order(ptr noalias %x, ptr noalias %y) {
 ; CHECK-LABEL: @test_noalias_store_between_load_and_store_elimin_order(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    store i32 0, i32* [[Y:%.*]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[Y:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %lv = load i32, i32* %x, align 4
+  %lv = load i32, ptr %x, align 4
   %inc = add nsw i32 %lv, 1
-  store i32 %inc, i32* %x, align 4
-  store i32 0, i32* %y, align 4
-  store i32 %lv, i32* %x, align 4
+  store i32 %inc, ptr %x, align 4
+  store i32 0, ptr %y, align 4
+  store i32 %lv, ptr %x, align 4
   ret void
 }
 
-declare noalias i8* @malloc(i64)
-declare noalias i8* @_Znwm(i64)
-declare void @clobber_memory(float*)
+declare noalias ptr @malloc(i64)
+declare noalias ptr @_Znwm(i64)
+declare void @clobber_memory(ptr)
 
 ; based on pr25892_lite
-define i8* @zero_memset_after_malloc(i64 %size) {
+define ptr @zero_memset_after_malloc(i64 %size) {
 ; CHECK-LABEL: @zero_memset_after_malloc(
-; CHECK-NEXT:    [[CALLOC:%.*]] = call i8* @calloc(i64 1, i64 [[SIZE:%.*]])
-; CHECK-NEXT:    ret i8* [[CALLOC]]
+; CHECK-NEXT:    [[CALLOC:%.*]] = call ptr @calloc(i64 1, i64 [[SIZE:%.*]])
+; CHECK-NEXT:    ret ptr [[CALLOC]]
 ;
-  %call = call i8* @malloc(i64 %size) inaccessiblememonly
-  call void @llvm.memset.p0i8.i64(i8* %call, i8 0, i64 %size, i1 false)
-  ret i8* %call
+  %call = call ptr @malloc(i64 %size) inaccessiblememonly
+  call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 %size, i1 false)
+  ret ptr %call
 }
 
 ; based on pr25892_lite
-define i8* @zero_memset_after_malloc_with_intermediate_clobbering(i64 %size) {
+define ptr @zero_memset_after_malloc_with_intermediate_clobbering(i64 %size) {
 ; CHECK-LABEL: @zero_memset_after_malloc_with_intermediate_clobbering(
-; CHECK-NEXT:    [[CALL:%.*]] = call i8* @malloc(i64 [[SIZE:%.*]]) #[[ATTR7:[0-9]+]]
-; CHECK-NEXT:    [[BC:%.*]] = bitcast i8* [[CALL]] to float*
-; CHECK-NEXT:    call void @clobber_memory(float* [[BC]])
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* [[CALL]], i8 0, i64 [[SIZE]], i1 false)
-; CHECK-NEXT:    ret i8* [[CALL]]
+; CHECK-NEXT:    [[CALL:%.*]] = call ptr @malloc(i64 [[SIZE:%.*]]) #[[ATTR7:[0-9]+]]
+; CHECK-NEXT:    call void @clobber_memory(ptr [[CALL]])
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr [[CALL]], i8 0, i64 [[SIZE]], i1 false)
+; CHECK-NEXT:    ret ptr [[CALL]]
 ;
-  %call = call i8* @malloc(i64 %size) inaccessiblememonly
-  %bc = bitcast i8* %call to float*
-  call void @clobber_memory(float* %bc)
-  call void @llvm.memset.p0i8.i64(i8* %call, i8 0, i64 %size, i1 false)
-  ret i8* %call
+  %call = call ptr @malloc(i64 %size) inaccessiblememonly
+  call void @clobber_memory(ptr %call)
+  call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 %size, i1 false)
+  ret ptr %call
 }
 
 ; based on pr25892_lite
-define i8* @zero_memset_after_malloc_with_
diff erent_sizes(i64 %size) {
+define ptr @zero_memset_after_malloc_with_
diff erent_sizes(i64 %size) {
 ; CHECK-LABEL: @zero_memset_after_malloc_with_
diff erent_sizes(
-; CHECK-NEXT:    [[CALL:%.*]] = call i8* @malloc(i64 [[SIZE:%.*]]) #[[ATTR7]]
+; CHECK-NEXT:    [[CALL:%.*]] = call ptr @malloc(i64 [[SIZE:%.*]]) #[[ATTR7]]
 ; CHECK-NEXT:    [[SIZE2:%.*]] = add nsw i64 [[SIZE]], -1
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* [[CALL]], i8 0, i64 [[SIZE2]], i1 false)
-; CHECK-NEXT:    ret i8* [[CALL]]
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr [[CALL]], i8 0, i64 [[SIZE2]], i1 false)
+; CHECK-NEXT:    ret ptr [[CALL]]
 ;
-  %call = call i8* @malloc(i64 %size) inaccessiblememonly
+  %call = call ptr @malloc(i64 %size) inaccessiblememonly
   %size2 = add nsw i64 %size, -1
-  call void @llvm.memset.p0i8.i64(i8* %call, i8 0, i64 %size2, i1 false)
-  ret i8* %call
+  call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 %size2, i1 false)
+  ret ptr %call
 }
 
 ; based on pr25892_lite
-define i8* @zero_memset_after_new(i64 %size) {
+define ptr @zero_memset_after_new(i64 %size) {
 ; CHECK-LABEL: @zero_memset_after_new(
-; CHECK-NEXT:    [[CALL:%.*]] = call i8* @_Znwm(i64 [[SIZE:%.*]])
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* [[CALL]], i8 0, i64 [[SIZE]], i1 false)
-; CHECK-NEXT:    ret i8* [[CALL]]
+; CHECK-NEXT:    [[CALL:%.*]] = call ptr @_Znwm(i64 [[SIZE:%.*]])
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr [[CALL]], i8 0, i64 [[SIZE]], i1 false)
+; CHECK-NEXT:    ret ptr [[CALL]]
 ;
-  %call = call i8* @_Znwm(i64 %size)
-  call void @llvm.memset.p0i8.i64(i8* %call, i8 0, i64 %size, i1 false)
-  ret i8* %call
+  %call = call ptr @_Znwm(i64 %size)
+  call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 %size, i1 false)
+  ret ptr %call
 }
 
 ; This should not create a calloc and should not crash the compiler.
-define i8* @notmalloc_memset(i64 %size, i8*(i64)* %notmalloc) {
+define ptr @notmalloc_memset(i64 %size, ptr %notmalloc) {
 ; CHECK-LABEL: @notmalloc_memset(
-; CHECK-NEXT:    [[CALL1:%.*]] = call i8* [[NOTMALLOC:%.*]](i64 [[SIZE:%.*]])
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* [[CALL1]], i8 0, i64 [[SIZE]], i1 false)
-; CHECK-NEXT:    ret i8* [[CALL1]]
+; CHECK-NEXT:    [[CALL1:%.*]] = call ptr [[NOTMALLOC:%.*]](i64 [[SIZE:%.*]])
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr [[CALL1]], i8 0, i64 [[SIZE]], i1 false)
+; CHECK-NEXT:    ret ptr [[CALL1]]
 ;
-  %call1 = call i8* %notmalloc(i64 %size)
-  call void @llvm.memset.p0i8.i64(i8* %call1, i8 0, i64 %size, i1 false)
-  ret i8* %call1
+  %call1 = call ptr %notmalloc(i64 %size)
+  call void @llvm.memset.p0.i64(ptr %call1, i8 0, i64 %size, i1 false)
+  ret ptr %call1
 }
 
 ; This should not create recursive call to calloc.
-define i8* @calloc(i64 %nmemb, i64 %size) inaccessiblememonly {
+define ptr @calloc(i64 %nmemb, i64 %size) inaccessiblememonly {
 ; CHECK-LABEL: @calloc(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[MUL:%.*]] = mul i64 [[SIZE:%.*]], [[NMEMB:%.*]]
-; CHECK-NEXT:    [[CALL:%.*]] = tail call noalias align 16 i8* @malloc(i64 [[MUL]])
-; CHECK-NEXT:    [[TOBOOL_NOT:%.*]] = icmp eq i8* [[CALL]], null
+; CHECK-NEXT:    [[CALL:%.*]] = tail call noalias align 16 ptr @malloc(i64 [[MUL]])
+; CHECK-NEXT:    [[TOBOOL_NOT:%.*]] = icmp eq ptr [[CALL]], null
 ; CHECK-NEXT:    br i1 [[TOBOOL_NOT]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
 ; CHECK:       if.then:
-; CHECK-NEXT:    tail call void @llvm.memset.p0i8.i64(i8* nonnull align 16 [[CALL]], i8 0, i64 [[MUL]], i1 false)
+; CHECK-NEXT:    tail call void @llvm.memset.p0.i64(ptr nonnull align 16 [[CALL]], i8 0, i64 [[MUL]], i1 false)
 ; CHECK-NEXT:    br label [[IF_END]]
 ; CHECK:       if.end:
-; CHECK-NEXT:    ret i8* [[CALL]]
+; CHECK-NEXT:    ret ptr [[CALL]]
 ;
 entry:
   %mul = mul i64 %size, %nmemb
-  %call = tail call noalias align 16 i8* @malloc(i64 %mul)
-  %tobool.not = icmp eq i8* %call, null
+  %call = tail call noalias align 16 ptr @malloc(i64 %mul)
+  %tobool.not = icmp eq ptr %call, null
   br i1 %tobool.not, label %if.end, label %if.then
 
 if.then:                                          ; preds = %entry
-  tail call void @llvm.memset.p0i8.i64(i8* nonnull align 16 %call, i8 0, i64 %mul, i1 false)
+  tail call void @llvm.memset.p0.i64(ptr nonnull align 16 %call, i8 0, i64 %mul, i1 false)
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %entry
-  ret i8* %call
+  ret ptr %call
 }
 
-define float* @pr25892(i64 %size) {
+define ptr @pr25892(i64 %size) {
 ; CHECK-LABEL: @pr25892(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[CALLOC:%.*]] = call i8* @calloc(i64 1, i64 [[SIZE:%.*]])
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8* [[CALLOC]], null
+; CHECK-NEXT:    [[CALLOC:%.*]] = call ptr @calloc(i64 1, i64 [[SIZE:%.*]])
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq ptr [[CALLOC]], null
 ; CHECK-NEXT:    br i1 [[CMP]], label [[CLEANUP:%.*]], label [[IF_END:%.*]]
 ; CHECK:       if.end:
-; CHECK-NEXT:    [[BC:%.*]] = bitcast i8* [[CALLOC]] to float*
 ; CHECK-NEXT:    br label [[CLEANUP]]
 ; CHECK:       cleanup:
-; CHECK-NEXT:    [[RETVAL_0:%.*]] = phi float* [ [[BC]], [[IF_END]] ], [ null, [[ENTRY:%.*]] ]
-; CHECK-NEXT:    ret float* [[RETVAL_0]]
+; CHECK-NEXT:    [[RETVAL_0:%.*]] = phi ptr [ [[CALLOC]], [[IF_END]] ], [ null, [[ENTRY:%.*]] ]
+; CHECK-NEXT:    ret ptr [[RETVAL_0]]
 ;
 entry:
-  %call = call i8* @malloc(i64 %size) inaccessiblememonly
-  %cmp = icmp eq i8* %call, null
+  %call = call ptr @malloc(i64 %size) inaccessiblememonly
+  %cmp = icmp eq ptr %call, null
   br i1 %cmp, label %cleanup, label %if.end
 if.end:
-  %bc = bitcast i8* %call to float*
-  call void @llvm.memset.p0i8.i64(i8* %call, i8 0, i64 %size, i1 false)
+  call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 %size, i1 false)
   br label %cleanup
 cleanup:
-  %retval.0 = phi float* [ %bc, %if.end ], [ null, %entry ]
-  ret float* %retval.0
+  %retval.0 = phi ptr [ %call, %if.end ], [ null, %entry ]
+  ret ptr %retval.0
 }
 
-define float* @pr25892_with_extra_store(i64 %size) {
+define ptr @pr25892_with_extra_store(i64 %size) {
 ; CHECK-LABEL: @pr25892_with_extra_store(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[CALLOC:%.*]] = call i8* @calloc(i64 1, i64 [[SIZE:%.*]])
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8* [[CALLOC]], null
+; CHECK-NEXT:    [[CALLOC:%.*]] = call ptr @calloc(i64 1, i64 [[SIZE:%.*]])
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq ptr [[CALLOC]], null
 ; CHECK-NEXT:    br i1 [[CMP]], label [[CLEANUP:%.*]], label [[IF_END:%.*]]
 ; CHECK:       if.end:
-; CHECK-NEXT:    [[BC:%.*]] = bitcast i8* [[CALLOC]] to float*
 ; CHECK-NEXT:    br label [[CLEANUP]]
 ; CHECK:       cleanup:
-; CHECK-NEXT:    [[RETVAL_0:%.*]] = phi float* [ [[BC]], [[IF_END]] ], [ null, [[ENTRY:%.*]] ]
-; CHECK-NEXT:    ret float* [[RETVAL_0]]
+; CHECK-NEXT:    [[RETVAL_0:%.*]] = phi ptr [ [[CALLOC]], [[IF_END]] ], [ null, [[ENTRY:%.*]] ]
+; CHECK-NEXT:    ret ptr [[RETVAL_0]]
 ;
 entry:
-  %call = call i8* @malloc(i64 %size) inaccessiblememonly
-  %cmp = icmp eq i8* %call, null
+  %call = call ptr @malloc(i64 %size) inaccessiblememonly
+  %cmp = icmp eq ptr %call, null
   br i1 %cmp, label %cleanup, label %if.end
 if.end:
-  %bc = bitcast i8* %call to float*
-  call void @llvm.memset.p0i8.i64(i8* %call, i8 0, i64 %size, i1 false)
-  store i8 0, i8* %call, align 1
+  call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 %size, i1 false)
+  store i8 0, ptr %call, align 1
   br label %cleanup
 cleanup:
-  %retval.0 = phi float* [ %bc, %if.end ], [ null, %entry ]
-  ret float* %retval.0
+  %retval.0 = phi ptr [ %call, %if.end ], [ null, %entry ]
+  ret ptr %retval.0
 }
 
 ; This should not create a calloc
-define i8* @malloc_with_no_nointer_null_check(i64 %0, i32 %1) {
+define ptr @malloc_with_no_nointer_null_check(i64 %0, i32 %1) {
 ; CHECK-LABEL: @malloc_with_no_nointer_null_check(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[CALL:%.*]] = call i8* @malloc(i64 [[TMP0:%.*]]) #[[ATTR7]]
+; CHECK-NEXT:    [[CALL:%.*]] = call ptr @malloc(i64 [[TMP0:%.*]]) #[[ATTR7]]
 ; CHECK-NEXT:    [[A:%.*]] = and i32 [[TMP1:%.*]], 32
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[A]], 0
 ; CHECK-NEXT:    br i1 [[CMP]], label [[CLEANUP:%.*]], label [[IF_END:%.*]]
 ; CHECK:       if.end:
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* [[CALL]], i8 0, i64 [[TMP0]], i1 false)
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr [[CALL]], i8 0, i64 [[TMP0]], i1 false)
 ; CHECK-NEXT:    br label [[CLEANUP]]
 ; CHECK:       cleanup:
-; CHECK-NEXT:    ret i8* [[CALL]]
+; CHECK-NEXT:    ret ptr [[CALL]]
 ;
 entry:
-  %call = call i8* @malloc(i64 %0) inaccessiblememonly
+  %call = call ptr @malloc(i64 %0) inaccessiblememonly
   %a = and i32 %1, 32
   %cmp = icmp eq i32 %a, 0
   br i1 %cmp, label %cleanup, label %if.end
 if.end:
-  call void @llvm.memset.p0i8.i64(i8* %call, i8 0, i64 %0, i1 false)
+  call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 %0, i1 false)
   br label %cleanup
 cleanup:
-  ret i8* %call
+  ret ptr %call
 }
 
 ; PR50143
-define i8* @store_zero_after_calloc_inaccessiblememonly() {
+define ptr @store_zero_after_calloc_inaccessiblememonly() {
 ; CHECK-LABEL: @store_zero_after_calloc_inaccessiblememonly(
-; CHECK-NEXT:    [[CALL:%.*]] = tail call i8* @calloc(i64 1, i64 10) #[[ATTR7]]
-; CHECK-NEXT:    ret i8* [[CALL]]
+; CHECK-NEXT:    [[CALL:%.*]] = tail call ptr @calloc(i64 1, i64 10) #[[ATTR7]]
+; CHECK-NEXT:    ret ptr [[CALL]]
 ;
-  %call = tail call i8* @calloc(i64 1, i64 10)  inaccessiblememonly
-  store i8 0, i8* %call
-  ret i8* %call
+  %call = tail call ptr @calloc(i64 1, i64 10)  inaccessiblememonly
+  store i8 0, ptr %call
+  ret ptr %call
 }
 
-define i8* @zero_memset_after_calloc()  {
+define ptr @zero_memset_after_calloc()  {
 ; CHECK-LABEL: @zero_memset_after_calloc(
-; CHECK-NEXT:    [[CALL:%.*]] = tail call i8* @calloc(i64 10000, i64 4)
-; CHECK-NEXT:    ret i8* [[CALL]]
+; CHECK-NEXT:    [[CALL:%.*]] = tail call ptr @calloc(i64 10000, i64 4)
+; CHECK-NEXT:    ret ptr [[CALL]]
 ;
-  %call = tail call i8* @calloc(i64 10000, i64 4)
-  call void @llvm.memset.p0i8.i64(i8* %call, i8 0, i64 40000, i1 false)
-  ret i8* %call
+  %call = tail call ptr @calloc(i64 10000, i64 4)
+  call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 40000, i1 false)
+  ret ptr %call
 }
 
-define i8* @volatile_zero_memset_after_calloc()  {
+define ptr @volatile_zero_memset_after_calloc()  {
 ; CHECK-LABEL: @volatile_zero_memset_after_calloc(
-; CHECK-NEXT:    [[CALL:%.*]] = tail call i8* @calloc(i64 10000, i64 4)
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* [[CALL]], i8 0, i64 40000, i1 true)
-; CHECK-NEXT:    ret i8* [[CALL]]
+; CHECK-NEXT:    [[CALL:%.*]] = tail call ptr @calloc(i64 10000, i64 4)
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr [[CALL]], i8 0, i64 40000, i1 true)
+; CHECK-NEXT:    ret ptr [[CALL]]
 ;
-  %call = tail call i8* @calloc(i64 10000, i64 4)
-  call void @llvm.memset.p0i8.i64(i8* %call, i8 0, i64 40000, i1 true)
-  ret i8* %call
+  %call = tail call ptr @calloc(i64 10000, i64 4)
+  call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 40000, i1 true)
+  ret ptr %call
 }
 
-define i8* @zero_memset_and_store_after_calloc(i8 %v)  {
+define ptr @zero_memset_and_store_after_calloc(i8 %v)  {
 ; CHECK-LABEL: @zero_memset_and_store_after_calloc(
-; CHECK-NEXT:    [[CALL:%.*]] = tail call i8* @calloc(i64 10000, i64 4)
-; CHECK-NEXT:    ret i8* [[CALL]]
+; CHECK-NEXT:    [[CALL:%.*]] = tail call ptr @calloc(i64 10000, i64 4)
+; CHECK-NEXT:    ret ptr [[CALL]]
 ;
-  %call = tail call i8* @calloc(i64 10000, i64 4)
-  store i8 %v, i8* %call
-  call void @llvm.memset.p0i8.i64(i8* %call, i8 0, i64 40000, i1 false)
-  ret i8* %call
+  %call = tail call ptr @calloc(i64 10000, i64 4)
+  store i8 %v, ptr %call
+  call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 40000, i1 false)
+  ret ptr %call
 }
 
-define i8* @partial_zero_memset_after_calloc() {
+define ptr @partial_zero_memset_after_calloc() {
 ; CHECK-LABEL: @partial_zero_memset_after_calloc(
-; CHECK-NEXT:    [[CALL:%.*]] = tail call i8* @calloc(i64 10000, i64 4)
-; CHECK-NEXT:    ret i8* [[CALL]]
+; CHECK-NEXT:    [[CALL:%.*]] = tail call ptr @calloc(i64 10000, i64 4)
+; CHECK-NEXT:    ret ptr [[CALL]]
 ;
-  %call = tail call i8* @calloc(i64 10000, i64 4)
-  call void @llvm.memset.p0i8.i64(i8* %call, i8 0, i64 20, i1 false)
-  ret i8* %call
+  %call = tail call ptr @calloc(i64 10000, i64 4)
+  call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 20, i1 false)
+  ret ptr %call
 }
 
-define i8* @partial_zero_memset_and_store_after_calloc(i8 %v)  {
+define ptr @partial_zero_memset_and_store_after_calloc(i8 %v)  {
 ; CHECK-LABEL: @partial_zero_memset_and_store_after_calloc(
-; CHECK-NEXT:    [[CALL:%.*]] = tail call i8* @calloc(i64 10000, i64 4)
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, i8* [[CALL]], i64 30
-; CHECK-NEXT:    store i8 [[V:%.*]], i8* [[GEP]], align 1
-; CHECK-NEXT:    ret i8* [[CALL]]
+; CHECK-NEXT:    [[CALL:%.*]] = tail call ptr @calloc(i64 10000, i64 4)
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, ptr [[CALL]], i64 30
+; CHECK-NEXT:    store i8 [[V:%.*]], ptr [[GEP]], align 1
+; CHECK-NEXT:    ret ptr [[CALL]]
 ;
-  %call = tail call i8* @calloc(i64 10000, i64 4)
-  %gep = getelementptr inbounds i8, i8* %call, i64 30
-  store i8 %v, i8* %gep
-  call void @llvm.memset.p0i8.i64(i8* %call, i8 0, i64 20, i1 false)
-  ret i8* %call
+  %call = tail call ptr @calloc(i64 10000, i64 4)
+  %gep = getelementptr inbounds i8, ptr %call, i64 30
+  store i8 %v, ptr %gep
+  call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 20, i1 false)
+  ret ptr %call
 }
 
-define i8* @zero_memset_and_store_with_dyn_index_after_calloc(i8 %v, i64 %idx)  {
+define ptr @zero_memset_and_store_with_dyn_index_after_calloc(i8 %v, i64 %idx)  {
 ; CHECK-LABEL: @zero_memset_and_store_with_dyn_index_after_calloc(
-; CHECK-NEXT:    [[CALL:%.*]] = tail call i8* @calloc(i64 10000, i64 4)
-; CHECK-NEXT:    ret i8* [[CALL]]
+; CHECK-NEXT:    [[CALL:%.*]] = tail call ptr @calloc(i64 10000, i64 4)
+; CHECK-NEXT:    ret ptr [[CALL]]
 ;
-  %call = tail call i8* @calloc(i64 10000, i64 4)
-  %gep = getelementptr inbounds i8, i8* %call, i64 %idx
-  store i8 %v, i8* %gep
-  call void @llvm.memset.p0i8.i64(i8* %call, i8 0, i64 40000, i1 false)
-  ret i8* %call
+  %call = tail call ptr @calloc(i64 10000, i64 4)
+  %gep = getelementptr inbounds i8, ptr %call, i64 %idx
+  store i8 %v, ptr %gep
+  call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 40000, i1 false)
+  ret ptr %call
 }
 
-define i8* @partial_zero_memset_and_store_with_dyn_index_after_calloc(i8 %v, i64 %idx)  {
+define ptr @partial_zero_memset_and_store_with_dyn_index_after_calloc(i8 %v, i64 %idx)  {
 ; CHECK-LABEL: @partial_zero_memset_and_store_with_dyn_index_after_calloc(
-; CHECK-NEXT:    [[CALL:%.*]] = tail call i8* @calloc(i64 10000, i64 4)
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, i8* [[CALL]], i64 [[IDX:%.*]]
-; CHECK-NEXT:    store i8 [[V:%.*]], i8* [[GEP]], align 1
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* [[CALL]], i8 0, i64 20, i1 false)
-; CHECK-NEXT:    ret i8* [[CALL]]
+; CHECK-NEXT:    [[CALL:%.*]] = tail call ptr @calloc(i64 10000, i64 4)
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, ptr [[CALL]], i64 [[IDX:%.*]]
+; CHECK-NEXT:    store i8 [[V:%.*]], ptr [[GEP]], align 1
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr [[CALL]], i8 0, i64 20, i1 false)
+; CHECK-NEXT:    ret ptr [[CALL]]
 ;
-  %call = tail call i8* @calloc(i64 10000, i64 4)
-  %gep = getelementptr inbounds i8, i8* %call, i64 %idx
-  store i8 %v, i8* %gep
-  call void @llvm.memset.p0i8.i64(i8* %call, i8 0, i64 20, i1 false)
-  ret i8* %call
+  %call = tail call ptr @calloc(i64 10000, i64 4)
+  %gep = getelementptr inbounds i8, ptr %call, i64 %idx
+  store i8 %v, ptr %gep
+  call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 20, i1 false)
+  ret ptr %call
 }
 
-define i8* @zero_memset_after_calloc_inaccessiblememonly()  {
+define ptr @zero_memset_after_calloc_inaccessiblememonly()  {
 ; CHECK-LABEL: @zero_memset_after_calloc_inaccessiblememonly(
-; CHECK-NEXT:    [[CALL:%.*]] = tail call i8* @calloc(i64 10000, i64 4) #[[ATTR7]]
-; CHECK-NEXT:    ret i8* [[CALL]]
+; CHECK-NEXT:    [[CALL:%.*]] = tail call ptr @calloc(i64 10000, i64 4) #[[ATTR7]]
+; CHECK-NEXT:    ret ptr [[CALL]]
 ;
-  %call = tail call i8* @calloc(i64 10000, i64 4) inaccessiblememonly
-  call void @llvm.memset.p0i8.i64(i8* %call, i8 0, i64 40000, i1 false)
-  ret i8* %call
+  %call = tail call ptr @calloc(i64 10000, i64 4) inaccessiblememonly
+  call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 40000, i1 false)
+  ret ptr %call
 }
 
-define i8* @cst_nonzero_memset_after_calloc() {
+define ptr @cst_nonzero_memset_after_calloc() {
 ; CHECK-LABEL: @cst_nonzero_memset_after_calloc(
-; CHECK-NEXT:    [[CALL:%.*]] = tail call i8* @calloc(i64 10000, i64 4)
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* [[CALL]], i8 1, i64 40000, i1 false)
-; CHECK-NEXT:    ret i8* [[CALL]]
+; CHECK-NEXT:    [[CALL:%.*]] = tail call ptr @calloc(i64 10000, i64 4)
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr [[CALL]], i8 1, i64 40000, i1 false)
+; CHECK-NEXT:    ret ptr [[CALL]]
 ;
-  %call = tail call i8* @calloc(i64 10000, i64 4)
-  call void @llvm.memset.p0i8.i64(i8* %call, i8 1, i64 40000, i1 false)
-  ret i8* %call
+  %call = tail call ptr @calloc(i64 10000, i64 4)
+  call void @llvm.memset.p0.i64(ptr %call, i8 1, i64 40000, i1 false)
+  ret ptr %call
 }
 
-define i8* @nonzero_memset_after_calloc(i8 %v) {
+define ptr @nonzero_memset_after_calloc(i8 %v) {
 ; CHECK-LABEL: @nonzero_memset_after_calloc(
-; CHECK-NEXT:    [[CALL:%.*]] = tail call i8* @calloc(i64 10000, i64 4)
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* [[CALL]], i8 [[V:%.*]], i64 40000, i1 false)
-; CHECK-NEXT:    ret i8* [[CALL]]
+; CHECK-NEXT:    [[CALL:%.*]] = tail call ptr @calloc(i64 10000, i64 4)
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr [[CALL]], i8 [[V:%.*]], i64 40000, i1 false)
+; CHECK-NEXT:    ret ptr [[CALL]]
 ;
-  %call = tail call i8* @calloc(i64 10000, i64 4)
-  call void @llvm.memset.p0i8.i64(i8* %call, i8 %v, i64 40000, i1 false)
-  ret i8* %call
+  %call = tail call ptr @calloc(i64 10000, i64 4)
+  call void @llvm.memset.p0.i64(ptr %call, i8 %v, i64 40000, i1 false)
+  ret ptr %call
 }
 
 ; PR11896
 ; The first memset is dead, because calloc provides zero-filled memory.
 ; TODO: This could be replaced with a call to malloc + memset_pattern16.
-define i8* @memset_pattern16_after_calloc(i8* %pat) {
+define ptr @memset_pattern16_after_calloc(ptr %pat) {
 ; CHECK-LABEL: @memset_pattern16_after_calloc(
-; CHECK-NEXT:    [[CALL:%.*]] = tail call i8* @calloc(i64 10000, i64 4)
-; CHECK-NEXT:    call void @memset_pattern16(i8* [[CALL]], i8* [[PAT:%.*]], i64 40000)
-; CHECK-NEXT:    ret i8* [[CALL]]
+; CHECK-NEXT:    [[CALL:%.*]] = tail call ptr @calloc(i64 10000, i64 4)
+; CHECK-NEXT:    call void @memset_pattern16(ptr [[CALL]], ptr [[PAT:%.*]], i64 40000)
+; CHECK-NEXT:    ret ptr [[CALL]]
 ;
-  %call = tail call i8* @calloc(i64 10000, i64 4) #1
-  call void @llvm.memset.p0i8.i64(i8* align 4 %call, i8 0, i64 40000, i1 false)
-  call void @memset_pattern16(i8* %call, i8* %pat, i64 40000) #1
-  ret i8* %call
+  %call = tail call ptr @calloc(i64 10000, i64 4) #1
+  call void @llvm.memset.p0.i64(ptr align 4 %call, i8 0, i64 40000, i1 false)
+  call void @memset_pattern16(ptr %call, ptr %pat, i64 40000) #1
+  ret ptr %call
 }
 
 @n = global i32 0, align 4
 @a = external global i32, align 4
- at b = external global i32*, align 8
+ at b = external global ptr, align 8
 
 ; GCC calloc-1.c test case should create calloc
-define i8* @test_malloc_memset_to_calloc(i64* %0) {
+define ptr @test_malloc_memset_to_calloc(ptr %0) {
 ; CHECK-LABEL: @test_malloc_memset_to_calloc(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* @n, align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @n, align 4
 ; CHECK-NEXT:    [[TMP2:%.*]] = sext i32 [[TMP1]] to i64
-; CHECK-NEXT:    [[CALLOC:%.*]] = call i8* @calloc(i64 1, i64 [[TMP2]])
-; CHECK-NEXT:    [[TMP3:%.*]] = load i64, i64* [[TMP0:%.*]], align 8
+; CHECK-NEXT:    [[CALLOC:%.*]] = call ptr @calloc(i64 1, i64 [[TMP2]])
+; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr [[TMP0:%.*]], align 8
 ; CHECK-NEXT:    [[TMP4:%.*]] = add nsw i64 [[TMP3]], 1
-; CHECK-NEXT:    store i64 [[TMP4]], i64* [[TMP0]], align 8
-; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i8* [[CALLOC]], null
+; CHECK-NEXT:    store i64 [[TMP4]], ptr [[TMP0]], align 8
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq ptr [[CALLOC]], null
 ; CHECK-NEXT:    br i1 [[TMP5]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
 ; CHECK:       if.then:
 ; CHECK-NEXT:    [[TMP6:%.*]] = add nsw i64 [[TMP3]], 2
-; CHECK-NEXT:    store i64 [[TMP6]], i64* [[TMP0]], align 8
-; CHECK-NEXT:    store i32 2, i32* @a, align 4
-; CHECK-NEXT:    [[TMP7:%.*]] = load i32*, i32** @b, align 8
-; CHECK-NEXT:    store i32 3, i32* [[TMP7]], align 4
+; CHECK-NEXT:    store i64 [[TMP6]], ptr [[TMP0]], align 8
+; CHECK-NEXT:    store i32 2, ptr @a, align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load ptr, ptr @b, align 8
+; CHECK-NEXT:    store i32 3, ptr [[TMP7]], align 4
 ; CHECK-NEXT:    br label [[IF_END]]
 ; CHECK:       if.end:
-; CHECK-NEXT:    ret i8* [[CALLOC]]
+; CHECK-NEXT:    ret ptr [[CALLOC]]
 ;
 entry:
-  %1 = load i32, i32* @n, align 4
+  %1 = load i32, ptr @n, align 4
   %2 = sext i32 %1 to i64
-  %3 = tail call i8* @malloc(i64 %2) inaccessiblememonly
-  %4 = load i64, i64* %0, align 8
+  %3 = tail call ptr @malloc(i64 %2) inaccessiblememonly
+  %4 = load i64, ptr %0, align 8
   %5 = add nsw i64 %4, 1
-  store i64 %5, i64* %0, align 8
-  %6 = icmp eq i8* %3, null
+  store i64 %5, ptr %0, align 8
+  %6 = icmp eq ptr %3, null
   br i1 %6, label %if.end, label %if.then
 
 if.then:
   %7 = add nsw i64 %4, 2
-  store i64 %7, i64* %0, align 8
-  store i32 2, i32* @a, align 4
-  tail call void @llvm.memset.p0i8.i64(i8* align 4 %3, i8 0, i64 %2, i1 false)
-  %8 = load i32*, i32** @b, align 8
-  store i32 3, i32* %8, align 4
+  store i64 %7, ptr %0, align 8
+  store i32 2, ptr @a, align 4
+  tail call void @llvm.memset.p0.i64(ptr align 4 %3, i8 0, i64 %2, i1 false)
+  %8 = load ptr, ptr @b, align 8
+  store i32 3, ptr %8, align 4
   br label %if.end
 
 if.end:
-  ret i8* %3
+  ret ptr %3
 }
 
-define void @store_same_i32_to_mayalias_loc(i32* %q, i32* %p) {
+define void @store_same_i32_to_mayalias_loc(ptr %q, ptr %p) {
 ; CHECK-LABEL: @store_same_i32_to_mayalias_loc(
-; CHECK-NEXT:    [[V:%.*]] = load i32, i32* [[P:%.*]], align 4
-; CHECK-NEXT:    store i32 [[V]], i32* [[Q:%.*]], align 4
+; CHECK-NEXT:    [[V:%.*]] = load i32, ptr [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 [[V]], ptr [[Q:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  %v = load i32, i32* %p, align 4
-  store i32 %v, i32* %q, align 4
-  store i32 %v, i32* %p, align 4
+  %v = load i32, ptr %p, align 4
+  store i32 %v, ptr %q, align 4
+  store i32 %v, ptr %p, align 4
   ret void
 }
 
-define void @store_same_i32_to_mayalias_loc_unalign(i32* %q, i32* %p) {
+define void @store_same_i32_to_mayalias_loc_unalign(ptr %q, ptr %p) {
 ; CHECK-LABEL: @store_same_i32_to_mayalias_loc_unalign(
-; CHECK-NEXT:    [[V:%.*]] = load i32, i32* [[P:%.*]], align 1
-; CHECK-NEXT:    store i32 [[V]], i32* [[Q:%.*]], align 1
-; CHECK-NEXT:    store i32 [[V]], i32* [[P]], align 1
+; CHECK-NEXT:    [[V:%.*]] = load i32, ptr [[P:%.*]], align 1
+; CHECK-NEXT:    store i32 [[V]], ptr [[Q:%.*]], align 1
+; CHECK-NEXT:    store i32 [[V]], ptr [[P]], align 1
 ; CHECK-NEXT:    ret void
 ;
-  %v = load i32, i32* %p, align 1
-  store i32 %v, i32* %q, align 1
-  store i32 %v, i32* %p, align 1
+  %v = load i32, ptr %p, align 1
+  store i32 %v, ptr %q, align 1
+  store i32 %v, ptr %p, align 1
   ret void
 }
 
-define void @store_same_i12_to_mayalias_loc(i12* %q, i12* %p) {
+define void @store_same_i12_to_mayalias_loc(ptr %q, ptr %p) {
 ; CHECK-LABEL: @store_same_i12_to_mayalias_loc(
-; CHECK-NEXT:    [[V:%.*]] = load i12, i12* [[P:%.*]], align 2
-; CHECK-NEXT:    store i12 [[V]], i12* [[Q:%.*]], align 2
+; CHECK-NEXT:    [[V:%.*]] = load i12, ptr [[P:%.*]], align 2
+; CHECK-NEXT:    store i12 [[V]], ptr [[Q:%.*]], align 2
 ; CHECK-NEXT:    ret void
 ;
-  %v = load i12, i12* %p, align 2
-  store i12 %v, i12* %q, align 2
-  store i12 %v, i12* %p, align 2
+  %v = load i12, ptr %p, align 2
+  store i12 %v, ptr %q, align 2
+  store i12 %v, ptr %p, align 2
   ret void
 }
 
-define void @store_same_i12_to_mayalias_loc_unalign(i12* %q, i12* %p) {
+define void @store_same_i12_to_mayalias_loc_unalign(ptr %q, ptr %p) {
 ; CHECK-LABEL: @store_same_i12_to_mayalias_loc_unalign(
-; CHECK-NEXT:    [[V:%.*]] = load i12, i12* [[P:%.*]], align 1
-; CHECK-NEXT:    store i12 [[V]], i12* [[Q:%.*]], align 1
-; CHECK-NEXT:    store i12 [[V]], i12* [[P]], align 1
+; CHECK-NEXT:    [[V:%.*]] = load i12, ptr [[P:%.*]], align 1
+; CHECK-NEXT:    store i12 [[V]], ptr [[Q:%.*]], align 1
+; CHECK-NEXT:    store i12 [[V]], ptr [[P]], align 1
 ; CHECK-NEXT:    ret void
 ;
-  %v = load i12, i12* %p, align 1
-  store i12 %v, i12* %q, align 1
-  store i12 %v, i12* %p, align 1
+  %v = load i12, ptr %p, align 1
+  store i12 %v, ptr %q, align 1
+  store i12 %v, ptr %p, align 1
   ret void
 }
 
-define void @store_same_ptr_to_mayalias_loc(i32** %q, i32** %p) {
+define void @store_same_ptr_to_mayalias_loc(ptr %q, ptr %p) {
 ; CHECK-LABEL: @store_same_ptr_to_mayalias_loc(
-; CHECK-NEXT:    [[V:%.*]] = load i32*, i32** [[P:%.*]], align 8
-; CHECK-NEXT:    store i32* [[V]], i32** [[Q:%.*]], align 8
+; CHECK-NEXT:    [[V:%.*]] = load ptr, ptr [[P:%.*]], align 8
+; CHECK-NEXT:    store ptr [[V]], ptr [[Q:%.*]], align 8
 ; CHECK-NEXT:    ret void
 ;
-  %v = load i32*, i32** %p, align 8
-  store i32* %v, i32** %q, align 8
-  store i32* %v, i32** %p, align 8
+  %v = load ptr, ptr %p, align 8
+  store ptr %v, ptr %q, align 8
+  store ptr %v, ptr %p, align 8
   ret void
 }
 
-define void @store_same_scalable_to_mayalias_loc(<vscale x 4 x i32>* %q, <vscale x 4 x i32>* %p) {
+define void @store_same_scalable_to_mayalias_loc(ptr %q, ptr %p) {
 ; CHECK-LABEL: @store_same_scalable_to_mayalias_loc(
-; CHECK-NEXT:    [[V:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[P:%.*]], align 4
-; CHECK-NEXT:    store <vscale x 4 x i32> [[V]], <vscale x 4 x i32>* [[Q:%.*]], align 4
-; CHECK-NEXT:    store <vscale x 4 x i32> [[V]], <vscale x 4 x i32>* [[P]], align 4
+; CHECK-NEXT:    [[V:%.*]] = load <vscale x 4 x i32>, ptr [[P:%.*]], align 4
+; CHECK-NEXT:    store <vscale x 4 x i32> [[V]], ptr [[Q:%.*]], align 4
+; CHECK-NEXT:    store <vscale x 4 x i32> [[V]], ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  %v = load <vscale x 4 x i32>, <vscale x 4 x i32>* %p, align 4
-  store <vscale x 4 x i32> %v, <vscale x 4 x i32>* %q, align 4
-  store <vscale x 4 x i32> %v, <vscale x 4 x i32>* %p, align 4
+  %v = load <vscale x 4 x i32>, ptr %p, align 4
+  store <vscale x 4 x i32> %v, ptr %q, align 4
+  store <vscale x 4 x i32> %v, ptr %p, align 4
   ret void
 }
 
-define void @store_same_i32_to_mayalias_loc_inconsistent_align(i32* %q, i32* %p) {
+define void @store_same_i32_to_mayalias_loc_inconsistent_align(ptr %q, ptr %p) {
 ; CHECK-LABEL: @store_same_i32_to_mayalias_loc_inconsistent_align(
-; CHECK-NEXT:    [[V:%.*]] = load i32, i32* [[P:%.*]], align 2
-; CHECK-NEXT:    store i32 [[V]], i32* [[Q:%.*]], align 4
-; CHECK-NEXT:    store i32 [[V]], i32* [[P]], align 4
+; CHECK-NEXT:    [[V:%.*]] = load i32, ptr [[P:%.*]], align 2
+; CHECK-NEXT:    store i32 [[V]], ptr [[Q:%.*]], align 4
+; CHECK-NEXT:    store i32 [[V]], ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  %v = load i32, i32* %p, align 2
-  store i32 %v, i32* %q, align 4
-  store i32 %v, i32* %p, align 4
+  %v = load i32, ptr %p, align 2
+  store i32 %v, ptr %q, align 4
+  store i32 %v, ptr %p, align 4
   ret void
 }
 
-define void @do_not_crash_on_liveonentrydef(i1 %c, i8* %p, i8* noalias %q) {
+define void @do_not_crash_on_liveonentrydef(i1 %c, ptr %p, ptr noalias %q) {
 ; CHECK-LABEL: @do_not_crash_on_liveonentrydef(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[IF:%.*]], label [[JOIN:%.*]]
 ; CHECK:       if:
-; CHECK-NEXT:    store i8 0, i8* [[Q:%.*]], align 1
+; CHECK-NEXT:    store i8 0, ptr [[Q:%.*]], align 1
 ; CHECK-NEXT:    br label [[JOIN]]
 ; CHECK:       join:
-; CHECK-NEXT:    [[V:%.*]] = load i8, i8* [[Q]], align 1
-; CHECK-NEXT:    store i8 0, i8* [[P:%.*]], align 1
-; CHECK-NEXT:    store i8 [[V]], i8* [[Q]], align 1
+; CHECK-NEXT:    [[V:%.*]] = load i8, ptr [[Q]], align 1
+; CHECK-NEXT:    store i8 0, ptr [[P:%.*]], align 1
+; CHECK-NEXT:    store i8 [[V]], ptr [[Q]], align 1
 ; CHECK-NEXT:    ret void
 ;
 entry:
   br i1 %c, label %if, label %join
 
 if:
-  store i8 0, i8* %q, align 1
+  store i8 0, ptr %q, align 1
   br label %join
 
 join:
-  %v = load i8, i8* %q, align 1
-  store i8 0, i8* %p, align 1
-  store i8 %v, i8* %q, align 1
+  %v = load i8, ptr %q, align 1
+  store i8 0, ptr %p, align 1
+  store i8 %v, ptr %q, align 1
   ret void
 }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/nounwind-invoke.ll b/llvm/test/Transforms/DeadStoreElimination/nounwind-invoke.ll
index 710cef81bbefe..e711c51a2e6b0 100644
--- a/llvm/test/Transforms/DeadStoreElimination/nounwind-invoke.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/nounwind-invoke.ll
@@ -2,43 +2,43 @@
 ; RUN: opt -dse -S < %s | FileCheck %s
 
 ; Make sure invokes are not removed as dead stores.
-define void @test_nounwind_invoke() personality i8* bitcast (i32 (...)*  @__gxx_personality_v0 to i8*) {
+define void @test_nounwind_invoke() personality ptr @__gxx_personality_v0 {
 ; CHECK-LABEL: @test_nounwind_invoke(
 ; CHECK-NEXT:  bb:
 ; CHECK-NEXT:    [[TMP:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    invoke void @foo(i32* [[TMP]])
+; CHECK-NEXT:    invoke void @foo(ptr [[TMP]])
 ; CHECK-NEXT:    to label [[BB1:%.*]] unwind label [[BB2:%.*]]
 ; CHECK:       bb1:
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i32(i64 4, i32* [[TMP]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP]])
 ; CHECK-NEXT:    ret void
 ; CHECK:       bb2:
-; CHECK-NEXT:    [[ABCTMP1:%.*]] = landingpad { i8*, i32 }
+; CHECK-NEXT:    [[ABCTMP1:%.*]] = landingpad { ptr, i32 }
 ; CHECK-NEXT:    cleanup
-; CHECK-NEXT:    resume { i8*, i32 } [[ABCTMP1]]
+; CHECK-NEXT:    resume { ptr, i32 } [[ABCTMP1]]
 ;
 bb:
   %tmp = alloca i32, align 4
   ; 'foo' is 'argmemonly', meaning it can only write to memory pointed by %tmp.
-  ; And this def is killed by 'call @llvm.lifetime.end.p0i32' in bb1 without
+  ; And this def is killed by 'call @llvm.lifetime.end.p0' in bb1 without
   ; being used elsewhere, becoming a dead store. But we shouldn't remove this
   ; because invokes are terminators and thus cannot be removed.
-  invoke void @foo(i32* %tmp)
+  invoke void @foo(ptr %tmp)
   to label %bb1 unwind label %bb2
 
 bb1:                                              ; preds = %bb
-  call void @llvm.lifetime.end.p0i32(i64 4, i32* %tmp)
+  call void @llvm.lifetime.end.p0(i64 4, ptr %tmp)
   ret void
 
 bb2:                                              ; preds = %bb
-  %tmp1 = landingpad { i8*, i32 }
+  %tmp1 = landingpad { ptr, i32 }
   cleanup
-  resume { i8*, i32 } %tmp1
+  resume { ptr, i32 } %tmp1
 }
 
 ; Function Attrs: argmemonly nocallback nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0i32(i64 immarg, i32* nocapture) #0
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #0
 ; Function Attrs: argmemonly nounwind willreturn
-declare void @foo(i32*) #1
+declare void @foo(ptr) #1
 declare i32 @__gxx_personality_v0(...)
 
 attributes #0 = { argmemonly nocallback nofree nosync nounwind willreturn }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/offsetted-overlapping-stores.ll b/llvm/test/Transforms/DeadStoreElimination/offsetted-overlapping-stores.ll
index bb53f87e340e4..bb066392f2f4b 100644
--- a/llvm/test/Transforms/DeadStoreElimination/offsetted-overlapping-stores.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/offsetted-overlapping-stores.ll
@@ -15,40 +15,35 @@ define void @ArrayTestFullyOverlapping(i64 %0) {
 ;
 ; CHECK-LABEL: @ArrayTestFullyOverlapping(
 ; CHECK-NEXT:    [[TMP2:%.*]] = add i64 [[TMP0:%.*]], -8
-; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [0 x i8], [0 x i8]* @BUFFER, i64 0, i64 [[TMP2]]
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast i8* [[TMP3]] to i64*
-; CHECK-NEXT:    store i64 0, i64* [[TMP4]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [0 x i8], ptr @BUFFER, i64 0, i64 [[TMP2]]
+; CHECK-NEXT:    store i64 0, ptr [[TMP3]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %2 = add i64 %0, -8
-  %3 = getelementptr inbounds [0 x i8], [0 x i8]* @BUFFER, i64 0, i64 %2
-  %4 = bitcast i8* %3 to i64*
-  %5 = add i64 %0, -4
-  %6 = getelementptr inbounds [0 x i8], [0 x i8]* @BUFFER, i64 0, i64 %5
-  %7 = bitcast i8* %6 to i32*
-  store i32 1, i32* %7
-  store i64 0, i64* %4
+  %3 = getelementptr inbounds [0 x i8], ptr @BUFFER, i64 0, i64 %2
+  %4 = add i64 %0, -4
+  %5 = getelementptr inbounds [0 x i8], ptr @BUFFER, i64 0, i64 %4
+  store i32 1, ptr %5
+  store i64 0, ptr %3
   ret void
 }
 
-define void @VectorTestFullyOverlapping(float* %arg, i32 %i) {
+define void @VectorTestFullyOverlapping(ptr %arg, i32 %i) {
 ; CHECK-LABEL: @VectorTestFullyOverlapping(
 ; CHECK-NEXT:  bb:
 ; CHECK-NEXT:    [[I2:%.*]] = zext i32 [[I:%.*]] to i64
-; CHECK-NEXT:    [[I3:%.*]] = getelementptr inbounds float, float* [[ARG:%.*]], i64 [[I2]]
-; CHECK-NEXT:    [[I4:%.*]] = bitcast float* [[I3]] to <2 x float>*
-; CHECK-NEXT:    store <2 x float> zeroinitializer, <2 x float>* [[I4]], align 16
+; CHECK-NEXT:    [[I3:%.*]] = getelementptr inbounds float, ptr [[ARG:%.*]], i64 [[I2]]
+; CHECK-NEXT:    store <2 x float> zeroinitializer, ptr [[I3]], align 16
 ; CHECK-NEXT:    ret void
 ;
 bb:
   %i7 = add nuw nsw i32 %i, 1
   %i8 = zext i32 %i7 to i64
-  %i9 = getelementptr inbounds float, float* %arg, i64 %i8
-  store float 0.0, float* %i9, align 4
+  %i9 = getelementptr inbounds float, ptr %arg, i64 %i8
+  store float 0.0, ptr %i9, align 4
   %i2 = zext i32 %i to i64
-  %i3 = getelementptr inbounds float, float* %arg, i64 %i2
-  %i4 = bitcast float* %i3 to <2 x float>*
-  store <2 x float> <float 0.0, float 0.0>, <2 x float>* %i4, align 16
+  %i3 = getelementptr inbounds float, ptr %arg, i64 %i2
+  store <2 x float> <float 0.0, float 0.0>, ptr %i3, align 16
   ret void
 }
 
@@ -59,27 +54,23 @@ define void @ArrayTestPartiallyOverlapping(i64 %0) {
 ;
 ; CHECK-LABEL: @ArrayTestPartiallyOverlapping(
 ; CHECK-NEXT:    [[TMP2:%.*]] = add i64 [[TMP0:%.*]], 10
-; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [0 x i8], [0 x i8]* @BUFFER, i64 0, i64 [[TMP2]]
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast i8* [[TMP3]] to i64*
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [0 x i8], ptr @BUFFER, i64 0, i64 [[TMP2]]
 ; CHECK-NEXT:    [[TMP5:%.*]] = add i64 [[TMP0]], 15
-; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [0 x i8], [0 x i8]* @BUFFER, i64 0, i64 [[TMP5]]
-; CHECK-NEXT:    [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i32*
-; CHECK-NEXT:    store i32 1, i32* [[TMP7]], align 4
-; CHECK-NEXT:    store i64 0, i64* [[TMP4]], align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [0 x i8], ptr @BUFFER, i64 0, i64 [[TMP5]]
+; CHECK-NEXT:    store i32 1, ptr [[TMP6]], align 4
+; CHECK-NEXT:    store i64 0, ptr [[TMP3]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %2 = add i64 %0, 10
-  %3 = getelementptr inbounds [0 x i8], [0 x i8]* @BUFFER, i64 0, i64 %2
-  %4 = bitcast i8* %3 to i64*
-  %5 = add i64 %0, 15
-  %6 = getelementptr inbounds [0 x i8], [0 x i8]* @BUFFER, i64 0, i64 %5
-  %7 = bitcast i8* %6 to i32*
-  store i32 1, i32* %7
-  store i64 0, i64* %4
+  %3 = getelementptr inbounds [0 x i8], ptr @BUFFER, i64 0, i64 %2
+  %4 = add i64 %0, 15
+  %5 = getelementptr inbounds [0 x i8], ptr @BUFFER, i64 0, i64 %4
+  store i32 1, ptr %5
+  store i64 0, ptr %3
   ret void
 }
 
-define void @VectorTestPartiallyOverlapping(float* %arg, i32 %i) {
+define void @VectorTestPartiallyOverlapping(ptr %arg, i32 %i) {
 ;
 ; The DSE pass will not kill the store because the overlap is partial
 ; and won't fully clobber the original store.
@@ -87,26 +78,22 @@ define void @VectorTestPartiallyOverlapping(float* %arg, i32 %i) {
 ; CHECK-LABEL: @VectorTestPartiallyOverlapping(
 ; CHECK-NEXT:  bb:
 ; CHECK-NEXT:    [[I2:%.*]] = zext i32 [[I:%.*]] to i64
-; CHECK-NEXT:    [[I3:%.*]] = getelementptr inbounds float, float* [[ARG:%.*]], i64 [[I2]]
-; CHECK-NEXT:    [[I4:%.*]] = bitcast float* [[I3]] to <2 x float>*
-; CHECK-NEXT:    store <2 x float> <float 1.000000e+00, float 1.000000e+00>, <2 x float>* [[I4]], align 16
+; CHECK-NEXT:    [[I3:%.*]] = getelementptr inbounds float, ptr [[ARG:%.*]], i64 [[I2]]
+; CHECK-NEXT:    store <2 x float> <float 1.000000e+00, float 1.000000e+00>, ptr [[I3]], align 16
 ; CHECK-NEXT:    [[I5:%.*]] = add nuw nsw i32 [[I]], 1
 ; CHECK-NEXT:    [[I6:%.*]] = zext i32 [[I5]] to i64
-; CHECK-NEXT:    [[I7:%.*]] = getelementptr inbounds float, float* [[ARG]], i64 [[I6]]
-; CHECK-NEXT:    [[I8:%.*]] = bitcast float* [[I7]] to <2 x float>*
-; CHECK-NEXT:    store <2 x float> zeroinitializer, <2 x float>* [[I8]], align 16
+; CHECK-NEXT:    [[I7:%.*]] = getelementptr inbounds float, ptr [[ARG]], i64 [[I6]]
+; CHECK-NEXT:    store <2 x float> zeroinitializer, ptr [[I7]], align 16
 ; CHECK-NEXT:    ret void
 ;
 bb:
   %i2 = zext i32 %i to i64
-  %i3 = getelementptr inbounds float, float* %arg, i64 %i2
-  %i4 = bitcast float* %i3 to <2 x float>*
-  store <2 x float> <float 1.000000e+00, float 1.000000e+00>, <2 x float>* %i4, align 16
+  %i3 = getelementptr inbounds float, ptr %arg, i64 %i2
+  store <2 x float> <float 1.000000e+00, float 1.000000e+00>, ptr %i3, align 16
   %i5 = add nuw nsw i32 %i, 1
   %i6 = zext i32 %i5 to i64
-  %i7 = getelementptr inbounds float, float* %arg, i64 %i6
-  %i8 = bitcast float* %i7 to <2 x float>*
-  store <2 x float> <float 0.0, float 0.0>, <2 x float>* %i8, align 16
+  %i7 = getelementptr inbounds float, ptr %arg, i64 %i6
+  store <2 x float> <float 0.0, float 0.0>, ptr %i7, align 16
   ret void
 }
 

diff  --git a/llvm/test/Transforms/DeadStoreElimination/operand-bundles.ll b/llvm/test/Transforms/DeadStoreElimination/operand-bundles.ll
index 6305dc5844c79..4631ccfa28049 100644
--- a/llvm/test/Transforms/DeadStoreElimination/operand-bundles.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/operand-bundles.ll
@@ -1,31 +1,31 @@
 ; RUN: opt < %s -basic-aa -dse -S | FileCheck %s
 
-declare noalias i8* @malloc(i64) "malloc-like"
+declare noalias ptr @malloc(i64) "malloc-like"
 
 declare void @foo()
-declare void @bar(i8*)
+declare void @bar(ptr)
 
 define void @test() {
-  %obj = call i8* @malloc(i64 8)
-  store i8 0, i8* %obj
+  %obj = call ptr @malloc(i64 8)
+  store i8 0, ptr %obj
   ; don't remove store. %obj should be treated like it will be read by the @foo.
-  ; CHECK: store i8 0, i8* %obj
-  call void @foo() ["deopt" (i8* %obj)]
+  ; CHECK: store i8 0, ptr %obj
+  call void @foo() ["deopt" (ptr %obj)]
   ret void
 }
 
 define void @test1() {
-  %obj = call i8* @malloc(i64 8)
-  store i8 0, i8* %obj
-  ; CHECK: store i8 0, i8* %obj
-  call void @bar(i8* nocapture %obj)
+  %obj = call ptr @malloc(i64 8)
+  store i8 0, ptr %obj
+  ; CHECK: store i8 0, ptr %obj
+  call void @bar(ptr nocapture %obj)
   ret void
 }
 
 define void @test2() {
-  %obj = call i8* @malloc(i64 8)
-  store i8 0, i8* %obj
-  ; CHECK-NOT: store i8 0, i8* %obj
+  %obj = call ptr @malloc(i64 8)
+  store i8 0, ptr %obj
+  ; CHECK-NOT: store i8 0, ptr %obj
   call void @foo()
   ret void
 }
@@ -35,21 +35,21 @@ define void @test3() {
   %s = alloca i64
   ; Verify that this first store is not considered killed by the second one
   ; since it could be observed from the deopt continuation.
-  ; CHECK: store i64 1, i64* %s
-  store i64 1, i64* %s
-  call void @foo() [ "deopt"(i64* %s) ]
-  store i64 0, i64* %s
+  ; CHECK: store i64 1, ptr %s
+  store i64 1, ptr %s
+  call void @foo() [ "deopt"(ptr %s) ]
+  store i64 0, ptr %s
   ret void
 }
 
-declare noalias i8* @calloc(i64, i64) inaccessiblememonly allockind("alloc,zeroed")
+declare noalias ptr @calloc(i64, i64) inaccessiblememonly allockind("alloc,zeroed")
 
 define void @test4() {
 ; CHECK-LABEL: @test4
-  %local_obj = call i8* @calloc(i64 1, i64 4)
-  call void @foo() ["deopt" (i8* %local_obj)]
-  store i8 0, i8* %local_obj, align 4
-  ; CHECK-NOT: store i8 0, i8* %local_obj, align 4
-  call void @bar(i8* nocapture %local_obj)
+  %local_obj = call ptr @calloc(i64 1, i64 4)
+  call void @foo() ["deopt" (ptr %local_obj)]
+  store i8 0, ptr %local_obj, align 4
+  ; CHECK-NOT: store i8 0, ptr %local_obj, align 4
+  call void @bar(ptr nocapture %local_obj)
   ret void
 }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/out-of-bounds-stores.ll b/llvm/test/Transforms/DeadStoreElimination/out-of-bounds-stores.ll
index 949ab781b4961..fdcb9c5becc9d 100644
--- a/llvm/test/Transforms/DeadStoreElimination/out-of-bounds-stores.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/out-of-bounds-stores.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -dse -S %s | FileCheck %s
 
-declare void @use_pointer(i32*)
+declare void @use_pointer(ptr)
 
 ; Out-of-bounds stores can be considered killing any other stores to the same
 ; object in the same BB, because they are UB and guaranteed to execute. Note
@@ -10,19 +10,16 @@ declare void @use_pointer(i32*)
 define i32 @test_out_of_bounds_store_local(i1 %c) {
 ; CHECK-LABEL: @test_out_of_bounds_store_local(
 ; CHECK-NEXT:    [[D:%.*]] = alloca [1 x i32], align 4
-; CHECK-NEXT:    [[ARRAYIDX_1:%.*]] = getelementptr inbounds [1 x i32], [1 x i32]* [[D]], i64 0, i64 1
-; CHECK-NEXT:    store i32 20, i32* [[ARRAYIDX_1]], align 4
-; CHECK-NEXT:    [[BC:%.*]] = bitcast [1 x i32]* [[D]] to i32*
-; CHECK-NEXT:    call void @use_pointer(i32* [[BC]])
+; CHECK-NEXT:    [[ARRAYIDX_1:%.*]] = getelementptr inbounds [1 x i32], ptr [[D]], i64 0, i64 1
+; CHECK-NEXT:    store i32 20, ptr [[ARRAYIDX_1]], align 4
+; CHECK-NEXT:    call void @use_pointer(ptr [[D]])
 ; CHECK-NEXT:    ret i32 0
 ;
   %d = alloca [1 x i32], align 4
-  %arrayidx = getelementptr inbounds [1 x i32], [1 x i32]* %d, i64 0, i64 0
-  store i32 10, i32* %arrayidx, align 4
-  %arrayidx.1 = getelementptr inbounds [1 x i32], [1 x i32]* %d, i64 0, i64 1
-  store i32 20, i32* %arrayidx.1, align 4
-  %bc = bitcast [1 x i32]* %d to i32*
-  call void @use_pointer(i32* %bc)
+  store i32 10, ptr %d, align 4
+  %arrayidx.1 = getelementptr inbounds [1 x i32], ptr %d, i64 0, i64 1
+  store i32 20, ptr %arrayidx.1, align 4
+  call void @use_pointer(ptr %d)
   ret i32 0
 }
 
@@ -33,25 +30,21 @@ define i32 @test_out_of_bounds_store_local(i1 %c) {
 define i32 @test_out_of_bounds_store_local_larger_object(i1 %c) {
 ; CHECK-LABEL: @test_out_of_bounds_store_local_larger_object(
 ; CHECK-NEXT:    [[D:%.*]] = alloca [2 x i32], align 4
-; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[D]], i64 0, i64 0
-; CHECK-NEXT:    store i32 10, i32* [[ARRAYIDX]], align 4
-; CHECK-NEXT:    [[ARRAYIDX_1:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[D]], i64 0, i64 1
-; CHECK-NEXT:    store i32 20, i32* [[ARRAYIDX_1]], align 4
-; CHECK-NEXT:    [[ARRAYIDX_2:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[D]], i64 0, i64 2
-; CHECK-NEXT:    store i32 30, i32* [[ARRAYIDX_2]], align 4
-; CHECK-NEXT:    [[BC:%.*]] = bitcast [2 x i32]* [[D]] to i32*
-; CHECK-NEXT:    call void @use_pointer(i32* [[BC]])
+; CHECK-NEXT:    store i32 10, ptr [[D]], align 4
+; CHECK-NEXT:    [[ARRAYIDX_1:%.*]] = getelementptr inbounds [2 x i32], ptr [[D]], i64 0, i64 1
+; CHECK-NEXT:    store i32 20, ptr [[ARRAYIDX_1]], align 4
+; CHECK-NEXT:    [[ARRAYIDX_2:%.*]] = getelementptr inbounds [2 x i32], ptr [[D]], i64 0, i64 2
+; CHECK-NEXT:    store i32 30, ptr [[ARRAYIDX_2]], align 4
+; CHECK-NEXT:    call void @use_pointer(ptr [[D]])
 ; CHECK-NEXT:    ret i32 0
 ;
   %d = alloca [2 x i32], align 4
-  %arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %d, i64 0, i64 0
-  store i32 10, i32* %arrayidx, align 4
-  %arrayidx.1 = getelementptr inbounds [2 x i32], [2 x i32]* %d, i64 0, i64 1
-  store i32 20, i32* %arrayidx.1, align 4
-  %arrayidx.2 = getelementptr inbounds [2 x i32], [2 x i32]* %d, i64 0, i64 2
-  store i32 30, i32* %arrayidx.2, align 4
-  %bc = bitcast [2 x i32]* %d to i32*
-  call void @use_pointer(i32* %bc)
+  store i32 10, ptr %d, align 4
+  %arrayidx.1 = getelementptr inbounds [2 x i32], ptr %d, i64 0, i64 1
+  store i32 20, ptr %arrayidx.1, align 4
+  %arrayidx.2 = getelementptr inbounds [2 x i32], ptr %d, i64 0, i64 2
+  store i32 30, ptr %arrayidx.2, align 4
+  call void @use_pointer(ptr %d)
   ret i32 0
 }
 
@@ -65,36 +58,32 @@ define i32 @test_out_of_bounds_store_nonlocal(i1 %c) {
 ; CHECK-NEXT:    [[D:%.*]] = alloca [1 x i32], align 4
 ; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
 ; CHECK:       for.body:
-; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [1 x i32], [1 x i32]* [[D]], i64 0, i64 0
-; CHECK-NEXT:    store i32 10, i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT:    store i32 10, ptr [[D]], align 4
 ; CHECK-NEXT:    br label [[FOR_INC:%.*]]
 ; CHECK:       for.inc:
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[FOR_BODY_1:%.*]], label [[FOR_END:%.*]]
 ; CHECK:       for.body.1:
 ; CHECK-NEXT:    ret i32 1
 ; CHECK:       for.end:
-; CHECK-NEXT:    [[BC:%.*]] = bitcast [1 x i32]* [[D]] to i32*
-; CHECK-NEXT:    call void @use_pointer(i32* [[BC]])
+; CHECK-NEXT:    call void @use_pointer(ptr [[D]])
 ; CHECK-NEXT:    ret i32 0
 ;
   %d = alloca [1 x i32], align 4
   br label %for.body
 
 for.body:                                         ; preds = %for.cond
-  %arrayidx = getelementptr inbounds [1 x i32], [1 x i32]* %d, i64 0, i64 0
-  store i32 10, i32* %arrayidx, align 4
+  store i32 10, ptr %d, align 4
   br label %for.inc
 
 for.inc:                                          ; preds = %for.body
   br i1 %c, label %for.body.1, label %for.end
 
 for.body.1:                                       ; preds = %for.inc
-  %arrayidx.1 = getelementptr inbounds [1 x i32], [1 x i32]* %d, i64 0, i64 1
-  store i32 20, i32* %arrayidx.1, align 4
+  %arrayidx.1 = getelementptr inbounds [1 x i32], ptr %d, i64 0, i64 1
+  store i32 20, ptr %arrayidx.1, align 4
   ret i32 1
 
 for.end:                                          ; preds = %for.inc
-  %bc = bitcast [1 x i32]* %d to i32*
-  call void @use_pointer(i32* %bc)
+  call void @use_pointer(ptr %d)
   ret i32 0
 }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/overlap.ll b/llvm/test/Transforms/DeadStoreElimination/overlap.ll
index b4ed6cc2ea814..285c7db5424a8 100644
--- a/llvm/test/Transforms/DeadStoreElimination/overlap.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/overlap.ll
@@ -2,68 +2,64 @@
 ; RUN: opt < %s -basic-aa -dse -S | FileCheck %s
 ; RUN: opt < %s -aa-pipeline=basic-aa -passes=dse -S | FileCheck %s
 
-declare void @use(i64*)
+declare void @use(ptr)
 
 define void @test1() {
 ; CHECK-LABEL: @test1(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i64, align 8
-; CHECK-NEXT:    call void @use(i64* [[A]])
-; CHECK-NEXT:    [[PTR1:%.*]] = bitcast i64* [[A]] to i8*
-; CHECK-NEXT:    [[PTR2:%.*]] = getelementptr i8, i8* [[PTR1]], i32 1
-; CHECK-NEXT:    store i8 10, i8* [[PTR1]], align 1
-; CHECK-NEXT:    store i8 20, i8* [[PTR2]], align 1
-; CHECK-NEXT:    [[LV:%.*]] = load i64, i64* [[A]], align 4
-; CHECK-NEXT:    store i8 0, i8* [[PTR1]], align 1
-; CHECK-NEXT:    call void @use(i64* [[A]])
+; CHECK-NEXT:    call void @use(ptr [[A]])
+; CHECK-NEXT:    [[PTR2:%.*]] = getelementptr i8, ptr [[A]], i32 1
+; CHECK-NEXT:    store i8 10, ptr [[A]], align 1
+; CHECK-NEXT:    store i8 20, ptr [[PTR2]], align 1
+; CHECK-NEXT:    [[LV:%.*]] = load i64, ptr [[A]], align 4
+; CHECK-NEXT:    store i8 0, ptr [[A]], align 1
+; CHECK-NEXT:    call void @use(ptr [[A]])
 ; CHECK-NEXT:    ret void
 ;
   %a = alloca i64
-  call void @use(i64* %a)
-  %ptr1 = bitcast i64* %a to i8*
-  %ptr2 = getelementptr i8, i8* %ptr1, i32 1
+  call void @use(ptr %a)
+  %ptr2 = getelementptr i8, ptr %a, i32 1
 
-  store i8 10, i8* %ptr1
-  store i8 20, i8* %ptr2
-  %lv = load i64, i64* %a
-  store i8 0, i8* %ptr1
+  store i8 10, ptr %a
+  store i8 20, ptr %ptr2
+  %lv = load i64, ptr %a
+  store i8 0, ptr %a
 
-  call void @use(i64* %a)
+  call void @use(ptr %a)
   ret void
 }
 
 define void @test2() {
 ; CHECK-LABEL: @test2(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i64, align 8
-; CHECK-NEXT:    call void @use(i64* [[A]])
-; CHECK-NEXT:    [[PTR1:%.*]] = bitcast i64* [[A]] to i8*
-; CHECK-NEXT:    [[PTR2:%.*]] = getelementptr i8, i8* [[PTR1]], i32 1
-; CHECK-NEXT:    store i8 10, i8* [[PTR1]], align 1
-; CHECK-NEXT:    store i8 20, i8* [[PTR2]], align 1
+; CHECK-NEXT:    call void @use(ptr [[A]])
+; CHECK-NEXT:    [[PTR2:%.*]] = getelementptr i8, ptr [[A]], i32 1
+; CHECK-NEXT:    store i8 10, ptr [[A]], align 1
+; CHECK-NEXT:    store i8 20, ptr [[PTR2]], align 1
 ; CHECK-NEXT:    br i1 undef, label [[BB1:%.*]], label [[END:%.*]]
 ; CHECK:       bb1:
-; CHECK-NEXT:    [[LV:%.*]] = load i64, i64* [[A]], align 4
+; CHECK-NEXT:    [[LV:%.*]] = load i64, ptr [[A]], align 4
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
-; CHECK-NEXT:    store i8 0, i8* [[PTR1]], align 1
-; CHECK-NEXT:    call void @use(i64* [[A]])
+; CHECK-NEXT:    store i8 0, ptr [[A]], align 1
+; CHECK-NEXT:    call void @use(ptr [[A]])
 ; CHECK-NEXT:    ret void
 ;
   %a = alloca i64
-  call void @use(i64* %a)
-  %ptr1 = bitcast i64* %a to i8*
-  %ptr2 = getelementptr i8, i8* %ptr1, i32 1
+  call void @use(ptr %a)
+  %ptr2 = getelementptr i8, ptr %a, i32 1
 
-  store i8 10, i8* %ptr1
-  store i8 20, i8* %ptr2
+  store i8 10, ptr %a
+  store i8 20, ptr %ptr2
   br i1 undef, label %bb1, label %end
 
 bb1:
-  %lv = load i64, i64* %a
+  %lv = load i64, ptr %a
   br label %end
 
 end:
-  store i8 0, i8* %ptr1
-  call void @use(i64* %a)
+  store i8 0, ptr %a
+  call void @use(ptr %a)
   ret void
 }
 
@@ -71,57 +67,55 @@ end:
 define void @test3(i1 %c) {
 ; CHECK-LABEL: @test3(
 ; CHECK-NEXT:    [[A:%.*]] = alloca [2 x i8], align 1
-; CHECK-NEXT:    [[A1:%.*]] = getelementptr [2 x i8], [2 x i8]* [[A]], i32 0, i32 1
+; CHECK-NEXT:    [[A1:%.*]] = getelementptr [2 x i8], ptr [[A]], i32 0, i32 1
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[IF:%.*]], label [[ELSE:%.*]]
 ; CHECK:       if:
-; CHECK-NEXT:    store [2 x i8] zeroinitializer, [2 x i8]* [[A]], align 1
+; CHECK-NEXT:    store [2 x i8] zeroinitializer, ptr [[A]], align 1
 ; CHECK-NEXT:    br label [[ELSE]]
 ; CHECK:       else:
-; CHECK-NEXT:    [[TMP1:%.*]] = load i8, i8* [[A1]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = load i8, ptr [[A1]], align 1
 ; CHECK-NEXT:    ret void
 ;
   %a = alloca [2 x i8]
-  %a0 = getelementptr [2 x i8], [2 x i8]* %a, i32 0, i32 0
-  %a1 = getelementptr [2 x i8], [2 x i8]* %a, i32 0, i32 1
-  store i8 1, i8* %a0
+  %a1 = getelementptr [2 x i8], ptr %a, i32 0, i32 1
+  store i8 1, ptr %a
   br i1 %c, label %if, label %else
 
 if:
-  store [2 x i8] zeroinitializer, [2 x i8]* %a
+  store [2 x i8] zeroinitializer, ptr %a
   br label %else
 
 else:
-  load i8, i8* %a1
+  load i8, ptr %a1
   ret void
 }
 
-; Variation on the previous test case, where only the store to %a0 is dead,
+; Variation on the previous test case, where only the store to %a is dead,
 ; but not the one to %a1. This tests for a potential caching bug.
 define void @test4(i1 %c) {
 ; CHECK-LABEL: @test4(
 ; CHECK-NEXT:    [[A:%.*]] = alloca [2 x i8], align 1
-; CHECK-NEXT:    [[A1:%.*]] = getelementptr [2 x i8], [2 x i8]* [[A]], i32 0, i32 1
-; CHECK-NEXT:    store i8 1, i8* [[A1]], align 1
+; CHECK-NEXT:    [[A1:%.*]] = getelementptr [2 x i8], ptr [[A]], i32 0, i32 1
+; CHECK-NEXT:    store i8 1, ptr [[A1]], align 1
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[IF:%.*]], label [[ELSE:%.*]]
 ; CHECK:       if:
-; CHECK-NEXT:    store [2 x i8] zeroinitializer, [2 x i8]* [[A]], align 1
+; CHECK-NEXT:    store [2 x i8] zeroinitializer, ptr [[A]], align 1
 ; CHECK-NEXT:    br label [[ELSE]]
 ; CHECK:       else:
-; CHECK-NEXT:    [[TMP1:%.*]] = load i8, i8* [[A1]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = load i8, ptr [[A1]], align 1
 ; CHECK-NEXT:    ret void
 ;
   %a = alloca [2 x i8]
-  %a0 = getelementptr [2 x i8], [2 x i8]* %a, i32 0, i32 0
-  %a1 = getelementptr [2 x i8], [2 x i8]* %a, i32 0, i32 1
-  store i8 1, i8* %a1
-  store i8 1, i8* %a0
+  %a1 = getelementptr [2 x i8], ptr %a, i32 0, i32 1
+  store i8 1, ptr %a1
+  store i8 1, ptr %a
   br i1 %c, label %if, label %else
 
 if:
-  store [2 x i8] zeroinitializer, [2 x i8]* %a
+  store [2 x i8] zeroinitializer, ptr %a
   br label %else
 
 else:
-  load i8, i8* %a1
+  load i8, ptr %a1
   ret void
 }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/phi-translation.ll b/llvm/test/Transforms/DeadStoreElimination/phi-translation.ll
index 158e7658aa4b4..c46d82f1e624f 100644
--- a/llvm/test/Transforms/DeadStoreElimination/phi-translation.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/phi-translation.ll
@@ -14,8 +14,8 @@ define void @memoryphi_translate_1(i1 %c) {
 ; CHECK:       else:
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
-; CHECK-NEXT:    [[P:%.*]] = phi i8* [ [[A_1]], [[THEN]] ], [ [[A_2]], [[ELSE]] ]
-; CHECK-NEXT:    store i8 10, i8* [[P]], align 1
+; CHECK-NEXT:    [[P:%.*]] = phi ptr [ [[A_1]], [[THEN]] ], [ [[A_2]], [[ELSE]] ]
+; CHECK-NEXT:    store i8 10, ptr [[P]], align 1
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -24,16 +24,16 @@ entry:
   br i1 %c, label %then, label %else
 
 then:
-  store i8 0, i8* %a.1
+  store i8 0, ptr %a.1
   br label %end
 
 else:
-  store i8 9, i8* %a.2
+  store i8 9, ptr %a.2
   br label %end
 
 end:
-  %p = phi i8* [ %a.1, %then ], [ %a.2, %else ]
-  store i8 10, i8* %p
+  %p = phi ptr [ %a.1, %then ], [ %a.2, %else ]
+  store i8 10, ptr %p
   ret void
 }
 
@@ -47,14 +47,14 @@ define i8 @memoryphi_translate_2(i1 %c) {
 ; CHECK-NEXT:    [[A_2:%.*]] = alloca i8, align 1
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[THEN:%.*]], label [[ELSE:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    store i8 0, i8* [[A_1]], align 1
+; CHECK-NEXT:    store i8 0, ptr [[A_1]], align 1
 ; CHECK-NEXT:    br label [[END:%.*]]
 ; CHECK:       else:
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
-; CHECK-NEXT:    [[P:%.*]] = phi i8* [ [[A_1]], [[THEN]] ], [ [[A_2]], [[ELSE]] ]
-; CHECK-NEXT:    [[L:%.*]] = load i8, i8* [[A_1]], align 1
-; CHECK-NEXT:    store i8 10, i8* [[P]], align 1
+; CHECK-NEXT:    [[P:%.*]] = phi ptr [ [[A_1]], [[THEN]] ], [ [[A_2]], [[ELSE]] ]
+; CHECK-NEXT:    [[L:%.*]] = load i8, ptr [[A_1]], align 1
+; CHECK-NEXT:    store i8 10, ptr [[P]], align 1
 ; CHECK-NEXT:    ret i8 [[L]]
 ;
 entry:
@@ -63,17 +63,17 @@ entry:
   br i1 %c, label %then, label %else
 
 then:
-  store i8 0, i8* %a.1
+  store i8 0, ptr %a.1
   br label %end
 
 else:
-  store i8 9, i8* %a.2
+  store i8 9, ptr %a.2
   br label %end
 
 end:
-  %p = phi i8* [ %a.1, %then ], [ %a.2, %else ]
-  %l = load i8, i8* %a.1
-  store i8 10, i8* %p
+  %p = phi ptr [ %a.1, %then ], [ %a.2, %else ]
+  %l = load i8, ptr %a.1
+  store i8 10, ptr %p
   ret i8 %l
 }
 
@@ -89,12 +89,12 @@ define i8 @memoryphi_translate_3(i1 %c) {
 ; CHECK:       then:
 ; CHECK-NEXT:    br label [[END:%.*]]
 ; CHECK:       else:
-; CHECK-NEXT:    store i8 9, i8* [[A_2]], align 1
+; CHECK-NEXT:    store i8 9, ptr [[A_2]], align 1
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
-; CHECK-NEXT:    [[P:%.*]] = phi i8* [ [[A_1]], [[THEN]] ], [ [[A_2]], [[ELSE]] ]
-; CHECK-NEXT:    [[L:%.*]] = load i8, i8* [[A_2]], align 1
-; CHECK-NEXT:    store i8 10, i8* [[P]], align 1
+; CHECK-NEXT:    [[P:%.*]] = phi ptr [ [[A_1]], [[THEN]] ], [ [[A_2]], [[ELSE]] ]
+; CHECK-NEXT:    [[L:%.*]] = load i8, ptr [[A_2]], align 1
+; CHECK-NEXT:    store i8 10, ptr [[P]], align 1
 ; CHECK-NEXT:    ret i8 [[L]]
 ;
 entry:
@@ -103,17 +103,17 @@ entry:
   br i1 %c, label %then, label %else
 
 then:
-  store i8 0, i8* %a.1
+  store i8 0, ptr %a.1
   br label %end
 
 else:
-  store i8 9, i8* %a.2
+  store i8 9, ptr %a.2
   br label %end
 
 end:
-  %p = phi i8* [ %a.1, %then ], [ %a.2, %else ]
-  %l = load i8, i8* %a.2
-  store i8 10, i8* %p
+  %p = phi ptr [ %a.1, %then ], [ %a.2, %else ]
+  %l = load i8, ptr %a.2
+  store i8 10, ptr %p
   ret i8 %l
 }
 
@@ -125,15 +125,15 @@ define i8 @memoryphi_translate_4(i1 %c) {
 ; CHECK-NEXT:    [[A_2:%.*]] = alloca i8, align 1
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[THEN:%.*]], label [[ELSE:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    store i8 0, i8* [[A_1]], align 1
+; CHECK-NEXT:    store i8 0, ptr [[A_1]], align 1
 ; CHECK-NEXT:    br label [[END:%.*]]
 ; CHECK:       else:
-; CHECK-NEXT:    store i8 9, i8* [[A_2]], align 1
+; CHECK-NEXT:    store i8 9, ptr [[A_2]], align 1
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
-; CHECK-NEXT:    [[P:%.*]] = phi i8* [ [[A_1]], [[THEN]] ], [ [[A_2]], [[ELSE]] ]
-; CHECK-NEXT:    [[L:%.*]] = load i8, i8* [[P]], align 1
-; CHECK-NEXT:    store i8 10, i8* [[P]], align 1
+; CHECK-NEXT:    [[P:%.*]] = phi ptr [ [[A_1]], [[THEN]] ], [ [[A_2]], [[ELSE]] ]
+; CHECK-NEXT:    [[L:%.*]] = load i8, ptr [[P]], align 1
+; CHECK-NEXT:    store i8 10, ptr [[P]], align 1
 ; CHECK-NEXT:    ret i8 [[L]]
 ;
 entry:
@@ -142,17 +142,17 @@ entry:
   br i1 %c, label %then, label %else
 
 then:
-  store i8 0, i8* %a.1
+  store i8 0, ptr %a.1
   br label %end
 
 else:
-  store i8 9, i8* %a.2
+  store i8 9, ptr %a.2
   br label %end
 
 end:
-  %p = phi i8* [ %a.1, %then ], [ %a.2, %else ]
-  %l = load i8, i8* %p
-  store i8 10, i8* %p
+  %p = phi ptr [ %a.1, %then ], [ %a.2, %else ]
+  %l = load i8, ptr %p
+  store i8 10, ptr %p
   ret i8 %l
 }
 
@@ -162,31 +162,31 @@ define void @memoryphi_translate_5(i1 %cond) {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[A:%.*]] = alloca i8, align 1
 ; CHECK-NEXT:    [[B:%.*]] = alloca i8, align 1
-; CHECK-NEXT:    store i8 0, i8* [[A]], align 1
+; CHECK-NEXT:    store i8 0, ptr [[A]], align 1
 ; CHECK-NEXT:    br i1 [[COND:%.*]], label [[COND_TRUE:%.*]], label [[COND_END:%.*]]
 ; CHECK:       cond.true:
 ; CHECK-NEXT:    br label [[COND_END]]
 ; CHECK:       cond.end:
-; CHECK-NEXT:    [[P:%.*]] = phi i8* [ [[B]], [[COND_TRUE]] ], [ [[A]], [[ENTRY:%.*]] ]
-; CHECK-NEXT:    store i8 0, i8* [[P]], align 1
-; CHECK-NEXT:    call void @use(i8* [[P]])
+; CHECK-NEXT:    [[P:%.*]] = phi ptr [ [[B]], [[COND_TRUE]] ], [ [[A]], [[ENTRY:%.*]] ]
+; CHECK-NEXT:    store i8 0, ptr [[P]], align 1
+; CHECK-NEXT:    call void @use(ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
 entry:
   %a = alloca i8
   %b = alloca i8
   %c = alloca i8
-  store i8 0, i8* %a
+  store i8 0, ptr %a
   br i1 %cond, label %cond.true, label %cond.end
 
 cond.true:
-  store i8 0, i8* %c
+  store i8 0, ptr %c
   br label %cond.end
 
 cond.end:
-  %p = phi i8* [ %b, %cond.true ], [ %a, %entry ]
-  store i8 0, i8* %p
-  call void @use(i8* %p)
+  %p = phi ptr [ %b, %cond.true ], [ %a, %entry ]
+  store i8 0, ptr %p
+  call void @use(ptr %p)
   ret void
 }
 
@@ -198,35 +198,35 @@ define void @translate_without_memoryphi_1(i1 %cond) {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[A:%.*]] = alloca i8, align 1
 ; CHECK-NEXT:    [[B:%.*]] = alloca i8, align 1
-; CHECK-NEXT:    store i8 0, i8* [[A]], align 1
+; CHECK-NEXT:    store i8 0, ptr [[A]], align 1
 ; CHECK-NEXT:    br i1 [[COND:%.*]], label [[COND_TRUE:%.*]], label [[COND_END:%.*]]
 ; CHECK:       cond.true:
 ; CHECK-NEXT:    br label [[COND_END]]
 ; CHECK:       cond.end:
-; CHECK-NEXT:    [[P:%.*]] = phi i8* [ [[B]], [[COND_TRUE]] ], [ [[A]], [[ENTRY:%.*]] ]
-; CHECK-NEXT:    store i8 0, i8* [[P]], align 1
-; CHECK-NEXT:    call void @use(i8* [[P]])
+; CHECK-NEXT:    [[P:%.*]] = phi ptr [ [[B]], [[COND_TRUE]] ], [ [[A]], [[ENTRY:%.*]] ]
+; CHECK-NEXT:    store i8 0, ptr [[P]], align 1
+; CHECK-NEXT:    call void @use(ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
 entry:
   %a = alloca i8
   %b = alloca i8
-  store i8 0, i8* %a
+  store i8 0, ptr %a
   br i1 %cond, label %cond.true, label %cond.end
 
 cond.true:
   br label %cond.end
 
 cond.end:
-  %p = phi i8* [ %b, %cond.true ], [ %a, %entry ]
-  store i8 0, i8* %p
-  call void @use(i8* %p)
+  %p = phi ptr [ %b, %cond.true ], [ %a, %entry ]
+  store i8 0, ptr %p
+  call void @use(ptr %p)
   ret void
 }
 
 ; In the test, translating through the phi results in a null address. Make sure
 ; this does not cause a crash.
-define void @test_trans_null(i1 %c, i16* %ptr) {
+define void @test_trans_null(i1 %c, ptr %ptr) {
 ; CHECK-LABEL: @test_trans_null(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[THEN:%.*]], label [[ELSE:%.*]]
@@ -234,14 +234,13 @@ define void @test_trans_null(i1 %c, i16* %ptr) {
 ; CHECK-NEXT:    br label [[EXIT:%.*]]
 ; CHECK:       else:
 ; CHECK-NEXT:    call void @fn()
-; CHECK-NEXT:    [[BC:%.*]] = bitcast i8* undef to i16*
-; CHECK-NEXT:    [[GEP_1:%.*]] = getelementptr inbounds i16, i16* [[BC]], i64 2
-; CHECK-NEXT:    store i16 8, i16* [[GEP_1]], align 2
+; CHECK-NEXT:    [[GEP_1:%.*]] = getelementptr inbounds i16, ptr undef, i64 2
+; CHECK-NEXT:    store i16 8, ptr [[GEP_1]], align 2
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
-; CHECK-NEXT:    [[P:%.*]] = phi i16* [ [[PTR:%.*]], [[THEN]] ], [ [[BC]], [[ELSE]] ]
-; CHECK-NEXT:    [[GEP_2:%.*]] = getelementptr inbounds i16, i16* [[P]], i64 2
-; CHECK-NEXT:    store i16 8, i16* [[GEP_2]], align 2
+; CHECK-NEXT:    [[P:%.*]] = phi ptr [ [[PTR:%.*]], [[THEN]] ], [ undef, [[ELSE]] ]
+; CHECK-NEXT:    [[GEP_2:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 2
+; CHECK-NEXT:    store i16 8, ptr [[GEP_2]], align 2
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -252,18 +251,17 @@ then:
 
 else:
   call void @fn()
-  %bc = bitcast i8* undef to i16*
-  %gep.1 = getelementptr inbounds i16, i16* %bc, i64 2
-  store i16 8, i16* %gep.1, align 2
+  %gep.1 = getelementptr inbounds i16, ptr undef, i64 2
+  store i16 8, ptr %gep.1, align 2
   br label %exit
 
 exit:
-  %p = phi i16* [ %ptr, %then ], [ %bc, %else ]
-  %gep.2 = getelementptr inbounds i16, i16* %p, i64 2
-  store i16 8, i16* %gep.2, align 2
+  %p = phi ptr [ %ptr, %then ], [ undef, %else ]
+  %gep.2 = getelementptr inbounds i16, ptr %p, i64 2
+  store i16 8, ptr %gep.2, align 2
   ret void
 }
 
 
-declare void @use(i8*)
+declare void @use(ptr)
 declare void @fn()

diff  --git a/llvm/test/Transforms/DeadStoreElimination/pr11390.ll b/llvm/test/Transforms/DeadStoreElimination/pr11390.ll
index 56ca604eff98b..d5e368b79bc3b 100644
--- a/llvm/test/Transforms/DeadStoreElimination/pr11390.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/pr11390.ll
@@ -3,36 +3,36 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-define fastcc void @cat_domain(i8* nocapture %name, i8* nocapture %domain, i8** 
+define fastcc void @cat_domain(ptr nocapture %name, ptr nocapture %domain, ptr 
 nocapture %s) nounwind uwtable {
 entry:
-  %call = tail call i64 @strlen(i8* %name) nounwind readonly
-  %call1 = tail call i64 @strlen(i8* %domain) nounwind readonly
+  %call = tail call i64 @strlen(ptr %name) nounwind readonly
+  %call1 = tail call i64 @strlen(ptr %domain) nounwind readonly
   %add = add i64 %call, 1
   %add2 = add i64 %add, %call1
   %add3 = add i64 %add2, 1
-  %call4 = tail call noalias i8* @malloc(i64 %add3) nounwind
-  store i8* %call4, i8** %s, align 8
-  %tobool = icmp eq i8* %call4, null
+  %call4 = tail call noalias ptr @malloc(i64 %add3) nounwind
+  store ptr %call4, ptr %s, align 8
+  %tobool = icmp eq ptr %call4, null
   br i1 %tobool, label %return, label %if.end
 
 if.end:                                           ; preds = %entry
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %call4, i8* %name, i64 %call, i1 false)
-  %arrayidx = getelementptr inbounds i8, i8* %call4, i64 %call
-  store i8 46, i8* %arrayidx, align 1
+  tail call void @llvm.memcpy.p0.p0.i64(ptr %call4, ptr %name, i64 %call, i1 false)
+  %arrayidx = getelementptr inbounds i8, ptr %call4, i64 %call
+  store i8 46, ptr %arrayidx, align 1
 ; CHECK: store i8 46
-  %add.ptr5 = getelementptr inbounds i8, i8* %call4, i64 %add
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %add.ptr5, i8* %domain, i64 %call1, i1 false)
-  %arrayidx8 = getelementptr inbounds i8, i8* %call4, i64 %add2
-  store i8 0, i8* %arrayidx8, align 1
+  %add.ptr5 = getelementptr inbounds i8, ptr %call4, i64 %add
+  tail call void @llvm.memcpy.p0.p0.i64(ptr %add.ptr5, ptr %domain, i64 %call1, i1 false)
+  %arrayidx8 = getelementptr inbounds i8, ptr %call4, i64 %add2
+  store i8 0, ptr %arrayidx8, align 1
   br label %return
 
 return:                                           ; preds = %if.end, %entry
   ret void
 }
 
-declare i64 @strlen(i8* nocapture) nounwind readonly
+declare i64 @strlen(ptr nocapture) nounwind readonly
 
-declare noalias i8* @malloc(i64) nounwind
+declare noalias ptr @malloc(i64) nounwind
 
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind

diff  --git a/llvm/test/Transforms/DeadStoreElimination/pr47285-not-overwritten-on-all-exit-paths.ll b/llvm/test/Transforms/DeadStoreElimination/pr47285-not-overwritten-on-all-exit-paths.ll
index 7c3bb913f5f70..0f3e11984b3be 100644
--- a/llvm/test/Transforms/DeadStoreElimination/pr47285-not-overwritten-on-all-exit-paths.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/pr47285-not-overwritten-on-all-exit-paths.ll
@@ -5,7 +5,7 @@
 
 ; Reduced test case for PR47285.
 
-; `store i32 9, i32* @b` in %interesting is not killed by `store i32 23, i32* @b`
+; `store i32 9, ptr @b` in %interesting is not killed by `store i32 23, ptr @b`
 ; in %killer, because it is not overwritten before reaching the end of the
 ; function via %bb.2 -> %no.overwrite.exit.
 
@@ -22,24 +22,24 @@ define void @test(i1 %c.0, i1 %c.2, i1 %c.3, i1 %c.4, i1 %c.5, i1 %c.6) {
 ; CHECK:       bb.4:
 ; CHECK-NEXT:    br i1 [[C_4:%.*]], label [[BB_5:%.*]], label [[BB_6:%.*]]
 ; CHECK:       bb.5:
-; CHECK-NEXT:    store i32 99, i32* @b, align 4
+; CHECK-NEXT:    store i32 99, ptr @b, align 4
 ; CHECK-NEXT:    br i1 [[C_3:%.*]], label [[BB_5]], label [[BB_2]]
 ; CHECK:       bb.6:
-; CHECK-NEXT:    store i32 91, i32* @b, align 4
+; CHECK-NEXT:    store i32 91, ptr @b, align 4
 ; CHECK-NEXT:    br i1 [[C_5:%.*]], label [[SPLIT_CRIT_EDGE_2:%.*]], label [[BB_2]]
 ; CHECK:       split_crit_edge.2:
-; CHECK-NEXT:    store i32 27, i32* @b, align 4
+; CHECK-NEXT:    store i32 27, ptr @b, align 4
 ; CHECK-NEXT:    br label [[EXIT:%.*]]
 ; CHECK:       bb.7:
 ; CHECK-NEXT:    br i1 [[C_4]], label [[INTERESTING:%.*]], label [[BB_8:%.*]]
 ; CHECK:       interesting:
-; CHECK-NEXT:    store i32 9, i32* @b, align 4
+; CHECK-NEXT:    store i32 9, ptr @b, align 4
 ; CHECK-NEXT:    br i1 [[C_6:%.*]], label [[KILLER:%.*]], label [[BB_2]]
 ; CHECK:       killer:
-; CHECK-NEXT:    store i32 23, i32* @b, align 4
+; CHECK-NEXT:    store i32 23, ptr @b, align 4
 ; CHECK-NEXT:    ret void
 ; CHECK:       bb.8:
-; CHECK-NEXT:    store i32 19, i32* @b, align 4
+; CHECK-NEXT:    store i32 19, ptr @b, align 4
 ; CHECK-NEXT:    br i1 [[C_4]], label [[EXIT]], label [[BB_2]]
 ; CHECK:       exit:
 ; CHECK-NEXT:    ret void
@@ -60,30 +60,30 @@ bb.4:                                             ; preds = %bb.3
   br i1 %c.4, label %bb.5, label %bb.6
 
 bb.5:                                             ; preds = %bb.5, %bb.4
-  store i32 99, i32* @b, align 4
+  store i32 99, ptr @b, align 4
   br i1 %c.3, label %bb.5, label %bb.2
 
 bb.6:                                             ; preds = %bb.4
-  store i32 91, i32* @b, align 4
+  store i32 91, ptr @b, align 4
   br i1 %c.5, label %split_crit_edge.2, label %bb.2
 
 split_crit_edge.2:                                ; preds = %bb.6
-  store i32 27, i32* @b, align 4
+  store i32 27, ptr @b, align 4
   br label %exit
 
 bb.7:                                             ; preds = %bb.3
   br i1 %c.4, label %interesting, label %bb.8
 
 interesting:                                      ; preds = %bb.7
-  store i32 9, i32* @b, align 4
+  store i32 9, ptr @b, align 4
   br i1 %c.6, label %killer, label %bb.2
 
 killer:                                           ; preds = %interesting
-  store i32 23, i32* @b, align 4
+  store i32 23, ptr @b, align 4
   ret void
 
 bb.8:                                             ; preds = %bb.7
-  store i32 19, i32* @b, align 4
+  store i32 19, ptr @b, align 4
   br i1 %c.4, label %exit, label %bb.2
 
 exit:                                             ; preds = %bb.8, %split_crit_edge.2

diff  --git a/llvm/test/Transforms/DeadStoreElimination/read-clobber-after-overwrite.ll b/llvm/test/Transforms/DeadStoreElimination/read-clobber-after-overwrite.ll
index 43dcdaa7047b8..35139b1925ab2 100644
--- a/llvm/test/Transforms/DeadStoreElimination/read-clobber-after-overwrite.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/read-clobber-after-overwrite.ll
@@ -12,10 +12,10 @@ define i32 @test() {
 ; CHECK-NEXT:    br label [[LOOP_2:%.*]]
 ; CHECK:       loop.2:
 ; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 0, [[LOOP_1]] ], [ [[IV_NEXT:%.*]], [[LOOP_2]] ]
-; CHECK-NEXT:    [[PTR_1:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[M0]], i64 3, i64 [[IV]]
-; CHECK-NEXT:    [[PTR_2:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[M0]], i64 0, i64 [[IV]]
-; CHECK-NEXT:    store i32 20, i32* [[PTR_2]], align 4
-; CHECK-NEXT:    store i32 30, i32* [[PTR_1]], align 4
+; CHECK-NEXT:    [[PTR_1:%.*]] = getelementptr inbounds [4 x i32], ptr [[M0]], i64 3, i64 [[IV]]
+; CHECK-NEXT:    [[PTR_2:%.*]] = getelementptr inbounds [4 x i32], ptr [[M0]], i64 0, i64 [[IV]]
+; CHECK-NEXT:    store i32 20, ptr [[PTR_2]], align 4
+; CHECK-NEXT:    store i32 30, ptr [[PTR_1]], align 4
 ; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
 ; CHECK-NEXT:    [[C_3:%.*]] = call i1 @cond()
 ; CHECK-NEXT:    br i1 [[C_3]], label [[LOOP_1_LATCH:%.*]], label [[LOOP_2]]
@@ -23,8 +23,8 @@ define i32 @test() {
 ; CHECK-NEXT:    [[C_2:%.*]] = call i1 @cond()
 ; CHECK-NEXT:    br i1 [[C_2]], label [[EXIT:%.*]], label [[LOOP_1]]
 ; CHECK:       exit:
-; CHECK-NEXT:    [[PTR_3:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[M0]], i64 0, i64 1
-; CHECK-NEXT:    [[LV:%.*]] = load i32, i32* [[PTR_3]], align 16
+; CHECK-NEXT:    [[PTR_3:%.*]] = getelementptr inbounds [4 x i32], ptr [[M0]], i64 0, i64 1
+; CHECK-NEXT:    [[LV:%.*]] = load i32, ptr [[PTR_3]], align 16
 ; CHECK-NEXT:    ret i32 [[LV]]
 ;
 entry:
@@ -36,11 +36,11 @@ loop.1:
 
 loop.2:
   %iv = phi i64 [ 0, %loop.1 ], [ %iv.next, %loop.2 ]
-  %ptr.1 = getelementptr inbounds [4 x i32], [4 x i32]* %M0, i64 3, i64 %iv
-  store i32 10, i32* %ptr.1, align 4
-  %ptr.2 = getelementptr inbounds [4 x i32], [4 x i32]* %M0, i64 0, i64 %iv
-  store i32 20, i32* %ptr.2, align 4
-  store i32 30, i32* %ptr.1, align 4
+  %ptr.1 = getelementptr inbounds [4 x i32], ptr %M0, i64 3, i64 %iv
+  store i32 10, ptr %ptr.1, align 4
+  %ptr.2 = getelementptr inbounds [4 x i32], ptr %M0, i64 0, i64 %iv
+  store i32 20, ptr %ptr.2, align 4
+  store i32 30, ptr %ptr.1, align 4
   %iv.next = add nuw nsw i64 %iv, 1
   %c.3 = call i1 @cond()
   br i1 %c.3, label %loop.1.latch, label %loop.2
@@ -50,8 +50,8 @@ loop.1.latch:
   br i1 %c.2, label %exit, label %loop.1
 
 exit:
-  %ptr.3 = getelementptr inbounds [4 x i32], [4 x i32]* %M0, i64 0, i64 1
-  %lv = load i32, i32* %ptr.3, align 16
+  %ptr.3 = getelementptr inbounds [4 x i32], ptr %M0, i64 0, i64 1
+  %lv = load i32, ptr %ptr.3, align 16
   ret i32 %lv
 
 

diff  --git a/llvm/test/Transforms/DeadStoreElimination/scoped-noalias.ll b/llvm/test/Transforms/DeadStoreElimination/scoped-noalias.ll
index 89870738a08eb..b24f366151d6a 100644
--- a/llvm/test/Transforms/DeadStoreElimination/scoped-noalias.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/scoped-noalias.ll
@@ -4,28 +4,28 @@
 ; Assume that %p1 != %p2 if and only if %c is true. In that case the noalias
 ; metadata is correct, but the first store cannot be eliminated, as it may be
 ; read-clobbered by the load.
-define void @test(i1 %c, i8* %p1, i8* %p2) {
+define void @test(i1 %c, ptr %p1, ptr %p2) {
 ; CHECK-LABEL: @test(
-; CHECK-NEXT:    store i8 0, i8* [[P1:%.*]], align 1
-; CHECK-NEXT:    [[TMP1:%.*]] = load i8, i8* [[P2:%.*]], align 1, !alias.scope !0
+; CHECK-NEXT:    store i8 0, ptr [[P1:%.*]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = load i8, ptr [[P2:%.*]], align 1, !alias.scope !0
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[IF:%.*]], label [[ELSE:%.*]]
 ; CHECK:       if:
-; CHECK-NEXT:    store i8 1, i8* [[P1]], align 1, !noalias !0
+; CHECK-NEXT:    store i8 1, ptr [[P1]], align 1, !noalias !0
 ; CHECK-NEXT:    ret void
 ; CHECK:       else:
-; CHECK-NEXT:    store i8 2, i8* [[P1]], align 1
+; CHECK-NEXT:    store i8 2, ptr [[P1]], align 1
 ; CHECK-NEXT:    ret void
 ;
-  store i8 0, i8* %p1
-  load i8, i8* %p2, !alias.scope !2
+  store i8 0, ptr %p1
+  load i8, ptr %p2, !alias.scope !2
   br i1 %c, label %if, label %else
 
 if:
-  store i8 1, i8* %p1, !noalias !2
+  store i8 1, ptr %p1, !noalias !2
   ret void
 
 else:
-  store i8 2, i8* %p1
+  store i8 2, ptr %p1
   ret void
 }
 

diff  --git a/llvm/test/Transforms/DeadStoreElimination/simple-preservation.ll b/llvm/test/Transforms/DeadStoreElimination/simple-preservation.ll
index 6aedc1ca01f83..d117940118dff 100644
--- a/llvm/test/Transforms/DeadStoreElimination/simple-preservation.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/simple-preservation.ll
@@ -3,15 +3,15 @@
 
 target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
 
-define void @test1(i32* %Q, i32* %P) {
+define void @test1(ptr %Q, ptr %P) {
 ; CHECK-LABEL: @test1(
-; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[Q:%.*]], i64 4), "nonnull"(i32* [[Q]]), "align"(i32* [[Q]], i64 4) ]
-; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P:%.*]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ]
-; CHECK-NEXT:    store i32 0, i32* [[P]], align 4
+; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[Q:%.*]], i64 4), "nonnull"(ptr [[Q]]), "align"(ptr [[Q]], i64 4) ]
+; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P:%.*]], i64 4), "nonnull"(ptr [[P]]), "align"(ptr [[P]], i64 4) ]
+; CHECK-NEXT:    store i32 0, ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  %DEAD = load i32, i32* %Q
-  store i32 %DEAD, i32* %P
-  store i32 0, i32* %P
+  %DEAD = load i32, ptr %Q
+  store i32 %DEAD, ptr %P
+  store i32 0, ptr %P
   ret void
 }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/simple.ll b/llvm/test/Transforms/DeadStoreElimination/simple.ll
index 29e86048dc241..ea0a97b6eb32d 100644
--- a/llvm/test/Transforms/DeadStoreElimination/simple.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/simple.ll
@@ -3,120 +3,120 @@
 ; RUN: opt < %s -aa-pipeline=basic-aa -passes=dse -S | FileCheck %s
 target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
 
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
-declare void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* nocapture, i8, i64, i32) nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
-declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32) nounwind
-declare void @llvm.init.trampoline(i8*, i8*, i8*)
-declare void @llvm.matrix.column.major.store(<6 x float>, float*, i64, i1, i32, i32)
-
-define void @test1(i32* %Q, i32* %P) {
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind
+declare void @llvm.memset.element.unordered.atomic.p0.i64(ptr nocapture, i8, i64, i32) nounwind
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
+declare void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i32) nounwind
+declare void @llvm.init.trampoline(ptr, ptr, ptr)
+declare void @llvm.matrix.column.major.store(<6 x float>, ptr, i64, i1, i32, i32)
+
+define void @test1(ptr %Q, ptr %P) {
 ; CHECK-LABEL: @test1(
-; CHECK-NEXT:    store i32 0, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  %DEAD = load i32, i32* %Q
-  store i32 %DEAD, i32* %P
-  store i32 0, i32* %P
+  %DEAD = load i32, ptr %Q
+  store i32 %DEAD, ptr %P
+  store i32 0, ptr %P
   ret void
 }
 
 ; PR8677
 @g = global i32 1
 
-define i32 @test3(i32* %g_addr) nounwind {
+define i32 @test3(ptr %g_addr) nounwind {
 ; CHECK-LABEL: @test3(
-; CHECK-NEXT:    [[G_VALUE:%.*]] = load i32, i32* [[G_ADDR:%.*]], align 4
-; CHECK-NEXT:    store i32 -1, i32* @g, align 4
-; CHECK-NEXT:    store i32 [[G_VALUE]], i32* [[G_ADDR]], align 4
-; CHECK-NEXT:    [[TMP3:%.*]] = load i32, i32* @g, align 4
+; CHECK-NEXT:    [[G_VALUE:%.*]] = load i32, ptr [[G_ADDR:%.*]], align 4
+; CHECK-NEXT:    store i32 -1, ptr @g, align 4
+; CHECK-NEXT:    store i32 [[G_VALUE]], ptr [[G_ADDR]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr @g, align 4
 ; CHECK-NEXT:    ret i32 [[TMP3]]
 ;
-  %g_value = load i32, i32* %g_addr, align 4
-  store i32 -1, i32* @g, align 4
-  store i32 %g_value, i32* %g_addr, align 4
-  %tmp3 = load i32, i32* @g, align 4
+  %g_value = load i32, ptr %g_addr, align 4
+  store i32 -1, ptr @g, align 4
+  store i32 %g_value, ptr %g_addr, align 4
+  %tmp3 = load i32, ptr @g, align 4
   ret i32 %tmp3
 }
 
 
-define void @test4(i32* %Q) {
+define void @test4(ptr %Q) {
 ; CHECK-LABEL: @test4(
-; CHECK-NEXT:    [[A:%.*]] = load i32, i32* [[Q:%.*]], align 4
-; CHECK-NEXT:    store volatile i32 [[A]], i32* [[Q]], align 4
+; CHECK-NEXT:    [[A:%.*]] = load i32, ptr [[Q:%.*]], align 4
+; CHECK-NEXT:    store volatile i32 [[A]], ptr [[Q]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  %a = load i32, i32* %Q
-  store volatile i32 %a, i32* %Q
+  %a = load i32, ptr %Q
+  store volatile i32 %a, ptr %Q
   ret void
 }
 
 ; PR8576 - Should delete store of 10 even though p/q are may aliases.
-define void @test2(i32 *%p, i32 *%q) {
+define void @test2(ptr %p, ptr %q) {
 ; CHECK-LABEL: @test2(
-; CHECK-NEXT:    store i32 20, i32* [[Q:%.*]], align 4
-; CHECK-NEXT:    store i32 30, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 20, ptr [[Q:%.*]], align 4
+; CHECK-NEXT:    store i32 30, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  store i32 10, i32* %p, align 4
-  store i32 20, i32* %q, align 4
-  store i32 30, i32* %p, align 4
+  store i32 10, ptr %p, align 4
+  store i32 20, ptr %q, align 4
+  store i32 30, ptr %p, align 4
   ret void
 }
 
 ; Should delete store of 10 even though memset is a may-store to P (P and Q may
 ; alias).
-define void @test6(i32 *%p, i8 *%q) {
+define void @test6(ptr %p, ptr %q) {
 ; CHECK-LABEL: @test6(
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* [[Q:%.*]], i8 42, i64 900, i1 false)
-; CHECK-NEXT:    store i32 30, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr [[Q:%.*]], i8 42, i64 900, i1 false)
+; CHECK-NEXT:    store i32 30, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  store i32 10, i32* %p, align 4       ;; dead.
-  call void @llvm.memset.p0i8.i64(i8* %q, i8 42, i64 900, i1 false)
-  store i32 30, i32* %p, align 4
+  store i32 10, ptr %p, align 4       ;; dead.
+  call void @llvm.memset.p0.i64(ptr %q, i8 42, i64 900, i1 false)
+  store i32 30, ptr %p, align 4
   ret void
 }
 
 ; Should delete store of 10 even though memset is a may-store to P (P and Q may
 ; alias).
-define void @test6_atomic(i32* align 4 %p, i8* align 4 %q) {
+define void @test6_atomic(ptr align 4 %p, ptr align 4 %q) {
 ; CHECK-LABEL: @test6_atomic(
-; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 [[Q:%.*]], i8 42, i64 900, i32 4)
-; CHECK-NEXT:    store atomic i32 30, i32* [[P:%.*]] unordered, align 4
+; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 4 [[Q:%.*]], i8 42, i64 900, i32 4)
+; CHECK-NEXT:    store atomic i32 30, ptr [[P:%.*]] unordered, align 4
 ; CHECK-NEXT:    ret void
 ;
-  store atomic i32 10, i32* %p unordered, align 4       ;; dead.
-  call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 %q, i8 42, i64 900, i32 4)
-  store atomic i32 30, i32* %p unordered, align 4
+  store atomic i32 10, ptr %p unordered, align 4       ;; dead.
+  call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 4 %q, i8 42, i64 900, i32 4)
+  store atomic i32 30, ptr %p unordered, align 4
   ret void
 }
 
 ; Should delete store of 10 even though memcpy is a may-store to P (P and Q may
 ; alias).
-define void @test7(i32 *%p, i8 *%q, i8* noalias %r) {
+define void @test7(ptr %p, ptr %q, ptr noalias %r) {
 ; CHECK-LABEL: @test7(
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[Q:%.*]], i8* [[R:%.*]], i64 900, i1 false)
-; CHECK-NEXT:    store i32 30, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr [[Q:%.*]], ptr [[R:%.*]], i64 900, i1 false)
+; CHECK-NEXT:    store i32 30, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  store i32 10, i32* %p, align 4       ;; dead.
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %q, i8* %r, i64 900, i1 false)
-  store i32 30, i32* %p, align 4
+  store i32 10, ptr %p, align 4       ;; dead.
+  call void @llvm.memcpy.p0.p0.i64(ptr %q, ptr %r, i64 900, i1 false)
+  store i32 30, ptr %p, align 4
   ret void
 }
 
 ; Should delete store of 10 even though memcpy is a may-store to P (P and Q may
 ; alias).
-define void @test7_atomic(i32* align 4 %p, i8* align 4 %q, i8* noalias align 4 %r) {
+define void @test7_atomic(ptr align 4 %p, ptr align 4 %q, ptr noalias align 4 %r) {
 ; CHECK-LABEL: @test7_atomic(
-; CHECK-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 4 [[Q:%.*]], i8* align 4 [[R:%.*]], i64 900, i32 4)
-; CHECK-NEXT:    store atomic i32 30, i32* [[P:%.*]] unordered, align 4
+; CHECK-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 4 [[Q:%.*]], ptr align 4 [[R:%.*]], i64 900, i32 4)
+; CHECK-NEXT:    store atomic i32 30, ptr [[P:%.*]] unordered, align 4
 ; CHECK-NEXT:    ret void
 ;
-  store atomic i32 10, i32* %p unordered, align 4       ;; dead.
-  call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 4 %q, i8* align 4 %r, i64 900, i32 4)
-  store atomic i32 30, i32* %p unordered, align 4
+  store atomic i32 10, ptr %p unordered, align 4       ;; dead.
+  call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 4 %q, ptr align 4 %r, i64 900, i32 4)
+  store atomic i32 30, ptr %p unordered, align 4
   ret void
 }
 
@@ -124,61 +124,57 @@ define void @test7_atomic(i32* align 4 %p, i8* align 4 %q, i8* noalias align 4 %
 define i32 @test8() {
 ; CHECK-LABEL: @test8(
 ; CHECK-NEXT:    [[V:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 1234567, i32* [[V]], align 4
-; CHECK-NEXT:    [[X:%.*]] = load i32, i32* [[V]], align 4
+; CHECK-NEXT:    store i32 1234567, ptr [[V]], align 4
+; CHECK-NEXT:    [[X:%.*]] = load i32, ptr [[V]], align 4
 ; CHECK-NEXT:    ret i32 [[X]]
 ;
   %V = alloca i32
-  store i32 1234567, i32* %V
-  %V2 = bitcast i32* %V to i8*
-  store i8 0, i8* %V2
-  %X = load i32, i32* %V
+  store i32 1234567, ptr %V
+  store i8 0, ptr %V
+  %X = load i32, ptr %V
   ret i32 %X
 
 }
 
 ; Test for byval handling.
 %struct.x = type { i32, i32, i32, i32 }
-define void @test9(%struct.x* byval(%struct.x)  %a) nounwind  {
+define void @test9(ptr byval(%struct.x)  %a) nounwind  {
 ; CHECK-LABEL: @test9(
 ; CHECK-NEXT:    ret void
 ;
-  %tmp2 = getelementptr %struct.x, %struct.x* %a, i32 0, i32 0
-  store i32 1, i32* %tmp2, align 4
+  store i32 1, ptr %a, align 4
   ret void
 }
 
 ; Test for inalloca handling.
-define void @test9_2(%struct.x* inalloca(%struct.x) %a) nounwind {
+define void @test9_2(ptr inalloca(%struct.x) %a) nounwind {
 ; CHECK-LABEL: @test9_2(
 ; CHECK-NEXT:    ret void
 ;
-  %tmp2 = getelementptr %struct.x, %struct.x* %a, i32 0, i32 0
-  store i32 1, i32* %tmp2, align 4
+  store i32 1, ptr %a, align 4
   ret void
 }
 
 ; Test for preallocated handling.
-define void @test9_3(%struct.x* preallocated(%struct.x)  %a) nounwind  {
+define void @test9_3(ptr preallocated(%struct.x)  %a) nounwind  {
 ; CHECK-LABEL: @test9_3(
 ; CHECK-NEXT:    ret void
 ;
-  %tmp2 = getelementptr %struct.x, %struct.x* %a, i32 0, i32 0
-  store i32 1, i32* %tmp2, align 4
+  store i32 1, ptr %a, align 4
   ret void
 }
 
 ; va_arg has fuzzy dependence, the store shouldn't be zapped.
-define double @test10(i8* %X) {
+define double @test10(ptr %X) {
 ; CHECK-LABEL: @test10(
-; CHECK-NEXT:    [[X_ADDR:%.*]] = alloca i8*, align 8
-; CHECK-NEXT:    store i8* [[X:%.*]], i8** [[X_ADDR]], align 8
-; CHECK-NEXT:    [[TMP_0:%.*]] = va_arg i8** [[X_ADDR]], double
+; CHECK-NEXT:    [[X_ADDR:%.*]] = alloca ptr, align 8
+; CHECK-NEXT:    store ptr [[X:%.*]], ptr [[X_ADDR]], align 8
+; CHECK-NEXT:    [[TMP_0:%.*]] = va_arg ptr [[X_ADDR]], double
 ; CHECK-NEXT:    ret double [[TMP_0]]
 ;
-  %X_addr = alloca i8*
-  store i8* %X, i8** %X_addr
-  %tmp.0 = va_arg i8** %X_addr, double
+  %X_addr = alloca ptr
+  store ptr %X, ptr %X_addr
+  %tmp.0 = va_arg ptr %X_addr, double
   ret double %tmp.0
 }
 
@@ -188,9 +184,9 @@ define void @test11() {
 ; CHECK-LABEL: @test11(
 ; CHECK-NEXT:    ret void
 ;
-  %storage = alloca [10 x i8], align 16		; <[10 x i8]*> [#uses=1]
-  %cast = getelementptr [10 x i8], [10 x i8]* %storage, i32 0, i32 0		; <i8*> [#uses=1]
-  call void @llvm.init.trampoline( i8* %cast, i8* bitcast (void ()* @test11f to i8*), i8* null )		; <i8*> [#uses=1]
+  %storage = alloca [10 x i8], align 16		; <ptr> [#uses=1]
+  %cast = getelementptr [10 x i8], ptr %storage, i32 0, i32 0		; <ptr> [#uses=1]
+  call void @llvm.init.trampoline( ptr %cast, ptr @test11f, ptr null )		; <ptr> [#uses=1]
   ret void
 }
 
@@ -200,100 +196,91 @@ define void @test_matrix_store(i64 %stride) {
 ; CHECK-NEXT:    ret void
 ;
   %a = alloca [6 x float]
-  %cast = bitcast [6 x float]* %a to float*
-  call void @llvm.matrix.column.major.store(<6 x float> zeroinitializer, float* %cast, i64 %stride, i1 false, i32 3, i32 2)
+  call void @llvm.matrix.column.major.store(<6 x float> zeroinitializer, ptr %a, i64 %stride, i1 false, i32 3, i32 2)
   ret void
 }
 
 ; %P doesn't escape, the DEAD instructions should be removed.
 declare void @may_unwind()
-define i32* @test_malloc_no_escape_before_return() {
+define ptr @test_malloc_no_escape_before_return() {
 ; CHECK-LABEL: @test_malloc_no_escape_before_return(
-; CHECK-NEXT:    [[PTR:%.*]] = tail call i8* @malloc(i64 4)
-; CHECK-NEXT:    [[P:%.*]] = bitcast i8* [[PTR]] to i32*
+; CHECK-NEXT:    [[PTR:%.*]] = tail call ptr @malloc(i64 4)
 ; CHECK-NEXT:    call void @may_unwind()
-; CHECK-NEXT:    store i32 0, i32* [[P]], align 4
-; CHECK-NEXT:    ret i32* [[P]]
+; CHECK-NEXT:    store i32 0, ptr [[PTR]], align 4
+; CHECK-NEXT:    ret ptr [[PTR]]
 ;
-  %ptr = tail call i8* @malloc(i64 4)
-  %P = bitcast i8* %ptr to i32*
-  %DEAD = load i32, i32* %P
+  %ptr = tail call ptr @malloc(i64 4)
+  %DEAD = load i32, ptr %ptr
   %DEAD2 = add i32 %DEAD, 1
-  store i32 %DEAD2, i32* %P
+  store i32 %DEAD2, ptr %ptr
   call void @may_unwind()
-  store i32 0, i32* %P
-  ret i32* %P
+  store i32 0, ptr %ptr
+  ret ptr %ptr
 }
 
-define i32* @test_custom_malloc_no_escape_before_return() {
+define ptr @test_custom_malloc_no_escape_before_return() {
 ; CHECK-LABEL: @test_custom_malloc_no_escape_before_return(
-; CHECK-NEXT:    [[PTR:%.*]] = tail call i8* @custom_malloc(i32 4)
-; CHECK-NEXT:    [[P:%.*]] = bitcast i8* [[PTR]] to i32*
+; CHECK-NEXT:    [[PTR:%.*]] = tail call ptr @custom_malloc(i32 4)
 ; CHECK-NEXT:    call void @may_unwind()
-; CHECK-NEXT:    store i32 0, i32* [[P]], align 4
-; CHECK-NEXT:    ret i32* [[P]]
+; CHECK-NEXT:    store i32 0, ptr [[PTR]], align 4
+; CHECK-NEXT:    ret ptr [[PTR]]
 ;
-  %ptr = tail call i8* @custom_malloc(i32 4)
-  %P = bitcast i8* %ptr to i32*
-  %DEAD = load i32, i32* %P
+  %ptr = tail call ptr @custom_malloc(i32 4)
+  %DEAD = load i32, ptr %ptr
   %DEAD2 = add i32 %DEAD, 1
-  store i32 %DEAD2, i32* %P
+  store i32 %DEAD2, ptr %ptr
   call void @may_unwind()
-  store i32 0, i32* %P
-  ret i32* %P
+  store i32 0, ptr %ptr
+  ret ptr %ptr
 }
 
-define i32 addrspace(1)* @test13_addrspacecast() {
+define ptr addrspace(1) @test13_addrspacecast() {
 ; CHECK-LABEL: @test13_addrspacecast(
-; CHECK-NEXT:    [[P:%.*]] = tail call i8* @malloc(i64 4)
-; CHECK-NEXT:    [[P_BC:%.*]] = bitcast i8* [[P]] to i32*
-; CHECK-NEXT:    [[P_AC:%.*]] = addrspacecast i32* [[P_BC]] to i32 addrspace(1)*
+; CHECK-NEXT:    [[P:%.*]] = tail call ptr @malloc(i64 4)
+; CHECK-NEXT:    [[P_AC:%.*]] = addrspacecast ptr [[P]] to ptr addrspace(1)
 ; CHECK-NEXT:    call void @may_unwind()
-; CHECK-NEXT:    store i32 0, i32 addrspace(1)* [[P_AC]], align 4
-; CHECK-NEXT:    ret i32 addrspace(1)* [[P_AC]]
+; CHECK-NEXT:    store i32 0, ptr addrspace(1) [[P_AC]], align 4
+; CHECK-NEXT:    ret ptr addrspace(1) [[P_AC]]
 ;
-  %p = tail call i8* @malloc(i64 4)
-  %p.bc = bitcast i8* %p to i32*
-  %p.ac = addrspacecast i32* %p.bc to i32 addrspace(1)*
-  %DEAD = load i32, i32 addrspace(1)* %p.ac
+  %p = tail call ptr @malloc(i64 4)
+  %p.ac = addrspacecast ptr %p to ptr addrspace(1)
+  %DEAD = load i32, ptr addrspace(1) %p.ac
   %DEAD2 = add i32 %DEAD, 1
-  store i32 %DEAD2, i32 addrspace(1)* %p.ac
+  store i32 %DEAD2, ptr addrspace(1) %p.ac
   call void @may_unwind()
-  store i32 0, i32 addrspace(1)* %p.ac
-  ret i32 addrspace(1)* %p.ac
+  store i32 0, ptr addrspace(1) %p.ac
+  ret ptr addrspace(1) %p.ac
 }
 
 
-declare noalias i8* @malloc(i64) willreturn allockind("alloc,uninitialized")
-declare noalias i8* @custom_malloc(i32) willreturn
-declare noalias i8* @calloc(i64, i64) willreturn allockind("alloc,zeroed")
+declare noalias ptr @malloc(i64) willreturn allockind("alloc,uninitialized")
+declare noalias ptr @custom_malloc(i32) willreturn
+declare noalias ptr @calloc(i64, i64) willreturn allockind("alloc,zeroed")
 
-define void @test14(i32* %Q) {
+define void @test14(ptr %Q) {
 ; CHECK-LABEL: @test14(
 ; CHECK-NEXT:    ret void
 ;
   %P = alloca i32
-  %DEAD = load i32, i32* %Q
-  store i32 %DEAD, i32* %P
+  %DEAD = load i32, ptr %Q
+  store i32 %DEAD, ptr %P
   ret void
 
 }
 
 ; The store here is not dead because the byval call reads it.
-declare void @test19f({i32}* byval({i32}) align 4 %P)
+declare void @test19f(ptr byval({i32}) align 4 %P)
 
-define void @test19({i32}* nocapture byval({i32}) align 4 %arg5) nounwind ssp {
+define void @test19(ptr nocapture byval({i32}) align 4 %arg5) nounwind ssp {
 ; CHECK-LABEL: @test19(
 ; CHECK-NEXT:  bb:
-; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds { i32 }, { i32 }* [[ARG5:%.*]], i32 0, i32 0
-; CHECK-NEXT:    store i32 912, i32* [[TMP7]], align 4
-; CHECK-NEXT:    call void @test19f({ i32 }* byval({ i32 }) align 4 [[ARG5]])
+; CHECK-NEXT:    store i32 912, ptr [[ARG5:%.*]], align 4
+; CHECK-NEXT:    call void @test19f(ptr byval({ i32 }) align 4 [[ARG5]])
 ; CHECK-NEXT:    ret void
 ;
 bb:
-  %tmp7 = getelementptr inbounds {i32}, {i32}* %arg5, i32 0, i32 0
-  store i32 912, i32* %tmp7
-  call void @test19f({i32}* byval({i32}) align 4 %arg5)
+  store i32 912, ptr %arg5
+  call void @test19f(ptr byval({i32}) align 4 %arg5)
   ret void
 
 }
@@ -302,18 +289,18 @@ define void @malloc_no_escape() {
 ; CHECK-LABEL: @malloc_no_escape(
 ; CHECK-NEXT:    ret void
 ;
-  %m = call i8* @malloc(i64 24)
-  store i8 0, i8* %m
+  %m = call ptr @malloc(i64 24)
+  store i8 0, ptr %m
   ret void
 }
 
 define void @custom_malloc_no_escape() {
 ; CHECK-LABEL: @custom_malloc_no_escape(
-; CHECK-NEXT:    [[M:%.*]] = call i8* @custom_malloc(i32 24)
+; CHECK-NEXT:    [[M:%.*]] = call ptr @custom_malloc(i32 24)
 ; CHECK-NEXT:    ret void
 ;
-  %m = call i8* @custom_malloc(i32 24)
-  store i8 0, i8* %m
+  %m = call ptr @custom_malloc(i32 24)
+  store i8 0, ptr %m
   ret void
 }
 
@@ -321,8 +308,8 @@ define void @test21() {
 ; CHECK-LABEL: @test21(
 ; CHECK-NEXT:    ret void
 ;
-  %m = call i8* @calloc(i64 9, i64 7)
-  store i8 0, i8* %m
+  %m = call ptr @calloc(i64 9, i64 7)
+  store i8 0, ptr %m
   ret void
 }
 
@@ -333,142 +320,137 @@ define void @test22(i1 %i, i32 %k, i32 %m) nounwind {
 ; CHECK-LABEL: @test22(
 ; CHECK-NEXT:    [[K_ADDR:%.*]] = alloca i32, align 4
 ; CHECK-NEXT:    [[M_ADDR:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[K_ADDR_M_ADDR:%.*]] = select i1 [[I:%.*]], i32* [[K_ADDR]], i32* [[M_ADDR]]
-; CHECK-NEXT:    store i32 0, i32* [[K_ADDR_M_ADDR]], align 4
+; CHECK-NEXT:    [[K_ADDR_M_ADDR:%.*]] = select i1 [[I:%.*]], ptr [[K_ADDR]], ptr [[M_ADDR]]
+; CHECK-NEXT:    store i32 0, ptr [[K_ADDR_M_ADDR]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %k.addr = alloca i32
   %m.addr = alloca i32
-  %k.addr.m.addr = select i1 %i, i32* %k.addr, i32* %m.addr
-  store i32 0, i32* %k.addr.m.addr, align 4
+  %k.addr.m.addr = select i1 %i, ptr %k.addr, ptr %m.addr
+  store i32 0, ptr %k.addr.m.addr, align 4
   ret void
 }
 
 ; PR13547
-declare noalias i8* @strdup(i8* nocapture) nounwind
-define noalias i8* @test23() nounwind uwtable ssp {
+declare noalias ptr @strdup(ptr nocapture) nounwind
+define noalias ptr @test23() nounwind uwtable ssp {
 ; CHECK-LABEL: @test23(
 ; CHECK-NEXT:    [[X:%.*]] = alloca [2 x i8], align 1
-; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i8], [2 x i8]* [[X]], i64 0, i64 0
-; CHECK-NEXT:    store i8 97, i8* [[ARRAYIDX]], align 1
-; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds [2 x i8], [2 x i8]* [[X]], i64 0, i64 1
-; CHECK-NEXT:    store i8 0, i8* [[ARRAYIDX1]], align 1
-; CHECK-NEXT:    [[CALL:%.*]] = call i8* @strdup(i8* [[ARRAYIDX]]) #[[ATTR5:[0-9]+]]
-; CHECK-NEXT:    ret i8* [[CALL]]
+; CHECK-NEXT:    store i8 97, ptr [[X]], align 1
+; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds [2 x i8], ptr [[X]], i64 0, i64 1
+; CHECK-NEXT:    store i8 0, ptr [[ARRAYIDX1]], align 1
+; CHECK-NEXT:    [[CALL:%.*]] = call ptr @strdup(ptr [[X]]) #[[ATTR5:[0-9]+]]
+; CHECK-NEXT:    ret ptr [[CALL]]
 ;
   %x = alloca [2 x i8], align 1
-  %arrayidx = getelementptr inbounds [2 x i8], [2 x i8]* %x, i64 0, i64 0
-  store i8 97, i8* %arrayidx, align 1
-  %arrayidx1 = getelementptr inbounds [2 x i8], [2 x i8]* %x, i64 0, i64 1
-  store i8 0, i8* %arrayidx1, align 1
-  %call = call i8* @strdup(i8* %arrayidx) nounwind
-  ret i8* %call
+  store i8 97, ptr %x, align 1
+  %arrayidx1 = getelementptr inbounds [2 x i8], ptr %x, i64 0, i64 1
+  store i8 0, ptr %arrayidx1, align 1
+  %call = call ptr @strdup(ptr %x) nounwind
+  ret ptr %call
 }
 
 ; Make sure same sized store to later element is deleted
-define void @test24([2 x i32]* %a, i32 %b, i32 %c) nounwind {
+define void @test24(ptr %a, i32 %b, i32 %c) nounwind {
 ; CHECK-LABEL: @test24(
-; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[A:%.*]], i64 0, i64 0
-; CHECK-NEXT:    store i32 [[B:%.*]], i32* [[TMP1]], align 4
-; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[A]], i64 0, i64 1
-; CHECK-NEXT:    store i32 [[C:%.*]], i32* [[TMP2]], align 4
+; CHECK-NEXT:    store i32 [[B:%.*]], ptr [[A:%.*]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [2 x i32], ptr [[A]], i64 0, i64 1
+; CHECK-NEXT:    store i32 [[C:%.*]], ptr [[TMP2]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  %1 = getelementptr inbounds [2 x i32], [2 x i32]* %a, i64 0, i64 0
-  store i32 0, i32* %1, align 4
-  %2 = getelementptr inbounds [2 x i32], [2 x i32]* %a, i64 0, i64 1
-  store i32 0, i32* %2, align 4
-  %3 = getelementptr inbounds [2 x i32], [2 x i32]* %a, i64 0, i64 0
-  store i32 %b, i32* %3, align 4
-  %4 = getelementptr inbounds [2 x i32], [2 x i32]* %a, i64 0, i64 1
-  store i32 %c, i32* %4, align 4
+  store i32 0, ptr %a, align 4
+  %1 = getelementptr inbounds [2 x i32], ptr %a, i64 0, i64 1
+  store i32 0, ptr %1, align 4
+  store i32 %b, ptr %a, align 4
+  %2 = getelementptr inbounds [2 x i32], ptr %a, i64 0, i64 1
+  store i32 %c, ptr %2, align 4
   ret void
 }
 
 ; Check another case like PR13547 where strdup is not like malloc.
-define i8* @test25(i8* %p) nounwind {
+define ptr @test25(ptr %p) nounwind {
 ; CHECK-LABEL: @test25(
-; CHECK-NEXT:    [[P_4:%.*]] = getelementptr i8, i8* [[P:%.*]], i64 4
-; CHECK-NEXT:    [[TMP:%.*]] = load i8, i8* [[P_4]], align 1
-; CHECK-NEXT:    store i8 0, i8* [[P_4]], align 1
-; CHECK-NEXT:    [[Q:%.*]] = call i8* @strdup(i8* [[P]]) #[[ATTR13:[0-9]+]]
-; CHECK-NEXT:    store i8 [[TMP]], i8* [[P_4]], align 1
-; CHECK-NEXT:    ret i8* [[Q]]
+; CHECK-NEXT:    [[P_4:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 4
+; CHECK-NEXT:    [[TMP:%.*]] = load i8, ptr [[P_4]], align 1
+; CHECK-NEXT:    store i8 0, ptr [[P_4]], align 1
+; CHECK-NEXT:    [[Q:%.*]] = call ptr @strdup(ptr [[P]]) #[[ATTR13:[0-9]+]]
+; CHECK-NEXT:    store i8 [[TMP]], ptr [[P_4]], align 1
+; CHECK-NEXT:    ret ptr [[Q]]
 ;
-  %p.4 = getelementptr i8, i8* %p, i64 4
-  %tmp = load i8, i8* %p.4, align 1
-  store i8 0, i8* %p.4, align 1
-  %q = call i8* @strdup(i8* %p) nounwind optsize
-  store i8 %tmp, i8* %p.4, align 1
-  ret i8* %q
+  %p.4 = getelementptr i8, ptr %p, i64 4
+  %tmp = load i8, ptr %p.4, align 1
+  store i8 0, ptr %p.4, align 1
+  %q = call ptr @strdup(ptr %p) nounwind optsize
+  store i8 %tmp, ptr %p.4, align 1
+  ret ptr %q
 }
 
 ; Don't remove redundant store because of may-aliased store.
-define i32 @test28(i1 %c, i32* %p, i32* %p2, i32 %i) {
+define i32 @test28(i1 %c, ptr %p, ptr %p2, i32 %i) {
 ; CHECK-LABEL: @test28(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[V:%.*]] = load i32, i32* [[P:%.*]], align 4
-; CHECK-NEXT:    store i32 [[I:%.*]], i32* [[P2:%.*]], align 4
+; CHECK-NEXT:    [[V:%.*]] = load i32, ptr [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 [[I:%.*]], ptr [[P2:%.*]], align 4
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    br label [[BB3:%.*]]
 ; CHECK:       bb2:
 ; CHECK-NEXT:    br label [[BB3]]
 ; CHECK:       bb3:
-; CHECK-NEXT:    store i32 [[V]], i32* [[P]], align 4
+; CHECK-NEXT:    store i32 [[V]], ptr [[P]], align 4
 ; CHECK-NEXT:    ret i32 0
 ;
 entry:
-  %v = load i32, i32* %p, align 4
+  %v = load i32, ptr %p, align 4
 
   ; Might overwrite value at %p
-  store i32 %i, i32* %p2, align 4
+  store i32 %i, ptr %p2, align 4
   br i1 %c, label %bb1, label %bb2
 bb1:
   br label %bb3
 bb2:
   br label %bb3
 bb3:
-  store i32 %v, i32* %p, align 4
+  store i32 %v, ptr %p, align 4
   ret i32 0
 }
 
 ; Don't remove redundant store because of may-aliased store.
-define i32 @test29(i1 %c, i32* %p, i32* %p2, i32 %i) {
+define i32 @test29(i1 %c, ptr %p, ptr %p2, i32 %i) {
 ; CHECK-LABEL: @test29(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[V:%.*]] = load i32, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    [[V:%.*]] = load i32, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    br label [[BB3:%.*]]
 ; CHECK:       bb2:
-; CHECK-NEXT:    store i32 [[I:%.*]], i32* [[P2:%.*]], align 4
+; CHECK-NEXT:    store i32 [[I:%.*]], ptr [[P2:%.*]], align 4
 ; CHECK-NEXT:    br label [[BB3]]
 ; CHECK:       bb3:
-; CHECK-NEXT:    store i32 [[V]], i32* [[P]], align 4
+; CHECK-NEXT:    store i32 [[V]], ptr [[P]], align 4
 ; CHECK-NEXT:    ret i32 0
 ;
 entry:
-  %v = load i32, i32* %p, align 4
+  %v = load i32, ptr %p, align 4
   br i1 %c, label %bb1, label %bb2
 bb1:
   br label %bb3
 bb2:
   ; Might overwrite value at %p
-  store i32 %i, i32* %p2, align 4
+  store i32 %i, ptr %p2, align 4
   br label %bb3
 bb3:
-  store i32 %v, i32* %p, align 4
+  store i32 %v, ptr %p, align 4
   ret i32 0
 }
 
 declare void @unknown_func()
 
 ; Don't remove redundant store because of unknown call.
-define i32 @test30(i1 %c, i32* %p, i32 %i) {
+define i32 @test30(i1 %c, ptr %p, i32 %i) {
 ; CHECK-LABEL: @test30(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[V:%.*]] = load i32, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    [[V:%.*]] = load i32, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    br label [[BB3:%.*]]
@@ -476,11 +458,11 @@ define i32 @test30(i1 %c, i32* %p, i32 %i) {
 ; CHECK-NEXT:    call void @unknown_func()
 ; CHECK-NEXT:    br label [[BB3]]
 ; CHECK:       bb3:
-; CHECK-NEXT:    store i32 [[V]], i32* [[P]], align 4
+; CHECK-NEXT:    store i32 [[V]], ptr [[P]], align 4
 ; CHECK-NEXT:    ret i32 0
 ;
 entry:
-  %v = load i32, i32* %p, align 4
+  %v = load i32, ptr %p, align 4
   br i1 %c, label %bb1, label %bb2
 bb1:
   br label %bb3
@@ -489,28 +471,28 @@ bb2:
   call void @unknown_func()
   br label %bb3
 bb3:
-  store i32 %v, i32* %p, align 4
+  store i32 %v, ptr %p, align 4
   ret i32 0
 }
 
 ; Don't remove redundant store in a loop with a may-alias store.
-define i32 @test32(i1 %c, i32* %p, i32 %i) {
+define i32 @test32(i1 %c, ptr %p, i32 %i) {
 ; CHECK-LABEL: @test32(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[V:%.*]] = load i32, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    [[V:%.*]] = load i32, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br label [[BB1:%.*]]
 ; CHECK:       bb1:
-; CHECK-NEXT:    store i32 [[V]], i32* [[P]], align 4
+; CHECK-NEXT:    store i32 [[V]], ptr [[P]], align 4
 ; CHECK-NEXT:    call void @unknown_func()
 ; CHECK-NEXT:    br i1 undef, label [[BB1]], label [[BB2:%.*]]
 ; CHECK:       bb2:
 ; CHECK-NEXT:    ret i32 0
 ;
 entry:
-  %v = load i32, i32* %p, align 4
+  %v = load i32, ptr %p, align 4
   br label %bb1
 bb1:
-  store i32 %v, i32* %p, align 4
+  store i32 %v, ptr %p, align 4
   ; Might read and overwrite value at %p
   call void @unknown_func()
   br i1 undef, label %bb1, label %bb2
@@ -520,241 +502,228 @@ bb2:
 
 ; We cannot remove any stores, because @unknown_func may unwind and the caller
 ; may read %p while unwinding.
-define void @test34(i32* noalias %p) {
+define void @test34(ptr noalias %p) {
 ; CHECK-LABEL: @test34(
-; CHECK-NEXT:    store i32 1, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 1, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    call void @unknown_func()
-; CHECK-NEXT:    store i32 0, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  store i32 1, i32* %p
+  store i32 1, ptr %p
   call void @unknown_func()
-  store i32 0, i32* %p
+  store i32 0, ptr %p
   ret void
 }
 ; Same as previous case, but with an sret argument.
 ; TODO: The first store could be eliminated if sret is not visible on unwind.
-define void @test34_sret(i32* noalias sret(i32) %p) {
+define void @test34_sret(ptr noalias sret(i32) %p) {
 ; CHECK-LABEL: @test34_sret(
-; CHECK-NEXT:    store i32 1, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 1, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    call void @unknown_func()
-; CHECK-NEXT:    store i32 0, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  store i32 1, i32* %p
+  store i32 1, ptr %p
   call void @unknown_func()
-  store i32 0, i32* %p
+  store i32 0, ptr %p
   ret void
 }
 
 ; Remove redundant store even with an unwinding function in the same block
-define void @test35(i32* noalias %p) {
+define void @test35(ptr noalias %p) {
 ; CHECK-LABEL: @test35(
 ; CHECK-NEXT:    call void @unknown_func()
-; CHECK-NEXT:    store i32 0, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
   call void @unknown_func()
-  store i32 1, i32* %p
-  store i32 0, i32* %p
+  store i32 1, ptr %p
+  store i32 0, ptr %p
   ret void
 }
 
 ; We cannot optimize away the first memmove since %P could overlap with %Q.
-define void @test36(i8* %P, i8* %Q) {
+define void @test36(ptr %P, ptr %Q) {
 ; CHECK-LABEL: @test36(
-; CHECK-NEXT:    tail call void @llvm.memmove.p0i8.p0i8.i64(i8* [[P:%.*]], i8* [[Q:%.*]], i64 12, i1 false)
-; CHECK-NEXT:    tail call void @llvm.memmove.p0i8.p0i8.i64(i8* [[P]], i8* [[Q]], i64 12, i1 false)
+; CHECK-NEXT:    tail call void @llvm.memmove.p0.p0.i64(ptr [[P:%.*]], ptr [[Q:%.*]], i64 12, i1 false)
+; CHECK-NEXT:    tail call void @llvm.memmove.p0.p0.i64(ptr [[P]], ptr [[Q]], i64 12, i1 false)
 ; CHECK-NEXT:    ret void
 ;
 
-  tail call void @llvm.memmove.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i1 false)
-  tail call void @llvm.memmove.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i1 false)
+  tail call void @llvm.memmove.p0.p0.i64(ptr %P, ptr %Q, i64 12, i1 false)
+  tail call void @llvm.memmove.p0.p0.i64(ptr %P, ptr %Q, i64 12, i1 false)
   ret void
 }
 
-define void @test36_atomic(i8* %P, i8* %Q) {
+define void @test36_atomic(ptr %P, ptr %Q) {
 ; CHECK-LABEL: @test36_atomic(
-; CHECK-NEXT:    tail call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 [[P:%.*]], i8* align 1 [[Q:%.*]], i64 12, i32 1)
-; CHECK-NEXT:    tail call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 [[P]], i8* align 1 [[Q]], i64 12, i32 1)
+; CHECK-NEXT:    tail call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 [[P:%.*]], ptr align 1 [[Q:%.*]], i64 12, i32 1)
+; CHECK-NEXT:    tail call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 [[P]], ptr align 1 [[Q]], i64 12, i32 1)
 ; CHECK-NEXT:    ret void
 ;
 
-  tail call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i32 1)
-  tail call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i32 1)
+  tail call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 %P, ptr align 1 %Q, i64 12, i32 1)
+  tail call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 %P, ptr align 1 %Q, i64 12, i32 1)
   ret void
 }
 
-define void @test37(i8* %P, i8* %Q, i8* %R) {
+define void @test37(ptr %P, ptr %Q, ptr %R) {
 ; CHECK-LABEL: @test37(
-; CHECK-NEXT:    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[P:%.*]], i8* [[Q:%.*]], i64 12, i1 false)
-; CHECK-NEXT:    tail call void @llvm.memmove.p0i8.p0i8.i64(i8* [[P]], i8* [[R:%.*]], i64 12, i1 false)
+; CHECK-NEXT:    tail call void @llvm.memcpy.p0.p0.i64(ptr [[P:%.*]], ptr [[Q:%.*]], i64 12, i1 false)
+; CHECK-NEXT:    tail call void @llvm.memmove.p0.p0.i64(ptr [[P]], ptr [[R:%.*]], i64 12, i1 false)
 ; CHECK-NEXT:    ret void
 ;
 
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i1 false)
-  tail call void @llvm.memmove.p0i8.p0i8.i64(i8* %P, i8* %R, i64 12, i1 false)
+  tail call void @llvm.memcpy.p0.p0.i64(ptr %P, ptr %Q, i64 12, i1 false)
+  tail call void @llvm.memmove.p0.p0.i64(ptr %P, ptr %R, i64 12, i1 false)
   ret void
 }
 
-define void @test37_atomic(i8* %P, i8* %Q, i8* %R) {
+define void @test37_atomic(ptr %P, ptr %Q, ptr %R) {
 ; CHECK-LABEL: @test37_atomic(
-; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 [[P:%.*]], i8* align 1 [[Q:%.*]], i64 12, i32 1)
-; CHECK-NEXT:    tail call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 [[P]], i8* align 1 [[R:%.*]], i64 12, i32 1)
+; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 [[P:%.*]], ptr align 1 [[Q:%.*]], i64 12, i32 1)
+; CHECK-NEXT:    tail call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 [[P]], ptr align 1 [[R:%.*]], i64 12, i32 1)
 ; CHECK-NEXT:    ret void
 ;
 
-  tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i32 1)
-  tail call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %R, i64 12, i32 1)
+  tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %P, ptr align 1 %Q, i64 12, i32 1)
+  tail call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 %P, ptr align 1 %R, i64 12, i32 1)
   ret void
 }
 
 ; See PR11763 - LLVM allows memcpy's source and destination to be equal (but not
 ; inequal and overlapping).
-define void @test38(i8* %P, i8* %Q, i8* %R) {
+define void @test38(ptr %P, ptr %Q, ptr %R) {
 ; CHECK-LABEL: @test38(
-; CHECK-NEXT:    tail call void @llvm.memmove.p0i8.p0i8.i64(i8* [[P:%.*]], i8* [[Q:%.*]], i64 12, i1 false)
-; CHECK-NEXT:    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[P]], i8* [[R:%.*]], i64 12, i1 false)
+; CHECK-NEXT:    tail call void @llvm.memmove.p0.p0.i64(ptr [[P:%.*]], ptr [[Q:%.*]], i64 12, i1 false)
+; CHECK-NEXT:    tail call void @llvm.memcpy.p0.p0.i64(ptr [[P]], ptr [[R:%.*]], i64 12, i1 false)
 ; CHECK-NEXT:    ret void
 ;
 
-  tail call void @llvm.memmove.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i1 false)
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %R, i64 12, i1 false)
+  tail call void @llvm.memmove.p0.p0.i64(ptr %P, ptr %Q, i64 12, i1 false)
+  tail call void @llvm.memcpy.p0.p0.i64(ptr %P, ptr %R, i64 12, i1 false)
   ret void
 }
 
 ; See PR11763 - LLVM allows memcpy's source and destination to be equal (but not
 ; inequal and overlapping).
-define void @test38_atomic(i8* %P, i8* %Q, i8* %R) {
+define void @test38_atomic(ptr %P, ptr %Q, ptr %R) {
 ; CHECK-LABEL: @test38_atomic(
-; CHECK-NEXT:    tail call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 [[P:%.*]], i8* align 1 [[Q:%.*]], i64 12, i32 1)
-; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 [[P]], i8* align 1 [[R:%.*]], i64 12, i32 1)
+; CHECK-NEXT:    tail call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 [[P:%.*]], ptr align 1 [[Q:%.*]], i64 12, i32 1)
+; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 [[P]], ptr align 1 [[R:%.*]], i64 12, i32 1)
 ; CHECK-NEXT:    ret void
 ;
 
-  tail call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i32 1)
-  tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %R, i64 12, i32 1)
+  tail call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 %P, ptr align 1 %Q, i64 12, i32 1)
+  tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %P, ptr align 1 %R, i64 12, i32 1)
   ret void
 }
 
-define void @test39(i8* %P, i8* %Q, i8* %R) {
+define void @test39(ptr %P, ptr %Q, ptr %R) {
 ; CHECK-LABEL: @test39(
-; CHECK-NEXT:    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[P:%.*]], i8* [[Q:%.*]], i64 12, i1 false)
-; CHECK-NEXT:    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[P]], i8* [[R:%.*]], i64 8, i1 false)
+; CHECK-NEXT:    tail call void @llvm.memcpy.p0.p0.i64(ptr [[P:%.*]], ptr [[Q:%.*]], i64 12, i1 false)
+; CHECK-NEXT:    tail call void @llvm.memcpy.p0.p0.i64(ptr [[P]], ptr [[R:%.*]], i64 8, i1 false)
 ; CHECK-NEXT:    ret void
 ;
 
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i1 false)
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %R, i64 8, i1 false)
+  tail call void @llvm.memcpy.p0.p0.i64(ptr %P, ptr %Q, i64 12, i1 false)
+  tail call void @llvm.memcpy.p0.p0.i64(ptr %P, ptr %R, i64 8, i1 false)
   ret void
 }
 
-define void @test39_atomic(i8* %P, i8* %Q, i8* %R) {
+define void @test39_atomic(ptr %P, ptr %Q, ptr %R) {
 ; CHECK-LABEL: @test39_atomic(
-; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 [[P:%.*]], i8* align 1 [[Q:%.*]], i64 12, i32 1)
-; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 [[P]], i8* align 1 [[R:%.*]], i64 8, i32 1)
+; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 [[P:%.*]], ptr align 1 [[Q:%.*]], i64 12, i32 1)
+; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 [[P]], ptr align 1 [[R:%.*]], i64 8, i32 1)
 ; CHECK-NEXT:    ret void
 ;
 
-  tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i32 1)
-  tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %R, i64 8, i32 1)
+  tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %P, ptr align 1 %Q, i64 12, i32 1)
+  tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %P, ptr align 1 %R, i64 8, i32 1)
   ret void
 }
 
-declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1)
-declare void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32)
+declare void @llvm.memmove.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1)
+declare void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i32)
 
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) nounwind
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) nounwind
-define void @test40(i32** noalias %Pp, i32* noalias %Q)  {
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind
+define void @test40(ptr noalias %Pp, ptr noalias %Q)  {
 ; CHECK-LABEL: @test40(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[AC:%.*]] = bitcast i32* [[A]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull [[AC]])
-; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i32** [[PP:%.*]] to i8**
-; CHECK-NEXT:    [[PC:%.*]] = load i8*, i8** [[TMP0]], align 8
-; CHECK-NEXT:    [[QC:%.*]] = bitcast i32* [[Q:%.*]] to i8*
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull align 4 [[AC]], i8* align 4 [[QC]], i64 4, i1 false)
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[PC]], i8* nonnull align 4 [[AC]], i64 4, i1 true)
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull [[AC]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[A]])
+; CHECK-NEXT:    [[PC:%.*]] = load ptr, ptr [[PP:%.*]], align 8
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 4 [[A]], ptr align 4 [[Q:%.*]], i64 4, i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[PC]], ptr nonnull align 4 [[A]], i64 4, i1 true)
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[A]])
 ; CHECK-NEXT:    ret void
 ;
 entry:
   %A = alloca i32, align 4
-  %Ac = bitcast i32* %A to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %Ac)
-  %0 = bitcast i32** %Pp to i8**
-  %Pc = load i8*, i8** %0, align 8
-  %Qc = bitcast i32* %Q to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull align 4 %Ac, i8* align 4 %Qc, i64 4, i1 false)
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %Pc, i8* nonnull align 4 %Ac, i64 4, i1 true)
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %Ac)
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %A)
+  %Pc = load ptr, ptr %Pp, align 8
+  call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 4 %A, ptr align 4 %Q, i64 4, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %Pc, ptr nonnull align 4 %A, i64 4, i1 true)
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %A)
   ret void
 }
 
-declare void @free(i8* nocapture) allockind("free")
+declare void @free(ptr nocapture) allockind("free")
 
-; We cannot remove `store i32 1, i32* %p`, because @unknown_func may unwind
+; We cannot remove `store i32 1, ptr %p`, because @unknown_func may unwind
 ; and the caller may read %p while unwinding.
-define void @test41(i32* noalias %P) {
+define void @test41(ptr noalias %P) {
 ; CHECK-LABEL: @test41(
-; CHECK-NEXT:    [[P2:%.*]] = bitcast i32* [[P:%.*]] to i8*
-; CHECK-NEXT:    store i32 1, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 1, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    call void @unknown_func()
-; CHECK-NEXT:    call void @free(i8* [[P2]])
+; CHECK-NEXT:    call void @free(ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
-  %P2 = bitcast i32* %P to i8*
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   call void @unknown_func()
-  store i32 2, i32* %P
-  call void @free(i8* %P2)
+  store i32 2, ptr %P
+  call void @free(ptr %P)
   ret void
 }
 
-define void @test42(i32* %P, i32* %Q) {
+define void @test42(ptr %P, ptr %Q) {
 ; CHECK-LABEL: @test42(
-; CHECK-NEXT:    store i32 1, i32* [[P:%.*]], align 4
-; CHECK-NEXT:    [[P2:%.*]] = bitcast i32* [[P]] to i8*
-; CHECK-NEXT:    store i32 2, i32* [[Q:%.*]], align 4
-; CHECK-NEXT:    store i8 3, i8* [[P2]], align 1
+; CHECK-NEXT:    store i32 1, ptr [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 2, ptr [[Q:%.*]], align 4
+; CHECK-NEXT:    store i8 3, ptr [[P]], align 1
 ; CHECK-NEXT:    ret void
 ;
-  store i32 1, i32* %P
-  %P2 = bitcast i32* %P to i8*
-  store i32 2, i32* %Q
-  store i8 3, i8* %P2
+  store i32 1, ptr %P
+  store i32 2, ptr %Q
+  store i8 3, ptr %P
   ret void
 }
 
-define void @test42a(i32* %P, i32* %Q) {
+define void @test42a(ptr %P, ptr %Q) {
 ; CHECK-LABEL: @test42a(
-; CHECK-NEXT:    store atomic i32 1, i32* [[P:%.*]] unordered, align 4
-; CHECK-NEXT:    [[P2:%.*]] = bitcast i32* [[P]] to i8*
-; CHECK-NEXT:    store atomic i32 2, i32* [[Q:%.*]] unordered, align 4
-; CHECK-NEXT:    store atomic i8 3, i8* [[P2]] unordered, align 4
+; CHECK-NEXT:    store atomic i32 1, ptr [[P:%.*]] unordered, align 4
+; CHECK-NEXT:    store atomic i32 2, ptr [[Q:%.*]] unordered, align 4
+; CHECK-NEXT:    store atomic i8 3, ptr [[P]] unordered, align 4
 ; CHECK-NEXT:    ret void
 ;
-  store atomic i32 1, i32* %P unordered, align 4
-  %P2 = bitcast i32* %P to i8*
-  store atomic i32 2, i32* %Q unordered, align 4
-  store atomic i8 3, i8* %P2 unordered, align 4
+  store atomic i32 1, ptr %P unordered, align 4
+  store atomic i32 2, ptr %Q unordered, align 4
+  store atomic i8 3, ptr %P unordered, align 4
   ret void
 }
 
-define void @test43a(i32* %P, i32* noalias %Q) {
+define void @test43a(ptr %P, ptr noalias %Q) {
 ; CHECK-LABEL: @test43a(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    store atomic i32 50331649, i32* [[P:%.*]] unordered, align 4
-; CHECK-NEXT:    store atomic i32 2, i32* [[Q:%.*]] unordered, align 4
+; CHECK-NEXT:    store atomic i32 50331649, ptr [[P:%.*]] unordered, align 4
+; CHECK-NEXT:    store atomic i32 2, ptr [[Q:%.*]] unordered, align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  store atomic i32 1, i32* %P unordered, align 4
-  %P2 = bitcast i32* %P to i8*
-  store atomic i32 2, i32* %Q unordered, align 4
-  store atomic i8 3, i8* %P2 unordered, align 4
+  store atomic i32 1, ptr %P unordered, align 4
+  store atomic i32 2, ptr %Q unordered, align 4
+  store atomic i8 3, ptr %P unordered, align 4
   ret void
 }
 
@@ -762,63 +731,63 @@ entry:
 
 ; Here we can remove the first non-volatile store. We cannot remove the
 ; volatile store.
-define void @test44_volatile(i32* %P) {
+define void @test44_volatile(ptr %P) {
 ; CHECK-LABEL: @test44_volatile(
-; CHECK-NEXT:    store volatile i32 2, i32* [[P:%.*]], align 4
-; CHECK-NEXT:    store i32 3, i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 2, ptr [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 3, ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  store i32 1, i32* %P, align 4
-  store volatile i32 2, i32* %P, align 4
-  store i32 3, i32* %P, align 4
+  store i32 1, ptr %P, align 4
+  store volatile i32 2, ptr %P, align 4
+  store i32 3, ptr %P, align 4
   ret void
 }
 
-define void @test45_volatile(i32* %P) {
+define void @test45_volatile(ptr %P) {
 ; CHECK-LABEL: @test45_volatile(
-; CHECK-NEXT:    store volatile i32 2, i32* [[P:%.*]], align 4
-; CHECK-NEXT:    store volatile i32 3, i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 2, ptr [[P:%.*]], align 4
+; CHECK-NEXT:    store volatile i32 3, ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  store i32 1, i32* %P, align 4
-  store volatile i32 2, i32* %P, align 4
-  store volatile i32 3, i32* %P, align 4
+  store i32 1, ptr %P, align 4
+  store volatile i32 2, ptr %P, align 4
+  store volatile i32 3, ptr %P, align 4
   ret void
 }
 
-define void @test46_volatile(i32* %P) {
+define void @test46_volatile(ptr %P) {
 ; CHECK-LABEL: @test46_volatile(
-; CHECK-NEXT:    store volatile i32 2, i32* [[P:%.*]], align 4
-; CHECK-NEXT:    store volatile i32 3, i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 2, ptr [[P:%.*]], align 4
+; CHECK-NEXT:    store volatile i32 3, ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  store volatile i32 2, i32* %P, align 4
-  store i32 1, i32* %P, align 4
-  store volatile i32 3, i32* %P, align 4
+  store volatile i32 2, ptr %P, align 4
+  store i32 1, ptr %P, align 4
+  store volatile i32 3, ptr %P, align 4
   ret void
 }
 
-define void @test47_volatile(i32* %P) {
+define void @test47_volatile(ptr %P) {
 ; CHECK-LABEL: @test47_volatile(
-; CHECK-NEXT:    store volatile i32 2, i32* [[P:%.*]], align 4
-; CHECK-NEXT:    store volatile i32 3, i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 2, ptr [[P:%.*]], align 4
+; CHECK-NEXT:    store volatile i32 3, ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  store volatile i32 2, i32* %P, align 4
-  store volatile i32 3, i32* %P, align 4
+  store volatile i32 2, ptr %P, align 4
+  store volatile i32 3, ptr %P, align 4
   ret void
 }
 
-define i32 @test48(i32* %P, i32* noalias %Q, i32* %R) {
+define i32 @test48(ptr %P, ptr noalias %Q, ptr %R) {
 ; CHECK-LABEL: @test48(
-; CHECK-NEXT:    store i32 2, i32* [[P:%.*]], align 4
-; CHECK-NEXT:    store i32 3, i32* [[Q:%.*]], align 4
-; CHECK-NEXT:    [[L:%.*]] = load i32, i32* [[R:%.*]], align 4
+; CHECK-NEXT:    store i32 2, ptr [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 3, ptr [[Q:%.*]], align 4
+; CHECK-NEXT:    [[L:%.*]] = load i32, ptr [[R:%.*]], align 4
 ; CHECK-NEXT:    ret i32 [[L]]
 ;
-  store i32 1, i32* %Q
-  store i32 2, i32* %P
-  store i32 3, i32* %Q
-  %l = load i32, i32* %R
+  store i32 1, ptr %Q
+  store i32 2, ptr %P
+  store i32 3, ptr %Q
+  %l = load i32, ptr %R
   ret i32 %l
 }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/stats.ll b/llvm/test/Transforms/DeadStoreElimination/stats.ll
index 990f098533bfa..94b8d7615cb16 100644
--- a/llvm/test/Transforms/DeadStoreElimination/stats.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/stats.ll
@@ -6,29 +6,29 @@
 target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
 
 
-define void @test2(i32* noalias %P, i32* noalias %C, i1 %c) {
+define void @test2(ptr noalias %P, ptr noalias %C, i1 %c) {
 ; CHECK-LABEL: @test2(
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
-; CHECK-NEXT:    store i32 3, i32* [[C:%.*]]
+; CHECK-NEXT:    store i32 3, ptr [[C:%.*]]
 ; CHECK-NEXT:    br label [[BB3:%.*]]
 ; CHECK:       bb2:
-; CHECK-NEXT:    store i32 4, i32* [[C]]
+; CHECK-NEXT:    store i32 4, ptr [[C]]
 ; CHECK-NEXT:    br label [[BB3]]
 ; CHECK:       bb3:
-; CHECK-NEXT:    store i32 0, i32* [[P:%.*]]
+; CHECK-NEXT:    store i32 0, ptr [[P:%.*]]
 ; CHECK-NEXT:    ret void
 ;
-  store i32 1, i32* %P
+  store i32 1, ptr %P
   br i1 %c, label %bb1, label %bb2
 bb1:
-  store i32 3, i32* %C
+  store i32 3, ptr %C
   br label %bb3
 bb2:
-  store i32 4, i32* %C
+  store i32 4, ptr %C
   br label %bb3
 bb3:
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   ret void
 }
 

diff  --git a/llvm/test/Transforms/DeadStoreElimination/store-after-loop.ll b/llvm/test/Transforms/DeadStoreElimination/store-after-loop.ll
index 1e98fd7404a37..089fbfb47389c 100644
--- a/llvm/test/Transforms/DeadStoreElimination/store-after-loop.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/store-after-loop.ll
@@ -3,59 +3,53 @@
 
 target datalayout = "E-m:e-p:32:32-i64:32-f64:32:64-a:0:32-n32-S32"
 
-%struct.ilist = type { i32, %struct.ilist* }
+%struct.ilist = type { i32, ptr }
 
 ; There is no dead store in this test. Make sure no store is deleted by DSE.
 ; Test case related to bug report PR52774.
 
-define %struct.ilist* @test() {
+define ptr @test() {
 ; CHECK-LABEL: @test(
 ; CHECK-NEXT:    br label [[LOOP:%.*]]
 ; CHECK:       loop:
 ; CHECK-NEXT:    [[IV:%.*]] = phi i32 [ 0, [[TMP0:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT:    [[LIST_NEXT:%.*]] = phi %struct.ilist* [ null, [[TMP0]] ], [ [[LIST_NEW_ILIST_PTR:%.*]], [[LOOP]] ]
-; CHECK-NEXT:    [[LIST_NEW_I8_PTR:%.*]] = tail call align 8 dereferenceable_or_null(8) i8* @malloc(i32 8)
-; CHECK-NEXT:    [[LIST_NEW_ILIST_PTR]] = bitcast i8* [[LIST_NEW_I8_PTR]] to %struct.ilist*
-; CHECK-NEXT:    [[GEP_NEW_VALUE:%.*]] = getelementptr inbounds [[STRUCT_ILIST:%.*]], %struct.ilist* [[LIST_NEW_ILIST_PTR]], i32 0, i32 0
-; CHECK-NEXT:    store i32 42, i32* [[GEP_NEW_VALUE]], align 8
-; CHECK-NEXT:    [[GEP_NEW_NEXT:%.*]] = getelementptr inbounds [[STRUCT_ILIST]], %struct.ilist* [[LIST_NEW_ILIST_PTR]], i32 0, i32 1
-; CHECK-NEXT:    store %struct.ilist* [[LIST_NEXT]], %struct.ilist** [[GEP_NEW_NEXT]], align 4
+; CHECK-NEXT:    [[LIST_NEXT:%.*]] = phi ptr [ null, [[TMP0]] ], [ [[LIST_NEW_I8_PTR:%.*]], [[LOOP]] ]
+; CHECK-NEXT:    [[LIST_NEW_I8_PTR]] = tail call align 8 dereferenceable_or_null(8) ptr @malloc(i32 8)
+; CHECK-NEXT:    store i32 42, ptr [[LIST_NEW_I8_PTR]], align 8
+; CHECK-NEXT:    [[GEP_NEW_NEXT:%.*]] = getelementptr inbounds [[STRUCT_ILIST:%.*]], ptr [[LIST_NEW_I8_PTR]], i32 0, i32 1
+; CHECK-NEXT:    store ptr [[LIST_NEXT]], ptr [[GEP_NEW_NEXT]], align 4
 ; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
 ; CHECK-NEXT:    [[COND:%.*]] = icmp eq i32 [[IV_NEXT]], 10
 ; CHECK-NEXT:    br i1 [[COND]], label [[EXIT:%.*]], label [[LOOP]]
 ; CHECK:       exit:
-; CHECK-NEXT:    [[LIST_LAST_ILIST_PTR:%.*]] = bitcast i8* [[LIST_NEW_I8_PTR]] to %struct.ilist*
-; CHECK-NEXT:    [[GEP_LIST_LAST_NEXT:%.*]] = getelementptr inbounds [[STRUCT_ILIST]], %struct.ilist* [[LIST_LAST_ILIST_PTR]], i32 0, i32 1
-; CHECK-NEXT:    store %struct.ilist* null, %struct.ilist** [[GEP_LIST_LAST_NEXT]], align 4
-; CHECK-NEXT:    [[GEP_LIST_NEXT_NEXT:%.*]] = getelementptr inbounds [[STRUCT_ILIST]], %struct.ilist* [[LIST_NEXT]], i32 0, i32 1
-; CHECK-NEXT:    [[LOADED_PTR:%.*]] = load %struct.ilist*, %struct.ilist** [[GEP_LIST_NEXT_NEXT]], align 4
-; CHECK-NEXT:    ret %struct.ilist* [[LOADED_PTR]]
+; CHECK-NEXT:    [[GEP_LIST_LAST_NEXT:%.*]] = getelementptr inbounds [[STRUCT_ILIST]], ptr [[LIST_NEW_I8_PTR]], i32 0, i32 1
+; CHECK-NEXT:    store ptr null, ptr [[GEP_LIST_LAST_NEXT]], align 4
+; CHECK-NEXT:    [[GEP_LIST_NEXT_NEXT:%.*]] = getelementptr inbounds [[STRUCT_ILIST]], ptr [[LIST_NEXT]], i32 0, i32 1
+; CHECK-NEXT:    [[LOADED_PTR:%.*]] = load ptr, ptr [[GEP_LIST_NEXT_NEXT]], align 4
+; CHECK-NEXT:    ret ptr [[LOADED_PTR]]
 ;
   br label %loop
 
 loop:
   %iv = phi i32 [ 0, %0 ], [ %iv.next, %loop ]
-  %list.next = phi %struct.ilist* [ null, %0 ], [ %list.new.ilist.ptr, %loop ]
-  %list.new.i8.ptr = tail call align 8 dereferenceable_or_null(8) i8* @malloc(i32 8)
-  %list.new.ilist.ptr = bitcast i8* %list.new.i8.ptr to %struct.ilist*
-  %gep.new.value = getelementptr inbounds %struct.ilist, %struct.ilist* %list.new.ilist.ptr, i32 0, i32 0
-  store i32 42, i32* %gep.new.value, align 8
-  %gep.new.next = getelementptr inbounds %struct.ilist, %struct.ilist* %list.new.ilist.ptr, i32 0, i32 1
-  store %struct.ilist* %list.next, %struct.ilist** %gep.new.next, align 4
+  %list.next = phi ptr [ null, %0 ], [ %list.new.i8.ptr, %loop ]
+  %list.new.i8.ptr = tail call align 8 dereferenceable_or_null(8) ptr @malloc(i32 8)
+  store i32 42, ptr %list.new.i8.ptr, align 8
+  %gep.new.next = getelementptr inbounds %struct.ilist, ptr %list.new.i8.ptr, i32 0, i32 1
+  store ptr %list.next, ptr %gep.new.next, align 4
   %iv.next = add nuw nsw i32 %iv, 1
   %cond = icmp eq i32 %iv.next, 10
   br i1 %cond, label %exit, label %loop
 
 exit:
-  %list.last.ilist.ptr = bitcast i8* %list.new.i8.ptr to %struct.ilist*
-  %gep.list.last.next = getelementptr inbounds %struct.ilist, %struct.ilist* %list.last.ilist.ptr, i32 0, i32 1
-  store %struct.ilist* null, %struct.ilist** %gep.list.last.next, align 4
-  %gep.list.next.next = getelementptr inbounds %struct.ilist, %struct.ilist* %list.next, i32 0, i32 1
-  %loaded_ptr = load %struct.ilist*, %struct.ilist** %gep.list.next.next, align 4
-  ret %struct.ilist* %loaded_ptr                                      ; use loaded pointer
+  %gep.list.last.next = getelementptr inbounds %struct.ilist, ptr %list.new.i8.ptr, i32 0, i32 1
+  store ptr null, ptr %gep.list.last.next, align 4
+  %gep.list.next.next = getelementptr inbounds %struct.ilist, ptr %list.next, i32 0, i32 1
+  %loaded_ptr = load ptr, ptr %gep.list.next.next, align 4
+  ret ptr %loaded_ptr                                      ; use loaded pointer
 }
 
 ; Function Attrs: inaccessiblememonly nounwind
-declare noalias noundef align 8 i8* @malloc(i32 noundef) local_unnamed_addr #0
+declare noalias noundef align 8 ptr @malloc(i32 noundef) local_unnamed_addr #0
 
 attributes #0 = { inaccessiblememonly nounwind}

diff  --git a/llvm/test/Transforms/DeadStoreElimination/stores-of-existing-values.ll b/llvm/test/Transforms/DeadStoreElimination/stores-of-existing-values.ll
index 06a5f8690c26a..be750d9f15dbe 100644
--- a/llvm/test/Transforms/DeadStoreElimination/stores-of-existing-values.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/stores-of-existing-values.ll
@@ -7,14 +7,14 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
 
 @a = external global [32 x i8], align 16
 
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64, i1 immarg)
+declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg)
 
 ; Test case for PR16520. The store in %if.then is redundant, because the same value
 ; has been stored earlier to the same location.
-define void @test1_pr16520(i1 %b, i8* nocapture %r) {
+define void @test1_pr16520(i1 %b, ptr nocapture %r) {
 ; CHECK-LABEL: @test1_pr16520(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    store i8 1, i8* [[R:%.*]], align 1
+; CHECK-NEXT:    store i8 1, ptr [[R:%.*]], align 1
 ; CHECK-NEXT:    br i1 [[B:%.*]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
 ; CHECK:       if.then:
 ; CHECK-NEXT:    tail call void @fn_mayread_or_clobber()
@@ -26,11 +26,11 @@ define void @test1_pr16520(i1 %b, i8* nocapture %r) {
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  store i8 1, i8* %r, align 1
+  store i8 1, ptr %r, align 1
   br i1 %b, label %if.then, label %if.else
 
 if.then:                                          ; preds = %entry
-  store i8 1, i8* %r, align 1
+  store i8 1, ptr %r, align 1
   tail call void @fn_mayread_or_clobber()
   br label %if.end
 
@@ -45,10 +45,10 @@ if.end:                                           ; preds = %if.else, %if.then
 declare void @fn_mayread_or_clobber()
 declare void @fn_readonly() readonly
 
-define void @test2(i1 %b, i8* nocapture %r) {
+define void @test2(i1 %b, ptr nocapture %r) {
 ; CHECK-LABEL: @test2(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    store i8 1, i8* [[R:%.*]], align 1
+; CHECK-NEXT:    store i8 1, ptr [[R:%.*]], align 1
 ; CHECK-NEXT:    br i1 [[B:%.*]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
 ; CHECK:       if.then:
 ; CHECK-NEXT:    tail call void @fn_readonly()
@@ -60,7 +60,7 @@ define void @test2(i1 %b, i8* nocapture %r) {
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  store i8 1, i8* %r, align 1
+  store i8 1, ptr %r, align 1
   br i1 %b, label %if.then, label %if.else
 
 if.then:                                          ; preds = %entry
@@ -72,15 +72,15 @@ if.else:                                          ; preds = %entry
   br label %if.end
 
 if.end:                                           ; preds = %if.else, %if.then
-  store i8 1, i8* %r, align 1
+  store i8 1, ptr %r, align 1
   ret void
 }
 
 ; Make sure volatile stores are not removed.
-define void @test2_volatile(i1 %b, i8* nocapture %r) {
+define void @test2_volatile(i1 %b, ptr nocapture %r) {
 ; CHECK-LABEL: @test2_volatile(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    store volatile i8 1, i8* [[R:%.*]], align 1
+; CHECK-NEXT:    store volatile i8 1, ptr [[R:%.*]], align 1
 ; CHECK-NEXT:    br i1 [[B:%.*]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
 ; CHECK:       if.then:
 ; CHECK-NEXT:    tail call void @fn_readonly()
@@ -89,11 +89,11 @@ define void @test2_volatile(i1 %b, i8* nocapture %r) {
 ; CHECK-NEXT:    tail call void @fn_readonly()
 ; CHECK-NEXT:    br label [[IF_END]]
 ; CHECK:       if.end:
-; CHECK-NEXT:    store volatile i8 1, i8* [[R]], align 1
+; CHECK-NEXT:    store volatile i8 1, ptr [[R]], align 1
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  store volatile i8 1, i8* %r, align 1
+  store volatile i8 1, ptr %r, align 1
   br i1 %b, label %if.then, label %if.else
 
 if.then:                                          ; preds = %entry
@@ -105,14 +105,14 @@ if.else:                                          ; preds = %entry
   br label %if.end
 
 if.end:                                           ; preds = %if.else, %if.then
-  store volatile i8 1, i8* %r, align 1
+  store volatile i8 1, ptr %r, align 1
   ret void
 }
 
-define void @test3(i1 %b, i8* nocapture %r) {
+define void @test3(i1 %b, ptr nocapture %r) {
 ; CHECK-LABEL: @test3(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    store i8 1, i8* [[R:%.*]], align 1
+; CHECK-NEXT:    store i8 1, ptr [[R:%.*]], align 1
 ; CHECK-NEXT:    br i1 [[B:%.*]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
 ; CHECK:       if.then:
 ; CHECK-NEXT:    tail call void @fn_mayread_or_clobber()
@@ -121,11 +121,11 @@ define void @test3(i1 %b, i8* nocapture %r) {
 ; CHECK-NEXT:    tail call void @fn_readonly()
 ; CHECK-NEXT:    br label [[IF_END]]
 ; CHECK:       if.end:
-; CHECK-NEXT:    store i8 1, i8* [[R]], align 1
+; CHECK-NEXT:    store i8 1, ptr [[R]], align 1
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  store i8 1, i8* %r, align 1
+  store i8 1, ptr %r, align 1
   br i1 %b, label %if.then, label %if.else
 
 if.then:                                          ; preds = %entry
@@ -137,14 +137,14 @@ if.else:                                          ; preds = %entry
   br label %if.end
 
 if.end:                                           ; preds = %if.else, %if.then
-  store i8 1, i8* %r, align 1
+  store i8 1, ptr %r, align 1
   ret void
 }
 
-define void @test4(i1 %b, i8* nocapture %r) {
+define void @test4(i1 %b, ptr nocapture %r) {
 ; CHECK-LABEL: @test4(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    store i8 1, i8* [[R:%.*]], align 1
+; CHECK-NEXT:    store i8 1, ptr [[R:%.*]], align 1
 ; CHECK-NEXT:    br i1 [[B:%.*]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
 ; CHECK:       if.then:
 ; CHECK-NEXT:    tail call void @fn_readonly()
@@ -153,11 +153,11 @@ define void @test4(i1 %b, i8* nocapture %r) {
 ; CHECK-NEXT:    tail call void @fn_mayread_or_clobber()
 ; CHECK-NEXT:    br label [[IF_END]]
 ; CHECK:       if.end:
-; CHECK-NEXT:    store i8 1, i8* [[R]], align 1
+; CHECK-NEXT:    store i8 1, ptr [[R]], align 1
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  store i8 1, i8* %r, align 1
+  store i8 1, ptr %r, align 1
   br i1 %b, label %if.then, label %if.else
 
 if.then:                                          ; preds = %entry
@@ -169,14 +169,14 @@ if.else:                                          ; preds = %entry
   br label %if.end
 
 if.end:                                           ; preds = %if.else, %if.then
-  store i8 1, i8* %r, align 1
+  store i8 1, ptr %r, align 1
   ret void
 }
 
-define void @test5(i1 %b, i8* nocapture %r) {
+define void @test5(i1 %b, ptr nocapture %r) {
 ; CHECK-LABEL: @test5(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    store i8 1, i8* [[R:%.*]], align 1
+; CHECK-NEXT:    store i8 1, ptr [[R:%.*]], align 1
 ; CHECK-NEXT:    br i1 [[B:%.*]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
 ; CHECK:       if.then:
 ; CHECK-NEXT:    tail call void @fn_readonly()
@@ -185,11 +185,11 @@ define void @test5(i1 %b, i8* nocapture %r) {
 ; CHECK-NEXT:    tail call void @fn_mayread_or_clobber()
 ; CHECK-NEXT:    br label [[IF_END]]
 ; CHECK:       if.end:
-; CHECK-NEXT:    store i8 1, i8* [[R]], align 1
+; CHECK-NEXT:    store i8 1, ptr [[R]], align 1
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  store i8 1, i8* %r, align 1
+  store i8 1, ptr %r, align 1
   br i1 %b, label %if.then, label %if.else
 
 if.then:                                          ; preds = %entry
@@ -201,49 +201,49 @@ if.else:                                          ; preds = %entry
   br label %if.end
 
 if.end:                                           ; preds = %if.else, %if.then
-  store i8 1, i8* %r, align 1
+  store i8 1, ptr %r, align 1
   ret void
 }
 
 declare i1 @cond() readnone
 
-define void @test6(i32* noalias %P) {
+define void @test6(ptr noalias %P) {
 ; CHECK-LABEL: @test6(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br label [[FOR_HEADER:%.*]]
 ; CHECK:       for.header:
-; CHECK-NEXT:    store i32 1, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 1, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    [[C1:%.*]] = call i1 @cond()
 ; CHECK-NEXT:    br i1 [[C1]], label [[FOR_BODY:%.*]], label [[END:%.*]]
 ; CHECK:       for.body:
-; CHECK-NEXT:    [[LV:%.*]] = load i32, i32* [[P]], align 4
+; CHECK-NEXT:    [[LV:%.*]] = load i32, ptr [[P]], align 4
 ; CHECK-NEXT:    br label [[FOR_HEADER]]
 ; CHECK:       end:
-; CHECK-NEXT:    store i32 3, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 3, ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
   br label %for.header
 
 for.header:
-  store i32 1, i32* %P, align 4
+  store i32 1, ptr %P, align 4
   %c1 = call i1 @cond()
   br i1 %c1, label %for.body, label %end
 
 for.body:
-  store i32 1, i32* %P, align 4
-  %lv = load i32, i32* %P
+  store i32 1, ptr %P, align 4
+  %lv = load i32, ptr %P
   br label %for.header
 
 end:
-  store i32 3, i32* %P, align 4
+  store i32 3, ptr %P, align 4
   ret void
 }
 
 ; Make sure the store in %bb3 can be eliminated in the presences of early returns.
-define void @test7(i32* noalias %P) {
+define void @test7(ptr noalias %P) {
 ; CHECK-LABEL: @test7(
-; CHECK-NEXT:    store i32 0, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br i1 true, label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    br label [[BB3:%.*]]
@@ -252,21 +252,21 @@ define void @test7(i32* noalias %P) {
 ; CHECK:       bb3:
 ; CHECK-NEXT:    ret void
 ;
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   br i1 true, label %bb1, label %bb2
 bb1:
   br label %bb3
 bb2:
   ret void
 bb3:
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   ret void
 }
 
 ; Make sure the store in %bb3 won't be eliminated because it may be clobbered before.
-define void @test8(i32* %P) {
+define void @test8(ptr %P) {
 ; CHECK-LABEL: @test8(
-; CHECK-NEXT:    store i32 0, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br i1 true, label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    call void @fn_mayread_or_clobber()
@@ -274,10 +274,10 @@ define void @test8(i32* %P) {
 ; CHECK:       bb2:
 ; CHECK-NEXT:    ret void
 ; CHECK:       bb3:
-; CHECK-NEXT:    store i32 0, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   br i1 true, label %bb1, label %bb2
 bb1:
   call void @fn_mayread_or_clobber()
@@ -285,15 +285,15 @@ bb1:
 bb2:
   ret void
 bb3:
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   ret void
 }
 
 ; Make sure the store in %bb3 will be eliminated because only the early exit path
 ; may be clobbered.
-define void @test9(i32* noalias %P) {
+define void @test9(ptr noalias %P) {
 ; CHECK-LABEL: @test9(
-; CHECK-NEXT:    store i32 0, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br i1 true, label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    br label [[BB3:%.*]]
@@ -303,7 +303,7 @@ define void @test9(i32* noalias %P) {
 ; CHECK:       bb3:
 ; CHECK-NEXT:    ret void
 ;
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   br i1 true, label %bb1, label %bb2
 bb1:
   br label %bb3
@@ -311,64 +311,63 @@ bb2:
   call void @fn_mayread_or_clobber()
   ret void
 bb3:
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   ret void
 }
 
 ; The store in bb3 can be eliminated, because the store in bb1 cannot alias it.
-define void @test10(i32* noalias %P, i32* %Q, i1 %c) {
+define void @test10(ptr noalias %P, ptr %Q, i1 %c) {
 ; UNOPT-LABEL: @test10(
-; UNOPT-NEXT:    store i32 0, i32* [[P:%.*]], align 4
+; UNOPT-NEXT:    store i32 0, ptr [[P:%.*]], align 4
 ; UNOPT-NEXT:    br i1 [[C:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
 ; UNOPT:       bb1:
-; UNOPT-NEXT:    store i32 10, i32* [[Q:%.*]], align 4
+; UNOPT-NEXT:    store i32 10, ptr [[Q:%.*]], align 4
 ; UNOPT-NEXT:    br label [[BB3:%.*]]
 ; UNOPT:       bb2:
 ; UNOPT-NEXT:    ret void
 ; UNOPT:       bb3:
-; UNOPT-NEXT:    store i32 0, i32* [[P]], align 4
+; UNOPT-NEXT:    store i32 0, ptr [[P]], align 4
 ; UNOPT-NEXT:    ret void
 ;
 ; OPT-LABEL: @test10(
-; OPT-NEXT:    store i32 0, i32* [[P:%.*]], align 4
+; OPT-NEXT:    store i32 0, ptr [[P:%.*]], align 4
 ; OPT-NEXT:    br i1 [[C:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
 ; OPT:       bb1:
-; OPT-NEXT:    store i32 10, i32* [[Q:%.*]], align 4
+; OPT-NEXT:    store i32 10, ptr [[Q:%.*]], align 4
 ; OPT-NEXT:    br label [[BB3:%.*]]
 ; OPT:       bb2:
 ; OPT-NEXT:    ret void
 ; OPT:       bb3:
 ; OPT-NEXT:    ret void
 ;
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   br i1 %c, label %bb1, label %bb2
 
 bb1:
-  store i32 10, i32* %Q
+  store i32 10, ptr %Q
   br label %bb3
 
 bb2:
   ret void
 
 bb3:
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   ret void
 }
 
-define void @test11_smaller_later_store(i32* noalias %P, i32* %Q, i1 %c) {
+define void @test11_smaller_later_store(ptr noalias %P, ptr %Q, i1 %c) {
 ; CHECK-LABEL: @test11_smaller_later_store(
-; CHECK-NEXT:    store i32 0, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    br label [[BB3:%.*]]
 ; CHECK:       bb2:
 ; CHECK-NEXT:    ret void
 ; CHECK:       bb3:
-; CHECK-NEXT:    [[BC:%.*]] = bitcast i32* [[P]] to i8*
-; CHECK-NEXT:    store i8 0, i8* [[BC]], align 1
+; CHECK-NEXT:    store i8 0, ptr [[P]], align 1
 ; CHECK-NEXT:    ret void
 ;
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   br i1 %c, label %bb1, label %bb2
 
 bb1:
@@ -378,26 +377,23 @@ bb2:
   ret void
 
 bb3:
-  %bc = bitcast i32* %P to i8*
-  store i8 0, i8* %bc
+  store i8 0, ptr %P
   ret void
 }
 
-define void @test11_smaller_earlier_store(i32* noalias %P, i32* %Q, i1 %c) {
+define void @test11_smaller_earlier_store(ptr noalias %P, ptr %Q, i1 %c) {
 ; CHECK-LABEL: @test11_smaller_earlier_store(
-; CHECK-NEXT:    [[BC:%.*]] = bitcast i32* [[P:%.*]] to i8*
-; CHECK-NEXT:    store i8 0, i8* [[BC]], align 1
+; CHECK-NEXT:    store i8 0, ptr [[P:%.*]], align 1
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    br label [[BB3:%.*]]
 ; CHECK:       bb2:
 ; CHECK-NEXT:    ret void
 ; CHECK:       bb3:
-; CHECK-NEXT:    store i32 0, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  %bc = bitcast i32* %P to i8*
-  store i8 0, i8* %bc
+  store i8 0, ptr %P
   br i1 %c, label %bb1, label %bb2
 
 bb1:
@@ -407,268 +403,254 @@ bb2:
   ret void
 
 bb3:
-  store i32 0, i32* %P
+  store i32 0, ptr %P
   ret void
 }
 
-declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg) #1
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) #1
 
-define void @test12_memset_simple(i8* %ptr) {
+define void @test12_memset_simple(ptr %ptr) {
 ; CHECK-LABEL: @test12_memset_simple(
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* [[PTR:%.*]], i8 0, i64 10, i1 false)
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr [[PTR:%.*]], i8 0, i64 10, i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.memset.p0i8.i64(i8* %ptr, i8 0, i64 10, i1 false)
-  %ptr.5 = getelementptr i8, i8* %ptr, i64 4
-  store i8 0, i8* %ptr.5
+  call void @llvm.memset.p0.i64(ptr %ptr, i8 0, i64 10, i1 false)
+  %ptr.5 = getelementptr i8, ptr %ptr, i64 4
+  store i8 0, ptr %ptr.5
   ret void
 }
 
-define void @test12_memset_other_store_in_between(i8* %ptr) {
+define void @test12_memset_other_store_in_between(ptr %ptr) {
 ; UNOPT-LABEL: @test12_memset_other_store_in_between(
-; UNOPT-NEXT:    call void @llvm.memset.p0i8.i64(i8* [[PTR:%.*]], i8 0, i64 10, i1 false)
-; UNOPT-NEXT:    [[PTR_4:%.*]] = getelementptr i8, i8* [[PTR]], i64 4
-; UNOPT-NEXT:    store i8 8, i8* [[PTR_4]], align 1
-; UNOPT-NEXT:    [[PTR_5:%.*]] = getelementptr i8, i8* [[PTR]], i64 5
-; UNOPT-NEXT:    store i8 0, i8* [[PTR_5]], align 1
+; UNOPT-NEXT:    call void @llvm.memset.p0.i64(ptr [[PTR:%.*]], i8 0, i64 10, i1 false)
+; UNOPT-NEXT:    [[PTR_4:%.*]] = getelementptr i8, ptr [[PTR]], i64 4
+; UNOPT-NEXT:    store i8 8, ptr [[PTR_4]], align 1
+; UNOPT-NEXT:    [[PTR_5:%.*]] = getelementptr i8, ptr [[PTR]], i64 5
+; UNOPT-NEXT:    store i8 0, ptr [[PTR_5]], align 1
 ; UNOPT-NEXT:    ret void
 ;
 ; OPT-LABEL: @test12_memset_other_store_in_between(
-; OPT-NEXT:    call void @llvm.memset.p0i8.i64(i8* [[PTR:%.*]], i8 0, i64 10, i1 false)
-; OPT-NEXT:    [[PTR_4:%.*]] = getelementptr i8, i8* [[PTR]], i64 4
-; OPT-NEXT:    store i8 8, i8* [[PTR_4]], align 1
+; OPT-NEXT:    call void @llvm.memset.p0.i64(ptr [[PTR:%.*]], i8 0, i64 10, i1 false)
+; OPT-NEXT:    [[PTR_4:%.*]] = getelementptr i8, ptr [[PTR]], i64 4
+; OPT-NEXT:    store i8 8, ptr [[PTR_4]], align 1
 ; OPT-NEXT:    ret void
 ;
-  call void @llvm.memset.p0i8.i64(i8* %ptr, i8 0, i64 10, i1 false)
-  %ptr.4 = getelementptr i8, i8* %ptr, i64 4
-  store i8 8, i8* %ptr.4
-  %ptr.5 = getelementptr i8, i8* %ptr, i64 5
-  store i8 0, i8* %ptr.5
+  call void @llvm.memset.p0.i64(ptr %ptr, i8 0, i64 10, i1 false)
+  %ptr.4 = getelementptr i8, ptr %ptr, i64 4
+  store i8 8, ptr %ptr.4
+  %ptr.5 = getelementptr i8, ptr %ptr, i64 5
+  store i8 0, ptr %ptr.5
   ret void
 }
 
-declare i8* @__memset_chk(i8* writeonly, i32, i64, i64) argmemonly writeonly nofree nounwind
+declare ptr @__memset_chk(ptr writeonly, i32, i64, i64) argmemonly writeonly nofree nounwind
 
-define void @test12_memset_chk_other_store_in_between(i8* %ptr) {
+define void @test12_memset_chk_other_store_in_between(ptr %ptr) {
 ; CHECK-LABEL: @test12_memset_chk_other_store_in_between(
-; CHECK-NEXT:    [[CALL:%.*]] = tail call i8* @__memset_chk(i8* [[PTR:%.*]], i32 0, i64 10, i64 -1)
-; CHECK-NEXT:    [[PTR_4:%.*]] = getelementptr i8, i8* [[PTR]], i64 4
-; CHECK-NEXT:    store i8 8, i8* [[PTR_4]], align 1
-; CHECK-NEXT:    [[PTR_5:%.*]] = getelementptr i8, i8* [[PTR]], i64 5
-; CHECK-NEXT:    store i8 0, i8* [[PTR_5]], align 1
+; CHECK-NEXT:    [[CALL:%.*]] = tail call ptr @__memset_chk(ptr [[PTR:%.*]], i32 0, i64 10, i64 -1)
+; CHECK-NEXT:    [[PTR_4:%.*]] = getelementptr i8, ptr [[PTR]], i64 4
+; CHECK-NEXT:    store i8 8, ptr [[PTR_4]], align 1
+; CHECK-NEXT:    [[PTR_5:%.*]] = getelementptr i8, ptr [[PTR]], i64 5
+; CHECK-NEXT:    store i8 0, ptr [[PTR_5]], align 1
 ; CHECK-NEXT:    ret void
 ;
-  %call = tail call i8* @__memset_chk(i8* %ptr, i32 0, i64 10, i64 -1)
-  %ptr.4 = getelementptr i8, i8* %ptr, i64 4
-  store i8 8, i8* %ptr.4
-  %ptr.5 = getelementptr i8, i8* %ptr, i64 5
-  store i8 0, i8* %ptr.5
+  %call = tail call ptr @__memset_chk(ptr %ptr, i32 0, i64 10, i64 -1)
+  %ptr.4 = getelementptr i8, ptr %ptr, i64 4
+  store i8 8, ptr %ptr.4
+  %ptr.5 = getelementptr i8, ptr %ptr, i64 5
+  store i8 0, ptr %ptr.5
   ret void
 }
 
-declare void @use(i8*)
+declare void @use(ptr)
 
 define void @test12_memset_chk_other_store_in_between_stack_obj_escape_after(i64 %n) {
 ; CHECK-LABEL: @test12_memset_chk_other_store_in_between_stack_obj_escape_after(
 ; CHECK-NEXT:    [[OBJ:%.*]] = alloca [200 x i8], align 1
-; CHECK-NEXT:    [[PTR:%.*]] = bitcast [200 x i8]* [[OBJ]] to i8*
-; CHECK-NEXT:    [[CALL:%.*]] = tail call i8* @__memset_chk(i8* [[PTR]], i32 0, i64 10, i64 [[N:%.*]])
-; CHECK-NEXT:    [[PTR_4:%.*]] = getelementptr i8, i8* [[PTR]], i64 4
-; CHECK-NEXT:    store i8 8, i8* [[PTR_4]], align 1
-; CHECK-NEXT:    [[PTR_5:%.*]] = getelementptr i8, i8* [[PTR]], i64 5
-; CHECK-NEXT:    store i8 0, i8* [[PTR_5]], align 1
-; CHECK-NEXT:    call void @use(i8* [[PTR]])
+; CHECK-NEXT:    [[CALL:%.*]] = tail call ptr @__memset_chk(ptr [[OBJ]], i32 0, i64 10, i64 [[N:%.*]])
+; CHECK-NEXT:    [[PTR_4:%.*]] = getelementptr i8, ptr [[OBJ]], i64 4
+; CHECK-NEXT:    store i8 8, ptr [[PTR_4]], align 1
+; CHECK-NEXT:    [[PTR_5:%.*]] = getelementptr i8, ptr [[OBJ]], i64 5
+; CHECK-NEXT:    store i8 0, ptr [[PTR_5]], align 1
+; CHECK-NEXT:    call void @use(ptr [[OBJ]])
 ; CHECK-NEXT:    ret void
 ;
   %obj = alloca [200 x i8]
-  %ptr = bitcast [200 x i8]* %obj to i8*
-  %call = tail call i8* @__memset_chk(i8* %ptr, i32 0, i64 10, i64 %n)
-  %ptr.4 = getelementptr i8, i8* %ptr, i64 4
-  store i8 8, i8* %ptr.4
-  %ptr.5 = getelementptr i8, i8* %ptr, i64 5
-  store i8 0, i8* %ptr.5
-  call void @use(i8* %ptr)
+  %call = tail call ptr @__memset_chk(ptr %obj, i32 0, i64 10, i64 %n)
+  %ptr.4 = getelementptr i8, ptr %obj, i64 4
+  store i8 8, ptr %ptr.4
+  %ptr.5 = getelementptr i8, ptr %obj, i64 5
+  store i8 0, ptr %ptr.5
+  call void @use(ptr %obj)
   ret void
 }
 
 define void @test12_memset_chk_other_store_in_between_stack_obj_escape_before(i64 %n) {
 ; CHECK-LABEL: @test12_memset_chk_other_store_in_between_stack_obj_escape_before(
 ; CHECK-NEXT:    [[OBJ:%.*]] = alloca [200 x i8], align 1
-; CHECK-NEXT:    [[PTR:%.*]] = bitcast [200 x i8]* [[OBJ]] to i8*
-; CHECK-NEXT:    call void @use(i8* [[PTR]])
-; CHECK-NEXT:    [[CALL:%.*]] = tail call i8* @__memset_chk(i8* [[PTR]], i32 0, i64 10, i64 [[N:%.*]])
-; CHECK-NEXT:    [[PTR_4:%.*]] = getelementptr i8, i8* [[PTR]], i64 4
-; CHECK-NEXT:    store i8 8, i8* [[PTR_4]], align 1
-; CHECK-NEXT:    [[PTR_5:%.*]] = getelementptr i8, i8* [[PTR]], i64 5
-; CHECK-NEXT:    store i8 0, i8* [[PTR_5]], align 1
-; CHECK-NEXT:    [[PTR_10:%.*]] = getelementptr i8, i8* [[PTR]], i64 10
-; CHECK-NEXT:    store i8 0, i8* [[PTR_10]], align 1
-; CHECK-NEXT:    call void @use(i8* [[PTR]])
+; CHECK-NEXT:    call void @use(ptr [[OBJ]])
+; CHECK-NEXT:    [[CALL:%.*]] = tail call ptr @__memset_chk(ptr [[OBJ]], i32 0, i64 10, i64 [[N:%.*]])
+; CHECK-NEXT:    [[PTR_4:%.*]] = getelementptr i8, ptr [[OBJ]], i64 4
+; CHECK-NEXT:    store i8 8, ptr [[PTR_4]], align 1
+; CHECK-NEXT:    [[PTR_5:%.*]] = getelementptr i8, ptr [[OBJ]], i64 5
+; CHECK-NEXT:    store i8 0, ptr [[PTR_5]], align 1
+; CHECK-NEXT:    [[PTR_10:%.*]] = getelementptr i8, ptr [[OBJ]], i64 10
+; CHECK-NEXT:    store i8 0, ptr [[PTR_10]], align 1
+; CHECK-NEXT:    call void @use(ptr [[OBJ]])
 ; CHECK-NEXT:    ret void
 ;
   %obj = alloca [200 x i8]
-  %ptr = bitcast [200 x i8]* %obj to i8*
-  call void @use(i8* %ptr)
-  %call = tail call i8* @__memset_chk(i8* %ptr, i32 0, i64 10, i64 %n)
-  %ptr.4 = getelementptr i8, i8* %ptr, i64 4
-  store i8 8, i8* %ptr.4
-  %ptr.5 = getelementptr i8, i8* %ptr, i64 5
-  store i8 0, i8* %ptr.5
-  %ptr.10 = getelementptr i8, i8* %ptr, i64 10
-  store i8 0, i8* %ptr.10
-  call void @use(i8* %ptr)
+  call void @use(ptr %obj)
+  %call = tail call ptr @__memset_chk(ptr %obj, i32 0, i64 10, i64 %n)
+  %ptr.4 = getelementptr i8, ptr %obj, i64 4
+  store i8 8, ptr %ptr.4
+  %ptr.5 = getelementptr i8, ptr %obj, i64 5
+  store i8 0, ptr %ptr.5
+  %ptr.10 = getelementptr i8, ptr %obj, i64 10
+  store i8 0, ptr %ptr.10
+  call void @use(ptr %obj)
   ret void
 }
 
-define void @test12_memset_other_store_in_between_partial_overlap(i8* %ptr) {
+define void @test12_memset_other_store_in_between_partial_overlap(ptr %ptr) {
 ; CHECK-LABEL: @test12_memset_other_store_in_between_partial_overlap(
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* [[PTR:%.*]], i8 0, i64 10, i1 false)
-; CHECK-NEXT:    [[PTR_4:%.*]] = getelementptr i8, i8* [[PTR]], i64 4
-; CHECK-NEXT:    [[BC_4:%.*]] = bitcast i8* [[PTR_4]] to i16*
-; CHECK-NEXT:    store i16 8, i16* [[BC_4]], align 2
-; CHECK-NEXT:    [[PTR_5:%.*]] = getelementptr i8, i8* [[PTR]], i64 5
-; CHECK-NEXT:    [[BC_5:%.*]] = bitcast i8* [[PTR_5]] to i16*
-; CHECK-NEXT:    store i16 0, i16* [[BC_5]], align 2
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr [[PTR:%.*]], i8 0, i64 10, i1 false)
+; CHECK-NEXT:    [[PTR_4:%.*]] = getelementptr i8, ptr [[PTR]], i64 4
+; CHECK-NEXT:    store i16 8, ptr [[PTR_4]], align 2
+; CHECK-NEXT:    [[PTR_5:%.*]] = getelementptr i8, ptr [[PTR]], i64 5
+; CHECK-NEXT:    store i16 0, ptr [[PTR_5]], align 2
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.memset.p0i8.i64(i8* %ptr, i8 0, i64 10, i1 false)
-  %ptr.4 = getelementptr i8, i8* %ptr, i64 4
-  %bc.4 = bitcast i8* %ptr.4 to i16*
-  store i16 8, i16* %bc.4
-  %ptr.5 = getelementptr i8, i8* %ptr, i64 5
-  %bc.5 = bitcast i8* %ptr.5 to i16*
-  store i16 0, i16* %bc.5
+  call void @llvm.memset.p0.i64(ptr %ptr, i8 0, i64 10, i1 false)
+  %ptr.4 = getelementptr i8, ptr %ptr, i64 4
+  store i16 8, ptr %ptr.4
+  %ptr.5 = getelementptr i8, ptr %ptr, i64 5
+  store i16 0, ptr %ptr.5
   ret void
 }
 
-define void @test12_memset_later_store_exceeds_memset(i8* %ptr) {
+define void @test12_memset_later_store_exceeds_memset(ptr %ptr) {
 ; CHECK-LABEL: @test12_memset_later_store_exceeds_memset(
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 1 [[PTR:%.*]], i8 0, i64 8, i1 false)
-; CHECK-NEXT:    [[PTR_4:%.*]] = getelementptr i8, i8* [[PTR]], i64 4
-; CHECK-NEXT:    store i8 8, i8* [[PTR_4]], align 1
-; CHECK-NEXT:    [[PTR_5:%.*]] = getelementptr i8, i8* [[PTR]], i64 8
-; CHECK-NEXT:    [[BC:%.*]] = bitcast i8* [[PTR_5]] to i64*
-; CHECK-NEXT:    store i64 0, i64* [[BC]], align 8
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 1 [[PTR:%.*]], i8 0, i64 8, i1 false)
+; CHECK-NEXT:    [[PTR_4:%.*]] = getelementptr i8, ptr [[PTR]], i64 4
+; CHECK-NEXT:    store i8 8, ptr [[PTR_4]], align 1
+; CHECK-NEXT:    [[PTR_5:%.*]] = getelementptr i8, ptr [[PTR]], i64 8
+; CHECK-NEXT:    store i64 0, ptr [[PTR_5]], align 8
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.memset.p0i8.i64(i8* %ptr, i8 0, i64 10, i1 false)
-  %ptr.4 = getelementptr i8, i8* %ptr, i64 4
-  store i8 8, i8* %ptr.4
-  %ptr.5 = getelementptr i8, i8* %ptr, i64 8
-  %bc = bitcast i8* %ptr.5 to i64*
-  store i64 0, i64* %bc
+  call void @llvm.memset.p0.i64(ptr %ptr, i8 0, i64 10, i1 false)
+  %ptr.4 = getelementptr i8, ptr %ptr, i64 4
+  store i8 8, ptr %ptr.4
+  %ptr.5 = getelementptr i8, ptr %ptr, i64 8
+  store i64 0, ptr %ptr.5
   ret void
 }
 
-define void @test12_memset_later_store_before_memset(i8* %ptr) {
+define void @test12_memset_later_store_before_memset(ptr %ptr) {
 ; CHECK-LABEL: @test12_memset_later_store_before_memset(
-; CHECK-NEXT:    [[PTR_1:%.*]] = getelementptr i8, i8* [[PTR:%.*]], i64 1
-; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, i8* [[PTR_1]], i64 7
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 1 [[TMP1]], i8 0, i64 3, i1 false)
-; CHECK-NEXT:    [[BC:%.*]] = bitcast i8* [[PTR]] to i64*
-; CHECK-NEXT:    store i64 0, i64* [[BC]], align 8
+; CHECK-NEXT:    [[PTR_1:%.*]] = getelementptr i8, ptr [[PTR:%.*]], i64 1
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[PTR_1]], i64 7
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 1 [[TMP1]], i8 0, i64 3, i1 false)
+; CHECK-NEXT:    store i64 0, ptr [[PTR]], align 8
 ; CHECK-NEXT:    ret void
 ;
-  %ptr.1 = getelementptr i8, i8* %ptr, i64 1
-  call void @llvm.memset.p0i8.i64(i8* %ptr.1, i8 0, i64 10, i1 false)
-  %ptr.4 = getelementptr i8, i8* %ptr, i64 4
-  store i8 8, i8* %ptr.4
-  %bc = bitcast i8* %ptr to i64*
-  store i64 0, i64* %bc
+  %ptr.1 = getelementptr i8, ptr %ptr, i64 1
+  call void @llvm.memset.p0.i64(ptr %ptr.1, i8 0, i64 10, i1 false)
+  %ptr.4 = getelementptr i8, ptr %ptr, i64 4
+  store i8 8, ptr %ptr.4
+  store i64 0, ptr %ptr
   ret void
 }
 
 ; The memset will be shortened and the store will not be redundant afterwards.
 ; It cannot be eliminated.
-define void @test13_memset_shortened(i64* %ptr) {
+define void @test13_memset_shortened(ptr %ptr) {
 ; CHECK-LABEL: @test13_memset_shortened(
-; CHECK-NEXT:    [[PTR_I8:%.*]] = bitcast i64* [[PTR:%.*]] to i8*
-; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, i8* [[PTR_I8]], i64 8
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 1 [[TMP1]], i8 0, i64 16, i1 false)
-; CHECK-NEXT:    store i64 0, i64* [[PTR]], align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i64 8
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 1 [[TMP1]], i8 0, i64 16, i1 false)
+; CHECK-NEXT:    store i64 0, ptr [[PTR]], align 8
 ; CHECK-NEXT:    ret void
 ;
-  %ptr.i8 = bitcast i64* %ptr to i8*
-  call void @llvm.memset.p0i8.i64(i8* %ptr.i8, i8 0, i64 24, i1 false)
-  store i64 0, i64* %ptr
+  call void @llvm.memset.p0.i64(ptr %ptr, i8 0, i64 24, i1 false)
+  store i64 0, ptr %ptr
   ret void
 }
 
-declare i8* @strcat(i8*, i8*) nounwind argmemonly
+declare ptr @strcat(ptr, ptr) nounwind argmemonly
 
-define void @test14_strcat(i8* noalias %P, i8* noalias %Q) {
+define void @test14_strcat(ptr noalias %P, ptr noalias %Q) {
 ; CHECK-LABEL: @test14_strcat(
-; CHECK-NEXT:    [[CALL1:%.*]] = call i8* @strcat(i8* [[P:%.*]], i8* [[Q:%.*]])
-; CHECK-NEXT:    [[CALL2:%.*]] = call i8* @strcat(i8* [[P]], i8* [[Q]])
+; CHECK-NEXT:    [[CALL1:%.*]] = call ptr @strcat(ptr [[P:%.*]], ptr [[Q:%.*]])
+; CHECK-NEXT:    [[CALL2:%.*]] = call ptr @strcat(ptr [[P]], ptr [[Q]])
 ; CHECK-NEXT:    ret void
 ;
-  %call1 = call i8* @strcat(i8* %P, i8* %Q)
+  %call1 = call ptr @strcat(ptr %P, ptr %Q)
   ; FIXME: Eliminate the second strcat as a "store of existing value" for this particular case, where both strcat's are identical (same source, not just same dest).
-  %call2 = call i8* @strcat(i8* %P, i8* %Q)
+  %call2 = call ptr @strcat(ptr %P, ptr %Q)
   ret void
 }
 
-define void @pr49927(i32* %q, i32* %p) {
+define void @pr49927(ptr %q, ptr %p) {
 ; CHECK-LABEL: @pr49927(
-; CHECK-NEXT:    [[V:%.*]] = load i32, i32* [[P:%.*]], align 4
-; CHECK-NEXT:    store i32 [[V]], i32* [[Q:%.*]], align 4
+; CHECK-NEXT:    [[V:%.*]] = load i32, ptr [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 [[V]], ptr [[Q:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  %v = load i32, i32* %p, align 4
-  store i32 %v, i32* %q, align 4
-  store i32 %v, i32* %p, align 4
+  %v = load i32, ptr %p, align 4
+  store i32 %v, ptr %q, align 4
+  store i32 %v, ptr %p, align 4
   ret void
 }
 
-define void @pr50339(i8* nocapture readonly %0) {
+define void @pr50339(ptr nocapture readonly %0) {
 ; CHECK-LABEL: @pr50339(
-; CHECK-NEXT:    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 16 dereferenceable(16) getelementptr inbounds ([32 x i8], [32 x i8]* @a, i64 0, i64 0), i8* noundef nonnull align 1 dereferenceable(16) [[TMP0:%.*]], i64 16, i1 false)
-; CHECK-NEXT:    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 16 dereferenceable(16) getelementptr inbounds ([32 x i8], [32 x i8]* @a, i64 0, i64 0), i8* noundef nonnull align 1 dereferenceable(16) [[TMP0]], i64 16, i1 false)
+; CHECK-NEXT:    tail call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 16 dereferenceable(16) @a, ptr noundef nonnull align 1 dereferenceable(16) [[TMP0:%.*]], i64 16, i1 false)
+; CHECK-NEXT:    tail call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 16 dereferenceable(16) @a, ptr noundef nonnull align 1 dereferenceable(16) [[TMP0]], i64 16, i1 false)
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 16 dereferenceable(16) getelementptr inbounds ([32 x i8], [32 x i8]* @a, i64 0, i64 0), i8* noundef nonnull align 1 dereferenceable(16) %0, i64 16, i1 false)
+  tail call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 16 dereferenceable(16) @a, ptr noundef nonnull align 1 dereferenceable(16) %0, i64 16, i1 false)
   ; FIXME: Eliminate the second memcpy as a "store of existing value" for this particular case, where both memcpy's are identical (same source, not just same dest).
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 16 dereferenceable(16) getelementptr inbounds ([32 x i8], [32 x i8]* @a, i64 0, i64 0), i8* noundef nonnull align 1 dereferenceable(16) %0, i64 16, i1 false)
+  tail call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 16 dereferenceable(16) @a, ptr noundef nonnull align 1 dereferenceable(16) %0, i64 16, i1 false)
   ret void
 }
 
 ; Cannot remove the second memcpy as redundant store, because %src is modified
 ; in between.
-define i8 @memset_optimized_access(i8* noalias %dst, i8* noalias %src) {
+define i8 @memset_optimized_access(ptr noalias %dst, ptr noalias %src) {
 ; CHECK-LABEL: @memset_optimized_access(
-; CHECK-NEXT:    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[DST:%.*]], i8* [[SRC:%.*]], i64 16, i1 false)
-; CHECK-NEXT:    store i8 99, i8* [[SRC]], align 1
-; CHECK-NEXT:    [[L:%.*]] = load i8, i8* [[DST]], align 1
-; CHECK-NEXT:    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[DST]], i8* [[SRC]], i64 16, i1 false)
+; CHECK-NEXT:    tail call void @llvm.memcpy.p0.p0.i64(ptr [[DST:%.*]], ptr [[SRC:%.*]], i64 16, i1 false)
+; CHECK-NEXT:    store i8 99, ptr [[SRC]], align 1
+; CHECK-NEXT:    [[L:%.*]] = load i8, ptr [[DST]], align 1
+; CHECK-NEXT:    tail call void @llvm.memcpy.p0.p0.i64(ptr [[DST]], ptr [[SRC]], i64 16, i1 false)
 ; CHECK-NEXT:    ret i8 [[L]]
 ;
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 16, i1 false)
-  store i8 99, i8* %src
-  %l = load i8, i8* %dst
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 16, i1 false)
+  tail call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %src, i64 16, i1 false)
+  store i8 99, ptr %src
+  %l = load i8, ptr %dst
+  tail call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %src, i64 16, i1 false)
   ret i8 %l
 }
 
 ; The @use() call is a later non-removable store, but should not affect the
 ; removal of the store in the if block.
-define void @later_non_removable_store(i1 %c, i8* %p) {
+define void @later_non_removable_store(i1 %c, ptr %p) {
 ; CHECK-LABEL: @later_non_removable_store(
-; CHECK-NEXT:    store i8 1, i8* [[P:%.*]], align 1
+; CHECK-NEXT:    store i8 1, ptr [[P:%.*]], align 1
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[IF:%.*]], label [[EXIT:%.*]]
 ; CHECK:       if:
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
-; CHECK-NEXT:    call void @use(i8* [[P]]) #[[ATTR6:[0-9]+]]
+; CHECK-NEXT:    call void @use(ptr [[P]]) #[[ATTR6:[0-9]+]]
 ; CHECK-NEXT:    ret void
 ;
-  store i8 1, i8* %p
+  store i8 1, ptr %p
   br i1 %c, label %if, label %exit
 
 if:
-  store i8 1, i8* %p
+  store i8 1, ptr %p
   br label %exit
 
 exit:
-  call void @use(i8* %p) argmemonly
+  call void @use(ptr %p) argmemonly
   ret void
 }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/tail-byval.ll b/llvm/test/Transforms/DeadStoreElimination/tail-byval.ll
index c11315a188bd4..05ed46906c172 100644
--- a/llvm/test/Transforms/DeadStoreElimination/tail-byval.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/tail-byval.ll
@@ -8,16 +8,16 @@
 target datalayout = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128"
 target triple = "i386-unknown-linux-gnu"
 
-declare void @g(i32* byval(i32) %p)
+declare void @g(ptr byval(i32) %p)
 
-define void @f(i32* byval(i32) %x) {
+define void @f(ptr byval(i32) %x) {
 entry:
   %p = alloca i32
-  %v = load i32, i32* %x
-  store i32 %v, i32* %p
-  tail call void @g(i32* byval(i32) %p)
+  %v = load i32, ptr %x
+  store i32 %v, ptr %p
+  tail call void @g(ptr byval(i32) %p)
   ret void
 }
-; CHECK-LABEL: define void @f(i32* byval(i32) %x)
-; CHECK:   store i32 %v, i32* %p
-; CHECK:   tail call void @g(i32* byval(i32) %p)
+; CHECK-LABEL: define void @f(ptr byval(i32) %x)
+; CHECK:   store i32 %v, ptr %p
+; CHECK:   tail call void @g(ptr byval(i32) %p)

diff  --git a/llvm/test/Transforms/DeadStoreElimination/trivial-dse-calls.ll b/llvm/test/Transforms/DeadStoreElimination/trivial-dse-calls.ll
index d581782884442..5d120e2b32289 100644
--- a/llvm/test/Transforms/DeadStoreElimination/trivial-dse-calls.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/trivial-dse-calls.ll
@@ -1,13 +1,13 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -dse -S < %s | FileCheck %s
 
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
 
 declare void @unknown()
-declare void @f(i8*)
-declare void @f2(i8*, i8*)
-declare i8* @f3(i8*, i8*)
+declare void @f(ptr)
+declare void @f2(ptr, ptr)
+declare ptr @f3(ptr, ptr)
 
 ; Basic case for DSEing a trivially dead writing call
 define void @test_dead() {
@@ -15,8 +15,7 @@ define void @test_dead() {
 ; CHECK-NEXT:    ret void
 ;
   %a = alloca i32, align 4
-  %bitcast = bitcast i32* %a to i8*
-  call void @f(i8* writeonly nocapture %bitcast) argmemonly nounwind willreturn
+  call void @f(ptr writeonly nocapture %a) argmemonly nounwind willreturn
   ret void
 }
 
@@ -24,16 +23,14 @@ define void @test_dead() {
 define void @test_lifetime() {
 ; CHECK-LABEL: @test_lifetime(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[BITCAST:%.*]] = bitcast i32* [[A]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 4, i8* [[BITCAST]])
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 4, i8* [[BITCAST]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 4, ptr [[A]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 4, ptr [[A]])
 ; CHECK-NEXT:    ret void
 ;
   %a = alloca i32, align 4
-  %bitcast = bitcast i32* %a to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* %bitcast)
-  call void @f(i8* writeonly nocapture %bitcast) argmemonly nounwind willreturn
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* %bitcast)
+  call void @llvm.lifetime.start.p0(i64 4, ptr %a)
+  call void @f(ptr writeonly nocapture %a) argmemonly nounwind willreturn
+  call void @llvm.lifetime.end.p0(i64 4, ptr %a)
   ret void
 }
 
@@ -42,20 +39,18 @@ define void @test_lifetime() {
 define void @test_lifetime2() {
 ; CHECK-LABEL: @test_lifetime2(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[BITCAST:%.*]] = bitcast i32* [[A]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 4, i8* [[BITCAST]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 4, ptr [[A]])
 ; CHECK-NEXT:    call void @unknown()
 ; CHECK-NEXT:    call void @unknown()
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 4, i8* [[BITCAST]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 4, ptr [[A]])
 ; CHECK-NEXT:    ret void
 ;
   %a = alloca i32, align 4
-  %bitcast = bitcast i32* %a to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* %bitcast)
+  call void @llvm.lifetime.start.p0(i64 4, ptr %a)
   call void @unknown()
-  call void @f(i8* writeonly nocapture %bitcast) argmemonly nounwind willreturn
+  call void @f(ptr writeonly nocapture %a) argmemonly nounwind willreturn
   call void @unknown()
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* %bitcast)
+  call void @llvm.lifetime.end.p0(i64 4, ptr %a)
   ret void
 }
 
@@ -66,23 +61,20 @@ define void @test_dead_readwrite() {
 ; CHECK-NEXT:    ret void
 ;
   %a = alloca i32, align 4
-  %bitcast = bitcast i32* %a to i8*
-  call void @f(i8* nocapture %bitcast) argmemonly nounwind willreturn
+  call void @f(ptr nocapture %a) argmemonly nounwind willreturn
   ret void
 }
 
 define i32 @test_neg_read_after() {
 ; CHECK-LABEL: @test_neg_read_after(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[BITCAST:%.*]] = bitcast i32* [[A]] to i8*
-; CHECK-NEXT:    call void @f(i8* nocapture writeonly [[BITCAST]]) #[[ATTR1:[0-9]+]]
-; CHECK-NEXT:    [[RES:%.*]] = load i32, i32* [[A]], align 4
+; CHECK-NEXT:    call void @f(ptr nocapture writeonly [[A]]) #[[ATTR1:[0-9]+]]
+; CHECK-NEXT:    [[RES:%.*]] = load i32, ptr [[A]], align 4
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %a = alloca i32, align 4
-  %bitcast = bitcast i32* %a to i8*
-  call void @f(i8* writeonly nocapture %bitcast) argmemonly nounwind willreturn
-  %res = load i32, i32* %a
+  call void @f(ptr writeonly nocapture %a) argmemonly nounwind willreturn
+  %res = load i32, ptr %a
   ret i32 %res
 }
 
@@ -90,39 +82,33 @@ define i32 @test_neg_read_after() {
 define void @test_neg_infinite_loop() {
 ; CHECK-LABEL: @test_neg_infinite_loop(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[BITCAST:%.*]] = bitcast i32* [[A]] to i8*
-; CHECK-NEXT:    call void @f(i8* nocapture writeonly [[BITCAST]]) #[[ATTR2:[0-9]+]]
+; CHECK-NEXT:    call void @f(ptr nocapture writeonly [[A]]) #[[ATTR2:[0-9]+]]
 ; CHECK-NEXT:    ret void
 ;
   %a = alloca i32, align 4
-  %bitcast = bitcast i32* %a to i8*
-  call void @f(i8* writeonly nocapture %bitcast) argmemonly nounwind
+  call void @f(ptr writeonly nocapture %a) argmemonly nounwind
   ret void
 }
 
 define void @test_neg_throw() {
 ; CHECK-LABEL: @test_neg_throw(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[BITCAST:%.*]] = bitcast i32* [[A]] to i8*
-; CHECK-NEXT:    call void @f(i8* nocapture writeonly [[BITCAST]]) #[[ATTR3:[0-9]+]]
+; CHECK-NEXT:    call void @f(ptr nocapture writeonly [[A]]) #[[ATTR3:[0-9]+]]
 ; CHECK-NEXT:    ret void
 ;
   %a = alloca i32, align 4
-  %bitcast = bitcast i32* %a to i8*
-  call void @f(i8* writeonly nocapture %bitcast) argmemonly willreturn
+  call void @f(ptr writeonly nocapture %a) argmemonly willreturn
   ret void
 }
 
 define void @test_neg_extra_write() {
 ; CHECK-LABEL: @test_neg_extra_write(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[BITCAST:%.*]] = bitcast i32* [[A]] to i8*
-; CHECK-NEXT:    call void @f(i8* nocapture writeonly [[BITCAST]]) #[[ATTR4:[0-9]+]]
+; CHECK-NEXT:    call void @f(ptr nocapture writeonly [[A]]) #[[ATTR4:[0-9]+]]
 ; CHECK-NEXT:    ret void
 ;
   %a = alloca i32, align 4
-  %bitcast = bitcast i32* %a to i8*
-  call void @f(i8* writeonly nocapture %bitcast) nounwind willreturn
+  call void @f(ptr writeonly nocapture %a) nounwind willreturn
   ret void
 }
 
@@ -132,64 +118,48 @@ define void @test_neg_unmodeled_write() {
 ; CHECK-LABEL: @test_neg_unmodeled_write(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
 ; CHECK-NEXT:    [[A2:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[BITCAST:%.*]] = bitcast i32* [[A]] to i8*
-; CHECK-NEXT:    [[BITCAST2:%.*]] = bitcast i32* [[A2]] to i8*
-; CHECK-NEXT:    call void @f2(i8* nocapture writeonly [[BITCAST]], i8* [[BITCAST2]]) #[[ATTR1]]
+; CHECK-NEXT:    call void @f2(ptr nocapture writeonly [[A]], ptr [[A2]]) #[[ATTR1]]
 ; CHECK-NEXT:    ret void
 ;
   %a = alloca i32, align 4
   %a2 = alloca i32, align 4
-  %bitcast = bitcast i32* %a to i8*
-  %bitcast2 = bitcast i32* %a2 to i8*
-  call void @f2(i8* nocapture writeonly %bitcast, i8* %bitcast2) argmemonly nounwind willreturn
+  call void @f2(ptr nocapture writeonly %a, ptr %a2) argmemonly nounwind willreturn
   ret void
 }
 
 define i32 @test_neg_captured_by_call() {
 ; CHECK-LABEL: @test_neg_captured_by_call(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[A2:%.*]] = alloca i8*, align 4
-; CHECK-NEXT:    [[BITCAST:%.*]] = bitcast i32* [[A]] to i8*
-; CHECK-NEXT:    [[BITCAST2:%.*]] = bitcast i8** [[A2]] to i8*
-; CHECK-NEXT:    call void @f2(i8* writeonly [[BITCAST]], i8* [[BITCAST2]]) #[[ATTR1]]
-; CHECK-NEXT:    [[A_COPY_CAST:%.*]] = load i8*, i8** [[A2]], align 8
-; CHECK-NEXT:    [[A_COPY:%.*]] = bitcast i8* [[A_COPY_CAST]] to i32*
-; CHECK-NEXT:    [[RES:%.*]] = load i32, i32* [[A_COPY]], align 4
+; CHECK-NEXT:    [[A2:%.*]] = alloca ptr, align 4
+; CHECK-NEXT:    call void @f2(ptr writeonly [[A]], ptr [[A2]]) #[[ATTR1]]
+; CHECK-NEXT:    [[A_COPY_CAST:%.*]] = load ptr, ptr [[A2]], align 8
+; CHECK-NEXT:    [[RES:%.*]] = load i32, ptr [[A_COPY_CAST]], align 4
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %a = alloca i32, align 4
-  %a2 = alloca i8*, align 4
-  %bitcast = bitcast i32* %a to i8*
-  %bitcast2 = bitcast i8** %a2 to i8*
-  call void @f2(i8* writeonly %bitcast, i8* %bitcast2) argmemonly nounwind willreturn
-  %a_copy_cast = load i8*, i8** %a2
-  %a_copy = bitcast i8* %a_copy_cast to i32*
-  %res = load i32, i32* %a_copy
+  %a2 = alloca ptr, align 4
+  call void @f2(ptr writeonly %a, ptr %a2) argmemonly nounwind willreturn
+  %a_copy_cast = load ptr, ptr %a2
+  %res = load i32, ptr %a_copy_cast
   ret i32 %res
 }
 
 define i32 @test_neg_captured_before() {
 ; CHECK-LABEL: @test_neg_captured_before(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[A2:%.*]] = alloca i8*, align 4
-; CHECK-NEXT:    [[BITCAST:%.*]] = bitcast i32* [[A]] to i8*
-; CHECK-NEXT:    [[BITCAST2:%.*]] = bitcast i8** [[A2]] to i8*
-; CHECK-NEXT:    store i8* [[BITCAST]], i8** [[A2]], align 8
-; CHECK-NEXT:    call void @f(i8* nocapture writeonly [[BITCAST]]) #[[ATTR1]]
-; CHECK-NEXT:    [[A_COPY_CAST:%.*]] = load i8*, i8** [[A2]], align 8
-; CHECK-NEXT:    [[A_COPY:%.*]] = bitcast i8* [[A_COPY_CAST]] to i32*
-; CHECK-NEXT:    [[RES:%.*]] = load i32, i32* [[A_COPY]], align 4
+; CHECK-NEXT:    [[A2:%.*]] = alloca ptr, align 4
+; CHECK-NEXT:    store ptr [[A]], ptr [[A2]], align 8
+; CHECK-NEXT:    call void @f(ptr nocapture writeonly [[A]]) #[[ATTR1]]
+; CHECK-NEXT:    [[A_COPY_CAST:%.*]] = load ptr, ptr [[A2]], align 8
+; CHECK-NEXT:    [[RES:%.*]] = load i32, ptr [[A_COPY_CAST]], align 4
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %a = alloca i32, align 4
-  %a2 = alloca i8*, align 4
-  %bitcast = bitcast i32* %a to i8*
-  %bitcast2 = bitcast i8** %a2 to i8*
-  store i8* %bitcast, i8** %a2
-  call void @f(i8* writeonly nocapture %bitcast) argmemonly nounwind willreturn
-  %a_copy_cast = load i8*, i8** %a2
-  %a_copy = bitcast i8* %a_copy_cast to i32*
-  %res = load i32, i32* %a_copy
+  %a2 = alloca ptr, align 4
+  store ptr %a, ptr %a2
+  call void @f(ptr writeonly nocapture %a) argmemonly nounwind willreturn
+  %a_copy_cast = load ptr, ptr %a2
+  %res = load i32, ptr %a_copy_cast
   ret i32 %res
 }
 
@@ -197,13 +167,11 @@ define i32 @test_neg_captured_before() {
 define void @test_new_op_bundle() {
 ; CHECK-LABEL: @test_new_op_bundle(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[BITCAST:%.*]] = bitcast i32* [[A]] to i8*
-; CHECK-NEXT:    call void @f(i8* nocapture writeonly [[BITCAST]]) #[[ATTR1]] [ "unknown"(i8* [[BITCAST]]) ]
+; CHECK-NEXT:    call void @f(ptr nocapture writeonly [[A]]) #[[ATTR1]] [ "unknown"(ptr [[A]]) ]
 ; CHECK-NEXT:    ret void
 ;
   %a = alloca i32, align 4
-  %bitcast = bitcast i32* %a to i8*
-  call void @f(i8* writeonly nocapture %bitcast) argmemonly nounwind willreturn ["unknown" (i8* %bitcast)]
+  call void @f(ptr writeonly nocapture %a) argmemonly nounwind willreturn ["unknown" (ptr %a)]
   ret void
 }
 
@@ -214,9 +182,7 @@ define void @test_unreleated_read() {
 ;
   %a = alloca i32, align 4
   %a2 = alloca i32, align 4
-  %bitcast = bitcast i32* %a to i8*
-  %bitcast2 = bitcast i32* %a2 to i8*
-  call void @f2(i8* nocapture writeonly %bitcast, i8* nocapture readonly %bitcast2) argmemonly nounwind willreturn
+  call void @f2(ptr nocapture writeonly %a, ptr nocapture readonly %a2) argmemonly nounwind willreturn
   ret void
 }
 
@@ -228,29 +194,23 @@ define void @test_unrelated_capture() {
 ;
   %a = alloca i32, align 4
   %a2 = alloca i32, align 4
-  %bitcast = bitcast i32* %a to i8*
-  %bitcast2 = bitcast i32* %a2 to i8*
-  call i8* @f3(i8* nocapture writeonly %bitcast, i8* readonly %bitcast2) argmemonly nounwind willreturn
+  call ptr @f3(ptr nocapture writeonly %a, ptr readonly %a2) argmemonly nounwind willreturn
   ret void
 }
 
-; Cannot remove call, as %bitcast2 is captured via the return value.
+; Cannot remove call, as %a2 is captured via the return value.
 define i8 @test_neg_unrelated_capture_used_via_return() {
 ; CHECK-LABEL: @test_neg_unrelated_capture_used_via_return(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
 ; CHECK-NEXT:    [[A2:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[BITCAST:%.*]] = bitcast i32* [[A]] to i8*
-; CHECK-NEXT:    [[BITCAST2:%.*]] = bitcast i32* [[A2]] to i8*
-; CHECK-NEXT:    [[CAPTURE:%.*]] = call i8* @f3(i8* nocapture writeonly [[BITCAST]], i8* readonly [[BITCAST2]]) #[[ATTR1]]
-; CHECK-NEXT:    [[V:%.*]] = load i8, i8* [[CAPTURE]], align 1
+; CHECK-NEXT:    [[CAPTURE:%.*]] = call ptr @f3(ptr nocapture writeonly [[A]], ptr readonly [[A2]]) #[[ATTR1]]
+; CHECK-NEXT:    [[V:%.*]] = load i8, ptr [[CAPTURE]], align 1
 ; CHECK-NEXT:    ret i8 [[V]]
 ;
   %a = alloca i32, align 4
   %a2 = alloca i32, align 4
-  %bitcast = bitcast i32* %a to i8*
-  %bitcast2 = bitcast i32* %a2 to i8*
-  %capture = call i8* @f3(i8* nocapture writeonly %bitcast, i8* readonly %bitcast2) argmemonly nounwind willreturn
-  %v = load i8, i8* %capture
+  %capture = call ptr @f3(ptr nocapture writeonly %a, ptr readonly %a2) argmemonly nounwind willreturn
+  %v = load i8, ptr %capture
   ret i8 %v
 }
 
@@ -261,8 +221,7 @@ define void @test_self_read() {
 ; CHECK-NEXT:    ret void
 ;
   %a = alloca i32, align 4
-  %bitcast = bitcast i32* %a to i8*
-  call void @f2(i8* nocapture writeonly %bitcast, i8* nocapture readonly %bitcast) argmemonly nounwind willreturn
+  call void @f2(ptr nocapture writeonly %a, ptr nocapture readonly %a) argmemonly nounwind willreturn
   ret void
 }
 
@@ -272,15 +231,14 @@ define void @test_self_read() {
 define i32 @test_dse_overwrite() {
 ; CHECK-LABEL: @test_dse_overwrite(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 0, i32* [[A]], align 4
-; CHECK-NEXT:    [[V:%.*]] = load i32, i32* [[A]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[A]], align 4
+; CHECK-NEXT:    [[V:%.*]] = load i32, ptr [[A]], align 4
 ; CHECK-NEXT:    ret i32 [[V]]
 ;
   %a = alloca i32, align 4
-  %bitcast = bitcast i32* %a to i8*
-  call void @f(i8* writeonly nocapture %bitcast) argmemonly nounwind willreturn
-  store i32 0, i32* %a
-  %v = load i32, i32* %a
+  call void @f(ptr writeonly nocapture %a) argmemonly nounwind willreturn
+  store i32 0, ptr %a
+  %v = load i32, ptr %a
   ret i32 %v
 }
 
@@ -288,34 +246,30 @@ define i32 @test_dse_overwrite() {
 define i32 @test_neg_dse_partial_overwrite() {
 ; CHECK-LABEL: @test_neg_dse_partial_overwrite(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[BITCAST:%.*]] = bitcast i32* [[A]] to i8*
-; CHECK-NEXT:    call void @f(i8* nocapture writeonly [[BITCAST]]) #[[ATTR1]]
-; CHECK-NEXT:    store i8 0, i8* [[BITCAST]], align 1
-; CHECK-NEXT:    [[V:%.*]] = load i32, i32* [[A]], align 4
+; CHECK-NEXT:    call void @f(ptr nocapture writeonly [[A]]) #[[ATTR1]]
+; CHECK-NEXT:    store i8 0, ptr [[A]], align 1
+; CHECK-NEXT:    [[V:%.*]] = load i32, ptr [[A]], align 4
 ; CHECK-NEXT:    ret i32 [[V]]
 ;
   %a = alloca i32, align 4
-  %bitcast = bitcast i32* %a to i8*
-  call void @f(i8* writeonly nocapture %bitcast) argmemonly nounwind willreturn
-  store i8 0, i8* %bitcast
-  %v = load i32, i32* %a
+  call void @f(ptr writeonly nocapture %a) argmemonly nounwind willreturn
+  store i8 0, ptr %a
+  %v = load i32, ptr %a
   ret i32 %v
 }
 
 ; Negative case where we don't know the size of a, and thus can't use the
 ; full overwrite reasoning
-define i32 @test_neg_dse_unsized(i32* %a) {
+define i32 @test_neg_dse_unsized(ptr %a) {
 ; CHECK-LABEL: @test_neg_dse_unsized(
-; CHECK-NEXT:    [[BITCAST:%.*]] = bitcast i32* [[A:%.*]] to i8*
-; CHECK-NEXT:    call void @f(i8* nocapture writeonly [[BITCAST]]) #[[ATTR1]]
-; CHECK-NEXT:    store i32 0, i32* [[A]], align 4
-; CHECK-NEXT:    [[V:%.*]] = load i32, i32* [[A]], align 4
+; CHECK-NEXT:    call void @f(ptr nocapture writeonly [[A:%.*]]) #[[ATTR1]]
+; CHECK-NEXT:    store i32 0, ptr [[A]], align 4
+; CHECK-NEXT:    [[V:%.*]] = load i32, ptr [[A]], align 4
 ; CHECK-NEXT:    ret i32 [[V]]
 ;
-  %bitcast = bitcast i32* %a to i8*
-  call void @f(i8* writeonly nocapture %bitcast) argmemonly nounwind willreturn
-  store i32 0, i32* %a
-  %v = load i32, i32* %a
+  call void @f(ptr writeonly nocapture %a) argmemonly nounwind willreturn
+  store i32 0, ptr %a
+  %v = load i32, ptr %a
   ret i32 %v
 }
 
@@ -324,11 +278,11 @@ define i32 @test_neg_dse_unsized(i32* %a) {
 ; Same as test_dse_overwrite, but with a non-alloca object.
 define void @test_dse_non_alloca() {
 ; CHECK-LABEL: @test_dse_non_alloca(
-; CHECK-NEXT:    store i8 0, i8* @G, align 1
+; CHECK-NEXT:    store i8 0, ptr @G, align 1
 ; CHECK-NEXT:    ret void
 ;
-  call void @f(i8* writeonly nocapture @G) argmemonly nounwind willreturn
-  store i8 0, i8* @G
+  call void @f(ptr writeonly nocapture @G) argmemonly nounwind willreturn
+  store i8 0, ptr @G
   ret void
 }
 

diff  --git a/llvm/test/Transforms/DeadStoreElimination/wrong-malloc-size.ll b/llvm/test/Transforms/DeadStoreElimination/wrong-malloc-size.ll
index 0ddbd7a62697b..8b39f07d0dccf 100644
--- a/llvm/test/Transforms/DeadStoreElimination/wrong-malloc-size.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/wrong-malloc-size.ll
@@ -2,17 +2,17 @@
 ; RUN: opt -S -dse < %s | FileCheck %s
 
 ; malloc should have i64 argument under default data layout
-declare noalias i8* @malloc(i32)
+declare noalias ptr @malloc(i32)
 
-define i8* @malloc_and_memset_intrinsic(i32 %n) {
+define ptr @malloc_and_memset_intrinsic(i32 %n) {
 ; CHECK-LABEL: @malloc_and_memset_intrinsic(
-; CHECK-NEXT:    [[CALL:%.*]] = call i8* @malloc(i32 [[N:%.*]])
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i32(i8* align 1 [[CALL]], i8 0, i32 [[N]], i1 false)
-; CHECK-NEXT:    ret i8* [[CALL]]
+; CHECK-NEXT:    [[CALL:%.*]] = call ptr @malloc(i32 [[N:%.*]])
+; CHECK-NEXT:    call void @llvm.memset.p0.i32(ptr align 1 [[CALL]], i8 0, i32 [[N]], i1 false)
+; CHECK-NEXT:    ret ptr [[CALL]]
 ;
-  %call = call i8* @malloc(i32 %n)
-  call void @llvm.memset.p0i8.i32(i8* align 1 %call, i8 0, i32 %n, i1 false)
-  ret i8* %call
+  %call = call ptr @malloc(i32 %n)
+  call void @llvm.memset.p0.i32(ptr align 1 %call, i8 0, i32 %n, i1 false)
+  ret ptr %call
 }
 
-declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1 immarg) #2
+declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i1 immarg) #2


        


More information about the llvm-commits mailing list